1 /*- 2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. 3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>. 4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>. 5 * Copyright 2012 John Marino <draco@marino.st>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Dynamic linker for ELF. 33 * 34 * John Polstra <jdp@polstra.com>. 35 */ 36 37 #ifndef __GNUC__ 38 #error "GCC is needed to compile this file" 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/mount.h> 43 #include <sys/mman.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/uio.h> 47 #include <sys/utsname.h> 48 #include <sys/ktrace.h> 49 50 #include <dlfcn.h> 51 #include <err.h> 52 #include <errno.h> 53 #include <fcntl.h> 54 #include <stdarg.h> 55 #include <stdio.h> 56 #include <stdlib.h> 57 #include <string.h> 58 #include <unistd.h> 59 60 #include "debug.h" 61 #include "rtld.h" 62 #include "libmap.h" 63 #include "rtld_tls.h" 64 #include "rtld_printf.h" 65 #include "notes.h" 66 67 #ifndef COMPAT_32BIT 68 #define PATH_RTLD "/libexec/ld-elf.so.1" 69 #else 70 #define PATH_RTLD "/libexec/ld-elf32.so.1" 71 #endif 72 73 /* Types. */ 74 typedef void (*func_ptr_type)(); 75 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg); 76 77 /* 78 * Function declarations. 79 */ 80 static const char *basename(const char *); 81 static void die(void) __dead2; 82 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **, 83 const Elf_Dyn **, const Elf_Dyn **); 84 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *, 85 const Elf_Dyn *); 86 static void digest_dynamic(Obj_Entry *, int); 87 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); 88 static Obj_Entry *dlcheck(void *); 89 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj, 90 int lo_flags, int mode, RtldLockState *lockstate); 91 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int); 92 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *); 93 static bool donelist_check(DoneList *, const Obj_Entry *); 94 static void errmsg_restore(char *); 95 static char *errmsg_save(void); 96 static void *fill_search_info(const char *, size_t, void *); 97 static char *find_library(const char *, const Obj_Entry *); 98 static const char *gethints(bool); 99 static void init_dag(Obj_Entry *); 100 static void init_pagesizes(Elf_Auxinfo **aux_info); 101 static void init_rtld(caddr_t, Elf_Auxinfo **); 102 static void initlist_add_neededs(Needed_Entry *, Objlist *); 103 static void initlist_add_objects(Obj_Entry *, Obj_Entry **, Objlist *); 104 static void linkmap_add(Obj_Entry *); 105 static void linkmap_delete(Obj_Entry *); 106 static void load_filtees(Obj_Entry *, int flags, RtldLockState *); 107 static void unload_filtees(Obj_Entry *); 108 static int load_needed_objects(Obj_Entry *, int); 109 static int load_preload_objects(void); 110 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int); 111 static void map_stacks_exec(RtldLockState *); 112 static Obj_Entry *obj_from_addr(const void *); 113 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *); 114 static void objlist_call_init(Objlist *, RtldLockState *); 115 static void objlist_clear(Objlist *); 116 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); 117 static void objlist_init(Objlist *); 118 static void objlist_push_head(Objlist *, Obj_Entry *); 119 static void objlist_push_tail(Objlist *, Obj_Entry *); 120 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *); 121 static void objlist_remove(Objlist *, Obj_Entry *); 122 static void *path_enumerate(const char *, path_enum_proc, void *); 123 static int relocate_object_dag(Obj_Entry *root, bool bind_now, 124 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate); 125 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 126 int flags, RtldLockState *lockstate); 127 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int, 128 RtldLockState *); 129 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now, 130 int flags, RtldLockState *lockstate); 131 static int rtld_dirname(const char *, char *); 132 static int rtld_dirname_abs(const char *, char *); 133 static void *rtld_dlopen(const char *name, int fd, int mode); 134 static void rtld_exit(void); 135 static char *search_library_path(const char *, const char *); 136 static const void **get_program_var_addr(const char *, RtldLockState *); 137 static void set_program_var(const char *, const void *); 138 static int symlook_default(SymLook *, const Obj_Entry *refobj); 139 static int symlook_global(SymLook *, DoneList *); 140 static void symlook_init_from_req(SymLook *, const SymLook *); 141 static int symlook_list(SymLook *, const Objlist *, DoneList *); 142 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *); 143 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *); 144 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *); 145 static void trace_loaded_objects(Obj_Entry *); 146 static void unlink_object(Obj_Entry *); 147 static void unload_object(Obj_Entry *); 148 static void unref_dag(Obj_Entry *); 149 static void ref_dag(Obj_Entry *); 150 static char *origin_subst_one(char *, const char *, const char *, bool); 151 static char *origin_subst(char *, const char *); 152 static void preinit_main(void); 153 static int rtld_verify_versions(const Objlist *); 154 static int rtld_verify_object_versions(Obj_Entry *); 155 static void object_add_name(Obj_Entry *, const char *); 156 static int object_match_name(const Obj_Entry *, const char *); 157 static void ld_utrace_log(int, void *, void *, size_t, int, const char *); 158 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj, 159 struct dl_phdr_info *phdr_info); 160 static uint32_t gnu_hash(const char *); 161 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *, 162 const unsigned long); 163 164 void r_debug_state(struct r_debug *, struct link_map *) __noinline; 165 166 /* 167 * Data declarations. 168 */ 169 static char *error_message; /* Message for dlerror(), or NULL */ 170 struct r_debug r_debug; /* for GDB; */ 171 static bool libmap_disable; /* Disable libmap */ 172 static bool ld_loadfltr; /* Immediate filters processing */ 173 static char *libmap_override; /* Maps to use in addition to libmap.conf */ 174 static bool trust; /* False for setuid and setgid programs */ 175 static bool dangerous_ld_env; /* True if environment variables have been 176 used to affect the libraries loaded */ 177 static char *ld_bind_now; /* Environment variable for immediate binding */ 178 static char *ld_debug; /* Environment variable for debugging */ 179 static char *ld_library_path; /* Environment variable for search path */ 180 static char *ld_preload; /* Environment variable for libraries to 181 load first */ 182 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */ 183 static char *ld_tracing; /* Called from ldd to print libs */ 184 static char *ld_utrace; /* Use utrace() to log events. */ 185 static Obj_Entry *obj_list; /* Head of linked list of shared objects */ 186 static Obj_Entry **obj_tail; /* Link field of last object in list */ 187 static Obj_Entry *obj_main; /* The main program shared object */ 188 static Obj_Entry obj_rtld; /* The dynamic linker shared object */ 189 static unsigned int obj_count; /* Number of objects in obj_list */ 190 static unsigned int obj_loads; /* Number of objects in obj_list */ 191 192 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ 193 STAILQ_HEAD_INITIALIZER(list_global); 194 static Objlist list_main = /* Objects loaded at program startup */ 195 STAILQ_HEAD_INITIALIZER(list_main); 196 static Objlist list_fini = /* Objects needing fini() calls */ 197 STAILQ_HEAD_INITIALIZER(list_fini); 198 199 Elf_Sym sym_zero; /* For resolving undefined weak refs. */ 200 201 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); 202 203 extern Elf_Dyn _DYNAMIC; 204 #pragma weak _DYNAMIC 205 #ifndef RTLD_IS_DYNAMIC 206 #define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL) 207 #endif 208 209 int npagesizes, osreldate; 210 size_t *pagesizes; 211 212 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0}; 213 214 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC; 215 static int max_stack_flags; 216 217 /* 218 * Global declarations normally provided by crt1. The dynamic linker is 219 * not built with crt1, so we have to provide them ourselves. 220 */ 221 char *__progname; 222 char **environ; 223 224 /* 225 * Used to pass argc, argv to init functions. 226 */ 227 int main_argc; 228 char **main_argv; 229 230 /* 231 * Globals to control TLS allocation. 232 */ 233 size_t tls_last_offset; /* Static TLS offset of last module */ 234 size_t tls_last_size; /* Static TLS size of last module */ 235 size_t tls_static_space; /* Static TLS space allocated */ 236 size_t tls_static_max_align; 237 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */ 238 int tls_max_index = 1; /* Largest module index allocated */ 239 240 bool ld_library_path_rpath = false; 241 242 /* 243 * Fill in a DoneList with an allocation large enough to hold all of 244 * the currently-loaded objects. Keep this as a macro since it calls 245 * alloca and we want that to occur within the scope of the caller. 246 */ 247 #define donelist_init(dlp) \ 248 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ 249 assert((dlp)->objs != NULL), \ 250 (dlp)->num_alloc = obj_count, \ 251 (dlp)->num_used = 0) 252 253 #define UTRACE_DLOPEN_START 1 254 #define UTRACE_DLOPEN_STOP 2 255 #define UTRACE_DLCLOSE_START 3 256 #define UTRACE_DLCLOSE_STOP 4 257 #define UTRACE_LOAD_OBJECT 5 258 #define UTRACE_UNLOAD_OBJECT 6 259 #define UTRACE_ADD_RUNDEP 7 260 #define UTRACE_PRELOAD_FINISHED 8 261 #define UTRACE_INIT_CALL 9 262 #define UTRACE_FINI_CALL 10 263 264 struct utrace_rtld { 265 char sig[4]; /* 'RTLD' */ 266 int event; 267 void *handle; 268 void *mapbase; /* Used for 'parent' and 'init/fini' */ 269 size_t mapsize; 270 int refcnt; /* Used for 'mode' */ 271 char name[MAXPATHLEN]; 272 }; 273 274 #define LD_UTRACE(e, h, mb, ms, r, n) do { \ 275 if (ld_utrace != NULL) \ 276 ld_utrace_log(e, h, mb, ms, r, n); \ 277 } while (0) 278 279 static void 280 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize, 281 int refcnt, const char *name) 282 { 283 struct utrace_rtld ut; 284 285 ut.sig[0] = 'R'; 286 ut.sig[1] = 'T'; 287 ut.sig[2] = 'L'; 288 ut.sig[3] = 'D'; 289 ut.event = event; 290 ut.handle = handle; 291 ut.mapbase = mapbase; 292 ut.mapsize = mapsize; 293 ut.refcnt = refcnt; 294 bzero(ut.name, sizeof(ut.name)); 295 if (name) 296 strlcpy(ut.name, name, sizeof(ut.name)); 297 utrace(&ut, sizeof(ut)); 298 } 299 300 /* 301 * Main entry point for dynamic linking. The first argument is the 302 * stack pointer. The stack is expected to be laid out as described 303 * in the SVR4 ABI specification, Intel 386 Processor Supplement. 304 * Specifically, the stack pointer points to a word containing 305 * ARGC. Following that in the stack is a null-terminated sequence 306 * of pointers to argument strings. Then comes a null-terminated 307 * sequence of pointers to environment strings. Finally, there is a 308 * sequence of "auxiliary vector" entries. 309 * 310 * The second argument points to a place to store the dynamic linker's 311 * exit procedure pointer and the third to a place to store the main 312 * program's object. 313 * 314 * The return value is the main program's entry point. 315 */ 316 func_ptr_type 317 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) 318 { 319 Elf_Auxinfo *aux_info[AT_COUNT]; 320 int i; 321 int argc; 322 char **argv; 323 char **env; 324 Elf_Auxinfo *aux; 325 Elf_Auxinfo *auxp; 326 const char *argv0; 327 Objlist_Entry *entry; 328 Obj_Entry *obj; 329 Obj_Entry **preload_tail; 330 Obj_Entry *last_interposer; 331 Objlist initlist; 332 RtldLockState lockstate; 333 char *library_path_rpath; 334 int mib[2]; 335 size_t len; 336 337 /* 338 * On entry, the dynamic linker itself has not been relocated yet. 339 * Be very careful not to reference any global data until after 340 * init_rtld has returned. It is OK to reference file-scope statics 341 * and string constants, and to call static and global functions. 342 */ 343 344 /* Find the auxiliary vector on the stack. */ 345 argc = *sp++; 346 argv = (char **) sp; 347 sp += argc + 1; /* Skip over arguments and NULL terminator */ 348 env = (char **) sp; 349 while (*sp++ != 0) /* Skip over environment, and NULL terminator */ 350 ; 351 aux = (Elf_Auxinfo *) sp; 352 353 /* Digest the auxiliary vector. */ 354 for (i = 0; i < AT_COUNT; i++) 355 aux_info[i] = NULL; 356 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { 357 if (auxp->a_type < AT_COUNT) 358 aux_info[auxp->a_type] = auxp; 359 } 360 361 /* Initialize and relocate ourselves. */ 362 assert(aux_info[AT_BASE] != NULL); 363 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info); 364 365 __progname = obj_rtld.path; 366 argv0 = argv[0] != NULL ? argv[0] : "(null)"; 367 environ = env; 368 main_argc = argc; 369 main_argv = argv; 370 371 if (aux_info[AT_CANARY] != NULL && 372 aux_info[AT_CANARY]->a_un.a_ptr != NULL) { 373 i = aux_info[AT_CANARYLEN]->a_un.a_val; 374 if (i > sizeof(__stack_chk_guard)) 375 i = sizeof(__stack_chk_guard); 376 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i); 377 } else { 378 mib[0] = CTL_KERN; 379 mib[1] = KERN_ARND; 380 381 len = sizeof(__stack_chk_guard); 382 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 || 383 len != sizeof(__stack_chk_guard)) { 384 /* If sysctl was unsuccessful, use the "terminator canary". */ 385 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0; 386 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0; 387 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n'; 388 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255; 389 } 390 } 391 392 trust = !issetugid(); 393 394 ld_bind_now = getenv(LD_ "BIND_NOW"); 395 /* 396 * If the process is tainted, then we un-set the dangerous environment 397 * variables. The process will be marked as tainted until setuid(2) 398 * is called. If any child process calls setuid(2) we do not want any 399 * future processes to honor the potentially un-safe variables. 400 */ 401 if (!trust) { 402 if (unsetenv(LD_ "PRELOAD") || unsetenv(LD_ "LIBMAP") || 403 unsetenv(LD_ "LIBRARY_PATH") || unsetenv(LD_ "LIBMAP_DISABLE") || 404 unsetenv(LD_ "DEBUG") || unsetenv(LD_ "ELF_HINTS_PATH") || 405 unsetenv(LD_ "LOADFLTR") || unsetenv(LD_ "LIBRARY_PATH_RPATH")) { 406 _rtld_error("environment corrupt; aborting"); 407 die(); 408 } 409 } 410 ld_debug = getenv(LD_ "DEBUG"); 411 libmap_disable = getenv(LD_ "LIBMAP_DISABLE") != NULL; 412 libmap_override = getenv(LD_ "LIBMAP"); 413 ld_library_path = getenv(LD_ "LIBRARY_PATH"); 414 ld_preload = getenv(LD_ "PRELOAD"); 415 ld_elf_hints_path = getenv(LD_ "ELF_HINTS_PATH"); 416 ld_loadfltr = getenv(LD_ "LOADFLTR") != NULL; 417 library_path_rpath = getenv(LD_ "LIBRARY_PATH_RPATH"); 418 if (library_path_rpath != NULL) { 419 if (library_path_rpath[0] == 'y' || 420 library_path_rpath[0] == 'Y' || 421 library_path_rpath[0] == '1') 422 ld_library_path_rpath = true; 423 else 424 ld_library_path_rpath = false; 425 } 426 dangerous_ld_env = libmap_disable || (libmap_override != NULL) || 427 (ld_library_path != NULL) || (ld_preload != NULL) || 428 (ld_elf_hints_path != NULL) || ld_loadfltr; 429 ld_tracing = getenv(LD_ "TRACE_LOADED_OBJECTS"); 430 ld_utrace = getenv(LD_ "UTRACE"); 431 432 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0) 433 ld_elf_hints_path = _PATH_ELF_HINTS; 434 435 if (ld_debug != NULL && *ld_debug != '\0') 436 debug = 1; 437 dbg("%s is initialized, base address = %p", __progname, 438 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); 439 dbg("RTLD dynamic = %p", obj_rtld.dynamic); 440 dbg("RTLD pltgot = %p", obj_rtld.pltgot); 441 442 dbg("initializing thread locks"); 443 lockdflt_init(); 444 445 /* 446 * Load the main program, or process its program header if it is 447 * already loaded. 448 */ 449 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ 450 int fd = aux_info[AT_EXECFD]->a_un.a_val; 451 dbg("loading main program"); 452 obj_main = map_object(fd, argv0, NULL); 453 close(fd); 454 if (obj_main == NULL) 455 die(); 456 max_stack_flags = obj->stack_flags; 457 } else { /* Main program already loaded. */ 458 const Elf_Phdr *phdr; 459 int phnum; 460 caddr_t entry; 461 462 dbg("processing main program's program header"); 463 assert(aux_info[AT_PHDR] != NULL); 464 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; 465 assert(aux_info[AT_PHNUM] != NULL); 466 phnum = aux_info[AT_PHNUM]->a_un.a_val; 467 assert(aux_info[AT_PHENT] != NULL); 468 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); 469 assert(aux_info[AT_ENTRY] != NULL); 470 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; 471 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) 472 die(); 473 } 474 475 if (aux_info[AT_EXECPATH] != 0) { 476 char *kexecpath; 477 char buf[MAXPATHLEN]; 478 479 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr; 480 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath); 481 if (kexecpath[0] == '/') 482 obj_main->path = kexecpath; 483 else if (getcwd(buf, sizeof(buf)) == NULL || 484 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) || 485 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf)) 486 obj_main->path = xstrdup(argv0); 487 else 488 obj_main->path = xstrdup(buf); 489 } else { 490 dbg("No AT_EXECPATH"); 491 obj_main->path = xstrdup(argv0); 492 } 493 dbg("obj_main path %s", obj_main->path); 494 obj_main->mainprog = true; 495 496 if (aux_info[AT_STACKPROT] != NULL && 497 aux_info[AT_STACKPROT]->a_un.a_val != 0) 498 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val; 499 500 /* 501 * Get the actual dynamic linker pathname from the executable if 502 * possible. (It should always be possible.) That ensures that 503 * gdb will find the right dynamic linker even if a non-standard 504 * one is being used. 505 */ 506 if (obj_main->interp != NULL && 507 strcmp(obj_main->interp, obj_rtld.path) != 0) { 508 free(obj_rtld.path); 509 obj_rtld.path = xstrdup(obj_main->interp); 510 __progname = obj_rtld.path; 511 } 512 513 digest_dynamic(obj_main, 0); 514 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", 515 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu, 516 obj_main->dynsymcount); 517 518 linkmap_add(obj_main); 519 linkmap_add(&obj_rtld); 520 521 /* Link the main program into the list of objects. */ 522 *obj_tail = obj_main; 523 obj_tail = &obj_main->next; 524 obj_count++; 525 obj_loads++; 526 527 /* Initialize a fake symbol for resolving undefined weak references. */ 528 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); 529 sym_zero.st_shndx = SHN_UNDEF; 530 sym_zero.st_value = -(uintptr_t)obj_main->relocbase; 531 532 if (!libmap_disable) 533 libmap_disable = (bool)lm_init(libmap_override); 534 535 dbg("loading LD_PRELOAD libraries"); 536 if (load_preload_objects() == -1) 537 die(); 538 preload_tail = obj_tail; 539 540 dbg("loading needed objects"); 541 if (load_needed_objects(obj_main, 0) == -1) 542 die(); 543 544 /* Make a list of all objects loaded at startup. */ 545 last_interposer = obj_main; 546 for (obj = obj_list; obj != NULL; obj = obj->next) { 547 if (obj->z_interpose && obj != obj_main) { 548 objlist_put_after(&list_main, last_interposer, obj); 549 last_interposer = obj; 550 } else { 551 objlist_push_tail(&list_main, obj); 552 } 553 obj->refcount++; 554 } 555 556 dbg("checking for required versions"); 557 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing) 558 die(); 559 560 if (ld_tracing) { /* We're done */ 561 trace_loaded_objects(obj_main); 562 exit(0); 563 } 564 565 if (getenv(LD_ "DUMP_REL_PRE") != NULL) { 566 dump_relocations(obj_main); 567 exit (0); 568 } 569 570 /* 571 * Processing tls relocations requires having the tls offsets 572 * initialized. Prepare offsets before starting initial 573 * relocation processing. 574 */ 575 dbg("initializing initial thread local storage offsets"); 576 STAILQ_FOREACH(entry, &list_main, link) { 577 /* 578 * Allocate all the initial objects out of the static TLS 579 * block even if they didn't ask for it. 580 */ 581 allocate_tls_offset(entry->obj); 582 } 583 584 if (relocate_objects(obj_main, 585 ld_bind_now != NULL && *ld_bind_now != '\0', 586 &obj_rtld, SYMLOOK_EARLY, NULL) == -1) 587 die(); 588 589 dbg("doing copy relocations"); 590 if (do_copy_relocations(obj_main) == -1) 591 die(); 592 593 if (getenv(LD_ "DUMP_REL_POST") != NULL) { 594 dump_relocations(obj_main); 595 exit (0); 596 } 597 598 /* 599 * Setup TLS for main thread. This must be done after the 600 * relocations are processed, since tls initialization section 601 * might be the subject for relocations. 602 */ 603 dbg("initializing initial thread local storage"); 604 allocate_initial_tls(obj_list); 605 606 dbg("initializing key program variables"); 607 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); 608 set_program_var("environ", env); 609 set_program_var("__elf_aux_vector", aux); 610 611 /* Make a list of init functions to call. */ 612 objlist_init(&initlist); 613 initlist_add_objects(obj_list, preload_tail, &initlist); 614 615 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ 616 617 map_stacks_exec(NULL); 618 619 dbg("resolving ifuncs"); 620 if (resolve_objects_ifunc(obj_main, 621 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY, 622 NULL) == -1) 623 die(); 624 625 if (!obj_main->crt_no_init) { 626 /* 627 * Make sure we don't call the main program's init and fini 628 * functions for binaries linked with old crt1 which calls 629 * _init itself. 630 */ 631 obj_main->init = obj_main->fini = (Elf_Addr)NULL; 632 obj_main->preinit_array = obj_main->init_array = 633 obj_main->fini_array = (Elf_Addr)NULL; 634 } 635 636 wlock_acquire(rtld_bind_lock, &lockstate); 637 if (obj_main->crt_no_init) 638 preinit_main(); 639 objlist_call_init(&initlist, &lockstate); 640 objlist_clear(&initlist); 641 dbg("loading filtees"); 642 for (obj = obj_list->next; obj != NULL; obj = obj->next) { 643 if (ld_loadfltr || obj->z_loadfltr) 644 load_filtees(obj, 0, &lockstate); 645 } 646 lock_release(rtld_bind_lock, &lockstate); 647 648 dbg("transferring control to program entry point = %p", obj_main->entry); 649 650 /* Return the exit procedure and the program entry point. */ 651 *exit_proc = rtld_exit; 652 *objp = obj_main; 653 return (func_ptr_type) obj_main->entry; 654 } 655 656 void * 657 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def) 658 { 659 void *ptr; 660 Elf_Addr target; 661 662 ptr = (void *)make_function_pointer(def, obj); 663 target = ((Elf_Addr (*)(void))ptr)(); 664 return ((void *)target); 665 } 666 667 Elf_Addr 668 _rtld_bind(Obj_Entry *obj, Elf_Size reloff) 669 { 670 const Elf_Rel *rel; 671 const Elf_Sym *def; 672 const Obj_Entry *defobj; 673 Elf_Addr *where; 674 Elf_Addr target; 675 RtldLockState lockstate; 676 677 rlock_acquire(rtld_bind_lock, &lockstate); 678 if (sigsetjmp(lockstate.env, 0) != 0) 679 lock_upgrade(rtld_bind_lock, &lockstate); 680 if (obj->pltrel) 681 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); 682 else 683 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); 684 685 where = (Elf_Addr *) (obj->relocbase + rel->r_offset); 686 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL, 687 &lockstate); 688 if (def == NULL) 689 die(); 690 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 691 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def); 692 else 693 target = (Elf_Addr)(defobj->relocbase + def->st_value); 694 695 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", 696 defobj->strtab + def->st_name, basename(obj->path), 697 (void *)target, basename(defobj->path)); 698 699 /* 700 * Write the new contents for the jmpslot. Note that depending on 701 * architecture, the value which we need to return back to the 702 * lazy binding trampoline may or may not be the target 703 * address. The value returned from reloc_jmpslot() is the value 704 * that the trampoline needs. 705 */ 706 target = reloc_jmpslot(where, target, defobj, obj, rel); 707 lock_release(rtld_bind_lock, &lockstate); 708 return target; 709 } 710 711 /* 712 * Error reporting function. Use it like printf. If formats the message 713 * into a buffer, and sets things up so that the next call to dlerror() 714 * will return the message. 715 */ 716 void 717 _rtld_error(const char *fmt, ...) 718 { 719 static char buf[512]; 720 va_list ap; 721 722 va_start(ap, fmt); 723 rtld_vsnprintf(buf, sizeof buf, fmt, ap); 724 error_message = buf; 725 va_end(ap); 726 } 727 728 /* 729 * Return a dynamically-allocated copy of the current error message, if any. 730 */ 731 static char * 732 errmsg_save(void) 733 { 734 return error_message == NULL ? NULL : xstrdup(error_message); 735 } 736 737 /* 738 * Restore the current error message from a copy which was previously saved 739 * by errmsg_save(). The copy is freed. 740 */ 741 static void 742 errmsg_restore(char *saved_msg) 743 { 744 if (saved_msg == NULL) 745 error_message = NULL; 746 else { 747 _rtld_error("%s", saved_msg); 748 free(saved_msg); 749 } 750 } 751 752 static const char * 753 basename(const char *name) 754 { 755 const char *p = strrchr(name, '/'); 756 return p != NULL ? p + 1 : name; 757 } 758 759 static struct utsname uts; 760 761 static char * 762 origin_subst_one(char *real, const char *kw, const char *subst, 763 bool may_free) 764 { 765 char *p, *p1, *res, *resp; 766 int subst_len, kw_len, subst_count, old_len, new_len; 767 768 kw_len = strlen(kw); 769 770 /* 771 * First, count the number of the keyword occurences, to 772 * preallocate the final string. 773 */ 774 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) { 775 p1 = strstr(p, kw); 776 if (p1 == NULL) 777 break; 778 } 779 780 /* 781 * If the keyword is not found, just return. 782 */ 783 if (subst_count == 0) 784 return (may_free ? real : xstrdup(real)); 785 786 /* 787 * There is indeed something to substitute. Calculate the 788 * length of the resulting string, and allocate it. 789 */ 790 subst_len = strlen(subst); 791 old_len = strlen(real); 792 new_len = old_len + (subst_len - kw_len) * subst_count; 793 res = xmalloc(new_len + 1); 794 795 /* 796 * Now, execute the substitution loop. 797 */ 798 for (p = real, resp = res, *resp = '\0';;) { 799 p1 = strstr(p, kw); 800 if (p1 != NULL) { 801 /* Copy the prefix before keyword. */ 802 memcpy(resp, p, p1 - p); 803 resp += p1 - p; 804 /* Keyword replacement. */ 805 memcpy(resp, subst, subst_len); 806 resp += subst_len; 807 *resp = '\0'; 808 p = p1 + kw_len; 809 } else 810 break; 811 } 812 813 /* Copy to the end of string and finish. */ 814 strcat(resp, p); 815 if (may_free) 816 free(real); 817 return (res); 818 } 819 820 static char * 821 origin_subst(char *real, const char *origin_path) 822 { 823 char *res1, *res2, *res3, *res4; 824 825 if (uts.sysname[0] == '\0') { 826 if (uname(&uts) != 0) { 827 _rtld_error("utsname failed: %d", errno); 828 return (NULL); 829 } 830 } 831 res1 = origin_subst_one(real, "$ORIGIN", origin_path, false); 832 res2 = origin_subst_one(res1, "$OSNAME", uts.sysname, true); 833 res3 = origin_subst_one(res2, "$OSREL", uts.release, true); 834 res4 = origin_subst_one(res3, "$PLATFORM", uts.machine, true); 835 return (res4); 836 } 837 838 static void 839 die(void) 840 { 841 const char *msg = dlerror(); 842 843 if (msg == NULL) 844 msg = "Fatal error"; 845 rtld_fdputstr(STDERR_FILENO, msg); 846 rtld_fdputchar(STDERR_FILENO, '\n'); 847 _exit(1); 848 } 849 850 /* 851 * Process a shared object's DYNAMIC section, and save the important 852 * information in its Obj_Entry structure. 853 */ 854 static void 855 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath, 856 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath) 857 { 858 const Elf_Dyn *dynp; 859 Needed_Entry **needed_tail = &obj->needed; 860 Needed_Entry **needed_filtees_tail = &obj->needed_filtees; 861 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees; 862 const Elf_Hashelt *hashtab; 863 const Elf32_Word *hashval; 864 Elf32_Word bkt, nmaskwords; 865 int bloom_size32; 866 bool nmw_power2; 867 int plttype = DT_REL; 868 869 *dyn_rpath = NULL; 870 *dyn_soname = NULL; 871 *dyn_runpath = NULL; 872 873 obj->bind_now = false; 874 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { 875 switch (dynp->d_tag) { 876 877 case DT_REL: 878 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); 879 break; 880 881 case DT_RELSZ: 882 obj->relsize = dynp->d_un.d_val; 883 break; 884 885 case DT_RELENT: 886 assert(dynp->d_un.d_val == sizeof(Elf_Rel)); 887 break; 888 889 case DT_JMPREL: 890 obj->pltrel = (const Elf_Rel *) 891 (obj->relocbase + dynp->d_un.d_ptr); 892 break; 893 894 case DT_PLTRELSZ: 895 obj->pltrelsize = dynp->d_un.d_val; 896 break; 897 898 case DT_RELA: 899 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); 900 break; 901 902 case DT_RELASZ: 903 obj->relasize = dynp->d_un.d_val; 904 break; 905 906 case DT_RELAENT: 907 assert(dynp->d_un.d_val == sizeof(Elf_Rela)); 908 break; 909 910 case DT_PLTREL: 911 plttype = dynp->d_un.d_val; 912 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); 913 break; 914 915 case DT_SYMTAB: 916 obj->symtab = (const Elf_Sym *) 917 (obj->relocbase + dynp->d_un.d_ptr); 918 break; 919 920 case DT_SYMENT: 921 assert(dynp->d_un.d_val == sizeof(Elf_Sym)); 922 break; 923 924 case DT_STRTAB: 925 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); 926 break; 927 928 case DT_STRSZ: 929 obj->strsize = dynp->d_un.d_val; 930 break; 931 932 case DT_VERNEED: 933 obj->verneed = (const Elf_Verneed *) (obj->relocbase + 934 dynp->d_un.d_val); 935 break; 936 937 case DT_VERNEEDNUM: 938 obj->verneednum = dynp->d_un.d_val; 939 break; 940 941 case DT_VERDEF: 942 obj->verdef = (const Elf_Verdef *) (obj->relocbase + 943 dynp->d_un.d_val); 944 break; 945 946 case DT_VERDEFNUM: 947 obj->verdefnum = dynp->d_un.d_val; 948 break; 949 950 case DT_VERSYM: 951 obj->versyms = (const Elf_Versym *)(obj->relocbase + 952 dynp->d_un.d_val); 953 break; 954 955 case DT_HASH: 956 { 957 hashtab = (const Elf_Hashelt *)(obj->relocbase + 958 dynp->d_un.d_ptr); 959 obj->nbuckets = hashtab[0]; 960 obj->nchains = hashtab[1]; 961 obj->buckets = hashtab + 2; 962 obj->chains = obj->buckets + obj->nbuckets; 963 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 && 964 obj->buckets != NULL; 965 } 966 break; 967 968 case DT_GNU_HASH: 969 { 970 hashtab = (const Elf_Hashelt *)(obj->relocbase + 971 dynp->d_un.d_ptr); 972 obj->nbuckets_gnu = hashtab[0]; 973 obj->symndx_gnu = hashtab[1]; 974 nmaskwords = hashtab[2]; 975 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords; 976 /* Number of bitmask words is required to be power of 2 */ 977 nmw_power2 = ((nmaskwords & (nmaskwords - 1)) == 0); 978 obj->maskwords_bm_gnu = nmaskwords - 1; 979 obj->shift2_gnu = hashtab[3]; 980 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4); 981 obj->buckets_gnu = hashtab + 4 + bloom_size32; 982 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu - 983 obj->symndx_gnu; 984 obj->valid_hash_gnu = nmw_power2 && obj->nbuckets_gnu > 0 && 985 obj->buckets_gnu != NULL; 986 } 987 break; 988 989 case DT_NEEDED: 990 if (!obj->rtld) { 991 Needed_Entry *nep = NEW(Needed_Entry); 992 nep->name = dynp->d_un.d_val; 993 nep->obj = NULL; 994 nep->next = NULL; 995 996 *needed_tail = nep; 997 needed_tail = &nep->next; 998 } 999 break; 1000 1001 case DT_FILTER: 1002 if (!obj->rtld) { 1003 Needed_Entry *nep = NEW(Needed_Entry); 1004 nep->name = dynp->d_un.d_val; 1005 nep->obj = NULL; 1006 nep->next = NULL; 1007 1008 *needed_filtees_tail = nep; 1009 needed_filtees_tail = &nep->next; 1010 } 1011 break; 1012 1013 case DT_AUXILIARY: 1014 if (!obj->rtld) { 1015 Needed_Entry *nep = NEW(Needed_Entry); 1016 nep->name = dynp->d_un.d_val; 1017 nep->obj = NULL; 1018 nep->next = NULL; 1019 1020 *needed_aux_filtees_tail = nep; 1021 needed_aux_filtees_tail = &nep->next; 1022 } 1023 break; 1024 1025 case DT_PLTGOT: 1026 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); 1027 break; 1028 1029 case DT_TEXTREL: 1030 obj->textrel = true; 1031 break; 1032 1033 case DT_SYMBOLIC: 1034 obj->symbolic = true; 1035 break; 1036 1037 case DT_RPATH: 1038 /* 1039 * We have to wait until later to process this, because we 1040 * might not have gotten the address of the string table yet. 1041 */ 1042 *dyn_rpath = dynp; 1043 break; 1044 1045 case DT_SONAME: 1046 *dyn_soname = dynp; 1047 break; 1048 1049 case DT_RUNPATH: 1050 *dyn_runpath = dynp; 1051 break; 1052 1053 case DT_INIT: 1054 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1055 break; 1056 1057 case DT_PREINIT_ARRAY: 1058 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1059 break; 1060 1061 case DT_PREINIT_ARRAYSZ: 1062 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1063 break; 1064 1065 case DT_INIT_ARRAY: 1066 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1067 break; 1068 1069 case DT_INIT_ARRAYSZ: 1070 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1071 break; 1072 1073 case DT_FINI: 1074 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1075 break; 1076 1077 case DT_FINI_ARRAY: 1078 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1079 break; 1080 1081 case DT_FINI_ARRAYSZ: 1082 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1083 break; 1084 1085 /* 1086 * Don't process DT_DEBUG on MIPS as the dynamic section 1087 * is mapped read-only. DT_MIPS_RLD_MAP is used instead. 1088 */ 1089 1090 #ifndef __mips__ 1091 case DT_DEBUG: 1092 /* XXX - not implemented yet */ 1093 if (!early) 1094 dbg("Filling in DT_DEBUG entry"); 1095 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; 1096 break; 1097 #endif 1098 1099 case DT_FLAGS: 1100 if ((dynp->d_un.d_val & DF_ORIGIN) && trust) 1101 obj->z_origin = true; 1102 if (dynp->d_un.d_val & DF_SYMBOLIC) 1103 obj->symbolic = true; 1104 if (dynp->d_un.d_val & DF_TEXTREL) 1105 obj->textrel = true; 1106 if (dynp->d_un.d_val & DF_BIND_NOW) 1107 obj->bind_now = true; 1108 /*if (dynp->d_un.d_val & DF_STATIC_TLS) 1109 ;*/ 1110 break; 1111 #ifdef __mips__ 1112 case DT_MIPS_LOCAL_GOTNO: 1113 obj->local_gotno = dynp->d_un.d_val; 1114 break; 1115 1116 case DT_MIPS_SYMTABNO: 1117 obj->symtabno = dynp->d_un.d_val; 1118 break; 1119 1120 case DT_MIPS_GOTSYM: 1121 obj->gotsym = dynp->d_un.d_val; 1122 break; 1123 1124 case DT_MIPS_RLD_MAP: 1125 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug; 1126 break; 1127 #endif 1128 1129 case DT_FLAGS_1: 1130 if (dynp->d_un.d_val & DF_1_NOOPEN) 1131 obj->z_noopen = true; 1132 if ((dynp->d_un.d_val & DF_1_ORIGIN) && trust) 1133 obj->z_origin = true; 1134 /*if (dynp->d_un.d_val & DF_1_GLOBAL) 1135 XXX ;*/ 1136 if (dynp->d_un.d_val & DF_1_BIND_NOW) 1137 obj->bind_now = true; 1138 if (dynp->d_un.d_val & DF_1_NODELETE) 1139 obj->z_nodelete = true; 1140 if (dynp->d_un.d_val & DF_1_LOADFLTR) 1141 obj->z_loadfltr = true; 1142 if (dynp->d_un.d_val & DF_1_INTERPOSE) 1143 obj->z_interpose = true; 1144 if (dynp->d_un.d_val & DF_1_NODEFLIB) 1145 obj->z_nodeflib = true; 1146 break; 1147 1148 default: 1149 if (!early) { 1150 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag, 1151 (long)dynp->d_tag); 1152 } 1153 break; 1154 } 1155 } 1156 1157 obj->traced = false; 1158 1159 if (plttype == DT_RELA) { 1160 obj->pltrela = (const Elf_Rela *) obj->pltrel; 1161 obj->pltrel = NULL; 1162 obj->pltrelasize = obj->pltrelsize; 1163 obj->pltrelsize = 0; 1164 } 1165 1166 /* Determine size of dynsym table (equal to nchains of sysv hash) */ 1167 if (obj->valid_hash_sysv) 1168 obj->dynsymcount = obj->nchains; 1169 else if (obj->valid_hash_gnu) { 1170 obj->dynsymcount = 0; 1171 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) { 1172 if (obj->buckets_gnu[bkt] == 0) 1173 continue; 1174 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]]; 1175 do 1176 obj->dynsymcount++; 1177 while ((*hashval++ & 1u) == 0); 1178 } 1179 obj->dynsymcount += obj->symndx_gnu; 1180 } 1181 } 1182 1183 static void 1184 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath, 1185 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath) 1186 { 1187 1188 if (obj->z_origin && obj->origin_path == NULL) { 1189 obj->origin_path = xmalloc(PATH_MAX); 1190 if (rtld_dirname_abs(obj->path, obj->origin_path) == -1) 1191 die(); 1192 } 1193 1194 if (dyn_runpath != NULL) { 1195 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val; 1196 if (obj->z_origin) 1197 obj->runpath = origin_subst(obj->runpath, obj->origin_path); 1198 } 1199 else if (dyn_rpath != NULL) { 1200 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val; 1201 if (obj->z_origin) 1202 obj->rpath = origin_subst(obj->rpath, obj->origin_path); 1203 } 1204 1205 if (dyn_soname != NULL) 1206 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val); 1207 } 1208 1209 static void 1210 digest_dynamic(Obj_Entry *obj, int early) 1211 { 1212 const Elf_Dyn *dyn_rpath; 1213 const Elf_Dyn *dyn_soname; 1214 const Elf_Dyn *dyn_runpath; 1215 1216 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath); 1217 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath); 1218 } 1219 1220 /* 1221 * Process a shared object's program header. This is used only for the 1222 * main program, when the kernel has already loaded the main program 1223 * into memory before calling the dynamic linker. It creates and 1224 * returns an Obj_Entry structure. 1225 */ 1226 static Obj_Entry * 1227 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) 1228 { 1229 Obj_Entry *obj; 1230 const Elf_Phdr *phlimit = phdr + phnum; 1231 const Elf_Phdr *ph; 1232 Elf_Addr note_start, note_end; 1233 int nsegs = 0; 1234 1235 obj = obj_new(); 1236 for (ph = phdr; ph < phlimit; ph++) { 1237 if (ph->p_type != PT_PHDR) 1238 continue; 1239 1240 obj->phdr = phdr; 1241 obj->phsize = ph->p_memsz; 1242 obj->relocbase = (caddr_t)phdr - ph->p_vaddr; 1243 break; 1244 } 1245 1246 obj->stack_flags = PF_X | PF_R | PF_W; 1247 1248 for (ph = phdr; ph < phlimit; ph++) { 1249 switch (ph->p_type) { 1250 1251 case PT_INTERP: 1252 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase); 1253 break; 1254 1255 case PT_LOAD: 1256 if (nsegs == 0) { /* First load segment */ 1257 obj->vaddrbase = trunc_page(ph->p_vaddr); 1258 obj->mapbase = obj->vaddrbase + obj->relocbase; 1259 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - 1260 obj->vaddrbase; 1261 } else { /* Last load segment */ 1262 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - 1263 obj->vaddrbase; 1264 } 1265 nsegs++; 1266 break; 1267 1268 case PT_DYNAMIC: 1269 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase); 1270 break; 1271 1272 case PT_TLS: 1273 obj->tlsindex = 1; 1274 obj->tlssize = ph->p_memsz; 1275 obj->tlsalign = ph->p_align; 1276 obj->tlsinitsize = ph->p_filesz; 1277 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase); 1278 break; 1279 1280 case PT_GNU_STACK: 1281 obj->stack_flags = ph->p_flags; 1282 break; 1283 1284 case PT_GNU_RELRO: 1285 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr); 1286 obj->relro_size = round_page(ph->p_memsz); 1287 break; 1288 1289 case PT_NOTE: 1290 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr; 1291 note_end = note_start + ph->p_filesz; 1292 digest_notes(obj, note_start, note_end); 1293 break; 1294 } 1295 } 1296 if (nsegs < 1) { 1297 _rtld_error("%s: too few PT_LOAD segments", path); 1298 return NULL; 1299 } 1300 1301 obj->entry = entry; 1302 return obj; 1303 } 1304 1305 void 1306 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end) 1307 { 1308 const Elf_Note *note; 1309 const char *note_name; 1310 uintptr_t p; 1311 1312 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end; 1313 note = (const Elf_Note *)((const char *)(note + 1) + 1314 roundup2(note->n_namesz, sizeof(Elf32_Addr)) + 1315 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) { 1316 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) || 1317 note->n_descsz != sizeof(int32_t)) 1318 continue; 1319 if (note->n_type != ABI_NOTETYPE && 1320 note->n_type != CRT_NOINIT_NOTETYPE) 1321 continue; 1322 note_name = (const char *)(note + 1); 1323 if (strncmp(NOTE_FREEBSD_VENDOR, note_name, 1324 sizeof(NOTE_FREEBSD_VENDOR)) != 0) 1325 continue; 1326 switch (note->n_type) { 1327 case ABI_NOTETYPE: 1328 /* FreeBSD osrel note */ 1329 p = (uintptr_t)(note + 1); 1330 p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 1331 obj->osrel = *(const int32_t *)(p); 1332 dbg("note osrel %d", obj->osrel); 1333 break; 1334 case CRT_NOINIT_NOTETYPE: 1335 /* FreeBSD 'crt does not call init' note */ 1336 obj->crt_no_init = true; 1337 dbg("note crt_no_init"); 1338 break; 1339 } 1340 } 1341 } 1342 1343 static Obj_Entry * 1344 dlcheck(void *handle) 1345 { 1346 Obj_Entry *obj; 1347 1348 for (obj = obj_list; obj != NULL; obj = obj->next) 1349 if (obj == (Obj_Entry *) handle) 1350 break; 1351 1352 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { 1353 _rtld_error("Invalid shared object handle %p", handle); 1354 return NULL; 1355 } 1356 return obj; 1357 } 1358 1359 /* 1360 * If the given object is already in the donelist, return true. Otherwise 1361 * add the object to the list and return false. 1362 */ 1363 static bool 1364 donelist_check(DoneList *dlp, const Obj_Entry *obj) 1365 { 1366 unsigned int i; 1367 1368 for (i = 0; i < dlp->num_used; i++) 1369 if (dlp->objs[i] == obj) 1370 return true; 1371 /* 1372 * Our donelist allocation should always be sufficient. But if 1373 * our threads locking isn't working properly, more shared objects 1374 * could have been loaded since we allocated the list. That should 1375 * never happen, but we'll handle it properly just in case it does. 1376 */ 1377 if (dlp->num_used < dlp->num_alloc) 1378 dlp->objs[dlp->num_used++] = obj; 1379 return false; 1380 } 1381 1382 /* 1383 * Hash function for symbol table lookup. Don't even think about changing 1384 * this. It is specified by the System V ABI. 1385 */ 1386 unsigned long 1387 elf_hash(const char *name) 1388 { 1389 const unsigned char *p = (const unsigned char *) name; 1390 unsigned long h = 0; 1391 unsigned long g; 1392 1393 while (*p != '\0') { 1394 h = (h << 4) + *p++; 1395 if ((g = h & 0xf0000000) != 0) 1396 h ^= g >> 24; 1397 h &= ~g; 1398 } 1399 return h; 1400 } 1401 1402 /* 1403 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits 1404 * unsigned in case it's implemented with a wider type. 1405 */ 1406 static uint32_t 1407 gnu_hash(const char *s) 1408 { 1409 uint32_t h; 1410 unsigned char c; 1411 1412 h = 5381; 1413 for (c = *s; c != '\0'; c = *++s) 1414 h = h * 33 + c; 1415 return (h & 0xffffffff); 1416 } 1417 1418 /* 1419 * Find the library with the given name, and return its full pathname. 1420 * The returned string is dynamically allocated. Generates an error 1421 * message and returns NULL if the library cannot be found. 1422 * 1423 * If the second argument is non-NULL, then it refers to an already- 1424 * loaded shared object, whose library search path will be searched. 1425 * 1426 * The search order is: 1427 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1) 1428 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1) 1429 * LD_LIBRARY_PATH 1430 * DT_RUNPATH in the referencing file 1431 * ldconfig hints (if -z nodefaultlib, filter out default library directories 1432 * from list) 1433 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib 1434 * 1435 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined. 1436 */ 1437 static char * 1438 find_library(const char *xname, const Obj_Entry *refobj) 1439 { 1440 char *pathname; 1441 char *name; 1442 bool nodeflib, objgiven; 1443 1444 objgiven = refobj != NULL; 1445 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */ 1446 if (xname[0] != '/' && !trust) { 1447 _rtld_error("Absolute pathname required for shared object \"%s\"", 1448 xname); 1449 return NULL; 1450 } 1451 if (objgiven && refobj->z_origin) { 1452 return (origin_subst(__DECONST(char *, xname), 1453 refobj->origin_path)); 1454 } else { 1455 return (xstrdup(xname)); 1456 } 1457 } 1458 1459 if (libmap_disable || !objgiven || 1460 (name = lm_find(refobj->path, xname)) == NULL) 1461 name = (char *)xname; 1462 1463 dbg(" Searching for \"%s\"", name); 1464 1465 /* 1466 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall 1467 * back to pre-conforming behaviour if user requested so with 1468 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z 1469 * nodeflib. 1470 */ 1471 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) { 1472 if ((pathname = search_library_path(name, ld_library_path)) != NULL || 1473 (refobj != NULL && 1474 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1475 (pathname = search_library_path(name, gethints(false))) != NULL || 1476 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL) 1477 return (pathname); 1478 } else { 1479 nodeflib = objgiven ? refobj->z_nodeflib : false; 1480 if ((objgiven && 1481 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1482 (objgiven && refobj->runpath == NULL && refobj != obj_main && 1483 (pathname = search_library_path(name, obj_main->rpath)) != NULL) || 1484 (pathname = search_library_path(name, ld_library_path)) != NULL || 1485 (objgiven && 1486 (pathname = search_library_path(name, refobj->runpath)) != NULL) || 1487 (pathname = search_library_path(name, gethints(nodeflib))) != NULL || 1488 (objgiven && !nodeflib && 1489 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL)) 1490 return (pathname); 1491 } 1492 1493 if (objgiven && refobj->path != NULL) { 1494 _rtld_error("Shared object \"%s\" not found, required by \"%s\"", 1495 name, basename(refobj->path)); 1496 } else { 1497 _rtld_error("Shared object \"%s\" not found", name); 1498 } 1499 return NULL; 1500 } 1501 1502 /* 1503 * Given a symbol number in a referencing object, find the corresponding 1504 * definition of the symbol. Returns a pointer to the symbol, or NULL if 1505 * no definition was found. Returns a pointer to the Obj_Entry of the 1506 * defining object via the reference parameter DEFOBJ_OUT. 1507 */ 1508 const Elf_Sym * 1509 find_symdef(unsigned long symnum, const Obj_Entry *refobj, 1510 const Obj_Entry **defobj_out, int flags, SymCache *cache, 1511 RtldLockState *lockstate) 1512 { 1513 const Elf_Sym *ref; 1514 const Elf_Sym *def; 1515 const Obj_Entry *defobj; 1516 SymLook req; 1517 const char *name; 1518 int res; 1519 1520 /* 1521 * If we have already found this symbol, get the information from 1522 * the cache. 1523 */ 1524 if (symnum >= refobj->dynsymcount) 1525 return NULL; /* Bad object */ 1526 if (cache != NULL && cache[symnum].sym != NULL) { 1527 *defobj_out = cache[symnum].obj; 1528 return cache[symnum].sym; 1529 } 1530 1531 ref = refobj->symtab + symnum; 1532 name = refobj->strtab + ref->st_name; 1533 def = NULL; 1534 defobj = NULL; 1535 1536 /* 1537 * We don't have to do a full scale lookup if the symbol is local. 1538 * We know it will bind to the instance in this load module; to 1539 * which we already have a pointer (ie ref). By not doing a lookup, 1540 * we not only improve performance, but it also avoids unresolvable 1541 * symbols when local symbols are not in the hash table. This has 1542 * been seen with the ia64 toolchain. 1543 */ 1544 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) { 1545 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) { 1546 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path, 1547 symnum); 1548 } 1549 symlook_init(&req, name); 1550 req.flags = flags; 1551 req.ventry = fetch_ventry(refobj, symnum); 1552 req.lockstate = lockstate; 1553 res = symlook_default(&req, refobj); 1554 if (res == 0) { 1555 def = req.sym_out; 1556 defobj = req.defobj_out; 1557 } 1558 } else { 1559 def = ref; 1560 defobj = refobj; 1561 } 1562 1563 /* 1564 * If we found no definition and the reference is weak, treat the 1565 * symbol as having the value zero. 1566 */ 1567 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { 1568 def = &sym_zero; 1569 defobj = obj_main; 1570 } 1571 1572 if (def != NULL) { 1573 *defobj_out = defobj; 1574 /* Record the information in the cache to avoid subsequent lookups. */ 1575 if (cache != NULL) { 1576 cache[symnum].sym = def; 1577 cache[symnum].obj = defobj; 1578 } 1579 } else { 1580 if (refobj != &obj_rtld) 1581 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); 1582 } 1583 return def; 1584 } 1585 1586 /* 1587 * Return the search path from the ldconfig hints file, reading it if 1588 * necessary. If nostdlib is true, then the default search paths are 1589 * not added to result. 1590 * 1591 * Returns NULL if there are problems with the hints file, 1592 * or if the search path there is empty. 1593 */ 1594 static const char * 1595 gethints(bool nostdlib) 1596 { 1597 static char *hints, *filtered_path; 1598 struct elfhints_hdr hdr; 1599 struct fill_search_info_args sargs, hargs; 1600 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo; 1601 struct dl_serpath *SLPpath, *hintpath; 1602 char *p; 1603 unsigned int SLPndx, hintndx, fndx, fcount; 1604 int fd; 1605 size_t flen; 1606 bool skip; 1607 1608 /* First call, read the hints file */ 1609 if (hints == NULL) { 1610 /* Keep from trying again in case the hints file is bad. */ 1611 hints = ""; 1612 1613 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1) 1614 return (NULL); 1615 if (read(fd, &hdr, sizeof hdr) != sizeof hdr || 1616 hdr.magic != ELFHINTS_MAGIC || 1617 hdr.version != 1) { 1618 close(fd); 1619 return (NULL); 1620 } 1621 p = xmalloc(hdr.dirlistlen + 1); 1622 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || 1623 read(fd, p, hdr.dirlistlen + 1) != 1624 (ssize_t)hdr.dirlistlen + 1) { 1625 free(p); 1626 close(fd); 1627 return (NULL); 1628 } 1629 hints = p; 1630 close(fd); 1631 } 1632 1633 /* 1634 * If caller agreed to receive list which includes the default 1635 * paths, we are done. Otherwise, if we still did not 1636 * calculated filtered result, do it now. 1637 */ 1638 if (!nostdlib) 1639 return (hints[0] != '\0' ? hints : NULL); 1640 if (filtered_path != NULL) 1641 goto filt_ret; 1642 1643 /* 1644 * Obtain the list of all configured search paths, and the 1645 * list of the default paths. 1646 * 1647 * First estimate the size of the results. 1648 */ 1649 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1650 smeta.dls_cnt = 0; 1651 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1652 hmeta.dls_cnt = 0; 1653 1654 sargs.request = RTLD_DI_SERINFOSIZE; 1655 sargs.serinfo = &smeta; 1656 hargs.request = RTLD_DI_SERINFOSIZE; 1657 hargs.serinfo = &hmeta; 1658 1659 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs); 1660 path_enumerate(p, fill_search_info, &hargs); 1661 1662 SLPinfo = xmalloc(smeta.dls_size); 1663 hintinfo = xmalloc(hmeta.dls_size); 1664 1665 /* 1666 * Next fetch both sets of paths. 1667 */ 1668 sargs.request = RTLD_DI_SERINFO; 1669 sargs.serinfo = SLPinfo; 1670 sargs.serpath = &SLPinfo->dls_serpath[0]; 1671 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt]; 1672 1673 hargs.request = RTLD_DI_SERINFO; 1674 hargs.serinfo = hintinfo; 1675 hargs.serpath = &hintinfo->dls_serpath[0]; 1676 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt]; 1677 1678 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs); 1679 path_enumerate(p, fill_search_info, &hargs); 1680 1681 /* 1682 * Now calculate the difference between two sets, by excluding 1683 * standard paths from the full set. 1684 */ 1685 fndx = 0; 1686 fcount = 0; 1687 filtered_path = xmalloc(hdr.dirlistlen + 1); 1688 hintpath = &hintinfo->dls_serpath[0]; 1689 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) { 1690 skip = false; 1691 SLPpath = &SLPinfo->dls_serpath[0]; 1692 /* 1693 * Check each standard path against current. 1694 */ 1695 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) { 1696 /* matched, skip the path */ 1697 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) { 1698 skip = true; 1699 break; 1700 } 1701 } 1702 if (skip) 1703 continue; 1704 /* 1705 * Not matched against any standard path, add the path 1706 * to result. Separate consequtive paths with ':'. 1707 */ 1708 if (fcount > 0) { 1709 filtered_path[fndx] = ':'; 1710 fndx++; 1711 } 1712 fcount++; 1713 flen = strlen(hintpath->dls_name); 1714 strncpy((filtered_path + fndx), hintpath->dls_name, flen); 1715 fndx += flen; 1716 } 1717 filtered_path[fndx] = '\0'; 1718 1719 free(SLPinfo); 1720 free(hintinfo); 1721 1722 filt_ret: 1723 return (filtered_path[0] != '\0' ? filtered_path : NULL); 1724 } 1725 1726 static void 1727 init_dag(Obj_Entry *root) 1728 { 1729 const Needed_Entry *needed; 1730 const Objlist_Entry *elm; 1731 DoneList donelist; 1732 1733 if (root->dag_inited) 1734 return; 1735 donelist_init(&donelist); 1736 1737 /* Root object belongs to own DAG. */ 1738 objlist_push_tail(&root->dldags, root); 1739 objlist_push_tail(&root->dagmembers, root); 1740 donelist_check(&donelist, root); 1741 1742 /* 1743 * Add dependencies of root object to DAG in breadth order 1744 * by exploiting the fact that each new object get added 1745 * to the tail of the dagmembers list. 1746 */ 1747 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1748 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) { 1749 if (needed->obj == NULL || donelist_check(&donelist, needed->obj)) 1750 continue; 1751 objlist_push_tail(&needed->obj->dldags, root); 1752 objlist_push_tail(&root->dagmembers, needed->obj); 1753 } 1754 } 1755 root->dag_inited = true; 1756 } 1757 1758 static void 1759 process_nodelete(Obj_Entry *root) 1760 { 1761 const Objlist_Entry *elm; 1762 1763 /* 1764 * Walk over object DAG and process every dependent object that 1765 * is marked as DF_1_NODELETE. They need to grow their own DAG, 1766 * which then should have its reference upped separately. 1767 */ 1768 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1769 if (elm->obj != NULL && elm->obj->z_nodelete && 1770 !elm->obj->ref_nodel) { 1771 dbg("obj %s nodelete", elm->obj->path); 1772 init_dag(elm->obj); 1773 ref_dag(elm->obj); 1774 elm->obj->ref_nodel = true; 1775 } 1776 } 1777 } 1778 /* 1779 * Initialize the dynamic linker. The argument is the address at which 1780 * the dynamic linker has been mapped into memory. The primary task of 1781 * this function is to relocate the dynamic linker. 1782 */ 1783 static void 1784 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info) 1785 { 1786 Obj_Entry objtmp; /* Temporary rtld object */ 1787 const Elf_Dyn *dyn_rpath; 1788 const Elf_Dyn *dyn_soname; 1789 const Elf_Dyn *dyn_runpath; 1790 1791 /* 1792 * Conjure up an Obj_Entry structure for the dynamic linker. 1793 * 1794 * The "path" member can't be initialized yet because string constants 1795 * cannot yet be accessed. Below we will set it correctly. 1796 */ 1797 memset(&objtmp, 0, sizeof(objtmp)); 1798 objtmp.path = NULL; 1799 objtmp.rtld = true; 1800 objtmp.mapbase = mapbase; 1801 #ifdef PIC 1802 objtmp.relocbase = mapbase; 1803 #endif 1804 if (RTLD_IS_DYNAMIC()) { 1805 objtmp.dynamic = rtld_dynamic(&objtmp); 1806 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath); 1807 assert(objtmp.needed == NULL); 1808 #if !defined(__mips__) 1809 /* MIPS has a bogus DT_TEXTREL. */ 1810 assert(!objtmp.textrel); 1811 #endif 1812 1813 /* 1814 * Temporarily put the dynamic linker entry into the object list, so 1815 * that symbols can be found. 1816 */ 1817 1818 relocate_objects(&objtmp, true, &objtmp, 0, NULL); 1819 } 1820 1821 /* Initialize the object list. */ 1822 obj_tail = &obj_list; 1823 1824 /* Now that non-local variables can be accesses, copy out obj_rtld. */ 1825 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld)); 1826 1827 /* The page size is required by the dynamic memory allocator. */ 1828 init_pagesizes(aux_info); 1829 1830 if (aux_info[AT_OSRELDATE] != NULL) 1831 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val; 1832 1833 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath); 1834 1835 /* Replace the path with a dynamically allocated copy. */ 1836 obj_rtld.path = xstrdup(PATH_RTLD); 1837 1838 r_debug.r_brk = r_debug_state; 1839 r_debug.r_state = RT_CONSISTENT; 1840 } 1841 1842 /* 1843 * Retrieve the array of supported page sizes. The kernel provides the page 1844 * sizes in increasing order. 1845 */ 1846 static void 1847 init_pagesizes(Elf_Auxinfo **aux_info) 1848 { 1849 static size_t psa[MAXPAGESIZES]; 1850 int mib[2]; 1851 size_t len, size; 1852 1853 if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] != 1854 NULL) { 1855 size = aux_info[AT_PAGESIZESLEN]->a_un.a_val; 1856 pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr; 1857 } else { 1858 len = 2; 1859 if (sysctlnametomib("hw.pagesizes", mib, &len) == 0) 1860 size = sizeof(psa); 1861 else { 1862 /* As a fallback, retrieve the base page size. */ 1863 size = sizeof(psa[0]); 1864 if (aux_info[AT_PAGESZ] != NULL) { 1865 psa[0] = aux_info[AT_PAGESZ]->a_un.a_val; 1866 goto psa_filled; 1867 } else { 1868 mib[0] = CTL_HW; 1869 mib[1] = HW_PAGESIZE; 1870 len = 2; 1871 } 1872 } 1873 if (sysctl(mib, len, psa, &size, NULL, 0) == -1) { 1874 _rtld_error("sysctl for hw.pagesize(s) failed"); 1875 die(); 1876 } 1877 psa_filled: 1878 pagesizes = psa; 1879 } 1880 npagesizes = size / sizeof(pagesizes[0]); 1881 /* Discard any invalid entries at the end of the array. */ 1882 while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0) 1883 npagesizes--; 1884 } 1885 1886 /* 1887 * Add the init functions from a needed object list (and its recursive 1888 * needed objects) to "list". This is not used directly; it is a helper 1889 * function for initlist_add_objects(). The write lock must be held 1890 * when this function is called. 1891 */ 1892 static void 1893 initlist_add_neededs(Needed_Entry *needed, Objlist *list) 1894 { 1895 /* Recursively process the successor needed objects. */ 1896 if (needed->next != NULL) 1897 initlist_add_neededs(needed->next, list); 1898 1899 /* Process the current needed object. */ 1900 if (needed->obj != NULL) 1901 initlist_add_objects(needed->obj, &needed->obj->next, list); 1902 } 1903 1904 /* 1905 * Scan all of the DAGs rooted in the range of objects from "obj" to 1906 * "tail" and add their init functions to "list". This recurses over 1907 * the DAGs and ensure the proper init ordering such that each object's 1908 * needed libraries are initialized before the object itself. At the 1909 * same time, this function adds the objects to the global finalization 1910 * list "list_fini" in the opposite order. The write lock must be 1911 * held when this function is called. 1912 */ 1913 static void 1914 initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list) 1915 { 1916 1917 if (obj->init_scanned || obj->init_done) 1918 return; 1919 obj->init_scanned = true; 1920 1921 /* Recursively process the successor objects. */ 1922 if (&obj->next != tail) 1923 initlist_add_objects(obj->next, tail, list); 1924 1925 /* Recursively process the needed objects. */ 1926 if (obj->needed != NULL) 1927 initlist_add_neededs(obj->needed, list); 1928 if (obj->needed_filtees != NULL) 1929 initlist_add_neededs(obj->needed_filtees, list); 1930 if (obj->needed_aux_filtees != NULL) 1931 initlist_add_neededs(obj->needed_aux_filtees, list); 1932 1933 /* Add the object to the init list. */ 1934 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL || 1935 obj->init_array != (Elf_Addr)NULL) 1936 objlist_push_tail(list, obj); 1937 1938 /* Add the object to the global fini list in the reverse order. */ 1939 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL) 1940 && !obj->on_fini_list) { 1941 objlist_push_head(&list_fini, obj); 1942 obj->on_fini_list = true; 1943 } 1944 } 1945 1946 #ifndef FPTR_TARGET 1947 #define FPTR_TARGET(f) ((Elf_Addr) (f)) 1948 #endif 1949 1950 static void 1951 free_needed_filtees(Needed_Entry *n) 1952 { 1953 Needed_Entry *needed, *needed1; 1954 1955 for (needed = n; needed != NULL; needed = needed->next) { 1956 if (needed->obj != NULL) { 1957 dlclose(needed->obj); 1958 needed->obj = NULL; 1959 } 1960 } 1961 for (needed = n; needed != NULL; needed = needed1) { 1962 needed1 = needed->next; 1963 free(needed); 1964 } 1965 } 1966 1967 static void 1968 unload_filtees(Obj_Entry *obj) 1969 { 1970 1971 free_needed_filtees(obj->needed_filtees); 1972 obj->needed_filtees = NULL; 1973 free_needed_filtees(obj->needed_aux_filtees); 1974 obj->needed_aux_filtees = NULL; 1975 obj->filtees_loaded = false; 1976 } 1977 1978 static void 1979 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags, 1980 RtldLockState *lockstate) 1981 { 1982 1983 for (; needed != NULL; needed = needed->next) { 1984 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj, 1985 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) | 1986 RTLD_LOCAL, lockstate); 1987 } 1988 } 1989 1990 static void 1991 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate) 1992 { 1993 1994 lock_restart_for_upgrade(lockstate); 1995 if (!obj->filtees_loaded) { 1996 load_filtee1(obj, obj->needed_filtees, flags, lockstate); 1997 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate); 1998 obj->filtees_loaded = true; 1999 } 2000 } 2001 2002 static int 2003 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags) 2004 { 2005 Obj_Entry *obj1; 2006 2007 for (; needed != NULL; needed = needed->next) { 2008 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj, 2009 flags & ~RTLD_LO_NOLOAD); 2010 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0) 2011 return (-1); 2012 } 2013 return (0); 2014 } 2015 2016 /* 2017 * Given a shared object, traverse its list of needed objects, and load 2018 * each of them. Returns 0 on success. Generates an error message and 2019 * returns -1 on failure. 2020 */ 2021 static int 2022 load_needed_objects(Obj_Entry *first, int flags) 2023 { 2024 Obj_Entry *obj; 2025 2026 for (obj = first; obj != NULL; obj = obj->next) { 2027 if (process_needed(obj, obj->needed, flags) == -1) 2028 return (-1); 2029 } 2030 return (0); 2031 } 2032 2033 static int 2034 load_preload_objects(void) 2035 { 2036 char *p = ld_preload; 2037 Obj_Entry *obj; 2038 static const char delim[] = " \t:;"; 2039 2040 if (p == NULL) 2041 return 0; 2042 2043 p += strspn(p, delim); 2044 while (*p != '\0') { 2045 size_t len = strcspn(p, delim); 2046 char savech; 2047 2048 savech = p[len]; 2049 p[len] = '\0'; 2050 obj = load_object(p, -1, NULL, 0); 2051 if (obj == NULL) 2052 return -1; /* XXX - cleanup */ 2053 obj->z_interpose = true; 2054 p[len] = savech; 2055 p += len; 2056 p += strspn(p, delim); 2057 } 2058 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL); 2059 return 0; 2060 } 2061 2062 static const char * 2063 printable_path(const char *path) 2064 { 2065 2066 return (path == NULL ? "<unknown>" : path); 2067 } 2068 2069 /* 2070 * Load a shared object into memory, if it is not already loaded. The 2071 * object may be specified by name or by user-supplied file descriptor 2072 * fd_u. In the later case, the fd_u descriptor is not closed, but its 2073 * duplicate is. 2074 * 2075 * Returns a pointer to the Obj_Entry for the object. Returns NULL 2076 * on failure. 2077 */ 2078 static Obj_Entry * 2079 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags) 2080 { 2081 Obj_Entry *obj; 2082 int fd; 2083 struct stat sb; 2084 char *path; 2085 2086 if (name != NULL) { 2087 for (obj = obj_list->next; obj != NULL; obj = obj->next) { 2088 if (object_match_name(obj, name)) 2089 return (obj); 2090 } 2091 2092 path = find_library(name, refobj); 2093 if (path == NULL) 2094 return (NULL); 2095 } else 2096 path = NULL; 2097 2098 /* 2099 * If we didn't find a match by pathname, or the name is not 2100 * supplied, open the file and check again by device and inode. 2101 * This avoids false mismatches caused by multiple links or ".." 2102 * in pathnames. 2103 * 2104 * To avoid a race, we open the file and use fstat() rather than 2105 * using stat(). 2106 */ 2107 fd = -1; 2108 if (fd_u == -1) { 2109 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) == -1) { 2110 _rtld_error("Cannot open \"%s\"", path); 2111 free(path); 2112 return (NULL); 2113 } 2114 } else { 2115 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0); 2116 if (fd == -1) { 2117 _rtld_error("Cannot dup fd"); 2118 free(path); 2119 return (NULL); 2120 } 2121 } 2122 if (fstat(fd, &sb) == -1) { 2123 _rtld_error("Cannot fstat \"%s\"", printable_path(path)); 2124 close(fd); 2125 free(path); 2126 return NULL; 2127 } 2128 for (obj = obj_list->next; obj != NULL; obj = obj->next) 2129 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) 2130 break; 2131 if (obj != NULL && name != NULL) { 2132 object_add_name(obj, name); 2133 free(path); 2134 close(fd); 2135 return obj; 2136 } 2137 if (flags & RTLD_LO_NOLOAD) { 2138 free(path); 2139 close(fd); 2140 return (NULL); 2141 } 2142 2143 /* First use of this object, so we must map it in */ 2144 obj = do_load_object(fd, name, path, &sb, flags); 2145 if (obj == NULL) 2146 free(path); 2147 close(fd); 2148 2149 return obj; 2150 } 2151 2152 static Obj_Entry * 2153 do_load_object(int fd, const char *name, char *path, struct stat *sbp, 2154 int flags) 2155 { 2156 Obj_Entry *obj; 2157 struct statfs fs; 2158 2159 /* 2160 * but first, make sure that environment variables haven't been 2161 * used to circumvent the noexec flag on a filesystem. 2162 */ 2163 if (dangerous_ld_env) { 2164 if (fstatfs(fd, &fs) != 0) { 2165 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path)); 2166 return NULL; 2167 } 2168 if (fs.f_flags & MNT_NOEXEC) { 2169 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname); 2170 return NULL; 2171 } 2172 } 2173 dbg("loading \"%s\"", printable_path(path)); 2174 obj = map_object(fd, printable_path(path), sbp); 2175 if (obj == NULL) 2176 return NULL; 2177 2178 /* 2179 * If DT_SONAME is present in the object, digest_dynamic2 already 2180 * added it to the object names. 2181 */ 2182 if (name != NULL) 2183 object_add_name(obj, name); 2184 obj->path = path; 2185 digest_dynamic(obj, 0); 2186 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path, 2187 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount); 2188 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) == 2189 RTLD_LO_DLOPEN) { 2190 dbg("refusing to load non-loadable \"%s\"", obj->path); 2191 _rtld_error("Cannot dlopen non-loadable %s", obj->path); 2192 munmap(obj->mapbase, obj->mapsize); 2193 obj_free(obj); 2194 return (NULL); 2195 } 2196 2197 *obj_tail = obj; 2198 obj_tail = &obj->next; 2199 obj_count++; 2200 obj_loads++; 2201 linkmap_add(obj); /* for GDB & dlinfo() */ 2202 max_stack_flags |= obj->stack_flags; 2203 2204 dbg(" %p .. %p: %s", obj->mapbase, 2205 obj->mapbase + obj->mapsize - 1, obj->path); 2206 if (obj->textrel) 2207 dbg(" WARNING: %s has impure text", obj->path); 2208 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 2209 obj->path); 2210 2211 return obj; 2212 } 2213 2214 static Obj_Entry * 2215 obj_from_addr(const void *addr) 2216 { 2217 Obj_Entry *obj; 2218 2219 for (obj = obj_list; obj != NULL; obj = obj->next) { 2220 if (addr < (void *) obj->mapbase) 2221 continue; 2222 if (addr < (void *) (obj->mapbase + obj->mapsize)) 2223 return obj; 2224 } 2225 return NULL; 2226 } 2227 2228 static void 2229 preinit_main(void) 2230 { 2231 Elf_Addr *preinit_addr; 2232 int index; 2233 2234 preinit_addr = (Elf_Addr *)obj_main->preinit_array; 2235 if (preinit_addr == NULL) 2236 return; 2237 2238 for (index = 0; index < obj_main->preinit_array_num; index++) { 2239 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) { 2240 dbg("calling preinit function for %s at %p", obj_main->path, 2241 (void *)preinit_addr[index]); 2242 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index], 2243 0, 0, obj_main->path); 2244 call_init_pointer(obj_main, preinit_addr[index]); 2245 } 2246 } 2247 } 2248 2249 /* 2250 * Call the finalization functions for each of the objects in "list" 2251 * belonging to the DAG of "root" and referenced once. If NULL "root" 2252 * is specified, every finalization function will be called regardless 2253 * of the reference count and the list elements won't be freed. All of 2254 * the objects are expected to have non-NULL fini functions. 2255 */ 2256 static void 2257 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate) 2258 { 2259 Objlist_Entry *elm; 2260 char *saved_msg; 2261 Elf_Addr *fini_addr; 2262 int index; 2263 2264 assert(root == NULL || root->refcount == 1); 2265 2266 /* 2267 * Preserve the current error message since a fini function might 2268 * call into the dynamic linker and overwrite it. 2269 */ 2270 saved_msg = errmsg_save(); 2271 do { 2272 STAILQ_FOREACH(elm, list, link) { 2273 if (root != NULL && (elm->obj->refcount != 1 || 2274 objlist_find(&root->dagmembers, elm->obj) == NULL)) 2275 continue; 2276 /* Remove object from fini list to prevent recursive invocation. */ 2277 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2278 /* 2279 * XXX: If a dlopen() call references an object while the 2280 * fini function is in progress, we might end up trying to 2281 * unload the referenced object in dlclose() or the object 2282 * won't be unloaded although its fini function has been 2283 * called. 2284 */ 2285 lock_release(rtld_bind_lock, lockstate); 2286 2287 /* 2288 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined. 2289 * When this happens, DT_FINI_ARRAY is processed first. 2290 */ 2291 fini_addr = (Elf_Addr *)elm->obj->fini_array; 2292 if (fini_addr != NULL && elm->obj->fini_array_num > 0) { 2293 for (index = elm->obj->fini_array_num - 1; index >= 0; 2294 index--) { 2295 if (fini_addr[index] != 0 && fini_addr[index] != 1) { 2296 dbg("calling fini function for %s at %p", 2297 elm->obj->path, (void *)fini_addr[index]); 2298 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, 2299 (void *)fini_addr[index], 0, 0, elm->obj->path); 2300 call_initfini_pointer(elm->obj, fini_addr[index]); 2301 } 2302 } 2303 } 2304 if (elm->obj->fini != (Elf_Addr)NULL) { 2305 dbg("calling fini function for %s at %p", elm->obj->path, 2306 (void *)elm->obj->fini); 2307 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini, 2308 0, 0, elm->obj->path); 2309 call_initfini_pointer(elm->obj, elm->obj->fini); 2310 } 2311 wlock_acquire(rtld_bind_lock, lockstate); 2312 /* No need to free anything if process is going down. */ 2313 if (root != NULL) 2314 free(elm); 2315 /* 2316 * We must restart the list traversal after every fini call 2317 * because a dlclose() call from the fini function or from 2318 * another thread might have modified the reference counts. 2319 */ 2320 break; 2321 } 2322 } while (elm != NULL); 2323 errmsg_restore(saved_msg); 2324 } 2325 2326 /* 2327 * Call the initialization functions for each of the objects in 2328 * "list". All of the objects are expected to have non-NULL init 2329 * functions. 2330 */ 2331 static void 2332 objlist_call_init(Objlist *list, RtldLockState *lockstate) 2333 { 2334 Objlist_Entry *elm; 2335 Obj_Entry *obj; 2336 char *saved_msg; 2337 Elf_Addr *init_addr; 2338 int index; 2339 2340 /* 2341 * Clean init_scanned flag so that objects can be rechecked and 2342 * possibly initialized earlier if any of vectors called below 2343 * cause the change by using dlopen. 2344 */ 2345 for (obj = obj_list; obj != NULL; obj = obj->next) 2346 obj->init_scanned = false; 2347 2348 /* 2349 * Preserve the current error message since an init function might 2350 * call into the dynamic linker and overwrite it. 2351 */ 2352 saved_msg = errmsg_save(); 2353 STAILQ_FOREACH(elm, list, link) { 2354 if (elm->obj->init_done) /* Initialized early. */ 2355 continue; 2356 /* 2357 * Race: other thread might try to use this object before current 2358 * one completes the initilization. Not much can be done here 2359 * without better locking. 2360 */ 2361 elm->obj->init_done = true; 2362 lock_release(rtld_bind_lock, lockstate); 2363 2364 /* 2365 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined. 2366 * When this happens, DT_INIT is processed first. 2367 */ 2368 if (elm->obj->init != (Elf_Addr)NULL) { 2369 dbg("calling init function for %s at %p", elm->obj->path, 2370 (void *)elm->obj->init); 2371 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init, 2372 0, 0, elm->obj->path); 2373 call_initfini_pointer(elm->obj, elm->obj->init); 2374 } 2375 init_addr = (Elf_Addr *)elm->obj->init_array; 2376 if (init_addr != NULL) { 2377 for (index = 0; index < elm->obj->init_array_num; index++) { 2378 if (init_addr[index] != 0 && init_addr[index] != 1) { 2379 dbg("calling init function for %s at %p", elm->obj->path, 2380 (void *)init_addr[index]); 2381 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, 2382 (void *)init_addr[index], 0, 0, elm->obj->path); 2383 call_init_pointer(elm->obj, init_addr[index]); 2384 } 2385 } 2386 } 2387 wlock_acquire(rtld_bind_lock, lockstate); 2388 } 2389 errmsg_restore(saved_msg); 2390 } 2391 2392 static void 2393 objlist_clear(Objlist *list) 2394 { 2395 Objlist_Entry *elm; 2396 2397 while (!STAILQ_EMPTY(list)) { 2398 elm = STAILQ_FIRST(list); 2399 STAILQ_REMOVE_HEAD(list, link); 2400 free(elm); 2401 } 2402 } 2403 2404 static Objlist_Entry * 2405 objlist_find(Objlist *list, const Obj_Entry *obj) 2406 { 2407 Objlist_Entry *elm; 2408 2409 STAILQ_FOREACH(elm, list, link) 2410 if (elm->obj == obj) 2411 return elm; 2412 return NULL; 2413 } 2414 2415 static void 2416 objlist_init(Objlist *list) 2417 { 2418 STAILQ_INIT(list); 2419 } 2420 2421 static void 2422 objlist_push_head(Objlist *list, Obj_Entry *obj) 2423 { 2424 Objlist_Entry *elm; 2425 2426 elm = NEW(Objlist_Entry); 2427 elm->obj = obj; 2428 STAILQ_INSERT_HEAD(list, elm, link); 2429 } 2430 2431 static void 2432 objlist_push_tail(Objlist *list, Obj_Entry *obj) 2433 { 2434 Objlist_Entry *elm; 2435 2436 elm = NEW(Objlist_Entry); 2437 elm->obj = obj; 2438 STAILQ_INSERT_TAIL(list, elm, link); 2439 } 2440 2441 static void 2442 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj) 2443 { 2444 Objlist_Entry *elm, *listelm; 2445 2446 STAILQ_FOREACH(listelm, list, link) { 2447 if (listelm->obj == listobj) 2448 break; 2449 } 2450 elm = NEW(Objlist_Entry); 2451 elm->obj = obj; 2452 if (listelm != NULL) 2453 STAILQ_INSERT_AFTER(list, listelm, elm, link); 2454 else 2455 STAILQ_INSERT_TAIL(list, elm, link); 2456 } 2457 2458 static void 2459 objlist_remove(Objlist *list, Obj_Entry *obj) 2460 { 2461 Objlist_Entry *elm; 2462 2463 if ((elm = objlist_find(list, obj)) != NULL) { 2464 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2465 free(elm); 2466 } 2467 } 2468 2469 /* 2470 * Relocate dag rooted in the specified object. 2471 * Returns 0 on success, or -1 on failure. 2472 */ 2473 2474 static int 2475 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj, 2476 int flags, RtldLockState *lockstate) 2477 { 2478 Objlist_Entry *elm; 2479 int error; 2480 2481 error = 0; 2482 STAILQ_FOREACH(elm, &root->dagmembers, link) { 2483 error = relocate_object(elm->obj, bind_now, rtldobj, flags, 2484 lockstate); 2485 if (error == -1) 2486 break; 2487 } 2488 return (error); 2489 } 2490 2491 /* 2492 * Relocate single object. 2493 * Returns 0 on success, or -1 on failure. 2494 */ 2495 static int 2496 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 2497 int flags, RtldLockState *lockstate) 2498 { 2499 2500 if (obj->relocated) 2501 return (0); 2502 obj->relocated = true; 2503 if (obj != rtldobj) 2504 dbg("relocating \"%s\"", obj->path); 2505 2506 if (obj->symtab == NULL || obj->strtab == NULL || 2507 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) { 2508 _rtld_error("%s: Shared object has no run-time symbol table", 2509 obj->path); 2510 return (-1); 2511 } 2512 2513 if (obj->textrel) { 2514 /* There are relocations to the write-protected text segment. */ 2515 if (mprotect(obj->mapbase, obj->textsize, 2516 PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { 2517 _rtld_error("%s: Cannot write-enable text segment: %s", 2518 obj->path, rtld_strerror(errno)); 2519 return (-1); 2520 } 2521 } 2522 2523 /* Process the non-PLT relocations. */ 2524 if (reloc_non_plt(obj, rtldobj, flags, lockstate)) 2525 return (-1); 2526 2527 if (obj->textrel) { /* Re-protected the text segment. */ 2528 if (mprotect(obj->mapbase, obj->textsize, 2529 PROT_READ|PROT_EXEC) == -1) { 2530 _rtld_error("%s: Cannot write-protect text segment: %s", 2531 obj->path, rtld_strerror(errno)); 2532 return (-1); 2533 } 2534 } 2535 2536 2537 /* Set the special PLT or GOT entries. */ 2538 init_pltgot(obj); 2539 2540 /* Process the PLT relocations. */ 2541 if (reloc_plt(obj) == -1) 2542 return (-1); 2543 /* Relocate the jump slots if we are doing immediate binding. */ 2544 if (obj->bind_now || bind_now) 2545 if (reloc_jmpslots(obj, flags, lockstate) == -1) 2546 return (-1); 2547 2548 if (obj->relro_size > 0) { 2549 if (mprotect(obj->relro_page, obj->relro_size, 2550 PROT_READ) == -1) { 2551 _rtld_error("%s: Cannot enforce relro protection: %s", 2552 obj->path, rtld_strerror(errno)); 2553 return (-1); 2554 } 2555 } 2556 2557 /* 2558 * Set up the magic number and version in the Obj_Entry. These 2559 * were checked in the crt1.o from the original ElfKit, so we 2560 * set them for backward compatibility. 2561 */ 2562 obj->magic = RTLD_MAGIC; 2563 obj->version = RTLD_VERSION; 2564 2565 return (0); 2566 } 2567 2568 /* 2569 * Relocate newly-loaded shared objects. The argument is a pointer to 2570 * the Obj_Entry for the first such object. All objects from the first 2571 * to the end of the list of objects are relocated. Returns 0 on success, 2572 * or -1 on failure. 2573 */ 2574 static int 2575 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj, 2576 int flags, RtldLockState *lockstate) 2577 { 2578 Obj_Entry *obj; 2579 int error; 2580 2581 for (error = 0, obj = first; obj != NULL; obj = obj->next) { 2582 error = relocate_object(obj, bind_now, rtldobj, flags, 2583 lockstate); 2584 if (error == -1) 2585 break; 2586 } 2587 return (error); 2588 } 2589 2590 /* 2591 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots 2592 * referencing STT_GNU_IFUNC symbols is postponed till the other 2593 * relocations are done. The indirect functions specified as 2594 * ifunc are allowed to call other symbols, so we need to have 2595 * objects relocated before asking for resolution from indirects. 2596 * 2597 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion, 2598 * instead of the usual lazy handling of PLT slots. It is 2599 * consistent with how GNU does it. 2600 */ 2601 static int 2602 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags, 2603 RtldLockState *lockstate) 2604 { 2605 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1) 2606 return (-1); 2607 if ((obj->bind_now || bind_now) && obj->gnu_ifunc && 2608 reloc_gnu_ifunc(obj, flags, lockstate) == -1) 2609 return (-1); 2610 return (0); 2611 } 2612 2613 static int 2614 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags, 2615 RtldLockState *lockstate) 2616 { 2617 Obj_Entry *obj; 2618 2619 for (obj = first; obj != NULL; obj = obj->next) { 2620 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1) 2621 return (-1); 2622 } 2623 return (0); 2624 } 2625 2626 static int 2627 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags, 2628 RtldLockState *lockstate) 2629 { 2630 Objlist_Entry *elm; 2631 2632 STAILQ_FOREACH(elm, list, link) { 2633 if (resolve_object_ifunc(elm->obj, bind_now, flags, 2634 lockstate) == -1) 2635 return (-1); 2636 } 2637 return (0); 2638 } 2639 2640 /* 2641 * Cleanup procedure. It will be called (by the atexit mechanism) just 2642 * before the process exits. 2643 */ 2644 static void 2645 rtld_exit(void) 2646 { 2647 RtldLockState lockstate; 2648 2649 wlock_acquire(rtld_bind_lock, &lockstate); 2650 dbg("rtld_exit()"); 2651 objlist_call_fini(&list_fini, NULL, &lockstate); 2652 /* No need to remove the items from the list, since we are exiting. */ 2653 if (!libmap_disable) 2654 lm_fini(); 2655 lock_release(rtld_bind_lock, &lockstate); 2656 } 2657 2658 /* 2659 * Iterate over a search path, translate each element, and invoke the 2660 * callback on the result. 2661 */ 2662 static void * 2663 path_enumerate(const char *path, path_enum_proc callback, void *arg) 2664 { 2665 const char *trans; 2666 if (path == NULL) 2667 return (NULL); 2668 2669 path += strspn(path, ":;"); 2670 while (*path != '\0') { 2671 size_t len; 2672 char *res; 2673 2674 len = strcspn(path, ":;"); 2675 trans = lm_findn(NULL, path, len); 2676 if (trans) 2677 res = callback(trans, strlen(trans), arg); 2678 else 2679 res = callback(path, len, arg); 2680 2681 if (res != NULL) 2682 return (res); 2683 2684 path += len; 2685 path += strspn(path, ":;"); 2686 } 2687 2688 return (NULL); 2689 } 2690 2691 struct try_library_args { 2692 const char *name; 2693 size_t namelen; 2694 char *buffer; 2695 size_t buflen; 2696 }; 2697 2698 static void * 2699 try_library_path(const char *dir, size_t dirlen, void *param) 2700 { 2701 struct try_library_args *arg; 2702 2703 arg = param; 2704 if (*dir == '/' || trust) { 2705 char *pathname; 2706 2707 if (dirlen + 1 + arg->namelen + 1 > arg->buflen) 2708 return (NULL); 2709 2710 pathname = arg->buffer; 2711 strncpy(pathname, dir, dirlen); 2712 pathname[dirlen] = '/'; 2713 strcpy(pathname + dirlen + 1, arg->name); 2714 2715 dbg(" Trying \"%s\"", pathname); 2716 if (access(pathname, F_OK) == 0) { /* We found it */ 2717 pathname = xmalloc(dirlen + 1 + arg->namelen + 1); 2718 strcpy(pathname, arg->buffer); 2719 return (pathname); 2720 } 2721 } 2722 return (NULL); 2723 } 2724 2725 static char * 2726 search_library_path(const char *name, const char *path) 2727 { 2728 char *p; 2729 struct try_library_args arg; 2730 2731 if (path == NULL) 2732 return NULL; 2733 2734 arg.name = name; 2735 arg.namelen = strlen(name); 2736 arg.buffer = xmalloc(PATH_MAX); 2737 arg.buflen = PATH_MAX; 2738 2739 p = path_enumerate(path, try_library_path, &arg); 2740 2741 free(arg.buffer); 2742 2743 return (p); 2744 } 2745 2746 int 2747 dlclose(void *handle) 2748 { 2749 Obj_Entry *root; 2750 RtldLockState lockstate; 2751 2752 wlock_acquire(rtld_bind_lock, &lockstate); 2753 root = dlcheck(handle); 2754 if (root == NULL) { 2755 lock_release(rtld_bind_lock, &lockstate); 2756 return -1; 2757 } 2758 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount, 2759 root->path); 2760 2761 /* Unreference the object and its dependencies. */ 2762 root->dl_refcount--; 2763 2764 if (root->refcount == 1) { 2765 /* 2766 * The object will be no longer referenced, so we must unload it. 2767 * First, call the fini functions. 2768 */ 2769 objlist_call_fini(&list_fini, root, &lockstate); 2770 2771 unref_dag(root); 2772 2773 /* Finish cleaning up the newly-unreferenced objects. */ 2774 GDB_STATE(RT_DELETE,&root->linkmap); 2775 unload_object(root); 2776 GDB_STATE(RT_CONSISTENT,NULL); 2777 } else 2778 unref_dag(root); 2779 2780 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL); 2781 lock_release(rtld_bind_lock, &lockstate); 2782 return 0; 2783 } 2784 2785 char * 2786 dlerror(void) 2787 { 2788 char *msg = error_message; 2789 error_message = NULL; 2790 return msg; 2791 } 2792 2793 /* 2794 * This function is deprecated and has no effect. 2795 */ 2796 void 2797 dllockinit(void *context, 2798 void *(*lock_create)(void *context), 2799 void (*rlock_acquire)(void *lock), 2800 void (*wlock_acquire)(void *lock), 2801 void (*lock_release)(void *lock), 2802 void (*lock_destroy)(void *lock), 2803 void (*context_destroy)(void *context)) 2804 { 2805 static void *cur_context; 2806 static void (*cur_context_destroy)(void *); 2807 2808 /* Just destroy the context from the previous call, if necessary. */ 2809 if (cur_context_destroy != NULL) 2810 cur_context_destroy(cur_context); 2811 cur_context = context; 2812 cur_context_destroy = context_destroy; 2813 } 2814 2815 void * 2816 dlopen(const char *name, int mode) 2817 { 2818 2819 return (rtld_dlopen(name, -1, mode)); 2820 } 2821 2822 void * 2823 fdlopen(int fd, int mode) 2824 { 2825 2826 return (rtld_dlopen(NULL, fd, mode)); 2827 } 2828 2829 static void * 2830 rtld_dlopen(const char *name, int fd, int mode) 2831 { 2832 RtldLockState lockstate; 2833 int lo_flags; 2834 2835 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name); 2836 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1"; 2837 if (ld_tracing != NULL) { 2838 rlock_acquire(rtld_bind_lock, &lockstate); 2839 if (sigsetjmp(lockstate.env, 0) != 0) 2840 lock_upgrade(rtld_bind_lock, &lockstate); 2841 environ = (char **)*get_program_var_addr("environ", &lockstate); 2842 lock_release(rtld_bind_lock, &lockstate); 2843 } 2844 lo_flags = RTLD_LO_DLOPEN; 2845 if (mode & RTLD_NODELETE) 2846 lo_flags |= RTLD_LO_NODELETE; 2847 if (mode & RTLD_NOLOAD) 2848 lo_flags |= RTLD_LO_NOLOAD; 2849 if (ld_tracing != NULL) 2850 lo_flags |= RTLD_LO_TRACE; 2851 2852 return (dlopen_object(name, fd, obj_main, lo_flags, 2853 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL)); 2854 } 2855 2856 static void 2857 dlopen_cleanup(Obj_Entry *obj) 2858 { 2859 2860 obj->dl_refcount--; 2861 unref_dag(obj); 2862 if (obj->refcount == 0) 2863 unload_object(obj); 2864 } 2865 2866 static Obj_Entry * 2867 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags, 2868 int mode, RtldLockState *lockstate) 2869 { 2870 Obj_Entry **old_obj_tail; 2871 Obj_Entry *obj; 2872 Objlist initlist; 2873 RtldLockState mlockstate; 2874 int result; 2875 2876 objlist_init(&initlist); 2877 2878 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) { 2879 wlock_acquire(rtld_bind_lock, &mlockstate); 2880 lockstate = &mlockstate; 2881 } 2882 GDB_STATE(RT_ADD,NULL); 2883 2884 old_obj_tail = obj_tail; 2885 obj = NULL; 2886 if (name == NULL && fd == -1) { 2887 obj = obj_main; 2888 obj->refcount++; 2889 } else { 2890 obj = load_object(name, fd, refobj, lo_flags); 2891 } 2892 2893 if (obj) { 2894 obj->dl_refcount++; 2895 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) 2896 objlist_push_tail(&list_global, obj); 2897 if (*old_obj_tail != NULL) { /* We loaded something new. */ 2898 assert(*old_obj_tail == obj); 2899 result = load_needed_objects(obj, 2900 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY)); 2901 init_dag(obj); 2902 ref_dag(obj); 2903 if (result != -1) 2904 result = rtld_verify_versions(&obj->dagmembers); 2905 if (result != -1 && ld_tracing) 2906 goto trace; 2907 if (result == -1 || relocate_object_dag(obj, 2908 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld, 2909 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 2910 lockstate) == -1) { 2911 dlopen_cleanup(obj); 2912 obj = NULL; 2913 } else if (lo_flags & RTLD_LO_EARLY) { 2914 /* 2915 * Do not call the init functions for early loaded 2916 * filtees. The image is still not initialized enough 2917 * for them to work. 2918 * 2919 * Our object is found by the global object list and 2920 * will be ordered among all init calls done right 2921 * before transferring control to main. 2922 */ 2923 } else { 2924 /* Make list of init functions to call. */ 2925 initlist_add_objects(obj, &obj->next, &initlist); 2926 } 2927 /* 2928 * Process all no_delete objects here, given them own 2929 * DAGs to prevent their dependencies from being unloaded. 2930 * This has to be done after we have loaded all of the 2931 * dependencies, so that we do not miss any. 2932 */ 2933 if (obj != NULL) 2934 process_nodelete(obj); 2935 } else { 2936 /* 2937 * Bump the reference counts for objects on this DAG. If 2938 * this is the first dlopen() call for the object that was 2939 * already loaded as a dependency, initialize the dag 2940 * starting at it. 2941 */ 2942 init_dag(obj); 2943 ref_dag(obj); 2944 2945 if ((lo_flags & RTLD_LO_TRACE) != 0) 2946 goto trace; 2947 } 2948 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 || 2949 obj->z_nodelete) && !obj->ref_nodel) { 2950 dbg("obj %s nodelete", obj->path); 2951 ref_dag(obj); 2952 obj->z_nodelete = obj->ref_nodel = true; 2953 } 2954 } 2955 2956 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0, 2957 name); 2958 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); 2959 2960 if (!(lo_flags & RTLD_LO_EARLY)) { 2961 map_stacks_exec(lockstate); 2962 } 2963 2964 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW, 2965 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 2966 lockstate) == -1) { 2967 objlist_clear(&initlist); 2968 dlopen_cleanup(obj); 2969 if (lockstate == &mlockstate) 2970 lock_release(rtld_bind_lock, lockstate); 2971 return (NULL); 2972 } 2973 2974 if (!(lo_flags & RTLD_LO_EARLY)) { 2975 /* Call the init functions. */ 2976 objlist_call_init(&initlist, lockstate); 2977 } 2978 objlist_clear(&initlist); 2979 if (lockstate == &mlockstate) 2980 lock_release(rtld_bind_lock, lockstate); 2981 return obj; 2982 trace: 2983 trace_loaded_objects(obj); 2984 if (lockstate == &mlockstate) 2985 lock_release(rtld_bind_lock, lockstate); 2986 exit(0); 2987 } 2988 2989 static void * 2990 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, 2991 int flags) 2992 { 2993 DoneList donelist; 2994 const Obj_Entry *obj, *defobj; 2995 const Elf_Sym *def; 2996 SymLook req; 2997 RtldLockState lockstate; 2998 #ifndef __ia64__ 2999 tls_index ti; 3000 #endif 3001 int res; 3002 3003 def = NULL; 3004 defobj = NULL; 3005 symlook_init(&req, name); 3006 req.ventry = ve; 3007 req.flags = flags | SYMLOOK_IN_PLT; 3008 req.lockstate = &lockstate; 3009 3010 rlock_acquire(rtld_bind_lock, &lockstate); 3011 if (sigsetjmp(lockstate.env, 0) != 0) 3012 lock_upgrade(rtld_bind_lock, &lockstate); 3013 if (handle == NULL || handle == RTLD_NEXT || 3014 handle == RTLD_DEFAULT || handle == RTLD_SELF) { 3015 3016 if ((obj = obj_from_addr(retaddr)) == NULL) { 3017 _rtld_error("Cannot determine caller's shared object"); 3018 lock_release(rtld_bind_lock, &lockstate); 3019 return NULL; 3020 } 3021 if (handle == NULL) { /* Just the caller's shared object. */ 3022 res = symlook_obj(&req, obj); 3023 if (res == 0) { 3024 def = req.sym_out; 3025 defobj = req.defobj_out; 3026 } 3027 } else if (handle == RTLD_NEXT || /* Objects after caller's */ 3028 handle == RTLD_SELF) { /* ... caller included */ 3029 if (handle == RTLD_NEXT) 3030 obj = obj->next; 3031 for (; obj != NULL; obj = obj->next) { 3032 res = symlook_obj(&req, obj); 3033 if (res == 0) { 3034 if (def == NULL || 3035 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) { 3036 def = req.sym_out; 3037 defobj = req.defobj_out; 3038 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3039 break; 3040 } 3041 } 3042 } 3043 /* 3044 * Search the dynamic linker itself, and possibly resolve the 3045 * symbol from there. This is how the application links to 3046 * dynamic linker services such as dlopen. 3047 */ 3048 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3049 res = symlook_obj(&req, &obj_rtld); 3050 if (res == 0) { 3051 def = req.sym_out; 3052 defobj = req.defobj_out; 3053 } 3054 } 3055 } else { 3056 assert(handle == RTLD_DEFAULT); 3057 res = symlook_default(&req, obj); 3058 if (res == 0) { 3059 defobj = req.defobj_out; 3060 def = req.sym_out; 3061 } 3062 } 3063 } else { 3064 if ((obj = dlcheck(handle)) == NULL) { 3065 lock_release(rtld_bind_lock, &lockstate); 3066 return NULL; 3067 } 3068 3069 donelist_init(&donelist); 3070 if (obj->mainprog) { 3071 /* Handle obtained by dlopen(NULL, ...) implies global scope. */ 3072 res = symlook_global(&req, &donelist); 3073 if (res == 0) { 3074 def = req.sym_out; 3075 defobj = req.defobj_out; 3076 } 3077 /* 3078 * Search the dynamic linker itself, and possibly resolve the 3079 * symbol from there. This is how the application links to 3080 * dynamic linker services such as dlopen. 3081 */ 3082 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3083 res = symlook_obj(&req, &obj_rtld); 3084 if (res == 0) { 3085 def = req.sym_out; 3086 defobj = req.defobj_out; 3087 } 3088 } 3089 } 3090 else { 3091 /* Search the whole DAG rooted at the given object. */ 3092 res = symlook_list(&req, &obj->dagmembers, &donelist); 3093 if (res == 0) { 3094 def = req.sym_out; 3095 defobj = req.defobj_out; 3096 } 3097 } 3098 } 3099 3100 if (def != NULL) { 3101 lock_release(rtld_bind_lock, &lockstate); 3102 3103 /* 3104 * The value required by the caller is derived from the value 3105 * of the symbol. For the ia64 architecture, we need to 3106 * construct a function descriptor which the caller can use to 3107 * call the function with the right 'gp' value. For other 3108 * architectures and for non-functions, the value is simply 3109 * the relocated value of the symbol. 3110 */ 3111 if (ELF_ST_TYPE(def->st_info) == STT_FUNC) 3112 return (make_function_pointer(def, defobj)); 3113 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 3114 return (rtld_resolve_ifunc(defobj, def)); 3115 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) { 3116 #ifdef __ia64__ 3117 return (__tls_get_addr(defobj->tlsindex, def->st_value)); 3118 #else 3119 ti.ti_module = defobj->tlsindex; 3120 ti.ti_offset = def->st_value; 3121 return (__tls_get_addr(&ti)); 3122 #endif 3123 } else 3124 return (defobj->relocbase + def->st_value); 3125 } 3126 3127 _rtld_error("Undefined symbol \"%s\"", name); 3128 lock_release(rtld_bind_lock, &lockstate); 3129 return NULL; 3130 } 3131 3132 void * 3133 dlsym(void *handle, const char *name) 3134 { 3135 return do_dlsym(handle, name, __builtin_return_address(0), NULL, 3136 SYMLOOK_DLSYM); 3137 } 3138 3139 dlfunc_t 3140 dlfunc(void *handle, const char *name) 3141 { 3142 union { 3143 void *d; 3144 dlfunc_t f; 3145 } rv; 3146 3147 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL, 3148 SYMLOOK_DLSYM); 3149 return (rv.f); 3150 } 3151 3152 void * 3153 dlvsym(void *handle, const char *name, const char *version) 3154 { 3155 Ver_Entry ventry; 3156 3157 ventry.name = version; 3158 ventry.file = NULL; 3159 ventry.hash = elf_hash(version); 3160 ventry.flags= 0; 3161 return do_dlsym(handle, name, __builtin_return_address(0), &ventry, 3162 SYMLOOK_DLSYM); 3163 } 3164 3165 int 3166 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info) 3167 { 3168 const Obj_Entry *obj; 3169 RtldLockState lockstate; 3170 3171 rlock_acquire(rtld_bind_lock, &lockstate); 3172 obj = obj_from_addr(addr); 3173 if (obj == NULL) { 3174 _rtld_error("No shared object contains address"); 3175 lock_release(rtld_bind_lock, &lockstate); 3176 return (0); 3177 } 3178 rtld_fill_dl_phdr_info(obj, phdr_info); 3179 lock_release(rtld_bind_lock, &lockstate); 3180 return (1); 3181 } 3182 3183 int 3184 dladdr(const void *addr, Dl_info *info) 3185 { 3186 const Obj_Entry *obj; 3187 const Elf_Sym *def; 3188 void *symbol_addr; 3189 unsigned long symoffset; 3190 RtldLockState lockstate; 3191 3192 rlock_acquire(rtld_bind_lock, &lockstate); 3193 obj = obj_from_addr(addr); 3194 if (obj == NULL) { 3195 _rtld_error("No shared object contains address"); 3196 lock_release(rtld_bind_lock, &lockstate); 3197 return 0; 3198 } 3199 info->dli_fname = obj->path; 3200 info->dli_fbase = obj->mapbase; 3201 info->dli_saddr = (void *)0; 3202 info->dli_sname = NULL; 3203 3204 /* 3205 * Walk the symbol list looking for the symbol whose address is 3206 * closest to the address sent in. 3207 */ 3208 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) { 3209 def = obj->symtab + symoffset; 3210 3211 /* 3212 * For skip the symbol if st_shndx is either SHN_UNDEF or 3213 * SHN_COMMON. 3214 */ 3215 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) 3216 continue; 3217 3218 /* 3219 * If the symbol is greater than the specified address, or if it 3220 * is further away from addr than the current nearest symbol, 3221 * then reject it. 3222 */ 3223 symbol_addr = obj->relocbase + def->st_value; 3224 if (symbol_addr > addr || symbol_addr < info->dli_saddr) 3225 continue; 3226 3227 /* Update our idea of the nearest symbol. */ 3228 info->dli_sname = obj->strtab + def->st_name; 3229 info->dli_saddr = symbol_addr; 3230 3231 /* Exact match? */ 3232 if (info->dli_saddr == addr) 3233 break; 3234 } 3235 lock_release(rtld_bind_lock, &lockstate); 3236 return 1; 3237 } 3238 3239 int 3240 dlinfo(void *handle, int request, void *p) 3241 { 3242 const Obj_Entry *obj; 3243 RtldLockState lockstate; 3244 int error; 3245 3246 rlock_acquire(rtld_bind_lock, &lockstate); 3247 3248 if (handle == NULL || handle == RTLD_SELF) { 3249 void *retaddr; 3250 3251 retaddr = __builtin_return_address(0); /* __GNUC__ only */ 3252 if ((obj = obj_from_addr(retaddr)) == NULL) 3253 _rtld_error("Cannot determine caller's shared object"); 3254 } else 3255 obj = dlcheck(handle); 3256 3257 if (obj == NULL) { 3258 lock_release(rtld_bind_lock, &lockstate); 3259 return (-1); 3260 } 3261 3262 error = 0; 3263 switch (request) { 3264 case RTLD_DI_LINKMAP: 3265 *((struct link_map const **)p) = &obj->linkmap; 3266 break; 3267 case RTLD_DI_ORIGIN: 3268 error = rtld_dirname(obj->path, p); 3269 break; 3270 3271 case RTLD_DI_SERINFOSIZE: 3272 case RTLD_DI_SERINFO: 3273 error = do_search_info(obj, request, (struct dl_serinfo *)p); 3274 break; 3275 3276 default: 3277 _rtld_error("Invalid request %d passed to dlinfo()", request); 3278 error = -1; 3279 } 3280 3281 lock_release(rtld_bind_lock, &lockstate); 3282 3283 return (error); 3284 } 3285 3286 static void 3287 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info) 3288 { 3289 3290 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase; 3291 phdr_info->dlpi_name = STAILQ_FIRST(&obj->names) ? 3292 STAILQ_FIRST(&obj->names)->name : obj->path; 3293 phdr_info->dlpi_phdr = obj->phdr; 3294 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]); 3295 phdr_info->dlpi_tls_modid = obj->tlsindex; 3296 phdr_info->dlpi_tls_data = obj->tlsinit; 3297 phdr_info->dlpi_adds = obj_loads; 3298 phdr_info->dlpi_subs = obj_loads - obj_count; 3299 } 3300 3301 int 3302 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param) 3303 { 3304 struct dl_phdr_info phdr_info; 3305 const Obj_Entry *obj; 3306 RtldLockState bind_lockstate, phdr_lockstate; 3307 int error; 3308 3309 wlock_acquire(rtld_phdr_lock, &phdr_lockstate); 3310 rlock_acquire(rtld_bind_lock, &bind_lockstate); 3311 3312 error = 0; 3313 3314 for (obj = obj_list; obj != NULL; obj = obj->next) { 3315 rtld_fill_dl_phdr_info(obj, &phdr_info); 3316 if ((error = callback(&phdr_info, sizeof phdr_info, param)) != 0) 3317 break; 3318 3319 } 3320 if (error == 0) { 3321 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info); 3322 error = callback(&phdr_info, sizeof(phdr_info), param); 3323 } 3324 3325 lock_release(rtld_bind_lock, &bind_lockstate); 3326 lock_release(rtld_phdr_lock, &phdr_lockstate); 3327 3328 return (error); 3329 } 3330 3331 static void * 3332 fill_search_info(const char *dir, size_t dirlen, void *param) 3333 { 3334 struct fill_search_info_args *arg; 3335 3336 arg = param; 3337 3338 if (arg->request == RTLD_DI_SERINFOSIZE) { 3339 arg->serinfo->dls_cnt ++; 3340 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1; 3341 } else { 3342 struct dl_serpath *s_entry; 3343 3344 s_entry = arg->serpath; 3345 s_entry->dls_name = arg->strspace; 3346 s_entry->dls_flags = arg->flags; 3347 3348 strncpy(arg->strspace, dir, dirlen); 3349 arg->strspace[dirlen] = '\0'; 3350 3351 arg->strspace += dirlen + 1; 3352 arg->serpath++; 3353 } 3354 3355 return (NULL); 3356 } 3357 3358 static int 3359 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info) 3360 { 3361 struct dl_serinfo _info; 3362 struct fill_search_info_args args; 3363 3364 args.request = RTLD_DI_SERINFOSIZE; 3365 args.serinfo = &_info; 3366 3367 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 3368 _info.dls_cnt = 0; 3369 3370 path_enumerate(obj->rpath, fill_search_info, &args); 3371 path_enumerate(ld_library_path, fill_search_info, &args); 3372 path_enumerate(obj->runpath, fill_search_info, &args); 3373 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args); 3374 if (!obj->z_nodeflib) 3375 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args); 3376 3377 3378 if (request == RTLD_DI_SERINFOSIZE) { 3379 info->dls_size = _info.dls_size; 3380 info->dls_cnt = _info.dls_cnt; 3381 return (0); 3382 } 3383 3384 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) { 3385 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()"); 3386 return (-1); 3387 } 3388 3389 args.request = RTLD_DI_SERINFO; 3390 args.serinfo = info; 3391 args.serpath = &info->dls_serpath[0]; 3392 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt]; 3393 3394 args.flags = LA_SER_RUNPATH; 3395 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL) 3396 return (-1); 3397 3398 args.flags = LA_SER_LIBPATH; 3399 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL) 3400 return (-1); 3401 3402 args.flags = LA_SER_RUNPATH; 3403 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL) 3404 return (-1); 3405 3406 args.flags = LA_SER_CONFIG; 3407 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args) 3408 != NULL) 3409 return (-1); 3410 3411 args.flags = LA_SER_DEFAULT; 3412 if (!obj->z_nodeflib && 3413 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL) 3414 return (-1); 3415 return (0); 3416 } 3417 3418 static int 3419 rtld_dirname(const char *path, char *bname) 3420 { 3421 const char *endp; 3422 3423 /* Empty or NULL string gets treated as "." */ 3424 if (path == NULL || *path == '\0') { 3425 bname[0] = '.'; 3426 bname[1] = '\0'; 3427 return (0); 3428 } 3429 3430 /* Strip trailing slashes */ 3431 endp = path + strlen(path) - 1; 3432 while (endp > path && *endp == '/') 3433 endp--; 3434 3435 /* Find the start of the dir */ 3436 while (endp > path && *endp != '/') 3437 endp--; 3438 3439 /* Either the dir is "/" or there are no slashes */ 3440 if (endp == path) { 3441 bname[0] = *endp == '/' ? '/' : '.'; 3442 bname[1] = '\0'; 3443 return (0); 3444 } else { 3445 do { 3446 endp--; 3447 } while (endp > path && *endp == '/'); 3448 } 3449 3450 if (endp - path + 2 > PATH_MAX) 3451 { 3452 _rtld_error("Filename is too long: %s", path); 3453 return(-1); 3454 } 3455 3456 strncpy(bname, path, endp - path + 1); 3457 bname[endp - path + 1] = '\0'; 3458 return (0); 3459 } 3460 3461 static int 3462 rtld_dirname_abs(const char *path, char *base) 3463 { 3464 char base_rel[PATH_MAX]; 3465 3466 if (rtld_dirname(path, base) == -1) 3467 return (-1); 3468 if (base[0] == '/') 3469 return (0); 3470 if (getcwd(base_rel, sizeof(base_rel)) == NULL || 3471 strlcat(base_rel, "/", sizeof(base_rel)) >= sizeof(base_rel) || 3472 strlcat(base_rel, base, sizeof(base_rel)) >= sizeof(base_rel)) 3473 return (-1); 3474 strcpy(base, base_rel); 3475 return (0); 3476 } 3477 3478 static void 3479 linkmap_add(Obj_Entry *obj) 3480 { 3481 struct link_map *l = &obj->linkmap; 3482 struct link_map *prev; 3483 3484 obj->linkmap.l_name = obj->path; 3485 obj->linkmap.l_addr = obj->mapbase; 3486 obj->linkmap.l_ld = obj->dynamic; 3487 #ifdef __mips__ 3488 /* GDB needs load offset on MIPS to use the symbols */ 3489 obj->linkmap.l_offs = obj->relocbase; 3490 #endif 3491 3492 if (r_debug.r_map == NULL) { 3493 r_debug.r_map = l; 3494 return; 3495 } 3496 3497 /* 3498 * Scan to the end of the list, but not past the entry for the 3499 * dynamic linker, which we want to keep at the very end. 3500 */ 3501 for (prev = r_debug.r_map; 3502 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; 3503 prev = prev->l_next) 3504 ; 3505 3506 /* Link in the new entry. */ 3507 l->l_prev = prev; 3508 l->l_next = prev->l_next; 3509 if (l->l_next != NULL) 3510 l->l_next->l_prev = l; 3511 prev->l_next = l; 3512 } 3513 3514 static void 3515 linkmap_delete(Obj_Entry *obj) 3516 { 3517 struct link_map *l = &obj->linkmap; 3518 3519 if (l->l_prev == NULL) { 3520 if ((r_debug.r_map = l->l_next) != NULL) 3521 l->l_next->l_prev = NULL; 3522 return; 3523 } 3524 3525 if ((l->l_prev->l_next = l->l_next) != NULL) 3526 l->l_next->l_prev = l->l_prev; 3527 } 3528 3529 /* 3530 * Function for the debugger to set a breakpoint on to gain control. 3531 * 3532 * The two parameters allow the debugger to easily find and determine 3533 * what the runtime loader is doing and to whom it is doing it. 3534 * 3535 * When the loadhook trap is hit (r_debug_state, set at program 3536 * initialization), the arguments can be found on the stack: 3537 * 3538 * +8 struct link_map *m 3539 * +4 struct r_debug *rd 3540 * +0 RetAddr 3541 */ 3542 void 3543 r_debug_state(struct r_debug* rd, struct link_map *m) 3544 { 3545 /* 3546 * The following is a hack to force the compiler to emit calls to 3547 * this function, even when optimizing. If the function is empty, 3548 * the compiler is not obliged to emit any code for calls to it, 3549 * even when marked __noinline. However, gdb depends on those 3550 * calls being made. 3551 */ 3552 __asm __volatile("" : : : "memory"); 3553 } 3554 3555 /* 3556 * Get address of the pointer variable in the main program. 3557 * Prefer non-weak symbol over the weak one. 3558 */ 3559 static const void ** 3560 get_program_var_addr(const char *name, RtldLockState *lockstate) 3561 { 3562 SymLook req; 3563 DoneList donelist; 3564 3565 symlook_init(&req, name); 3566 req.lockstate = lockstate; 3567 donelist_init(&donelist); 3568 if (symlook_global(&req, &donelist) != 0) 3569 return (NULL); 3570 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC) 3571 return ((const void **)make_function_pointer(req.sym_out, 3572 req.defobj_out)); 3573 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC) 3574 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out)); 3575 else 3576 return ((const void **)(req.defobj_out->relocbase + 3577 req.sym_out->st_value)); 3578 } 3579 3580 /* 3581 * Set a pointer variable in the main program to the given value. This 3582 * is used to set key variables such as "environ" before any of the 3583 * init functions are called. 3584 */ 3585 static void 3586 set_program_var(const char *name, const void *value) 3587 { 3588 const void **addr; 3589 3590 if ((addr = get_program_var_addr(name, NULL)) != NULL) { 3591 dbg("\"%s\": *%p <-- %p", name, addr, value); 3592 *addr = value; 3593 } 3594 } 3595 3596 /* 3597 * Search the global objects, including dependencies and main object, 3598 * for the given symbol. 3599 */ 3600 static int 3601 symlook_global(SymLook *req, DoneList *donelist) 3602 { 3603 SymLook req1; 3604 const Objlist_Entry *elm; 3605 int res; 3606 3607 symlook_init_from_req(&req1, req); 3608 3609 /* Search all objects loaded at program start up. */ 3610 if (req->defobj_out == NULL || 3611 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3612 res = symlook_list(&req1, &list_main, donelist); 3613 if (res == 0 && (req->defobj_out == NULL || 3614 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3615 req->sym_out = req1.sym_out; 3616 req->defobj_out = req1.defobj_out; 3617 assert(req->defobj_out != NULL); 3618 } 3619 } 3620 3621 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */ 3622 STAILQ_FOREACH(elm, &list_global, link) { 3623 if (req->defobj_out != NULL && 3624 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3625 break; 3626 res = symlook_list(&req1, &elm->obj->dagmembers, donelist); 3627 if (res == 0 && (req->defobj_out == NULL || 3628 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3629 req->sym_out = req1.sym_out; 3630 req->defobj_out = req1.defobj_out; 3631 assert(req->defobj_out != NULL); 3632 } 3633 } 3634 3635 return (req->sym_out != NULL ? 0 : ESRCH); 3636 } 3637 3638 /* 3639 * Given a symbol name in a referencing object, find the corresponding 3640 * definition of the symbol. Returns a pointer to the symbol, or NULL if 3641 * no definition was found. Returns a pointer to the Obj_Entry of the 3642 * defining object via the reference parameter DEFOBJ_OUT. 3643 */ 3644 static int 3645 symlook_default(SymLook *req, const Obj_Entry *refobj) 3646 { 3647 DoneList donelist; 3648 const Objlist_Entry *elm; 3649 SymLook req1; 3650 int res; 3651 3652 donelist_init(&donelist); 3653 symlook_init_from_req(&req1, req); 3654 3655 /* Look first in the referencing object if linked symbolically. */ 3656 if (refobj->symbolic && !donelist_check(&donelist, refobj)) { 3657 res = symlook_obj(&req1, refobj); 3658 if (res == 0) { 3659 req->sym_out = req1.sym_out; 3660 req->defobj_out = req1.defobj_out; 3661 assert(req->defobj_out != NULL); 3662 } 3663 } 3664 3665 symlook_global(req, &donelist); 3666 3667 /* Search all dlopened DAGs containing the referencing object. */ 3668 STAILQ_FOREACH(elm, &refobj->dldags, link) { 3669 if (req->sym_out != NULL && 3670 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3671 break; 3672 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist); 3673 if (res == 0 && (req->sym_out == NULL || 3674 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3675 req->sym_out = req1.sym_out; 3676 req->defobj_out = req1.defobj_out; 3677 assert(req->defobj_out != NULL); 3678 } 3679 } 3680 3681 /* 3682 * Search the dynamic linker itself, and possibly resolve the 3683 * symbol from there. This is how the application links to 3684 * dynamic linker services such as dlopen. 3685 */ 3686 if (req->sym_out == NULL || 3687 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3688 res = symlook_obj(&req1, &obj_rtld); 3689 if (res == 0) { 3690 req->sym_out = req1.sym_out; 3691 req->defobj_out = req1.defobj_out; 3692 assert(req->defobj_out != NULL); 3693 } 3694 } 3695 3696 return (req->sym_out != NULL ? 0 : ESRCH); 3697 } 3698 3699 static int 3700 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp) 3701 { 3702 const Elf_Sym *def; 3703 const Obj_Entry *defobj; 3704 const Objlist_Entry *elm; 3705 SymLook req1; 3706 int res; 3707 3708 def = NULL; 3709 defobj = NULL; 3710 STAILQ_FOREACH(elm, objlist, link) { 3711 if (donelist_check(dlp, elm->obj)) 3712 continue; 3713 symlook_init_from_req(&req1, req); 3714 if ((res = symlook_obj(&req1, elm->obj)) == 0) { 3715 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 3716 def = req1.sym_out; 3717 defobj = req1.defobj_out; 3718 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3719 break; 3720 } 3721 } 3722 } 3723 if (def != NULL) { 3724 req->sym_out = def; 3725 req->defobj_out = defobj; 3726 return (0); 3727 } 3728 return (ESRCH); 3729 } 3730 3731 /* 3732 * Search the chain of DAGS cointed to by the given Needed_Entry 3733 * for a symbol of the given name. Each DAG is scanned completely 3734 * before advancing to the next one. Returns a pointer to the symbol, 3735 * or NULL if no definition was found. 3736 */ 3737 static int 3738 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp) 3739 { 3740 const Elf_Sym *def; 3741 const Needed_Entry *n; 3742 const Obj_Entry *defobj; 3743 SymLook req1; 3744 int res; 3745 3746 def = NULL; 3747 defobj = NULL; 3748 symlook_init_from_req(&req1, req); 3749 for (n = needed; n != NULL; n = n->next) { 3750 if (n->obj == NULL || 3751 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0) 3752 continue; 3753 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 3754 def = req1.sym_out; 3755 defobj = req1.defobj_out; 3756 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3757 break; 3758 } 3759 } 3760 if (def != NULL) { 3761 req->sym_out = def; 3762 req->defobj_out = defobj; 3763 return (0); 3764 } 3765 return (ESRCH); 3766 } 3767 3768 /* 3769 * Search the symbol table of a single shared object for a symbol of 3770 * the given name and version, if requested. Returns a pointer to the 3771 * symbol, or NULL if no definition was found. If the object is 3772 * filter, return filtered symbol from filtee. 3773 * 3774 * The symbol's hash value is passed in for efficiency reasons; that 3775 * eliminates many recomputations of the hash value. 3776 */ 3777 int 3778 symlook_obj(SymLook *req, const Obj_Entry *obj) 3779 { 3780 DoneList donelist; 3781 SymLook req1; 3782 int flags, res, mres; 3783 3784 /* 3785 * If there is at least one valid hash at this point, we prefer to 3786 * use the faster GNU version if available. 3787 */ 3788 if (obj->valid_hash_gnu) 3789 mres = symlook_obj1_gnu(req, obj); 3790 else if (obj->valid_hash_sysv) 3791 mres = symlook_obj1_sysv(req, obj); 3792 else 3793 return (EINVAL); 3794 3795 if (mres == 0) { 3796 if (obj->needed_filtees != NULL) { 3797 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 3798 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 3799 donelist_init(&donelist); 3800 symlook_init_from_req(&req1, req); 3801 res = symlook_needed(&req1, obj->needed_filtees, &donelist); 3802 if (res == 0) { 3803 req->sym_out = req1.sym_out; 3804 req->defobj_out = req1.defobj_out; 3805 } 3806 return (res); 3807 } 3808 if (obj->needed_aux_filtees != NULL) { 3809 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 3810 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 3811 donelist_init(&donelist); 3812 symlook_init_from_req(&req1, req); 3813 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist); 3814 if (res == 0) { 3815 req->sym_out = req1.sym_out; 3816 req->defobj_out = req1.defobj_out; 3817 return (res); 3818 } 3819 } 3820 } 3821 return (mres); 3822 } 3823 3824 /* Symbol match routine common to both hash functions */ 3825 static bool 3826 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result, 3827 const unsigned long symnum) 3828 { 3829 Elf_Versym verndx; 3830 const Elf_Sym *symp; 3831 const char *strp; 3832 3833 symp = obj->symtab + symnum; 3834 strp = obj->strtab + symp->st_name; 3835 3836 switch (ELF_ST_TYPE(symp->st_info)) { 3837 case STT_FUNC: 3838 case STT_NOTYPE: 3839 case STT_OBJECT: 3840 case STT_COMMON: 3841 case STT_GNU_IFUNC: 3842 if (symp->st_value == 0) 3843 return (false); 3844 /* fallthrough */ 3845 case STT_TLS: 3846 if (symp->st_shndx != SHN_UNDEF) 3847 break; 3848 #ifndef __mips__ 3849 else if (((req->flags & SYMLOOK_IN_PLT) == 0) && 3850 (ELF_ST_TYPE(symp->st_info) == STT_FUNC)) 3851 break; 3852 /* fallthrough */ 3853 #endif 3854 default: 3855 return (false); 3856 } 3857 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0) 3858 return (false); 3859 3860 if (req->ventry == NULL) { 3861 if (obj->versyms != NULL) { 3862 verndx = VER_NDX(obj->versyms[symnum]); 3863 if (verndx > obj->vernum) { 3864 _rtld_error( 3865 "%s: symbol %s references wrong version %d", 3866 obj->path, obj->strtab + symnum, verndx); 3867 return (false); 3868 } 3869 /* 3870 * If we are not called from dlsym (i.e. this 3871 * is a normal relocation from unversioned 3872 * binary), accept the symbol immediately if 3873 * it happens to have first version after this 3874 * shared object became versioned. Otherwise, 3875 * if symbol is versioned and not hidden, 3876 * remember it. If it is the only symbol with 3877 * this name exported by the shared object, it 3878 * will be returned as a match by the calling 3879 * function. If symbol is global (verndx < 2) 3880 * accept it unconditionally. 3881 */ 3882 if ((req->flags & SYMLOOK_DLSYM) == 0 && 3883 verndx == VER_NDX_GIVEN) { 3884 result->sym_out = symp; 3885 return (true); 3886 } 3887 else if (verndx >= VER_NDX_GIVEN) { 3888 if ((obj->versyms[symnum] & VER_NDX_HIDDEN) 3889 == 0) { 3890 if (result->vsymp == NULL) 3891 result->vsymp = symp; 3892 result->vcount++; 3893 } 3894 return (false); 3895 } 3896 } 3897 result->sym_out = symp; 3898 return (true); 3899 } 3900 if (obj->versyms == NULL) { 3901 if (object_match_name(obj, req->ventry->name)) { 3902 _rtld_error("%s: object %s should provide version %s " 3903 "for symbol %s", obj_rtld.path, obj->path, 3904 req->ventry->name, obj->strtab + symnum); 3905 return (false); 3906 } 3907 } else { 3908 verndx = VER_NDX(obj->versyms[symnum]); 3909 if (verndx > obj->vernum) { 3910 _rtld_error("%s: symbol %s references wrong version %d", 3911 obj->path, obj->strtab + symnum, verndx); 3912 return (false); 3913 } 3914 if (obj->vertab[verndx].hash != req->ventry->hash || 3915 strcmp(obj->vertab[verndx].name, req->ventry->name)) { 3916 /* 3917 * Version does not match. Look if this is a 3918 * global symbol and if it is not hidden. If 3919 * global symbol (verndx < 2) is available, 3920 * use it. Do not return symbol if we are 3921 * called by dlvsym, because dlvsym looks for 3922 * a specific version and default one is not 3923 * what dlvsym wants. 3924 */ 3925 if ((req->flags & SYMLOOK_DLSYM) || 3926 (verndx >= VER_NDX_GIVEN) || 3927 (obj->versyms[symnum] & VER_NDX_HIDDEN)) 3928 return (false); 3929 } 3930 } 3931 result->sym_out = symp; 3932 return (true); 3933 } 3934 3935 /* 3936 * Search for symbol using SysV hash function. 3937 * obj->buckets is known not to be NULL at this point; the test for this was 3938 * performed with the obj->valid_hash_sysv assignment. 3939 */ 3940 static int 3941 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj) 3942 { 3943 unsigned long symnum; 3944 Sym_Match_Result matchres; 3945 3946 matchres.sym_out = NULL; 3947 matchres.vsymp = NULL; 3948 matchres.vcount = 0; 3949 3950 for (symnum = obj->buckets[req->hash % obj->nbuckets]; 3951 symnum != STN_UNDEF; symnum = obj->chains[symnum]) { 3952 if (symnum >= obj->nchains) 3953 return (ESRCH); /* Bad object */ 3954 3955 if (matched_symbol(req, obj, &matchres, symnum)) { 3956 req->sym_out = matchres.sym_out; 3957 req->defobj_out = obj; 3958 return (0); 3959 } 3960 } 3961 if (matchres.vcount == 1) { 3962 req->sym_out = matchres.vsymp; 3963 req->defobj_out = obj; 3964 return (0); 3965 } 3966 return (ESRCH); 3967 } 3968 3969 /* Search for symbol using GNU hash function */ 3970 static int 3971 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj) 3972 { 3973 Elf_Addr bloom_word; 3974 const Elf32_Word *hashval; 3975 Elf32_Word bucket; 3976 Sym_Match_Result matchres; 3977 unsigned int h1, h2; 3978 unsigned long symnum; 3979 3980 matchres.sym_out = NULL; 3981 matchres.vsymp = NULL; 3982 matchres.vcount = 0; 3983 3984 /* Pick right bitmask word from Bloom filter array */ 3985 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) & 3986 obj->maskwords_bm_gnu]; 3987 3988 /* Calculate modulus word size of gnu hash and its derivative */ 3989 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1); 3990 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1)); 3991 3992 /* Filter out the "definitely not in set" queries */ 3993 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0) 3994 return (ESRCH); 3995 3996 /* Locate hash chain and corresponding value element*/ 3997 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu]; 3998 if (bucket == 0) 3999 return (ESRCH); 4000 hashval = &obj->chain_zero_gnu[bucket]; 4001 do { 4002 if (((*hashval ^ req->hash_gnu) >> 1) == 0) { 4003 symnum = hashval - obj->chain_zero_gnu; 4004 if (matched_symbol(req, obj, &matchres, symnum)) { 4005 req->sym_out = matchres.sym_out; 4006 req->defobj_out = obj; 4007 return (0); 4008 } 4009 } 4010 } while ((*hashval++ & 1) == 0); 4011 if (matchres.vcount == 1) { 4012 req->sym_out = matchres.vsymp; 4013 req->defobj_out = obj; 4014 return (0); 4015 } 4016 return (ESRCH); 4017 } 4018 4019 static void 4020 trace_loaded_objects(Obj_Entry *obj) 4021 { 4022 char *fmt1, *fmt2, *fmt, *main_local, *list_containers; 4023 int c; 4024 4025 if ((main_local = getenv(LD_ "TRACE_LOADED_OBJECTS_PROGNAME")) == NULL) 4026 main_local = ""; 4027 4028 if ((fmt1 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT1")) == NULL) 4029 fmt1 = "\t%o => %p (%x)\n"; 4030 4031 if ((fmt2 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT2")) == NULL) 4032 fmt2 = "\t%o (%x)\n"; 4033 4034 list_containers = getenv(LD_ "TRACE_LOADED_OBJECTS_ALL"); 4035 4036 for (; obj; obj = obj->next) { 4037 Needed_Entry *needed; 4038 char *name, *path; 4039 bool is_lib; 4040 4041 if (list_containers && obj->needed != NULL) 4042 rtld_printf("%s:\n", obj->path); 4043 for (needed = obj->needed; needed; needed = needed->next) { 4044 if (needed->obj != NULL) { 4045 if (needed->obj->traced && !list_containers) 4046 continue; 4047 needed->obj->traced = true; 4048 path = needed->obj->path; 4049 } else 4050 path = "not found"; 4051 4052 name = (char *)obj->strtab + needed->name; 4053 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ 4054 4055 fmt = is_lib ? fmt1 : fmt2; 4056 while ((c = *fmt++) != '\0') { 4057 switch (c) { 4058 default: 4059 rtld_putchar(c); 4060 continue; 4061 case '\\': 4062 switch (c = *fmt) { 4063 case '\0': 4064 continue; 4065 case 'n': 4066 rtld_putchar('\n'); 4067 break; 4068 case 't': 4069 rtld_putchar('\t'); 4070 break; 4071 } 4072 break; 4073 case '%': 4074 switch (c = *fmt) { 4075 case '\0': 4076 continue; 4077 case '%': 4078 default: 4079 rtld_putchar(c); 4080 break; 4081 case 'A': 4082 rtld_putstr(main_local); 4083 break; 4084 case 'a': 4085 rtld_putstr(obj_main->path); 4086 break; 4087 case 'o': 4088 rtld_putstr(name); 4089 break; 4090 #if 0 4091 case 'm': 4092 rtld_printf("%d", sodp->sod_major); 4093 break; 4094 case 'n': 4095 rtld_printf("%d", sodp->sod_minor); 4096 break; 4097 #endif 4098 case 'p': 4099 rtld_putstr(path); 4100 break; 4101 case 'x': 4102 rtld_printf("%p", needed->obj ? needed->obj->mapbase : 4103 0); 4104 break; 4105 } 4106 break; 4107 } 4108 ++fmt; 4109 } 4110 } 4111 } 4112 } 4113 4114 /* 4115 * Unload a dlopened object and its dependencies from memory and from 4116 * our data structures. It is assumed that the DAG rooted in the 4117 * object has already been unreferenced, and that the object has a 4118 * reference count of 0. 4119 */ 4120 static void 4121 unload_object(Obj_Entry *root) 4122 { 4123 Obj_Entry *obj; 4124 Obj_Entry **linkp; 4125 4126 assert(root->refcount == 0); 4127 4128 /* 4129 * Pass over the DAG removing unreferenced objects from 4130 * appropriate lists. 4131 */ 4132 unlink_object(root); 4133 4134 /* Unmap all objects that are no longer referenced. */ 4135 linkp = &obj_list->next; 4136 while ((obj = *linkp) != NULL) { 4137 if (obj->refcount == 0) { 4138 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 4139 obj->path); 4140 dbg("unloading \"%s\"", obj->path); 4141 unload_filtees(root); 4142 munmap(obj->mapbase, obj->mapsize); 4143 linkmap_delete(obj); 4144 *linkp = obj->next; 4145 obj_count--; 4146 obj_free(obj); 4147 } else 4148 linkp = &obj->next; 4149 } 4150 obj_tail = linkp; 4151 } 4152 4153 static void 4154 unlink_object(Obj_Entry *root) 4155 { 4156 Objlist_Entry *elm; 4157 4158 if (root->refcount == 0) { 4159 /* Remove the object from the RTLD_GLOBAL list. */ 4160 objlist_remove(&list_global, root); 4161 4162 /* Remove the object from all objects' DAG lists. */ 4163 STAILQ_FOREACH(elm, &root->dagmembers, link) { 4164 objlist_remove(&elm->obj->dldags, root); 4165 if (elm->obj != root) 4166 unlink_object(elm->obj); 4167 } 4168 } 4169 } 4170 4171 static void 4172 ref_dag(Obj_Entry *root) 4173 { 4174 Objlist_Entry *elm; 4175 4176 assert(root->dag_inited); 4177 STAILQ_FOREACH(elm, &root->dagmembers, link) 4178 elm->obj->refcount++; 4179 } 4180 4181 static void 4182 unref_dag(Obj_Entry *root) 4183 { 4184 Objlist_Entry *elm; 4185 4186 assert(root->dag_inited); 4187 STAILQ_FOREACH(elm, &root->dagmembers, link) 4188 elm->obj->refcount--; 4189 } 4190 4191 /* 4192 * Common code for MD __tls_get_addr(). 4193 */ 4194 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline; 4195 static void * 4196 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset) 4197 { 4198 Elf_Addr *newdtv, *dtv; 4199 RtldLockState lockstate; 4200 int to_copy; 4201 4202 dtv = *dtvp; 4203 /* Check dtv generation in case new modules have arrived */ 4204 if (dtv[0] != tls_dtv_generation) { 4205 wlock_acquire(rtld_bind_lock, &lockstate); 4206 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4207 to_copy = dtv[1]; 4208 if (to_copy > tls_max_index) 4209 to_copy = tls_max_index; 4210 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr)); 4211 newdtv[0] = tls_dtv_generation; 4212 newdtv[1] = tls_max_index; 4213 free(dtv); 4214 lock_release(rtld_bind_lock, &lockstate); 4215 dtv = *dtvp = newdtv; 4216 } 4217 4218 /* Dynamically allocate module TLS if necessary */ 4219 if (dtv[index + 1] == 0) { 4220 /* Signal safe, wlock will block out signals. */ 4221 wlock_acquire(rtld_bind_lock, &lockstate); 4222 if (!dtv[index + 1]) 4223 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index); 4224 lock_release(rtld_bind_lock, &lockstate); 4225 } 4226 return ((void *)(dtv[index + 1] + offset)); 4227 } 4228 4229 void * 4230 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset) 4231 { 4232 Elf_Addr *dtv; 4233 4234 dtv = *dtvp; 4235 /* Check dtv generation in case new modules have arrived */ 4236 if (__predict_true(dtv[0] == tls_dtv_generation && 4237 dtv[index + 1] != 0)) 4238 return ((void *)(dtv[index + 1] + offset)); 4239 return (tls_get_addr_slow(dtvp, index, offset)); 4240 } 4241 4242 #if defined(__arm__) || defined(__ia64__) || defined(__mips__) || defined(__powerpc__) 4243 4244 /* 4245 * Allocate Static TLS using the Variant I method. 4246 */ 4247 void * 4248 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign) 4249 { 4250 Obj_Entry *obj; 4251 char *tcb; 4252 Elf_Addr **tls; 4253 Elf_Addr *dtv; 4254 Elf_Addr addr; 4255 int i; 4256 4257 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) 4258 return (oldtcb); 4259 4260 assert(tcbsize >= TLS_TCB_SIZE); 4261 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize); 4262 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); 4263 4264 if (oldtcb != NULL) { 4265 memcpy(tls, oldtcb, tls_static_space); 4266 free(oldtcb); 4267 4268 /* Adjust the DTV. */ 4269 dtv = tls[0]; 4270 for (i = 0; i < dtv[1]; i++) { 4271 if (dtv[i+2] >= (Elf_Addr)oldtcb && 4272 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) { 4273 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls; 4274 } 4275 } 4276 } else { 4277 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4278 tls[0] = dtv; 4279 dtv[0] = tls_dtv_generation; 4280 dtv[1] = tls_max_index; 4281 4282 for (obj = objs; obj; obj = obj->next) { 4283 if (obj->tlsoffset > 0) { 4284 addr = (Elf_Addr)tls + obj->tlsoffset; 4285 if (obj->tlsinitsize > 0) 4286 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4287 if (obj->tlssize > obj->tlsinitsize) 4288 memset((void*) (addr + obj->tlsinitsize), 0, 4289 obj->tlssize - obj->tlsinitsize); 4290 dtv[obj->tlsindex + 1] = addr; 4291 } 4292 } 4293 } 4294 4295 return (tcb); 4296 } 4297 4298 void 4299 free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4300 { 4301 Elf_Addr *dtv; 4302 Elf_Addr tlsstart, tlsend; 4303 int dtvsize, i; 4304 4305 assert(tcbsize >= TLS_TCB_SIZE); 4306 4307 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE; 4308 tlsend = tlsstart + tls_static_space; 4309 4310 dtv = *(Elf_Addr **)tlsstart; 4311 dtvsize = dtv[1]; 4312 for (i = 0; i < dtvsize; i++) { 4313 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) { 4314 free((void*)dtv[i+2]); 4315 } 4316 } 4317 free(dtv); 4318 free(tcb); 4319 } 4320 4321 #endif 4322 4323 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) 4324 4325 /* 4326 * Allocate Static TLS using the Variant II method. 4327 */ 4328 void * 4329 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) 4330 { 4331 Obj_Entry *obj; 4332 size_t size, ralign; 4333 char *tls; 4334 Elf_Addr *dtv, *olddtv; 4335 Elf_Addr segbase, oldsegbase, addr; 4336 int i; 4337 4338 ralign = tcbalign; 4339 if (tls_static_max_align > ralign) 4340 ralign = tls_static_max_align; 4341 size = round(tls_static_space, ralign) + round(tcbsize, ralign); 4342 4343 assert(tcbsize >= 2*sizeof(Elf_Addr)); 4344 tls = malloc_aligned(size, ralign); 4345 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4346 4347 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign)); 4348 ((Elf_Addr*)segbase)[0] = segbase; 4349 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; 4350 4351 dtv[0] = tls_dtv_generation; 4352 dtv[1] = tls_max_index; 4353 4354 if (oldtls) { 4355 /* 4356 * Copy the static TLS block over whole. 4357 */ 4358 oldsegbase = (Elf_Addr) oldtls; 4359 memcpy((void *)(segbase - tls_static_space), 4360 (const void *)(oldsegbase - tls_static_space), 4361 tls_static_space); 4362 4363 /* 4364 * If any dynamic TLS blocks have been created tls_get_addr(), 4365 * move them over. 4366 */ 4367 olddtv = ((Elf_Addr**)oldsegbase)[1]; 4368 for (i = 0; i < olddtv[1]; i++) { 4369 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { 4370 dtv[i+2] = olddtv[i+2]; 4371 olddtv[i+2] = 0; 4372 } 4373 } 4374 4375 /* 4376 * We assume that this block was the one we created with 4377 * allocate_initial_tls(). 4378 */ 4379 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); 4380 } else { 4381 for (obj = objs; obj; obj = obj->next) { 4382 if (obj->tlsoffset) { 4383 addr = segbase - obj->tlsoffset; 4384 memset((void*) (addr + obj->tlsinitsize), 4385 0, obj->tlssize - obj->tlsinitsize); 4386 if (obj->tlsinit) 4387 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4388 dtv[obj->tlsindex + 1] = addr; 4389 } 4390 } 4391 } 4392 4393 return (void*) segbase; 4394 } 4395 4396 void 4397 free_tls(void *tls, size_t tcbsize, size_t tcbalign) 4398 { 4399 Elf_Addr* dtv; 4400 size_t size, ralign; 4401 int dtvsize, i; 4402 Elf_Addr tlsstart, tlsend; 4403 4404 /* 4405 * Figure out the size of the initial TLS block so that we can 4406 * find stuff which ___tls_get_addr() allocated dynamically. 4407 */ 4408 ralign = tcbalign; 4409 if (tls_static_max_align > ralign) 4410 ralign = tls_static_max_align; 4411 size = round(tls_static_space, ralign); 4412 4413 dtv = ((Elf_Addr**)tls)[1]; 4414 dtvsize = dtv[1]; 4415 tlsend = (Elf_Addr) tls; 4416 tlsstart = tlsend - size; 4417 for (i = 0; i < dtvsize; i++) { 4418 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) { 4419 free_aligned((void *)dtv[i + 2]); 4420 } 4421 } 4422 4423 free_aligned((void *)tlsstart); 4424 free((void*) dtv); 4425 } 4426 4427 #endif 4428 4429 /* 4430 * Allocate TLS block for module with given index. 4431 */ 4432 void * 4433 allocate_module_tls(int index) 4434 { 4435 Obj_Entry* obj; 4436 char* p; 4437 4438 for (obj = obj_list; obj; obj = obj->next) { 4439 if (obj->tlsindex == index) 4440 break; 4441 } 4442 if (!obj) { 4443 _rtld_error("Can't find module with TLS index %d", index); 4444 die(); 4445 } 4446 4447 p = malloc_aligned(obj->tlssize, obj->tlsalign); 4448 memcpy(p, obj->tlsinit, obj->tlsinitsize); 4449 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize); 4450 4451 return p; 4452 } 4453 4454 bool 4455 allocate_tls_offset(Obj_Entry *obj) 4456 { 4457 size_t off; 4458 4459 if (obj->tls_done) 4460 return true; 4461 4462 if (obj->tlssize == 0) { 4463 obj->tls_done = true; 4464 return true; 4465 } 4466 4467 if (obj->tlsindex == 1) 4468 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign); 4469 else 4470 off = calculate_tls_offset(tls_last_offset, tls_last_size, 4471 obj->tlssize, obj->tlsalign); 4472 4473 /* 4474 * If we have already fixed the size of the static TLS block, we 4475 * must stay within that size. When allocating the static TLS, we 4476 * leave a small amount of space spare to be used for dynamically 4477 * loading modules which use static TLS. 4478 */ 4479 if (tls_static_space != 0) { 4480 if (calculate_tls_end(off, obj->tlssize) > tls_static_space) 4481 return false; 4482 } else if (obj->tlsalign > tls_static_max_align) { 4483 tls_static_max_align = obj->tlsalign; 4484 } 4485 4486 tls_last_offset = obj->tlsoffset = off; 4487 tls_last_size = obj->tlssize; 4488 obj->tls_done = true; 4489 4490 return true; 4491 } 4492 4493 void 4494 free_tls_offset(Obj_Entry *obj) 4495 { 4496 4497 /* 4498 * If we were the last thing to allocate out of the static TLS 4499 * block, we give our space back to the 'allocator'. This is a 4500 * simplistic workaround to allow libGL.so.1 to be loaded and 4501 * unloaded multiple times. 4502 */ 4503 if (calculate_tls_end(obj->tlsoffset, obj->tlssize) 4504 == calculate_tls_end(tls_last_offset, tls_last_size)) { 4505 tls_last_offset -= obj->tlssize; 4506 tls_last_size = 0; 4507 } 4508 } 4509 4510 void * 4511 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign) 4512 { 4513 void *ret; 4514 RtldLockState lockstate; 4515 4516 wlock_acquire(rtld_bind_lock, &lockstate); 4517 ret = allocate_tls(obj_list, oldtls, tcbsize, tcbalign); 4518 lock_release(rtld_bind_lock, &lockstate); 4519 return (ret); 4520 } 4521 4522 void 4523 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4524 { 4525 RtldLockState lockstate; 4526 4527 wlock_acquire(rtld_bind_lock, &lockstate); 4528 free_tls(tcb, tcbsize, tcbalign); 4529 lock_release(rtld_bind_lock, &lockstate); 4530 } 4531 4532 static void 4533 object_add_name(Obj_Entry *obj, const char *name) 4534 { 4535 Name_Entry *entry; 4536 size_t len; 4537 4538 len = strlen(name); 4539 entry = malloc(sizeof(Name_Entry) + len); 4540 4541 if (entry != NULL) { 4542 strcpy(entry->name, name); 4543 STAILQ_INSERT_TAIL(&obj->names, entry, link); 4544 } 4545 } 4546 4547 static int 4548 object_match_name(const Obj_Entry *obj, const char *name) 4549 { 4550 Name_Entry *entry; 4551 4552 STAILQ_FOREACH(entry, &obj->names, link) { 4553 if (strcmp(name, entry->name) == 0) 4554 return (1); 4555 } 4556 return (0); 4557 } 4558 4559 static Obj_Entry * 4560 locate_dependency(const Obj_Entry *obj, const char *name) 4561 { 4562 const Objlist_Entry *entry; 4563 const Needed_Entry *needed; 4564 4565 STAILQ_FOREACH(entry, &list_main, link) { 4566 if (object_match_name(entry->obj, name)) 4567 return entry->obj; 4568 } 4569 4570 for (needed = obj->needed; needed != NULL; needed = needed->next) { 4571 if (strcmp(obj->strtab + needed->name, name) == 0 || 4572 (needed->obj != NULL && object_match_name(needed->obj, name))) { 4573 /* 4574 * If there is DT_NEEDED for the name we are looking for, 4575 * we are all set. Note that object might not be found if 4576 * dependency was not loaded yet, so the function can 4577 * return NULL here. This is expected and handled 4578 * properly by the caller. 4579 */ 4580 return (needed->obj); 4581 } 4582 } 4583 _rtld_error("%s: Unexpected inconsistency: dependency %s not found", 4584 obj->path, name); 4585 die(); 4586 } 4587 4588 static int 4589 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj, 4590 const Elf_Vernaux *vna) 4591 { 4592 const Elf_Verdef *vd; 4593 const char *vername; 4594 4595 vername = refobj->strtab + vna->vna_name; 4596 vd = depobj->verdef; 4597 if (vd == NULL) { 4598 _rtld_error("%s: version %s required by %s not defined", 4599 depobj->path, vername, refobj->path); 4600 return (-1); 4601 } 4602 for (;;) { 4603 if (vd->vd_version != VER_DEF_CURRENT) { 4604 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4605 depobj->path, vd->vd_version); 4606 return (-1); 4607 } 4608 if (vna->vna_hash == vd->vd_hash) { 4609 const Elf_Verdaux *aux = (const Elf_Verdaux *) 4610 ((char *)vd + vd->vd_aux); 4611 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0) 4612 return (0); 4613 } 4614 if (vd->vd_next == 0) 4615 break; 4616 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4617 } 4618 if (vna->vna_flags & VER_FLG_WEAK) 4619 return (0); 4620 _rtld_error("%s: version %s required by %s not found", 4621 depobj->path, vername, refobj->path); 4622 return (-1); 4623 } 4624 4625 static int 4626 rtld_verify_object_versions(Obj_Entry *obj) 4627 { 4628 const Elf_Verneed *vn; 4629 const Elf_Verdef *vd; 4630 const Elf_Verdaux *vda; 4631 const Elf_Vernaux *vna; 4632 const Obj_Entry *depobj; 4633 int maxvernum, vernum; 4634 4635 if (obj->ver_checked) 4636 return (0); 4637 obj->ver_checked = true; 4638 4639 maxvernum = 0; 4640 /* 4641 * Walk over defined and required version records and figure out 4642 * max index used by any of them. Do very basic sanity checking 4643 * while there. 4644 */ 4645 vn = obj->verneed; 4646 while (vn != NULL) { 4647 if (vn->vn_version != VER_NEED_CURRENT) { 4648 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry", 4649 obj->path, vn->vn_version); 4650 return (-1); 4651 } 4652 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4653 for (;;) { 4654 vernum = VER_NEED_IDX(vna->vna_other); 4655 if (vernum > maxvernum) 4656 maxvernum = vernum; 4657 if (vna->vna_next == 0) 4658 break; 4659 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4660 } 4661 if (vn->vn_next == 0) 4662 break; 4663 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 4664 } 4665 4666 vd = obj->verdef; 4667 while (vd != NULL) { 4668 if (vd->vd_version != VER_DEF_CURRENT) { 4669 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4670 obj->path, vd->vd_version); 4671 return (-1); 4672 } 4673 vernum = VER_DEF_IDX(vd->vd_ndx); 4674 if (vernum > maxvernum) 4675 maxvernum = vernum; 4676 if (vd->vd_next == 0) 4677 break; 4678 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4679 } 4680 4681 if (maxvernum == 0) 4682 return (0); 4683 4684 /* 4685 * Store version information in array indexable by version index. 4686 * Verify that object version requirements are satisfied along the 4687 * way. 4688 */ 4689 obj->vernum = maxvernum + 1; 4690 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry)); 4691 4692 vd = obj->verdef; 4693 while (vd != NULL) { 4694 if ((vd->vd_flags & VER_FLG_BASE) == 0) { 4695 vernum = VER_DEF_IDX(vd->vd_ndx); 4696 assert(vernum <= maxvernum); 4697 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux); 4698 obj->vertab[vernum].hash = vd->vd_hash; 4699 obj->vertab[vernum].name = obj->strtab + vda->vda_name; 4700 obj->vertab[vernum].file = NULL; 4701 obj->vertab[vernum].flags = 0; 4702 } 4703 if (vd->vd_next == 0) 4704 break; 4705 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4706 } 4707 4708 vn = obj->verneed; 4709 while (vn != NULL) { 4710 depobj = locate_dependency(obj, obj->strtab + vn->vn_file); 4711 if (depobj == NULL) 4712 return (-1); 4713 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4714 for (;;) { 4715 if (check_object_provided_version(obj, depobj, vna)) 4716 return (-1); 4717 vernum = VER_NEED_IDX(vna->vna_other); 4718 assert(vernum <= maxvernum); 4719 obj->vertab[vernum].hash = vna->vna_hash; 4720 obj->vertab[vernum].name = obj->strtab + vna->vna_name; 4721 obj->vertab[vernum].file = obj->strtab + vn->vn_file; 4722 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ? 4723 VER_INFO_HIDDEN : 0; 4724 if (vna->vna_next == 0) 4725 break; 4726 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4727 } 4728 if (vn->vn_next == 0) 4729 break; 4730 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 4731 } 4732 return 0; 4733 } 4734 4735 static int 4736 rtld_verify_versions(const Objlist *objlist) 4737 { 4738 Objlist_Entry *entry; 4739 int rc; 4740 4741 rc = 0; 4742 STAILQ_FOREACH(entry, objlist, link) { 4743 /* 4744 * Skip dummy objects or objects that have their version requirements 4745 * already checked. 4746 */ 4747 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL) 4748 continue; 4749 if (rtld_verify_object_versions(entry->obj) == -1) { 4750 rc = -1; 4751 if (ld_tracing == NULL) 4752 break; 4753 } 4754 } 4755 if (rc == 0 || ld_tracing != NULL) 4756 rc = rtld_verify_object_versions(&obj_rtld); 4757 return rc; 4758 } 4759 4760 const Ver_Entry * 4761 fetch_ventry(const Obj_Entry *obj, unsigned long symnum) 4762 { 4763 Elf_Versym vernum; 4764 4765 if (obj->vertab) { 4766 vernum = VER_NDX(obj->versyms[symnum]); 4767 if (vernum >= obj->vernum) { 4768 _rtld_error("%s: symbol %s has wrong verneed value %d", 4769 obj->path, obj->strtab + symnum, vernum); 4770 } else if (obj->vertab[vernum].hash != 0) { 4771 return &obj->vertab[vernum]; 4772 } 4773 } 4774 return NULL; 4775 } 4776 4777 int 4778 _rtld_get_stack_prot(void) 4779 { 4780 4781 return (stack_prot); 4782 } 4783 4784 static void 4785 map_stacks_exec(RtldLockState *lockstate) 4786 { 4787 void (*thr_map_stacks_exec)(void); 4788 4789 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0) 4790 return; 4791 thr_map_stacks_exec = (void (*)(void))(uintptr_t) 4792 get_program_var_addr("__pthread_map_stacks_exec", lockstate); 4793 if (thr_map_stacks_exec != NULL) { 4794 stack_prot |= PROT_EXEC; 4795 thr_map_stacks_exec(); 4796 } 4797 } 4798 4799 void 4800 symlook_init(SymLook *dst, const char *name) 4801 { 4802 4803 bzero(dst, sizeof(*dst)); 4804 dst->name = name; 4805 dst->hash = elf_hash(name); 4806 dst->hash_gnu = gnu_hash(name); 4807 } 4808 4809 static void 4810 symlook_init_from_req(SymLook *dst, const SymLook *src) 4811 { 4812 4813 dst->name = src->name; 4814 dst->hash = src->hash; 4815 dst->hash_gnu = src->hash_gnu; 4816 dst->ventry = src->ventry; 4817 dst->flags = src->flags; 4818 dst->defobj_out = NULL; 4819 dst->sym_out = NULL; 4820 dst->lockstate = src->lockstate; 4821 } 4822 4823 /* 4824 * Overrides for libc_pic-provided functions. 4825 */ 4826 4827 int 4828 __getosreldate(void) 4829 { 4830 size_t len; 4831 int oid[2]; 4832 int error, osrel; 4833 4834 if (osreldate != 0) 4835 return (osreldate); 4836 4837 oid[0] = CTL_KERN; 4838 oid[1] = KERN_OSRELDATE; 4839 osrel = 0; 4840 len = sizeof(osrel); 4841 error = sysctl(oid, 2, &osrel, &len, NULL, 0); 4842 if (error == 0 && osrel > 0 && len == sizeof(osrel)) 4843 osreldate = osrel; 4844 return (osreldate); 4845 } 4846 4847 void 4848 exit(int status) 4849 { 4850 4851 _exit(status); 4852 } 4853 4854 void (*__cleanup)(void); 4855 int __isthreaded = 0; 4856 int _thread_autoinit_dummy_decl = 1; 4857 4858 /* 4859 * No unresolved symbols for rtld. 4860 */ 4861 void 4862 __pthread_cxa_finalize(struct dl_phdr_info *a) 4863 { 4864 } 4865 4866 void 4867 __stack_chk_fail(void) 4868 { 4869 4870 _rtld_error("stack overflow detected; terminated"); 4871 die(); 4872 } 4873 __weak_reference(__stack_chk_fail, __stack_chk_fail_local); 4874 4875 void 4876 __chk_fail(void) 4877 { 4878 4879 _rtld_error("buffer overflow detected; terminated"); 4880 die(); 4881 } 4882 4883 const char * 4884 rtld_strerror(int errnum) 4885 { 4886 4887 if (errnum < 0 || errnum >= sys_nerr) 4888 return ("Unknown error"); 4889 return (sys_errlist[errnum]); 4890 } 4891