1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * Object file dependent support for ELF objects. 33 */ 34 #include "_synonyms.h" 35 36 #include <stdio.h> 37 #include <sys/procfs.h> 38 #include <sys/mman.h> 39 #include <sys/debug.h> 40 #include <string.h> 41 #include <limits.h> 42 #include <dlfcn.h> 43 #include <debug.h> 44 #include <conv.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 50 /* 51 * Default and secure dependency search paths. 52 */ 53 static Pnode elf_dflt_dirs[] = { 54 #if defined(_ELF64) 55 #ifndef SGS_PRE_UNIFIED_PROCESS 56 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 57 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 58 #endif 59 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 60 LA_SER_DEFAULT, 0, 0 } 61 #else 62 #ifndef SGS_PRE_UNIFIED_PROCESS 63 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 64 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 65 #endif 66 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 67 LA_SER_DEFAULT, 0, 0 } 68 #endif 69 }; 70 71 static Pnode elf_secure_dirs[] = { 72 #if defined(_ELF64) 73 #ifndef SGS_PRE_UNIFIED_PROCESS 74 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 75 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 76 #endif 77 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 78 MSG_PTH_USRLIBSE_64_SIZE, 79 LA_SER_SECURE, 0, 0 } 80 #else 81 #ifndef SGS_PRE_UNIFIED_PROCESS 82 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 83 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 84 #endif 85 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 86 LA_SER_SECURE, 0, 0 } 87 #endif 88 }; 89 90 /* 91 * Defines for local functions. 92 */ 93 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 94 static int elf_are_u(Rej_desc *); 95 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 96 static ulong_t elf_entry_pt(void); 97 static char *elf_get_so(const char *, const char *); 98 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 99 static int elf_needed(Lm_list *, Aliste, Rt_map *); 100 static void elf_unmap_so(Rt_map *); 101 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 102 103 /* 104 * Functions and data accessed through indirect pointers. 105 */ 106 Fct elf_fct = { 107 elf_are_u, 108 elf_entry_pt, 109 elf_map_so, 110 elf_unmap_so, 111 elf_needed, 112 lookup_sym, 113 elf_reloc, 114 elf_dflt_dirs, 115 elf_secure_dirs, 116 elf_fix_name, 117 elf_get_so, 118 elf_dladdr, 119 dlsym_handle, 120 elf_verify_vers, 121 elf_set_prot 122 }; 123 124 125 /* 126 * Redefine NEEDED name if necessary. 127 */ 128 static Pnode * 129 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 130 { 131 /* 132 * For ABI compliance, if we are asked for ld.so.1, then really give 133 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 134 */ 135 if (((*name == '/') && 136 #if defined(_ELF64) 137 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 138 #else 139 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 140 #endif 141 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 142 Pnode *pnp; 143 144 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 145 MSG_ORIG(MSG_PTH_LIBSYS))); 146 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 147 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 148 if (pnp) 149 free(pnp); 150 return (0); 151 } 152 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 153 pnp->p_orig = (orig & PN_SER_MASK); 154 return (pnp); 155 } 156 157 return (expand_paths(clmp, name, orig, 0)); 158 } 159 160 /* 161 * Determine if we have been given an ELF file and if so determine if the file 162 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 163 * with associated error information. 164 */ 165 static int 166 elf_are_u(Rej_desc *rej) 167 { 168 Ehdr *ehdr; 169 170 /* 171 * Determine if we're an elf file. If not simply return, we don't set 172 * any rejection information as this test allows use to scroll through 173 * the objects we support (ELF, AOUT). 174 */ 175 if (fmap->fm_fsize < sizeof (Ehdr) || 176 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 177 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 178 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 179 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 180 return (0); 181 } 182 183 /* 184 * Check class and encoding. 185 */ 186 /* LINTED */ 187 ehdr = (Ehdr *)fmap->fm_maddr; 188 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 189 rej->rej_type = SGS_REJ_CLASS; 190 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 191 return (0); 192 } 193 if (ehdr->e_ident[EI_DATA] != M_DATA) { 194 rej->rej_type = SGS_REJ_DATA; 195 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 196 return (0); 197 } 198 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 199 (ehdr->e_type != ET_DYN)) { 200 rej->rej_type = SGS_REJ_TYPE; 201 rej->rej_info = (uint_t)ehdr->e_type; 202 return (0); 203 } 204 205 /* 206 * Verify machine specific flags, and hardware capability requirements. 207 */ 208 if ((elf_mach_flags_check(rej, ehdr) == 0) || 209 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 210 return (0); 211 212 /* 213 * Verify ELF version. ??? is this too restrictive ??? 214 */ 215 if (ehdr->e_version > EV_CURRENT) { 216 rej->rej_type = SGS_REJ_VERSION; 217 rej->rej_info = (uint_t)ehdr->e_version; 218 return (0); 219 } 220 return (1); 221 } 222 223 /* 224 * The runtime linker employs lazy loading to provide the libraries needed for 225 * debugging, preloading .o's and dldump(). As these are seldom used, the 226 * standard startup of ld.so.1 doesn't initialize all the information necessary 227 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 228 * is called we get here to perform these initializations: 229 * 230 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 231 * dependency. Typically, for all other objects, this is called during 232 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 233 * 234 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 235 * are by default skipped thus delaying all relative relocation processing 236 * on every invocation of ld.so.1. 237 */ 238 int 239 elf_rtld_load() 240 { 241 Lm_list *lml = &lml_rtld; 242 Rt_map *lmp = lml->lm_head; 243 244 if (lml->lm_flags & LML_FLG_PLTREL) 245 return (1); 246 247 /* 248 * As we need to refer to the DYNINFO() information, insure that it has 249 * been initialized. 250 */ 251 if (elf_needed(lml, ALO_DATA, lmp) == 0) 252 return (0); 253 254 #if defined(i386) 255 /* 256 * This is a kludge to give ld.so.1 a performance benefit on i386. 257 * It's based around two factors. 258 * 259 * o JMPSLOT relocations (PLT's) actually need a relative relocation 260 * applied to the GOT entry so that they can find PLT0. 261 * 262 * o ld.so.1 does not exercise *any* PLT's before it has made a call 263 * to elf_lazy_load(). This is because all dynamic dependencies 264 * are recorded as lazy dependencies. 265 */ 266 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 267 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 268 (ulong_t)ADDR(lmp)); 269 #endif 270 271 lml->lm_flags |= LML_FLG_PLTREL; 272 return (1); 273 } 274 275 /* 276 * Lazy load an object. 277 */ 278 Rt_map * 279 elf_lazy_load(Rt_map *clmp, uint_t ndx, const char *sym) 280 { 281 Rt_map *nlmp, *hlmp; 282 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 283 uint_t flags = 0; 284 Pnode *pnp; 285 const char *name; 286 Lm_list *lml = LIST(clmp); 287 Lm_cntl *lmc; 288 Aliste lmco; 289 290 /* 291 * If this dependency has already been processed, we're done. 292 */ 293 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 294 (dip->di_flags & FLG_DI_PROCESSD)) 295 return (nlmp); 296 297 /* 298 * Determine the initial dependency name, and indicate that this 299 * dependencies processing has initiated. 300 */ 301 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 302 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 303 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 304 dip->di_flags |= FLG_DI_PROCESSD; 305 306 if (dip->di_flags & FLG_DI_GROUP) 307 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 308 309 /* 310 * Expand the requested name if necessary. 311 */ 312 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 313 return (0); 314 315 /* 316 * Provided the object on the head of the link-map has completed its 317 * relocation, create a new link-map control list for this request. 318 */ 319 hlmp = lml->lm_head; 320 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 321 if ((lmc = alist_append(&(lml->lm_lists), 0, sizeof (Lm_cntl), 322 AL_CNT_LMLISTS)) == 0) { 323 remove_pnode(pnp); 324 return (0); 325 } 326 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 327 } else { 328 lmc = 0; 329 lmco = ALO_DATA; 330 } 331 332 /* 333 * Load the associated object. 334 */ 335 dip->di_info = nlmp = 336 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 337 338 /* 339 * Remove any expanded pathname infrastructure. Reduce the pending lazy 340 * dependency count of the caller, together with the link-map lists 341 * count of objects that still have lazy dependencies pending. 342 */ 343 remove_pnode(pnp); 344 if (--LAZY(clmp) == 0) 345 LIST(clmp)->lm_lazy--; 346 347 /* 348 * Finish processing the objects associated with this request. 349 */ 350 if (nlmp && ((analyze_lmc(lml, lmco, nlmp) == 0) || 351 (relocate_lmc(lml, lmco, nlmp) == 0))) 352 dip->di_info = nlmp = 0; 353 354 /* 355 * If the dependency has been successfully processed, and it is part of 356 * a link-map control list that is equivalent, or less, that the callers 357 * control list, create an association between the caller and this 358 * dependency. If this dependency isn't yet apart of the callers 359 * link-map control list, then it is still apart of a list that is being 360 * relocated. As the relocation of an object on this list might still 361 * fail, we can't yet bind the caller to this object. To do so, would 362 * be locking the object so that it couldn't be deleted. Mark this 363 * object as free, and it will be reprocessed when this dependency is 364 * next referenced. 365 */ 366 if (nlmp) { 367 if (CNTL(nlmp) <= CNTL(clmp)) { 368 if (bind_one(clmp, nlmp, BND_NEEDED) == 0) 369 dip->di_info = nlmp = 0; 370 } else { 371 dip->di_info = 0; 372 dip->di_flags &= ~FLG_DI_PROCESSD; 373 if (LAZY(clmp)++ == 0) 374 LIST(clmp)->lm_lazy++; 375 } 376 } 377 378 /* 379 * After a successful load, any objects collected on the new link-map 380 * control list will have been moved to the callers link-map control 381 * list. This control list can now be deleted. 382 */ 383 if (lmc) { 384 if (nlmp == 0) 385 remove_incomplete(lml, lmco); 386 remove_cntl(lml, lmco); 387 } 388 389 return (nlmp); 390 } 391 392 393 /* 394 * Return the entry point of the ELF executable. 395 */ 396 static ulong_t 397 elf_entry_pt(void) 398 { 399 return (ENTRY(lml_main.lm_head)); 400 } 401 402 /* 403 * Unmap a given ELF shared object from the address space. 404 */ 405 static void 406 elf_unmap_so(Rt_map *lmp) 407 { 408 caddr_t addr; 409 size_t size; 410 Mmap *mmaps; 411 412 /* 413 * If this link map represents a relocatable object concatenation, then 414 * the image was simply generated in allocated memory. Free the memory. 415 * 416 * Note: the memory was originally allocated in the libelf:_elf_outmap 417 * routine and would normally have been free'd in elf_outsync(), but 418 * because we 'interpose' on that routine the memory wasn't free'd at 419 * that time. 420 */ 421 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 422 free((void *)ADDR(lmp)); 423 return; 424 } 425 426 /* 427 * If padding was enabled via rtld_db, then we have at least one page 428 * in front of the image - and possibly a trailing page. 429 * Unmap the front page first: 430 */ 431 if (PADSTART(lmp) != ADDR(lmp)) { 432 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 433 size = ADDR(lmp) - (ulong_t)addr; 434 (void) munmap(addr, size); 435 } 436 437 /* 438 * Unmap any trailing padding. 439 */ 440 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 441 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 442 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 443 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 444 (void) munmap(addr, size); 445 } 446 447 /* 448 * Unmmap all mapped segments. 449 */ 450 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 451 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 452 } 453 454 /* 455 * Determine if a dependency requires a particular version and if so verify 456 * that the version exists in the dependency. 457 */ 458 static int 459 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 460 { 461 Verneed *vnd = VERNEED(clmp); 462 int _num, num = VERNEEDNUM(clmp); 463 char *cstrs = (char *)STRTAB(clmp); 464 Lm_list *lml = LIST(clmp); 465 466 /* 467 * Traverse the callers version needed information and determine if any 468 * specific versions are required from the dependency. 469 */ 470 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 471 for (_num = 1; _num <= num; _num++, 472 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 473 Half cnt = vnd->vn_cnt; 474 Vernaux *vnap; 475 char *nstrs, *need; 476 477 /* 478 * Determine if a needed entry matches this dependency. 479 */ 480 need = (char *)(cstrs + vnd->vn_file); 481 if (strcmp(name, need) != 0) 482 continue; 483 484 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 485 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 486 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 487 488 /* 489 * Validate that each version required actually exists in the 490 * dependency. 491 */ 492 nstrs = (char *)STRTAB(nlmp); 493 494 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 495 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 496 char *version, *define; 497 Verdef *vdf = VERDEF(nlmp); 498 ulong_t _num, num = VERDEFNUM(nlmp); 499 int found = 0; 500 501 version = (char *)(cstrs + vnap->vna_name); 502 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 503 504 for (_num = 1; _num <= num; _num++, 505 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 506 Verdaux *vdap; 507 508 if (vnap->vna_hash != vdf->vd_hash) 509 continue; 510 511 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 512 define = (char *)(nstrs + vdap->vda_name); 513 if (strcmp(version, define) != 0) 514 continue; 515 516 found++; 517 break; 518 } 519 520 /* 521 * If we're being traced print out any matched version 522 * when the verbose (-v) option is in effect. Always 523 * print any unmatched versions. 524 */ 525 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 526 if (found) { 527 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 528 continue; 529 530 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 531 need, version, NAME(nlmp)); 532 } else { 533 if (rtld_flags & RT_FL_SILENCERR) 534 continue; 535 536 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 537 need, version); 538 } 539 continue; 540 } 541 542 /* 543 * If the version hasn't been found then this is a 544 * candidate for a fatal error condition. Weak 545 * version definition requirements are silently 546 * ignored. Also, if the image inspected for a version 547 * definition has no versioning recorded at all then 548 * silently ignore this (this provides better backward 549 * compatibility to old images created prior to 550 * versioning being available). Both of these skipped 551 * diagnostics are available under tracing (see above). 552 */ 553 if ((found == 0) && (num != 0) && 554 (!(vnap->vna_flags & VER_FLG_WEAK))) { 555 eprintf(lml, ERR_FATAL, 556 MSG_INTL(MSG_VER_NFOUND), need, version, 557 NAME(clmp)); 558 return (0); 559 } 560 } 561 } 562 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 563 return (1); 564 } 565 566 /* 567 * Search through the dynamic section for DT_NEEDED entries and perform one 568 * of two functions. If only the first argument is specified then load the 569 * defined shared object, otherwise add the link map representing the defined 570 * link map the the dlopen list. 571 */ 572 static int 573 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 574 { 575 Dyn *dyn; 576 ulong_t ndx = 0; 577 uint_t lazy = 0, flags = 0; 578 Word lmflags = lml->lm_flags; 579 Word lmtflags = lml->lm_tflags; 580 581 /* 582 * Process each shared object on needed list. 583 */ 584 if (DYN(clmp) == 0) 585 return (1); 586 587 for (dyn = (Dyn *)DYN(clmp); dyn->d_tag != DT_NULL; dyn++, ndx++) { 588 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 589 Rt_map *nlmp = 0; 590 char *name; 591 int silent = 0; 592 Pnode *pnp; 593 594 switch (dyn->d_tag) { 595 case DT_POSFLAG_1: 596 if ((dyn->d_un.d_val & DF_P1_LAZYLOAD) && 597 !(lmtflags & LML_TFLG_NOLAZYLD)) 598 lazy = 1; 599 if (dyn->d_un.d_val & DF_P1_GROUPPERM) 600 flags = (FLG_RT_SETGROUP | FLG_RT_HANDLE); 601 continue; 602 case DT_NEEDED: 603 case DT_USED: 604 dip->di_flags |= FLG_DI_NEEDED; 605 if (flags) 606 dip->di_flags |= FLG_DI_GROUP; 607 608 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 609 610 /* 611 * NOTE, libc.so.1 can't be lazy loaded. Although a 612 * lazy position flag won't be produced when a RTLDINFO 613 * .dynamic entry is found (introduced with the UPM in 614 * Solaris 10), it was possible to mark libc for lazy 615 * loading on previous releases. To reduce the overhead 616 * of testing for this occurrence, only carry out this 617 * check for the first object on the link-map list 618 * (there aren't many applications built without libc). 619 */ 620 if (lazy && (lml->lm_head == clmp) && 621 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 622 lazy = 0; 623 624 /* 625 * Don't bring in lazy loaded objects yet unless we've 626 * been asked to attempt to load all available objects 627 * (crle(1) sets LD_FLAGS=loadavail). Even under 628 * RTLD_NOW we don't process this - RTLD_NOW will cause 629 * relocation processing which in turn might trigger 630 * lazy loading, but its possible that the object has a 631 * lazy loaded file with no bindings (i.e., it should 632 * never have been a dependency in the first place). 633 */ 634 if (lazy) { 635 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 636 LAZY(clmp)++; 637 lazy = flags = 0; 638 continue; 639 } 640 641 /* 642 * Silence any error messages - see description 643 * under elf_lookup_filtee(). 644 */ 645 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 646 rtld_flags |= RT_FL_SILENCERR; 647 silent = 1; 648 } 649 } 650 break; 651 case DT_AUXILIARY: 652 dip->di_flags |= FLG_DI_AUXFLTR; 653 lazy = flags = 0; 654 continue; 655 case DT_SUNW_AUXILIARY: 656 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 657 lazy = flags = 0; 658 continue; 659 case DT_FILTER: 660 dip->di_flags |= FLG_DI_STDFLTR; 661 lazy = flags = 0; 662 continue; 663 case DT_SUNW_FILTER: 664 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 665 lazy = flags = 0; 666 continue; 667 default: 668 lazy = flags = 0; 669 continue; 670 } 671 672 DBG_CALL(Dbg_file_needed(clmp, name)); 673 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 674 dip->di_flags |= FLG_DI_PROCESSD; 675 676 /* 677 * Establish the objects name, load it and establish a binding 678 * with the caller. 679 */ 680 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 681 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 682 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 683 nlmp = 0; 684 685 /* 686 * Clean up any infrastructure, including the removal of the 687 * error suppression state, if it had been previously set in 688 * this routine. 689 */ 690 if (pnp) 691 remove_pnode(pnp); 692 if (silent) 693 rtld_flags &= ~RT_FL_SILENCERR; 694 lazy = flags = 0; 695 if ((dip->di_info = (void *)nlmp) == 0) { 696 /* 697 * If the object could not be mapped, continue if error 698 * suppression is established or we're here with ldd(1). 699 */ 700 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 701 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 702 continue; 703 else 704 return (0); 705 } 706 } 707 708 if (LAZY(clmp)) 709 lml->lm_lazy++; 710 711 return (1); 712 } 713 714 static int 715 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 716 { 717 prmap_t *maps, *_maps; 718 int pfd, num, _num; 719 caddr_t eaddr = vaddr + size; 720 int err; 721 722 /* 723 * If memory reservations have been established for alternative objects 724 * determine if this object falls within the reservation, if it does no 725 * further checking is required. 726 */ 727 if (rtld_flags & RT_FL_MEMRESV) { 728 Rtc_head *head = (Rtc_head *)config->c_bgn; 729 730 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 731 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 732 return (0); 733 } 734 735 /* 736 * Determine the mappings presently in use by this process. 737 */ 738 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 739 return (1); 740 741 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 742 err = errno; 743 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 744 strerror(err)); 745 return (1); 746 } 747 748 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 749 return (1); 750 751 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 752 err = errno; 753 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 754 strerror(err)); 755 free(maps); 756 return (1); 757 } 758 759 /* 760 * Determine if the supplied address clashes with any of the present 761 * process mappings. 762 */ 763 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 764 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 765 Rt_map *lmp; 766 const char *str; 767 768 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 769 continue; 770 771 /* 772 * We have a memory clash. See if one of the known dynamic 773 * dependency mappings represents this space so as to provide 774 * the user a more meaningful message. 775 */ 776 if ((lmp = _caller(vaddr, 0)) != 0) 777 str = NAME(lmp); 778 else 779 str = MSG_INTL(MSG_STR_UNKNOWN); 780 781 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 782 EC_NATPTR(vaddr), EC_OFF(size), str); 783 return (1); 784 } 785 free(maps); 786 return (0); 787 } 788 789 /* 790 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 791 * are used to obtained an aligned reservation from anonymous memory. If 792 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 793 * reservation using the file as backing. 794 */ 795 static Am_ret 796 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 797 int mperm, int fd, Xword align) 798 { 799 Am_ret amret; 800 int mflag = MAP_PRIVATE | MAP_NORESERVE; 801 802 #if defined(MAP_ALIGN) 803 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 804 mflag |= MAP_ALIGN; 805 *maddr = (caddr_t)align; 806 } 807 #endif 808 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 809 return (amret); 810 811 if (amret == AM_OK) 812 return (AM_OK); 813 814 /* 815 * If an anonymous memory request failed (which should only be the 816 * case if it is unsupported on the system we're running on), establish 817 * the initial mapping directly from the file. 818 */ 819 *maddr = 0; 820 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 821 fd, 0)) == MAP_FAILED) { 822 int err = errno; 823 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 824 strerror(err)); 825 return (AM_ERROR); 826 } 827 return (AM_NOSUP); 828 } 829 830 static void * 831 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 832 int fd, Off foff) 833 { 834 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 835 static int notd = 0; 836 837 /* 838 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 839 * flag. 840 */ 841 if (notd == 0) { 842 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 843 mflag |= MAP_TEXT; 844 else 845 mflag |= MAP_INITDATA; 846 } 847 #endif 848 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 849 return (0); 850 851 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 852 if ((notd == 0) && (errno == EINVAL)) { 853 /* 854 * MAP_TEXT and MAP_INITDATA may not be supported on this 855 * platform, try again without. 856 */ 857 notd = 1; 858 mflag &= ~(MAP_TEXT | MAP_INITDATA); 859 860 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 861 } 862 #endif 863 return (MAP_FAILED); 864 } 865 866 /* 867 * Map in a file. 868 */ 869 static caddr_t 870 elf_map_it( 871 Lm_list *lml, /* link-map list */ 872 const char *name, /* actual name stored for pathname */ 873 Off fsize, /* total mapping claim of the file */ 874 Ehdr *ehdr, /* ELF header of file */ 875 Phdr *fphdr, /* first loadable Phdr */ 876 Phdr *lphdr, /* last loadable Phdr */ 877 Phdr **rrphdr, /* return first Phdr in reservation */ 878 caddr_t *rraddr, /* return start of reservation */ 879 Off *rrsize, /* return total size of reservation */ 880 int fixed, /* image is resolved to a fixed addr */ 881 int fd, /* images file descriptor */ 882 Xword align, /* image segments maximum alignment */ 883 Mmap *mmaps, /* mmap information array and */ 884 uint_t *mmapcnt) /* mapping count */ 885 { 886 caddr_t raddr; /* reservation address */ 887 Off rsize; /* reservation size */ 888 Phdr *phdr; /* working program header poiner */ 889 caddr_t maddr; /* working mmap address */ 890 caddr_t faddr; /* working file address */ 891 size_t padsize; /* object padding requirement */ 892 size_t padpsize = 0; /* padding size rounded to next page */ 893 size_t padmsize = 0; /* padding size rounded for alignment */ 894 int skipfseg; /* skip mapping first segment */ 895 int mperm; /* segment permissions */ 896 Am_ret amret = AM_NOSUP; 897 898 /* 899 * If padding is required extend both the front and rear of the image. 900 * To insure the image itself is mapped at the correct alignment the 901 * initial padding is rounded up to the nearest page. Once the image is 902 * mapped the excess can be pruned to the nearest page required for the 903 * actual padding itself. 904 */ 905 if ((padsize = r_debug.rtd_objpad) != 0) { 906 padpsize = M_PROUND(padsize); 907 if (fixed) 908 padmsize = padpsize; 909 else 910 padmsize = S_ROUND(padsize, align); 911 } 912 913 /* 914 * Determine the initial permissions used to map in the first segment. 915 * If this segments memsz is greater that its filesz then the difference 916 * must be zeroed. Make sure this segment is writable. 917 */ 918 mperm = 0; 919 if (fphdr->p_flags & PF_R) 920 mperm |= PROT_READ; 921 if (fphdr->p_flags & PF_X) 922 mperm |= PROT_EXEC; 923 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 924 mperm |= PROT_WRITE; 925 926 /* 927 * Determine whether or not to let system reserve address space based on 928 * whether this is a dynamic executable (addresses in object are fixed) 929 * or a shared object (addresses in object are relative to the objects' 930 * base). 931 */ 932 if (fixed) { 933 /* 934 * Determine the reservation address and size, and insure that 935 * this reservation isn't already in use. 936 */ 937 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 938 raddr = maddr - padpsize; 939 rsize = fsize + padpsize + padsize; 940 941 if (lml_main.lm_head) { 942 if (elf_map_check(lml, name, raddr, rsize) != 0) 943 return (0); 944 } 945 946 /* 947 * As this is a fixed image, all segments must be individually 948 * mapped. 949 */ 950 skipfseg = 0; 951 952 } else { 953 size_t esize; 954 955 /* 956 * If this isn't a fixed image, reserve enough address space for 957 * the entire image to be mapped. The amount of reservation is 958 * the range between the beginning of the first, and end of the 959 * last loadable segment, together with any padding, plus the 960 * alignment of the first segment. 961 * 962 * The optimal reservation is made as a no-reserve mapping from 963 * anonymous memory. Each segment is then mapped into this 964 * reservation. If the anonymous mapping capability isn't 965 * available, the reservation is obtained from the file itself. 966 * In this case the first segment of the image is mapped as part 967 * of the reservation, thus only the following segments need to 968 * be remapped. 969 */ 970 rsize = fsize + padmsize + padsize; 971 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 972 fd, align)) == AM_ERROR) 973 return (0); 974 maddr = raddr + padmsize; 975 faddr = (caddr_t)S_ROUND((Off)maddr, align); 976 977 /* 978 * If this reservation has been obtained from anonymous memory, 979 * then all segments must be individually mapped. Otherwise, 980 * the first segment heads the reservation. 981 */ 982 if (amret == AM_OK) 983 skipfseg = 0; 984 else 985 skipfseg = 1; 986 987 /* 988 * For backward compatibility (where MAP_ALIGN isn't available), 989 * insure the alignment of the reservation is adequate for this 990 * object, and if not remap the object to obtain the correct 991 * alignment. 992 */ 993 if (faddr != maddr) { 994 (void) munmap(raddr, rsize); 995 996 rsize += align; 997 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 998 mperm, fd, align)) == AM_ERROR) 999 return (0); 1000 1001 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 1002 padpsize), align); 1003 1004 esize = maddr - raddr + padpsize; 1005 1006 /* 1007 * As ths image has been realigned, the first segment 1008 * of the file needs to be remapped to its correct 1009 * location. 1010 */ 1011 skipfseg = 0; 1012 } else 1013 esize = padmsize - padpsize; 1014 1015 /* 1016 * If this reservation included padding, remove any excess for 1017 * the start of the image (the padding was adjusted to insure 1018 * the image was aligned appropriately). 1019 */ 1020 if (esize) { 1021 (void) munmap(raddr, esize); 1022 raddr += esize; 1023 rsize -= esize; 1024 } 1025 } 1026 1027 /* 1028 * At this point we know the initial location of the image, and its 1029 * size. Pass these back to the caller for inclusion in the link-map 1030 * that will eventually be created. 1031 */ 1032 *rraddr = raddr; 1033 *rrsize = rsize; 1034 1035 /* 1036 * The first loadable segment is now pointed to by maddr. This segment 1037 * will eventually contain the elf header and program headers, so reset 1038 * the program header. Pass this back to the caller for inclusion in 1039 * the link-map so it can be used for later unmapping operations. 1040 */ 1041 /* LINTED */ 1042 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1043 1044 /* 1045 * If padding is required at the front of the image, obtain that now. 1046 * Note, if we've already obtained a reservation from anonymous memory 1047 * then this reservation will already include suitable padding. 1048 * Otherwise this reservation is backed by the file, or in the case of 1049 * a fixed image, doesn't yet exist. Map the padding so that it is 1050 * suitably protected (PROT_NONE), and insure the first segment of the 1051 * file is mapped to its correct location. 1052 */ 1053 if (padsize) { 1054 if (amret == AM_NOSUP) { 1055 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1056 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1057 MAP_FAILED) 1058 return (0); 1059 1060 skipfseg = 0; 1061 } 1062 rsize -= padpsize; 1063 } 1064 1065 /* 1066 * Map individual segments. For a fixed image, these will each be 1067 * unique mappings. For a reservation these will fill in the 1068 * reservation. 1069 */ 1070 for (phdr = fphdr; phdr <= lphdr; 1071 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1072 caddr_t addr; 1073 Off mlen, flen; 1074 size_t size; 1075 1076 /* 1077 * Skip non-loadable segments or segments that don't occupy 1078 * any memory. 1079 */ 1080 if (((phdr->p_type != PT_LOAD) && 1081 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1082 continue; 1083 1084 /* 1085 * Establish this segments address relative to our base. 1086 */ 1087 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1088 (fixed ? 0 : faddr))); 1089 1090 /* 1091 * Determine the mapping protection from the segment attributes. 1092 * Also determine the etext address from the last loadable 1093 * segment which has permissions but no write access. 1094 */ 1095 mperm = 0; 1096 if (phdr->p_flags) { 1097 if (phdr->p_flags & PF_R) 1098 mperm |= PROT_READ; 1099 if (phdr->p_flags & PF_X) 1100 mperm |= PROT_EXEC; 1101 if (phdr->p_flags & PF_W) 1102 mperm |= PROT_WRITE; 1103 else 1104 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1105 (ulong_t)(fixed ? 0 : faddr); 1106 } 1107 1108 /* 1109 * Determine the type of mapping required. 1110 */ 1111 if (phdr->p_type == PT_SUNWBSS) { 1112 /* 1113 * Potentially, we can defer the loading of any SUNWBSS 1114 * segment, depending on whether the symbols it provides 1115 * have been bound to. In this manner, large segments 1116 * that are interposed upon between shared libraries 1117 * may not require mapping. Note, that the mapping 1118 * information is recorded in our mapping descriptor at 1119 * this time. 1120 */ 1121 mlen = phdr->p_memsz; 1122 flen = 0; 1123 1124 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1125 /* 1126 * If this segment has no backing file and no flags 1127 * specified, then it defines a reservation. At this 1128 * point all standard loadable segments will have been 1129 * processed. The segment reservation is mapped 1130 * directly from /dev/null. 1131 */ 1132 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1133 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1134 return (0); 1135 1136 mlen = phdr->p_memsz; 1137 flen = 0; 1138 1139 } else if (phdr->p_filesz == 0) { 1140 /* 1141 * If this segment has no backing file then it defines a 1142 * nobits segment and is mapped directly from /dev/zero. 1143 */ 1144 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1145 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1146 return (0); 1147 1148 mlen = phdr->p_memsz; 1149 flen = 0; 1150 1151 } else { 1152 Off foff; 1153 1154 /* 1155 * This mapping originates from the file. Determine the 1156 * file offset to which the mapping will be directed 1157 * (must be aligned) and how much to map (might be more 1158 * than the file in the case of .bss). 1159 */ 1160 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1161 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1162 flen = phdr->p_filesz + (phdr->p_offset - foff); 1163 1164 /* 1165 * If this is a non-fixed, non-anonymous mapping, and no 1166 * padding is involved, then the first loadable segment 1167 * is already part of the initial reservation. In this 1168 * case there is no need to remap this segment. 1169 */ 1170 if ((skipfseg == 0) || (phdr != fphdr)) { 1171 int phdr_mperm = mperm; 1172 /* 1173 * If this segments memsz is greater that its 1174 * filesz then the difference must be zeroed. 1175 * Make sure this segment is writable. 1176 */ 1177 if (phdr->p_memsz > phdr->p_filesz) 1178 mperm |= PROT_WRITE; 1179 1180 if (elf_map_textdata((caddr_t)addr, flen, 1181 mperm, phdr_mperm, 1182 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1183 MAP_FAILED) { 1184 int err = errno; 1185 eprintf(lml, ERR_FATAL, 1186 MSG_INTL(MSG_SYS_MMAP), name, 1187 strerror(err)); 1188 return (0); 1189 } 1190 } 1191 1192 /* 1193 * If the memory occupancy of the segment overflows the 1194 * definition in the file, we need to "zero out" the end 1195 * of the mapping we've established, and if necessary, 1196 * map some more space from /dev/zero. Note, zero'ed 1197 * memory must end on a double word boundary to satisfy 1198 * zero(). 1199 */ 1200 if (phdr->p_memsz > phdr->p_filesz) { 1201 caddr_t zaddr; 1202 size_t zlen, zplen; 1203 Off fend; 1204 1205 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1206 (fixed ? 0 : faddr)); 1207 zaddr = (caddr_t)M_PROUND(foff); 1208 zplen = (size_t)(zaddr - foff); 1209 1210 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1211 phdr->p_memsz + (fixed ? 0 : faddr))); 1212 zlen = (size_t)(fend - foff); 1213 1214 /* 1215 * Determine whether the number of bytes that 1216 * must be zero'ed overflow to the next page. 1217 * If not, simply clear the exact bytes 1218 * (filesz to memsz) from this page. Otherwise, 1219 * clear the remaining bytes of this page, and 1220 * map an following pages from /dev/zero. 1221 */ 1222 if (zlen < zplen) 1223 zero((caddr_t)foff, (long)zlen); 1224 else { 1225 zero((caddr_t)foff, (long)zplen); 1226 1227 if ((zlen = (fend - (Off)zaddr)) > 0) { 1228 if (dz_map(lml, zaddr, zlen, 1229 mperm, 1230 MAP_FIXED | MAP_PRIVATE) == 1231 MAP_FAILED) 1232 return (0); 1233 } 1234 } 1235 } 1236 } 1237 1238 /* 1239 * Unmap anything from the last mapping address to this one and 1240 * update the mapping claim pointer. 1241 */ 1242 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1243 (void) munmap(maddr, size); 1244 rsize -= size; 1245 } 1246 1247 /* 1248 * Retain this segments mapping information. 1249 */ 1250 mmaps[*mmapcnt].m_vaddr = addr; 1251 mmaps[*mmapcnt].m_msize = mlen; 1252 mmaps[*mmapcnt].m_fsize = flen; 1253 mmaps[*mmapcnt].m_perm = mperm; 1254 (*mmapcnt)++; 1255 1256 maddr = addr + M_PROUND(mlen); 1257 rsize -= M_PROUND(mlen); 1258 } 1259 1260 /* 1261 * If padding is required at the end of the image, obtain that now. 1262 * Note, if we've already obtained a reservation from anonymous memory 1263 * then this reservation will already include suitable padding. 1264 */ 1265 if (padsize) { 1266 if (amret == AM_NOSUP) { 1267 /* 1268 * maddr is currently page aligned from the last segment 1269 * mapping. 1270 */ 1271 if (dz_map(lml, maddr, padsize, PROT_NONE, 1272 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1273 MAP_FAILED) 1274 return (0); 1275 } 1276 maddr += padsize; 1277 rsize -= padsize; 1278 } 1279 1280 /* 1281 * Unmap any final reservation. 1282 */ 1283 if ((fixed == 0) && (rsize != 0)) 1284 (void) munmap(maddr, rsize); 1285 1286 return (faddr); 1287 } 1288 1289 /* 1290 * A null symbol interpretor. Used if a filter has no associated filtees. 1291 */ 1292 /* ARGSUSED0 */ 1293 static Sym * 1294 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1295 { 1296 return ((Sym *)0); 1297 } 1298 1299 /* 1300 * Disable filtee use. 1301 */ 1302 static void 1303 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1304 { 1305 dip->di_info = 0; 1306 1307 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1308 /* 1309 * If this is an object filter, free the filtee's duplication. 1310 */ 1311 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1312 free(REFNAME(lmp)); 1313 REFNAME(lmp) = (char *)0; 1314 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1315 1316 /* 1317 * Indicate that this filtee is no longer available. 1318 */ 1319 if (dip->di_flags & FLG_DI_STDFLTR) 1320 SYMINTP(lmp) = elf_null_find_sym; 1321 1322 } 1323 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1324 /* 1325 * Indicate that this standard filtee is no longer available. 1326 */ 1327 if (SYMSFLTRCNT(lmp)) 1328 SYMSFLTRCNT(lmp)--; 1329 } else { 1330 /* 1331 * Indicate that this auxiliary filtee is no longer available. 1332 */ 1333 if (SYMAFLTRCNT(lmp)) 1334 SYMAFLTRCNT(lmp)--; 1335 } 1336 dip->di_flags &= ~MSK_DI_FILTER; 1337 } 1338 1339 /* 1340 * Find symbol interpreter - filters. 1341 * This function is called when the symbols from a shared object should 1342 * be resolved from the shared objects filtees instead of from within itself. 1343 * 1344 * A symbol name of 0 is used to trigger filtee loading. 1345 */ 1346 static Sym * 1347 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1348 { 1349 const char *name = slp->sl_name, *filtees; 1350 Rt_map *clmp = slp->sl_cmap; 1351 Rt_map *ilmp = slp->sl_imap; 1352 Pnode *pnp, **pnpp; 1353 int any; 1354 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1355 Lm_list *lml = LIST(ilmp); 1356 1357 /* 1358 * Indicate that the filter has been used. If a binding already exists 1359 * to the caller, indicate that this object is referenced. This insures 1360 * we don't generate false unreferenced diagnostics from ldd -u/U or 1361 * debugging. Don't create a binding regardless, as this filter may 1362 * have been dlopen()'ed. 1363 */ 1364 if (name && (ilmp != clmp)) { 1365 Word tracing = (LIST(clmp)->lm_flags & 1366 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1367 1368 if (tracing || DBG_ENABLED) { 1369 Bnd_desc ** bdpp; 1370 Aliste off; 1371 1372 FLAGS1(ilmp) |= FL1_RT_USED; 1373 1374 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1375 for (ALIST_TRAVERSE(CALLERS(ilmp), off, bdpp)) { 1376 Bnd_desc * bdp = *bdpp; 1377 1378 if (bdp->b_caller == clmp) { 1379 bdp->b_flags |= BND_REFER; 1380 break; 1381 } 1382 } 1383 } 1384 } 1385 } 1386 1387 /* 1388 * If this is the first call to process this filter, establish the 1389 * filtee list. If a configuration file exists, determine if any 1390 * filtee associations for this filter, and its filtee reference, are 1391 * defined. Otherwise, process the filtee reference. Any token 1392 * expansion is also completed at this point (i.e., $PLATFORM). 1393 */ 1394 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1395 if (dip->di_info == 0) { 1396 if (rtld_flags2 & RT_FL2_FLTCFG) 1397 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1398 filtees); 1399 1400 if (dip->di_info == 0) { 1401 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1402 if ((lml->lm_flags & 1403 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1404 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1405 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1406 NAME(ilmp), filtees); 1407 1408 if ((dip->di_info = (void *)expand_paths(ilmp, 1409 filtees, PN_SER_FILTEE, 0)) == 0) { 1410 elf_disable_filtee(ilmp, dip); 1411 return ((Sym *)0); 1412 } 1413 } 1414 } 1415 1416 /* 1417 * Traverse the filtee list, dlopen()'ing any objects specified and 1418 * using their group handle to lookup the symbol. 1419 */ 1420 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1421 pnpp = &pnp->p_next, pnp = *pnpp) { 1422 int mode; 1423 Grp_hdl *ghp; 1424 Rt_map *nlmp = 0; 1425 1426 if (pnp->p_len == 0) 1427 continue; 1428 1429 /* 1430 * Establish the mode of the filtee from the filter. As filtees 1431 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1432 * and the filtees aren't global. It would be nice to have 1433 * RTLD_FIRST used here also, but as filters got out long before 1434 * RTLD_FIRST was introduced it's a little too late now. 1435 */ 1436 mode = MODE(ilmp) | RTLD_GROUP; 1437 mode &= ~RTLD_GLOBAL; 1438 1439 /* 1440 * Insure that any auxiliary filter can locate symbols from its 1441 * caller. 1442 */ 1443 if (dip->di_flags & FLG_DI_AUXFLTR) 1444 mode |= RTLD_PARENT; 1445 1446 /* 1447 * Process any hardware capability directory. Establish a new 1448 * link-map control list from which to analyze any newly added 1449 * objects. 1450 */ 1451 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1452 Lm_cntl *lmc; 1453 Aliste lmco; 1454 1455 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1456 if ((lmc = alist_append(&(lml->lm_lists), 0, 1457 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1458 return ((Sym *)0); 1459 lmco = (Aliste)((char *)lmc - 1460 (char *)lml->lm_lists); 1461 } else { 1462 lmc = 0; 1463 lmco = ALO_DATA; 1464 } 1465 1466 pnp = hwcap_filtees(pnpp, lmco, dip, ilmp, filtees, 1467 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1468 1469 /* 1470 * Now that any hardware capability objects have been 1471 * processed, remove any link-map control list. 1472 */ 1473 if (lmc) { 1474 if (pnp->p_len == 0) 1475 (void) lm_salvage(lml, 0, lmco); 1476 remove_cntl(lml, lmco); 1477 } 1478 } 1479 1480 if (pnp->p_len == 0) 1481 continue; 1482 1483 /* 1484 * Process an individual filtee. 1485 */ 1486 if (pnp->p_info == 0) { 1487 const char *filtee = pnp->p_name; 1488 int audit = 0; 1489 1490 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1491 1492 ghp = 0; 1493 1494 /* 1495 * Determine if the reference link map is already 1496 * loaded. As an optimization compare the filtee with 1497 * our interpretor. The most common filter is 1498 * libdl.so.1, which is a filter on ld.so.1. 1499 */ 1500 #if defined(_ELF64) 1501 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1502 #else 1503 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1504 #endif 1505 /* 1506 * Create an association between ld.so.1 and 1507 * the filter. 1508 */ 1509 nlmp = lml_rtld.lm_head; 1510 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1511 (GPH_LDSO | GPH_FIRST | GPH_FILTEE))) == 0) 1512 nlmp = 0; 1513 1514 /* 1515 * Establish the filter handle to prevent any 1516 * recursion. 1517 */ 1518 if (nlmp && ghp) 1519 pnp->p_info = (void *)ghp; 1520 1521 /* 1522 * Audit the filter/filtee established. Ignore 1523 * any return from the auditor, as we can't 1524 * allow ignore filtering to ld.so.1, otherwise 1525 * nothing is going to work. 1526 */ 1527 if ((lml->lm_tflags | FLAGS1(ilmp)) & 1528 LML_TFLG_AUD_OBJFILTER) 1529 (void) audit_objfilter(ilmp, filtees, 1530 nlmp, 0); 1531 1532 } else { 1533 Rej_desc rej = { 0 }; 1534 Lm_cntl *lmc; 1535 Aliste lmco; 1536 1537 /* 1538 * Establish a new link-map control list from 1539 * which to analyze any newly added objects. 1540 */ 1541 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1542 if ((lmc = 1543 alist_append(&(lml->lm_lists), 0, 1544 sizeof (Lm_cntl), 1545 AL_CNT_LMLISTS)) == 0) 1546 return ((Sym *)0); 1547 lmco = (Aliste)((char *)lmc - 1548 (char *)lml->lm_lists); 1549 } else { 1550 lmc = 0; 1551 lmco = ALO_DATA; 1552 } 1553 1554 /* 1555 * Load the filtee. 1556 */ 1557 if ((nlmp = load_path(lml, lmco, filtee, ilmp, 1558 mode, FLG_RT_HANDLE, &ghp, 0, &rej)) == 0) { 1559 file_notfound(LIST(ilmp), filtee, ilmp, 1560 FLG_RT_HANDLE, &rej); 1561 remove_rej(&rej); 1562 } 1563 1564 /* 1565 * Establish the filter handle to prevent any 1566 * recursion. 1567 */ 1568 if (nlmp && ghp) { 1569 ghp->gh_flags |= GPH_FILTEE; 1570 pnp->p_info = (void *)ghp; 1571 } 1572 1573 /* 1574 * Audit the filter/filtee established. A 1575 * return of 0 indicates the auditor wishes to 1576 * ignore this filtee. 1577 */ 1578 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1579 LML_TFLG_AUD_OBJFILTER)) { 1580 if (audit_objfilter(ilmp, filtees, 1581 nlmp, 0) == 0) { 1582 audit = 1; 1583 nlmp = 0; 1584 } 1585 } 1586 1587 /* 1588 * Finish processing the objects associated with 1589 * this request. Create an association between 1590 * this object and the originating filter to 1591 * provide sufficient information to tear down 1592 * this filtee if necessary. 1593 */ 1594 if (nlmp && ghp && 1595 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1596 (relocate_lmc(lml, lmco, nlmp) == 0))) 1597 nlmp = 0; 1598 1599 /* 1600 * If the filtee has been successfully 1601 * processed, and it is part of a link-map 1602 * control list that is equivalent, or less, 1603 * than the filter control list, create an 1604 * association between the filter and filtee. 1605 * This association provides sufficient 1606 * information to tear down the filter and 1607 * filtee if necessary. 1608 */ 1609 if (nlmp && ghp && (CNTL(nlmp) <= CNTL(ilmp)) && 1610 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1611 nlmp = 0; 1612 1613 /* 1614 * Now that this object has been processed, 1615 * remove any link-map control list. 1616 */ 1617 if (lmc) { 1618 if (nlmp == 0) 1619 (void) lm_salvage(lml, 0, lmco); 1620 remove_cntl(lml, lmco); 1621 } 1622 } 1623 1624 /* 1625 * Generate a diagnostic if the filtee couldn't be 1626 * loaded, null out the pnode entry, and continue 1627 * the search. Otherwise, retain this group handle 1628 * for future symbol searches. 1629 */ 1630 if (nlmp == 0) { 1631 pnp->p_info = 0; 1632 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1633 audit)); 1634 1635 if (ghp) 1636 (void) dlclose_core(ghp, ilmp); 1637 1638 pnp->p_len = 0; 1639 continue; 1640 } 1641 } 1642 1643 ghp = (Grp_hdl *)pnp->p_info; 1644 1645 /* 1646 * If we're just here to trigger filtee loading skip the symbol 1647 * lookup so we'll continue looking for additional filtees. 1648 */ 1649 if (name) { 1650 Grp_desc *gdp; 1651 Sym *sym = 0; 1652 Aliste off; 1653 Slookup sl = *slp; 1654 1655 sl.sl_flags |= LKUP_FIRST; 1656 any++; 1657 1658 /* 1659 * Look for the symbol in the handles dependencies. 1660 */ 1661 for (ALIST_TRAVERSE(ghp->gh_depends, off, gdp)) { 1662 if ((gdp->gd_flags & GPD_AVAIL) == 0) 1663 continue; 1664 1665 /* 1666 * If our parent is a dependency don't look at 1667 * it (otherwise we are in a recursive loop). 1668 * This situation can occur with auxiliary 1669 * filters if the filtee has a dependency on the 1670 * filter. This dependency isn't necessary as 1671 * auxiliary filters are opened RTLD_PARENT, but 1672 * users may still unknowingly add an explicit 1673 * dependency to the parent. 1674 */ 1675 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1676 continue; 1677 1678 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1679 binfo)) != 0) || 1680 (ghp->gh_flags & GPH_FIRST)) 1681 break; 1682 } 1683 1684 /* 1685 * If this filtee has just been loaded (nlmp != 0), 1686 * determine whether the filtee was triggered by a 1687 * relocation from an object that is still being 1688 * relocated on a leaf link-map control list. As the 1689 * relocation of an object on this list might still 1690 * fail, we can't yet bind the filter to the filtee. 1691 * To do so, would be locking the filtee so that it 1692 * couldn't be deleted, and the filtee itself could have 1693 * bound to an object that must be torn down. Insure 1694 * the caller isn't bound to the handle at this time. 1695 * Any association will be reestablished when the filter 1696 * is later referenced and the filtee has propagated to 1697 * the same link-map control list. 1698 */ 1699 if (nlmp && (CNTL(nlmp) > CNTL(ilmp))) { 1700 remove_caller(ghp, ilmp); 1701 pnp->p_info = 0; 1702 } 1703 if (sym) { 1704 *binfo |= DBG_BINFO_FILTEE; 1705 return (sym); 1706 } 1707 } 1708 1709 /* 1710 * If this object is tagged to terminate filtee processing we're 1711 * done. 1712 */ 1713 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1714 break; 1715 } 1716 1717 /* 1718 * If we're just here to trigger filtee loading then we're done. 1719 */ 1720 if (name == 0) 1721 return ((Sym *)0); 1722 1723 /* 1724 * If no filtees have been found for a filter, clean up any Pnode 1725 * structures and disable their search completely. For auxiliary 1726 * filters we can reselect the symbol search function so that we never 1727 * enter this routine again for this object. For standard filters we 1728 * use the null symbol routine. 1729 */ 1730 if (any == 0) { 1731 remove_pnode((Pnode *)dip->di_info); 1732 elf_disable_filtee(ilmp, dip); 1733 return ((Sym *)0); 1734 } 1735 1736 return ((Sym *)0); 1737 } 1738 1739 /* 1740 * Focal point for disabling error messages for auxiliary filters. As an 1741 * auxiliary filter allows for filtee use, but provides a fallback should a 1742 * filtee not exist (or fail to load), any errors generated as a consequence of 1743 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1744 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1745 * produced. ldd(1) employs printf(), and here, the selection of whether to 1746 * print a diagnostic in regards to auxiliary filters is a little more complex. 1747 * 1748 * . The determination of whether to produce an ldd message, or a fatal 1749 * error message is driven by LML_FLG_TRC_ENABLE. 1750 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1751 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1752 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1753 * 1754 * . If the calling object is lddstub, then several classes of message are 1755 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1756 * a stub executable employed to preload a user specified library against. 1757 * 1758 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1759 * be suppressed. All detailed ldd messages should still be produced. 1760 */ 1761 Sym * 1762 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1763 { 1764 Sym *sym; 1765 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1766 int silent = 0; 1767 1768 /* 1769 * Make sure this entry is still acting as a filter. We may have tried 1770 * to process this previously, and disabled it if the filtee couldn't 1771 * be processed. However, other entries may provide different filtees 1772 * that are yet to be completed. 1773 */ 1774 if (dip->di_flags == 0) 1775 return ((Sym *)0); 1776 1777 /* 1778 * Indicate whether an error message is required should this filtee not 1779 * be found, based on the type of filter. 1780 */ 1781 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1782 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1783 rtld_flags |= RT_FL_SILENCERR; 1784 silent = 1; 1785 } 1786 1787 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1788 1789 if (silent) 1790 rtld_flags &= ~RT_FL_SILENCERR; 1791 1792 return (sym); 1793 } 1794 1795 /* 1796 * Compute the elf hash value (as defined in the ELF access library). 1797 * The form of the hash table is: 1798 * 1799 * |--------------| 1800 * | # of buckets | 1801 * |--------------| 1802 * | # of chains | 1803 * |--------------| 1804 * | bucket[] | 1805 * |--------------| 1806 * | chain[] | 1807 * |--------------| 1808 */ 1809 ulong_t 1810 elf_hash(const char *name) 1811 { 1812 uint_t hval = 0; 1813 1814 while (*name) { 1815 uint_t g; 1816 hval = (hval << 4) + *name++; 1817 if ((g = (hval & 0xf0000000)) != 0) 1818 hval ^= g >> 24; 1819 hval &= ~g; 1820 } 1821 return ((ulong_t)hval); 1822 } 1823 1824 /* 1825 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1826 * function specially in the executable - if they have a value, even though 1827 * undefined, we use that value. This allows us to associate all references 1828 * to a function's address to a single place in the process: the plt entry 1829 * for that function in the executable. Calls to lookup from plt binding 1830 * routines do NOT set LKUP_SPEC in the flag. 1831 */ 1832 Sym * 1833 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1834 { 1835 const char *name = slp->sl_name; 1836 Rt_map *ilmp = slp->sl_imap; 1837 ulong_t hash = slp->sl_hash; 1838 uint_t ndx, htmp, buckets, *chainptr; 1839 Sym *sym, *symtabptr; 1840 char *strtabptr, *strtabname; 1841 uint_t flags1; 1842 Syminfo *sip; 1843 1844 /* 1845 * If we're only here to establish a symbols index, skip the diagnostic 1846 * used to trace a symbol search. 1847 */ 1848 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1849 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1850 1851 if (HASH(ilmp) == 0) 1852 return ((Sym *)0); 1853 1854 buckets = HASH(ilmp)[0]; 1855 /* LINTED */ 1856 htmp = (uint_t)hash % buckets; 1857 1858 /* 1859 * Get the first symbol on hash chain and initialize the string 1860 * and symbol table pointers. 1861 */ 1862 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1863 return ((Sym *)0); 1864 1865 chainptr = HASH(ilmp) + 2 + buckets; 1866 strtabptr = STRTAB(ilmp); 1867 symtabptr = SYMTAB(ilmp); 1868 1869 while (ndx) { 1870 sym = symtabptr + ndx; 1871 strtabname = strtabptr + sym->st_name; 1872 1873 /* 1874 * Compare the symbol found with the name required. If the 1875 * names don't match continue with the next hash entry. 1876 */ 1877 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1878 if ((ndx = chainptr[ndx]) != 0) 1879 continue; 1880 return ((Sym *)0); 1881 } 1882 1883 /* 1884 * If we're only here to establish a symbols index, we're done. 1885 */ 1886 if (slp->sl_flags & LKUP_SYMNDX) 1887 return (sym); 1888 1889 /* 1890 * If we find a match and the symbol is defined, return the 1891 * symbol pointer and the link map in which it was found. 1892 */ 1893 if (sym->st_shndx != SHN_UNDEF) { 1894 *dlmp = ilmp; 1895 *binfo |= DBG_BINFO_FOUND; 1896 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1897 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1898 is_sym_interposer(ilmp, sym))) 1899 *binfo |= DBG_BINFO_INTERPOSE; 1900 break; 1901 1902 /* 1903 * If we find a match and the symbol is undefined, the 1904 * symbol type is a function, and the value of the symbol 1905 * is non zero, then this is a special case. This allows 1906 * the resolution of a function address to the plt[] entry. 1907 * See SPARC ABI, Dynamic Linking, Function Addresses for 1908 * more details. 1909 */ 1910 } else if ((slp->sl_flags & LKUP_SPEC) && 1911 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1912 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1913 *dlmp = ilmp; 1914 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1915 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1916 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1917 is_sym_interposer(ilmp, sym))) 1918 *binfo |= DBG_BINFO_INTERPOSE; 1919 return (sym); 1920 } 1921 1922 /* 1923 * Undefined symbol. 1924 */ 1925 return ((Sym *)0); 1926 } 1927 1928 /* 1929 * We've found a match. Determine if the defining object contains 1930 * symbol binding information. 1931 */ 1932 if ((sip = SYMINFO(ilmp)) != 0) 1933 sip += ndx; 1934 1935 /* 1936 * If this is a direct binding request, but the symbol definition has 1937 * disabled directly binding to it (presumably because the symbol 1938 * definition has been changed since the referring object was built), 1939 * indicate this failure so that the caller can fall back to a standard 1940 * symbol search. Clear any debug binding information for cleanliness. 1941 */ 1942 if (sip && (slp->sl_flags & LKUP_DIRECT) && 1943 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 1944 *binfo |= BINFO_DIRECTDIS; 1945 *binfo &= ~DBG_BINFO_MSK; 1946 return ((Sym *)0); 1947 } 1948 1949 /* 1950 * Determine whether this object is acting as a filter. 1951 */ 1952 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 1953 return (sym); 1954 1955 /* 1956 * Determine if this object offers per-symbol filtering, and if so, 1957 * whether this symbol references a filtee. 1958 */ 1959 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 1960 /* 1961 * If this is a standard filter reference, and no standard 1962 * filtees remain to be inspected, we're done. If this is an 1963 * auxiliary filter reference, and no auxiliary filtees remain, 1964 * we'll fall through in case any object filtering is available. 1965 */ 1966 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 1967 (SYMSFLTRCNT(ilmp) == 0)) 1968 return ((Sym *)0); 1969 1970 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 1971 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 1972 SYMAFLTRCNT(ilmp))) { 1973 Sym * fsym; 1974 1975 /* 1976 * This symbol has an associated filtee. Lookup the 1977 * symbol in the filtee, and if it is found return it. 1978 * If the symbol doesn't exist, and this is a standard 1979 * filter, return an error, otherwise fall through to 1980 * catch any object filtering that may be available. 1981 */ 1982 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1983 sip->si_boundto)) != 0) 1984 return (fsym); 1985 if (sip->si_flags & SYMINFO_FLG_FILTER) 1986 return ((Sym *)0); 1987 } 1988 } 1989 1990 /* 1991 * Determine if this object provides global filtering. 1992 */ 1993 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 1994 Sym * fsym; 1995 1996 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 1997 /* 1998 * This object has an associated filtee. Lookup the 1999 * symbol in the filtee, and if it is found return it. 2000 * If the symbol doesn't exist, and this is a standard 2001 * filter, return and error, otherwise return the symbol 2002 * within the filter itself. 2003 */ 2004 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2005 OBJFLTRNDX(ilmp))) != 0) 2006 return (fsym); 2007 } 2008 2009 if (flags1 & FL1_RT_OBJSFLTR) 2010 return ((Sym *)0); 2011 } 2012 return (sym); 2013 } 2014 2015 /* 2016 * Create a new Rt_map structure for an ELF object and initialize 2017 * all values. 2018 */ 2019 Rt_map * 2020 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2021 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2022 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2023 { 2024 Rt_map *lmp; 2025 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2026 Xword rpath = 0; 2027 Ehdr *ehdr = (Ehdr *)addr; 2028 2029 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2030 lml->lm_lmidstr, lmco)); 2031 2032 /* 2033 * Allocate space for the link-map and private elf information. Once 2034 * these are allocated and initialized, we can use remove_so(0, lmp) to 2035 * tear down the link-map should any failures occur. 2036 */ 2037 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2038 return (0); 2039 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2040 free(lmp); 2041 return (0); 2042 } 2043 2044 /* 2045 * All fields not filled in were set to 0 by calloc. 2046 */ 2047 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2048 DYN(lmp) = ld; 2049 ADDR(lmp) = addr; 2050 MSIZE(lmp) = msize; 2051 ENTRY(lmp) = (Addr)entry; 2052 SYMINTP(lmp) = elf_find_sym; 2053 ETEXT(lmp) = etext; 2054 FCT(lmp) = &elf_fct; 2055 LIST(lmp) = lml; 2056 PADSTART(lmp) = paddr; 2057 PADIMLEN(lmp) = padimsize; 2058 THREADID(lmp) = rt_thr_self(); 2059 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2060 SORTVAL(lmp) = -1; 2061 2062 MMAPS(lmp) = mmaps; 2063 MMAPCNT(lmp) = mmapcnt; 2064 ASSERT(mmapcnt != 0); 2065 2066 /* 2067 * If this is a shared object, add the base address to each address. 2068 * if this is an executable, use address as is. 2069 */ 2070 if (ehdr->e_type == ET_EXEC) { 2071 base = 0; 2072 FLAGS(lmp) |= FLG_RT_FIXED; 2073 } else 2074 base = addr; 2075 2076 /* 2077 * Fill in rest of the link map entries with information from the file's 2078 * dynamic structure. 2079 */ 2080 if (ld) { 2081 uint_t dyncnt = 0; 2082 Xword pltpadsz = 0; 2083 Rti_desc *rti; 2084 2085 /* CSTYLED */ 2086 for ( ; ld->d_tag != DT_NULL; ++ld, dyncnt++) { 2087 switch ((Xword)ld->d_tag) { 2088 case DT_SYMTAB: 2089 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2090 break; 2091 case DT_SUNW_SYMTAB: 2092 SUNWSYMTAB(lmp) = 2093 (void *)(ld->d_un.d_ptr + base); 2094 break; 2095 case DT_SUNW_SYMSZ: 2096 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2097 break; 2098 case DT_STRTAB: 2099 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2100 break; 2101 case DT_SYMENT: 2102 SYMENT(lmp) = ld->d_un.d_val; 2103 break; 2104 case DT_FEATURE_1: 2105 ld->d_un.d_val |= DTF_1_PARINIT; 2106 if (ld->d_un.d_val & DTF_1_CONFEXP) 2107 crle = 1; 2108 break; 2109 case DT_MOVESZ: 2110 MOVESZ(lmp) = ld->d_un.d_val; 2111 FLAGS(lmp) |= FLG_RT_MOVE; 2112 break; 2113 case DT_MOVEENT: 2114 MOVEENT(lmp) = ld->d_un.d_val; 2115 break; 2116 case DT_MOVETAB: 2117 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2118 break; 2119 case DT_REL: 2120 case DT_RELA: 2121 /* 2122 * At this time we can only handle 1 type of 2123 * relocation per object. 2124 */ 2125 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2126 break; 2127 case DT_RELSZ: 2128 case DT_RELASZ: 2129 RELSZ(lmp) = ld->d_un.d_val; 2130 break; 2131 case DT_RELENT: 2132 case DT_RELAENT: 2133 RELENT(lmp) = ld->d_un.d_val; 2134 break; 2135 case DT_RELCOUNT: 2136 case DT_RELACOUNT: 2137 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2138 break; 2139 case DT_TEXTREL: 2140 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2141 break; 2142 case DT_HASH: 2143 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2144 break; 2145 case DT_PLTGOT: 2146 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2147 break; 2148 case DT_PLTRELSZ: 2149 PLTRELSZ(lmp) = ld->d_un.d_val; 2150 break; 2151 case DT_JMPREL: 2152 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2153 break; 2154 case DT_INIT: 2155 INIT(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2156 break; 2157 case DT_FINI: 2158 FINI(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2159 break; 2160 case DT_INIT_ARRAY: 2161 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2162 base); 2163 break; 2164 case DT_INIT_ARRAYSZ: 2165 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2166 break; 2167 case DT_FINI_ARRAY: 2168 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2169 base); 2170 break; 2171 case DT_FINI_ARRAYSZ: 2172 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2173 break; 2174 case DT_PREINIT_ARRAY: 2175 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2176 base); 2177 break; 2178 case DT_PREINIT_ARRAYSZ: 2179 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2180 break; 2181 case DT_RPATH: 2182 case DT_RUNPATH: 2183 rpath = ld->d_un.d_val; 2184 break; 2185 case DT_FILTER: 2186 fltr = ld->d_un.d_val; 2187 OBJFLTRNDX(lmp) = dyncnt; 2188 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2189 break; 2190 case DT_AUXILIARY: 2191 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2192 fltr = ld->d_un.d_val; 2193 OBJFLTRNDX(lmp) = dyncnt; 2194 } 2195 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2196 break; 2197 case DT_SUNW_FILTER: 2198 SYMSFLTRCNT(lmp)++; 2199 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2200 break; 2201 case DT_SUNW_AUXILIARY: 2202 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2203 SYMAFLTRCNT(lmp)++; 2204 } 2205 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2206 break; 2207 case DT_DEPAUDIT: 2208 if (!(rtld_flags & RT_FL_NOAUDIT)) 2209 audit = ld->d_un.d_val; 2210 break; 2211 case DT_CONFIG: 2212 cfile = ld->d_un.d_val; 2213 break; 2214 case DT_DEBUG: 2215 /* 2216 * DT_DEBUG entries are only created in 2217 * dynamic objects that require an interpretor 2218 * (ie. all dynamic executables and some shared 2219 * objects), and provide for a hand-shake with 2220 * debuggers. This entry is initialized to 2221 * zero by the link-editor. If a debugger has 2222 * us and updated this entry set the debugger 2223 * flag, and finish initializing the debugging 2224 * structure (see setup() also). Switch off any 2225 * configuration object use as most debuggers 2226 * can't handle fixed dynamic executables as 2227 * dependencies, and we can't handle requests 2228 * like object padding for alternative objects. 2229 */ 2230 if (ld->d_un.d_ptr) 2231 rtld_flags |= 2232 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2233 ld->d_un.d_ptr = (Addr)&r_debug; 2234 break; 2235 case DT_VERNEED: 2236 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2237 base); 2238 break; 2239 case DT_VERNEEDNUM: 2240 /* LINTED */ 2241 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2242 break; 2243 case DT_VERDEF: 2244 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2245 break; 2246 case DT_VERDEFNUM: 2247 /* LINTED */ 2248 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2249 break; 2250 case DT_BIND_NOW: 2251 if ((ld->d_un.d_val & DF_BIND_NOW) && 2252 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2253 MODE(lmp) |= RTLD_NOW; 2254 MODE(lmp) &= ~RTLD_LAZY; 2255 } 2256 break; 2257 case DT_FLAGS: 2258 if (ld->d_un.d_val & DF_SYMBOLIC) 2259 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2260 if (ld->d_un.d_val & DF_TEXTREL) 2261 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2262 if ((ld->d_un.d_val & DF_BIND_NOW) && 2263 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2264 MODE(lmp) |= RTLD_NOW; 2265 MODE(lmp) &= ~RTLD_LAZY; 2266 } 2267 /* 2268 * Capture any static TLS use, and enforce that 2269 * this object be non-deletable. 2270 */ 2271 if (ld->d_un.d_val & DF_STATIC_TLS) { 2272 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2273 MODE(lmp) |= RTLD_NODELETE; 2274 } 2275 break; 2276 case DT_FLAGS_1: 2277 if (ld->d_un.d_val & DF_1_DISPRELPND) 2278 FLAGS1(lmp) |= FL1_RT_DISPREL; 2279 if (ld->d_un.d_val & DF_1_GROUP) 2280 FLAGS(lmp) |= 2281 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2282 if ((ld->d_un.d_val & DF_1_NOW) && 2283 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2284 MODE(lmp) |= RTLD_NOW; 2285 MODE(lmp) &= ~RTLD_LAZY; 2286 } 2287 if (ld->d_un.d_val & DF_1_NODELETE) 2288 MODE(lmp) |= RTLD_NODELETE; 2289 if (ld->d_un.d_val & DF_1_INITFIRST) 2290 FLAGS(lmp) |= FLG_RT_INITFRST; 2291 if (ld->d_un.d_val & DF_1_NOOPEN) 2292 FLAGS(lmp) |= FLG_RT_NOOPEN; 2293 if (ld->d_un.d_val & DF_1_LOADFLTR) 2294 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2295 if (ld->d_un.d_val & DF_1_NODUMP) 2296 FLAGS(lmp) |= FLG_RT_NODUMP; 2297 if (ld->d_un.d_val & DF_1_CONFALT) 2298 crle = 1; 2299 if (ld->d_un.d_val & DF_1_DIRECT) 2300 FLAGS1(lmp) |= FL1_RT_DIRECT; 2301 if (ld->d_un.d_val & DF_1_NODEFLIB) 2302 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2303 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2304 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2305 if (ld->d_un.d_val & DF_1_TRANS) 2306 FLAGS(lmp) |= FLG_RT_TRANS; 2307 #ifndef EXPAND_RELATIVE 2308 if (ld->d_un.d_val & DF_1_ORIGIN) 2309 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2310 #endif 2311 /* 2312 * If this object identifies itself as an 2313 * interposer, but relocation processing has 2314 * already started, then demote it. It's too 2315 * late to guarantee complete interposition. 2316 */ 2317 if (ld->d_un.d_val & 2318 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2319 if (lml->lm_flags & LML_FLG_STARTREL) { 2320 DBG_CALL(Dbg_util_intoolate(lmp)); 2321 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2322 (void) printf( 2323 MSG_INTL(MSG_LDD_REL_ERR2), 2324 NAME(lmp)); 2325 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2326 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2327 else 2328 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2329 } 2330 break; 2331 case DT_SYMINFO: 2332 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2333 base); 2334 break; 2335 case DT_SYMINENT: 2336 SYMINENT(lmp) = ld->d_un.d_val; 2337 break; 2338 case DT_PLTPAD: 2339 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2340 break; 2341 case DT_PLTPADSZ: 2342 pltpadsz = ld->d_un.d_val; 2343 break; 2344 case DT_SUNW_RTLDINF: 2345 /* 2346 * Maintain a list of RTLDINFO structures. 2347 * Typically, libc is the only supplier, and 2348 * only one structure is provided. However, 2349 * multiple suppliers and multiple structures 2350 * are supported. For example, one structure 2351 * may provide thread_init, and another 2352 * structure may provide atexit reservations. 2353 */ 2354 if ((rti = alist_append(&lml->lm_rti, 0, 2355 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2356 remove_so(0, lmp); 2357 return (0); 2358 } 2359 rti->rti_lmp = lmp; 2360 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2361 break; 2362 case DT_SUNW_SORTENT: 2363 SUNWSORTENT(lmp) = ld->d_un.d_val; 2364 break; 2365 case DT_SUNW_SYMSORT: 2366 SUNWSYMSORT(lmp) = 2367 (void *)(ld->d_un.d_ptr + base); 2368 break; 2369 case DT_SUNW_SYMSORTSZ: 2370 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2371 break; 2372 case DT_DEPRECATED_SPARC_REGISTER: 2373 case M_DT_REGISTER: 2374 FLAGS(lmp) |= FLG_RT_REGSYMS; 2375 break; 2376 case M_DT_PLTRESERVE: 2377 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2378 base); 2379 break; 2380 } 2381 } 2382 2383 2384 if (PLTPAD(lmp)) { 2385 if (pltpadsz == (Xword)0) 2386 PLTPAD(lmp) = 0; 2387 else 2388 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2389 pltpadsz); 2390 } 2391 2392 /* 2393 * Allocate Dynamic Info structure 2394 */ 2395 if ((DYNINFO(lmp) = calloc((size_t)dyncnt, 2396 sizeof (Dyninfo))) == 0) { 2397 remove_so(0, lmp); 2398 return (0); 2399 } 2400 DYNINFOCNT(lmp) = dyncnt; 2401 } 2402 2403 /* 2404 * A dynsym contains only global functions. We want to have 2405 * a version of it that also includes local functions, so that 2406 * dladdr() will be able to report names for local functions 2407 * when used to generate a stack trace for a stripped file. 2408 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2409 * 2410 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2411 * in order to avoid having to have two copies of the global 2412 * symbols held in DT_SYMTAB: The local symbols are placed in 2413 * a separate section than the globals in the dynsym, but the 2414 * linker conspires to put the data for these two sections adjacent 2415 * to each other. DT_SUNW_SYMTAB points at the top of the local 2416 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2417 * 2418 * If the two sections are not adjacent, then something went wrong 2419 * at link time. We use ASSERT to kill the process if this is 2420 * a debug build. In a production build, we will silently ignore 2421 * the presence of the .ldynsym and proceed. We can detect this 2422 * situation by checking to see that DT_SYMTAB lies in 2423 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2424 */ 2425 if ((SUNWSYMTAB(lmp) != NULL) && 2426 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2427 (((char *)SYMTAB(lmp) >= 2428 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2429 ASSERT(0); 2430 SUNWSYMTAB(lmp) = NULL; 2431 SUNWSYMSZ(lmp) = 0; 2432 } 2433 2434 /* 2435 * If configuration file use hasn't been disabled, and a configuration 2436 * file hasn't already been set via an environment variable, see if any 2437 * application specific configuration file is specified. An LD_CONFIG 2438 * setting is used first, but if this image was generated via crle(1) 2439 * then a default configuration file is a fall-back. 2440 */ 2441 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2442 if (cfile) 2443 config->c_name = (const char *)(cfile + 2444 (char *)STRTAB(lmp)); 2445 else if (crle) { 2446 rtld_flags |= RT_FL_CONFAPP; 2447 #ifndef EXPAND_RELATIVE 2448 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2449 #endif 2450 } 2451 } 2452 2453 if (rpath) 2454 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2455 if (fltr) { 2456 /* 2457 * If this object is a global filter, duplicate the filtee 2458 * string name(s) so that REFNAME() is available in core files. 2459 * This cludge was useful for debuggers at one point, but only 2460 * when the filtee name was an individual full path. 2461 */ 2462 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2463 remove_so(0, lmp); 2464 return (0); 2465 } 2466 } 2467 2468 if (rtld_flags & RT_FL_RELATIVE) 2469 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2470 2471 /* 2472 * For Intel ABI compatibility. It's possible that a JMPREL can be 2473 * specified without any other relocations (e.g. a dynamic executable 2474 * normally only contains .plt relocations). If this is the case then 2475 * no REL, RELSZ or RELENT will have been created. For us to be able 2476 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2477 * the RELENT for these relocations. Refer to elf_reloc() for more 2478 * details. 2479 */ 2480 if (!RELENT(lmp) && JMPREL(lmp)) 2481 RELENT(lmp) = sizeof (Rel); 2482 2483 /* 2484 * Establish any per-object auditing. If we're establishing `main's 2485 * link-map its too early to go searching for audit objects so just 2486 * hold the object name for later (see setup()). 2487 */ 2488 if (audit) { 2489 char *cp = audit + (char *)STRTAB(lmp); 2490 2491 if (*cp) { 2492 if (((AUDITORS(lmp) = 2493 calloc(1, sizeof (Audit_desc))) == 0) || 2494 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2495 remove_so(0, lmp); 2496 return (0); 2497 } 2498 if (lml_main.lm_head) { 2499 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2500 remove_so(0, lmp); 2501 return (0); 2502 } 2503 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2504 lml->lm_flags |= LML_FLG_LOCAUDIT; 2505 } 2506 } 2507 } 2508 2509 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2510 remove_so(0, lmp); 2511 return (0); 2512 } 2513 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2514 remove_so(0, lmp); 2515 return (0); 2516 } 2517 2518 /* 2519 * Add the mapped object to the end of the link map list. 2520 */ 2521 lm_append(lml, lmco, lmp); 2522 return (lmp); 2523 } 2524 2525 /* 2526 * Assign hardware/software capabilities. 2527 */ 2528 void 2529 cap_assign(Cap *cap, Rt_map *lmp) 2530 { 2531 while (cap->c_tag != CA_SUNW_NULL) { 2532 switch (cap->c_tag) { 2533 case CA_SUNW_HW_1: 2534 HWCAP(lmp) = cap->c_un.c_val; 2535 break; 2536 case CA_SUNW_SF_1: 2537 SFCAP(lmp) = cap->c_un.c_val; 2538 } 2539 cap++; 2540 } 2541 } 2542 2543 /* 2544 * Map in an ELF object. 2545 * Takes an open file descriptor for the object to map and its pathname; returns 2546 * a pointer to a Rt_map structure for this object, or 0 on error. 2547 */ 2548 static Rt_map * 2549 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2550 int fd) 2551 { 2552 int i; /* general temporary */ 2553 Off memsize = 0; /* total memory size of pathname */ 2554 Off mentry; /* entry point */ 2555 Ehdr *ehdr; /* ELF header of ld.so */ 2556 Phdr *phdr; /* first Phdr in file */ 2557 Phdr *phdr0; /* Saved first Phdr in file */ 2558 Phdr *pptr; /* working Phdr */ 2559 Phdr *fph = 0; /* first loadable Phdr */ 2560 Phdr *lph; /* last loadable Phdr */ 2561 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2562 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2563 Phdr *swph = 0; /* program header for SUNWBSS */ 2564 Phdr *tlph = 0; /* program header for PT_TLS */ 2565 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2566 Cap *cap = 0; /* program header for SUNWCAP */ 2567 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2568 size_t size; /* size of elf and program headers */ 2569 caddr_t faddr = 0; /* mapping address of pathname */ 2570 Rt_map *lmp; /* link map created */ 2571 caddr_t paddr; /* start of padded image */ 2572 Off plen; /* size of image including padding */ 2573 Half etype; 2574 int fixed; 2575 Mmap *mmaps; 2576 uint_t mmapcnt = 0; 2577 Xword align = 0; 2578 2579 /* LINTED */ 2580 ehdr = (Ehdr *)fmap->fm_maddr; 2581 2582 /* 2583 * If this a relocatable object then special processing is required. 2584 */ 2585 if ((etype = ehdr->e_type) == ET_REL) 2586 return (elf_obj_file(lml, lmco, pname, fd)); 2587 2588 /* 2589 * If this isn't a dynamic executable or shared object we can't process 2590 * it. If this is a dynamic executable then all addresses are fixed. 2591 */ 2592 if (etype == ET_EXEC) 2593 fixed = 1; 2594 else if (etype == ET_DYN) 2595 fixed = 0; 2596 else { 2597 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2598 conv_ehdr_type(etype, 0)); 2599 return (0); 2600 } 2601 2602 /* 2603 * If our original mapped page was not large enough to hold all the 2604 * program headers remap them. 2605 */ 2606 size = (size_t)((char *)ehdr->e_phoff + 2607 (ehdr->e_phnum * ehdr->e_phentsize)); 2608 if (size > fmap->fm_fsize) { 2609 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2610 return (0); 2611 } 2612 if (size > fmap->fm_msize) { 2613 fmap_setup(); 2614 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2615 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2616 int err = errno; 2617 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2618 strerror(err)); 2619 return (0); 2620 } 2621 fmap->fm_msize = size; 2622 /* LINTED */ 2623 ehdr = (Ehdr *)fmap->fm_maddr; 2624 } 2625 /* LINTED */ 2626 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2627 2628 /* 2629 * Get entry point. 2630 */ 2631 mentry = ehdr->e_entry; 2632 2633 /* 2634 * Point at program headers and perform some basic validation. 2635 */ 2636 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2637 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2638 if ((pptr->p_type == PT_LOAD) || 2639 (pptr->p_type == PT_SUNWBSS)) { 2640 2641 if (fph == 0) { 2642 fph = pptr; 2643 /* LINTED argument lph is initialized in first pass */ 2644 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2645 eprintf(lml, ERR_ELF, 2646 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2647 return (0); 2648 } 2649 2650 lph = pptr; 2651 2652 if (pptr->p_memsz) 2653 lmph = pptr; 2654 if (pptr->p_filesz) 2655 lfph = pptr; 2656 if (pptr->p_type == PT_SUNWBSS) 2657 swph = pptr; 2658 if (pptr->p_align > align) 2659 align = pptr->p_align; 2660 2661 } else if (pptr->p_type == PT_DYNAMIC) { 2662 mld = (Dyn *)(pptr->p_vaddr); 2663 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2664 tlph = pptr; 2665 } else if (pptr->p_type == PT_SUNWCAP) { 2666 cap = (Cap *)(pptr->p_vaddr); 2667 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2668 unwindph = pptr; 2669 } 2670 } 2671 2672 #if defined(MAP_ALIGN) 2673 /* 2674 * Make sure the maximum page alignment is a power of 2 >= the system 2675 * page size, for use with MAP_ALIGN. 2676 */ 2677 align = M_PROUND(align); 2678 #endif 2679 2680 /* 2681 * We'd better have at least one loadable segment, together with some 2682 * specified file and memory size. 2683 */ 2684 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2685 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2686 return (0); 2687 } 2688 2689 /* 2690 * Check that the files size accounts for the loadable sections 2691 * we're going to map in (failure to do this may cause spurious 2692 * bus errors if we're given a truncated file). 2693 */ 2694 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2695 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2696 return (0); 2697 } 2698 2699 /* 2700 * Memsize must be page rounded so that if we add object padding 2701 * at the end it will start at the beginning of a page. 2702 */ 2703 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2704 M_PTRUNC((ulong_t)fph->p_vaddr)); 2705 2706 /* 2707 * Determine if an existing mapping is acceptable. 2708 */ 2709 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2710 (strcmp(pname, interp->i_name) == 0)) { 2711 /* 2712 * If this is the interpreter then it has already been mapped 2713 * and we have the address so don't map it again. Note that 2714 * the common occurrence of a reference to the interpretor 2715 * (libdl -> ld.so.1) will have been caught during filter 2716 * initialization (see elf_lookup_filtee()). However, some 2717 * ELF implementations are known to record libc.so.1 as the 2718 * interpretor, and thus this test catches this behavior. 2719 */ 2720 paddr = faddr = interp->i_faddr; 2721 2722 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2723 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2724 (fph->p_filesz == fph->p_memsz) && 2725 (((Xword)fmap->fm_maddr % align) == 0)) { 2726 /* 2727 * If the mapping required has already been established from 2728 * the initial page we don't need to do anything more. Reset 2729 * the fmap address so then any later files start a new fmap. 2730 * This is really an optimization for filters, such as libdl.so, 2731 * which should only require one page. 2732 */ 2733 paddr = faddr = fmap->fm_maddr; 2734 fmap->fm_maddr = 0; 2735 fmap_setup(); 2736 } 2737 2738 /* 2739 * Allocate a mapping array to retain mapped segment information. 2740 */ 2741 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2742 return (0); 2743 2744 /* 2745 * If we're reusing an existing mapping determine the objects etext 2746 * address. Otherwise map the file (which will calculate the etext 2747 * address as part of the mapping process). 2748 */ 2749 if (faddr) { 2750 caddr_t base; 2751 2752 if (fixed) 2753 base = 0; 2754 else 2755 base = faddr; 2756 2757 /* LINTED */ 2758 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2759 2760 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2761 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2762 if (pptr->p_type != PT_LOAD) 2763 continue; 2764 2765 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2766 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2767 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2768 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2769 mmapcnt++; 2770 2771 if (!(pptr->p_flags & PF_W)) { 2772 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2773 (ulong_t)pptr->p_memsz + 2774 (ulong_t)(fixed ? 0 : faddr); 2775 } 2776 } 2777 } else { 2778 /* 2779 * Map the file. 2780 */ 2781 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2782 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2783 return (0); 2784 } 2785 2786 /* 2787 * Calculate absolute base addresses and entry points. 2788 */ 2789 if (!fixed) { 2790 if (mld) 2791 /* LINTED */ 2792 mld = (Dyn *)((Off)mld + faddr); 2793 if (cap) 2794 /* LINTED */ 2795 cap = (Cap *)((Off)cap + faddr); 2796 mentry += (Off)faddr; 2797 } 2798 2799 /* 2800 * Create new link map structure for newly mapped shared object. 2801 */ 2802 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2803 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2804 mmapcnt))) { 2805 (void) munmap((caddr_t)faddr, memsize); 2806 return (0); 2807 } 2808 2809 /* 2810 * Start the system loading in the ELF information we'll be processing. 2811 */ 2812 if (REL(lmp)) { 2813 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2814 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2815 MADV_WILLNEED); 2816 } 2817 2818 /* 2819 * If this shared object contains any special segments, record them. 2820 */ 2821 if (swph) { 2822 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2823 SUNWBSS(lmp) = phdr + (swph - phdr0); 2824 } 2825 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2826 remove_so(lml, lmp); 2827 return (0); 2828 } 2829 2830 if (unwindph) 2831 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2832 2833 if (cap) 2834 cap_assign(cap, lmp); 2835 2836 return (lmp); 2837 } 2838 2839 /* 2840 * Function to correct protection settings. Segments are all mapped initially 2841 * with permissions as given in the segment header. We need to turn on write 2842 * permissions on a text segment if there are any relocations against that 2843 * segment, and them turn write permission back off again before returning 2844 * control to the user. This function turns the permission on or off depending 2845 * on the value of the argument. 2846 */ 2847 int 2848 elf_set_prot(Rt_map *lmp, int permission) 2849 { 2850 Mmap *mmaps; 2851 2852 /* 2853 * If this is an allocated image (ie. a relocatable object) we can't 2854 * mprotect() anything. 2855 */ 2856 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2857 return (1); 2858 2859 DBG_CALL(Dbg_file_prot(lmp, permission)); 2860 2861 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2862 if (mmaps->m_perm & PROT_WRITE) 2863 continue; 2864 2865 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2866 (mmaps->m_perm | permission)) == -1) { 2867 int err = errno; 2868 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2869 NAME(lmp), strerror(err)); 2870 return (0); 2871 } 2872 } 2873 return (1); 2874 } 2875 2876 /* 2877 * Build full pathname of shared object from given directory name and filename. 2878 */ 2879 static char * 2880 elf_get_so(const char *dir, const char *file) 2881 { 2882 static char pname[PATH_MAX]; 2883 2884 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 2885 return (pname); 2886 } 2887 2888 /* 2889 * The copy relocation is recorded in a copy structure which will be applied 2890 * after all other relocations are carried out. This provides for copying data 2891 * that must be relocated itself (ie. pointers in shared objects). This 2892 * structure also provides a means of binding RTLD_GROUP dependencies to any 2893 * copy relocations that have been taken from any group members. 2894 * 2895 * If the size of the .bss area available for the copy information is not the 2896 * same as the source of the data inform the user if we're under ldd(1) control 2897 * (this checking was only established in 5.3, so by only issuing an error via 2898 * ldd(1) we maintain the standard set by previous releases). 2899 */ 2900 int 2901 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 2902 Rt_map *dlmp, const void *dadd) 2903 { 2904 Rel_copy rc; 2905 Lm_list *lml = LIST(rlmp); 2906 2907 rc.r_name = name; 2908 rc.r_rsym = rsym; /* the new reference symbol and its */ 2909 rc.r_rlmp = rlmp; /* associated link-map */ 2910 rc.r_dlmp = dlmp; /* the defining link-map */ 2911 rc.r_dsym = dsym; /* the original definition */ 2912 rc.r_radd = radd; 2913 rc.r_dadd = dadd; 2914 2915 if (rsym->st_size > dsym->st_size) 2916 rc.r_size = (size_t)dsym->st_size; 2917 else 2918 rc.r_size = (size_t)rsym->st_size; 2919 2920 if (alist_append(©(dlmp), &rc, sizeof (Rel_copy), 2921 AL_CNT_COPYREL) == 0) { 2922 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2923 return (0); 2924 else 2925 return (1); 2926 } 2927 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 2928 if (alist_append(©(rlmp), &dlmp, 2929 sizeof (Rt_map *), AL_CNT_COPYREL) == 0) { 2930 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2931 return (0); 2932 else 2933 return (1); 2934 } 2935 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 2936 } 2937 2938 /* 2939 * If we are tracing (ldd), warn the user if 2940 * 1) the size from the reference symbol differs from the 2941 * copy definition. We can only copy as much data as the 2942 * reference (dynamic executables) entry allows. 2943 * 2) the copy definition has STV_PROTECTED visibility. 2944 */ 2945 if (lml->lm_flags & LML_FLG_TRC_WARN) { 2946 if (rsym->st_size != dsym->st_size) { 2947 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 2948 _conv_reloc_type(M_R_COPY), demangle(name), 2949 NAME(rlmp), EC_XWORD(rsym->st_size), 2950 NAME(dlmp), EC_XWORD(dsym->st_size)); 2951 if (rsym->st_size > dsym->st_size) 2952 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 2953 NAME(dlmp)); 2954 else 2955 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 2956 NAME(rlmp)); 2957 } 2958 2959 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 2960 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 2961 _conv_reloc_type(M_R_COPY), demangle(name), 2962 NAME(dlmp)); 2963 } 2964 } 2965 2966 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 2967 (Xword)rc.r_size)); 2968 return (1); 2969 } 2970 2971 /* 2972 * Determine the symbol location of an address within a link-map. Look for 2973 * the nearest symbol (whose value is less than or equal to the required 2974 * address). This is the object specific part of dladdr(). 2975 */ 2976 static void 2977 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 2978 { 2979 ulong_t ndx, cnt, base, _value; 2980 Sym *sym, *_sym; 2981 const char *str; 2982 int _flags; 2983 uint_t *dynaddr_ndx; 2984 uint_t dynaddr_n = 0; 2985 ulong_t value; 2986 2987 /* 2988 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 2989 * the dynsym that starts with any local function symbols that exist in 2990 * the library and then moves to the data held in SYMTAB(). In this 2991 * case, SUNWSYMSZ tells us how long the symbol table is. The 2992 * availability of local function symbols will enhance the results 2993 * we can provide. 2994 * 2995 * If SUNWSYMTAB() is non-NULL, then there might also be a 2996 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 2997 * an array of indices into SUNWSYMTAB, sorted by increasing 2998 * address. We can use this to do an O(log N) search instead of a 2999 * brute force search. 3000 * 3001 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 3002 * contains only global symbols. In that case, the length of 3003 * the symbol table comes from the nchain field of the related 3004 * symbol lookup hash table. 3005 */ 3006 str = STRTAB(lmp); 3007 if (SUNWSYMSZ(lmp) == NULL) { 3008 sym = SYMTAB(lmp); 3009 /* 3010 * If we don't have a .hash table there are no symbols 3011 * to look at. 3012 */ 3013 if (HASH(lmp) == 0) 3014 return; 3015 cnt = HASH(lmp)[1]; 3016 } else { 3017 sym = SUNWSYMTAB(lmp); 3018 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3019 dynaddr_ndx = SUNWSYMSORT(lmp); 3020 if (dynaddr_ndx != NULL) 3021 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3022 } 3023 3024 if (FLAGS(lmp) & FLG_RT_FIXED) 3025 base = 0; 3026 else 3027 base = ADDR(lmp); 3028 3029 if (dynaddr_n > 0) { /* Binary search */ 3030 long low = 0, low_bnd; 3031 long high = dynaddr_n - 1, high_bnd; 3032 long mid; 3033 Sym *mid_sym; 3034 3035 /* 3036 * Note that SUNWSYMSORT only contains symbols types that 3037 * supply memory addresses, so there's no need to check and 3038 * filter out any other types. 3039 */ 3040 low_bnd = low; 3041 high_bnd = high; 3042 _sym = NULL; 3043 while (low <= high) { 3044 mid = (low + high) / 2; 3045 mid_sym = &sym[dynaddr_ndx[mid]]; 3046 value = mid_sym->st_value + base; 3047 if (addr < value) { 3048 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3049 addr) 3050 high_bnd = high; 3051 high = mid - 1; 3052 } else if (addr > value) { 3053 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3054 addr) 3055 low_bnd = low; 3056 low = mid + 1; 3057 } else { 3058 _sym = mid_sym; 3059 _value = value; 3060 break; 3061 } 3062 } 3063 /* 3064 * If the above didn't find it exactly, then we must 3065 * return the closest symbol with a value that doesn't 3066 * exceed the one we are looking for. If that symbol exists, 3067 * it will lie in the range bounded by low_bnd and 3068 * high_bnd. This is a linear search, but a short one. 3069 */ 3070 if (_sym == NULL) { 3071 for (mid = low_bnd; mid <= high_bnd; mid++) { 3072 mid_sym = &sym[dynaddr_ndx[mid]]; 3073 value = mid_sym->st_value + base; 3074 if (addr >= value) { 3075 _sym = mid_sym; 3076 _value = value; 3077 } else { 3078 break; 3079 } 3080 } 3081 } 3082 } else { /* Linear search */ 3083 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3084 /* 3085 * Skip expected symbol types that are not functions 3086 * or data: 3087 * - A symbol table starts with an undefined symbol 3088 * in slot 0. If we are using SUNWSYMTAB(), 3089 * there will be a second undefined symbol 3090 * right before the globals. 3091 * - The local part of SUNWSYMTAB() contains a 3092 * series of function symbols. Each section 3093 * starts with an initial STT_FILE symbol. 3094 */ 3095 if ((sym->st_shndx == SHN_UNDEF) || 3096 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3097 continue; 3098 3099 value = sym->st_value + base; 3100 if (value > addr) 3101 continue; 3102 if (value < _value) 3103 continue; 3104 3105 _sym = sym; 3106 _value = value; 3107 3108 /* 3109 * Note, because we accept local and global symbols 3110 * we could find a section symbol that matches the 3111 * associated address, which means that the symbol 3112 * name will be null. In this case continue the 3113 * search in case we can find a global symbol of 3114 * the same value. 3115 */ 3116 if ((value == addr) && 3117 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3118 break; 3119 } 3120 } 3121 3122 _flags = flags & RTLD_DL_MASK; 3123 if (_sym) { 3124 if (_flags == RTLD_DL_SYMENT) 3125 *info = (void *)_sym; 3126 else if (_flags == RTLD_DL_LINKMAP) 3127 *info = (void *)lmp; 3128 3129 dlip->dli_sname = str + _sym->st_name; 3130 dlip->dli_saddr = (void *)_value; 3131 } else { 3132 /* 3133 * addr lies between the beginning of the mapped segment and 3134 * the first global symbol. We have no symbol to return 3135 * and the caller requires one. We use _START_, the base 3136 * address of the mapping. 3137 */ 3138 3139 if (_flags == RTLD_DL_SYMENT) { 3140 /* 3141 * An actual symbol struct is needed, so we 3142 * construct one for _START_. To do this in a 3143 * fully accurate way requires a different symbol 3144 * for each mapped segment. This requires the 3145 * use of dynamic memory and a mutex. That's too much 3146 * plumbing for a fringe case of limited importance. 3147 * 3148 * Fortunately, we can simplify: 3149 * - Only the st_size and st_info fields are useful 3150 * outside of the linker internals. The others 3151 * reference things that outside code cannot see, 3152 * and can be set to 0. 3153 * - It's just a label and there is no size 3154 * to report. So, the size should be 0. 3155 * This means that only st_info needs a non-zero 3156 * (constant) value. A static struct will suffice. 3157 * It must be const (readonly) so the caller can't 3158 * change its meaning for subsequent callers. 3159 */ 3160 static const Sym fsym = { 0, 0, 0, 3161 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3162 *info = (void *) &fsym; 3163 } 3164 3165 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3166 dlip->dli_saddr = (void *) ADDR(lmp); 3167 } 3168 } 3169 3170 static void 3171 elf_lazy_cleanup(Alist *alp) 3172 { 3173 Rt_map **lmpp; 3174 Aliste off; 3175 3176 /* 3177 * Cleanup any link-maps added to this dynamic list and free it. 3178 */ 3179 for (ALIST_TRAVERSE(alp, off, lmpp)) 3180 FLAGS(*lmpp) &= ~FLG_RT_DLSYM; 3181 free(alp); 3182 } 3183 3184 /* 3185 * This routine is called upon to search for a symbol from the dependencies of 3186 * the initial link-map. To maintain lazy loadings goal of reducing the number 3187 * of objects mapped, any symbol search is first carried out using the objects 3188 * that already exist in the process (either on a link-map list or handle). 3189 * If a symbol can't be found, and lazy dependencies are still pending, this 3190 * routine loads the dependencies in an attempt to locate the symbol. 3191 * 3192 * Only new objects are inspected as we will have already inspected presently 3193 * loaded objects before calling this routine. However, a new object may not 3194 * be new - although the di_lmp might be zero, the object may have been mapped 3195 * as someone elses dependency. Thus there's a possibility of some symbol 3196 * search duplication. 3197 */ 3198 3199 Sym * 3200 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 3201 { 3202 Sym *sym = 0; 3203 Alist * alist = 0; 3204 Aliste off; 3205 Rt_map ** lmpp, * lmp = slp->sl_imap; 3206 const char *name = slp->sl_name; 3207 3208 if (alist_append(&alist, &lmp, sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) 3209 return (0); 3210 FLAGS(lmp) |= FLG_RT_DLSYM; 3211 3212 for (ALIST_TRAVERSE(alist, off, lmpp)) { 3213 uint_t cnt = 0; 3214 Slookup sl = *slp; 3215 Dyninfo *dip; 3216 3217 /* 3218 * Loop through the DT_NEEDED entries examining each object for 3219 * the symbol. If the symbol is not found the object is in turn 3220 * added to the alist, so that its DT_NEEDED entires may be 3221 * examined. 3222 */ 3223 lmp = *lmpp; 3224 for (dip = DYNINFO(lmp); cnt < DYNINFOCNT(lmp); cnt++, dip++) { 3225 Rt_map *nlmp; 3226 3227 if (((dip->di_flags & FLG_DI_NEEDED) == 0) || 3228 dip->di_info) 3229 continue; 3230 3231 /* 3232 * If this entry defines a lazy dependency try loading 3233 * it. If the file can't be loaded, consider this 3234 * non-fatal and continue the search (lazy loaded 3235 * dependencies need not exist and their loading should 3236 * only be fatal if called from a relocation). 3237 * 3238 * If the file is already loaded and relocated we must 3239 * still inspect it for symbols, even though it might 3240 * have already been searched. This lazy load operation 3241 * might have promoted the permissions of the object, 3242 * and thus made the object applicable for this symbol 3243 * search, whereas before the object might have been 3244 * skipped. 3245 */ 3246 if ((nlmp = elf_lazy_load(lmp, cnt, name)) == 0) 3247 continue; 3248 3249 /* 3250 * If this object isn't yet a part of the dynamic list 3251 * then inspect it for the symbol. If the symbol isn't 3252 * found add the object to the dynamic list so that we 3253 * can inspect its dependencies. 3254 */ 3255 if (FLAGS(nlmp) & FLG_RT_DLSYM) 3256 continue; 3257 3258 sl.sl_imap = nlmp; 3259 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3260 break; 3261 3262 /* 3263 * Some dlsym() operations are already traversing a 3264 * link-map (dlopen(0)), and thus there's no need to 3265 * build our own dynamic dependency list. 3266 */ 3267 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3268 if (alist_append(&alist, &nlmp, 3269 sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) { 3270 elf_lazy_cleanup(alist); 3271 return (0); 3272 } 3273 FLAGS(nlmp) |= FLG_RT_DLSYM; 3274 } 3275 } 3276 if (sym) 3277 break; 3278 } 3279 3280 elf_lazy_cleanup(alist); 3281 return (sym); 3282 } 3283 3284 /* 3285 * Warning message for bad r_offset. 3286 */ 3287 void 3288 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3289 ulong_t rsymndx) 3290 { 3291 const char *name = (char *)0; 3292 Lm_list *lml = LIST(lmp); 3293 int trace; 3294 3295 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3296 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3297 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3298 trace = 1; 3299 else 3300 trace = 0; 3301 3302 if ((trace == 0) && (DBG_ENABLED == 0)) 3303 return; 3304 3305 if (rsymndx) { 3306 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3307 (rsymndx * SYMENT(lmp))); 3308 3309 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3310 name = (char *)(STRTAB(lmp) + symref->st_name); 3311 } 3312 3313 if (name == 0) 3314 name = MSG_ORIG(MSG_STR_EMPTY); 3315 3316 if (trace) { 3317 const char *rstr; 3318 3319 rstr = _conv_reloc_type((uint_t)rtype); 3320 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3321 EC_ADDR(roffset)); 3322 return; 3323 } 3324 3325 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3326 } 3327 3328 /* 3329 * Resolve a static TLS relocation. 3330 */ 3331 long 3332 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3333 ulong_t roffset, long value) 3334 { 3335 Lm_list *lml = LIST(lmp); 3336 3337 /* 3338 * Relocations against a static TLS block have limited support once 3339 * process initialization has completed. Any error condition should be 3340 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3341 * however individual relocations are tested in case the dynamic flag 3342 * had not been set when this object was built. 3343 */ 3344 if (PTTLS(lmp) == 0) { 3345 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3346 M_REL_SHT_TYPE, rel, NULL, name)); 3347 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3348 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3349 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3350 return (0); 3351 } 3352 3353 /* 3354 * If no static TLS has been set aside for this object, determine if 3355 * any can be obtained. Enforce that any object using static TLS is 3356 * non-deletable. 3357 */ 3358 if (TLSSTATOFF(lmp) == 0) { 3359 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3360 MODE(lmp) |= RTLD_NODELETE; 3361 3362 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3363 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3364 M_REL_SHT_TYPE, rel, NULL, name)); 3365 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3366 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3367 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3368 return (0); 3369 } 3370 } 3371 3372 /* 3373 * Typically, a static TLS offset is maintained as a symbols value. 3374 * For local symbols that are not apart of the dynamic symbol table, 3375 * the TLS relocation points to a section symbol, and the static TLS 3376 * offset was deposited in the associated GOT table. Make sure the GOT 3377 * is cleared, so that the value isn't reused in do_reloc(). 3378 */ 3379 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3380 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3381 value = *(long *)roffset; 3382 *(long *)roffset = 0; 3383 } else { 3384 value = sym->st_value; 3385 } 3386 } 3387 return (-(TLSSTATOFF(lmp) - value)); 3388 } 3389