1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * Object file dependent support for ELF objects. 33 */ 34 #include "_synonyms.h" 35 36 #include <stdio.h> 37 #include <sys/procfs.h> 38 #include <sys/mman.h> 39 #include <sys/debug.h> 40 #include <string.h> 41 #include <limits.h> 42 #include <dlfcn.h> 43 #include <debug.h> 44 #include <conv.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 50 /* 51 * Default and secure dependency search paths. 52 */ 53 static Pnode elf_dflt_dirs[] = { 54 #if defined(_ELF64) 55 #ifndef SGS_PRE_UNIFIED_PROCESS 56 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 57 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 58 #endif 59 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 60 LA_SER_DEFAULT, 0, 0 } 61 #else 62 #ifndef SGS_PRE_UNIFIED_PROCESS 63 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 64 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 65 #endif 66 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 67 LA_SER_DEFAULT, 0, 0 } 68 #endif 69 }; 70 71 static Pnode elf_secure_dirs[] = { 72 #if defined(_ELF64) 73 #ifndef SGS_PRE_UNIFIED_PROCESS 74 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 75 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 76 #endif 77 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 78 MSG_PTH_USRLIBSE_64_SIZE, 79 LA_SER_SECURE, 0, 0 } 80 #else 81 #ifndef SGS_PRE_UNIFIED_PROCESS 82 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 83 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 84 #endif 85 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 86 LA_SER_SECURE, 0, 0 } 87 #endif 88 }; 89 90 /* 91 * Defines for local functions. 92 */ 93 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 94 static int elf_are_u(Rej_desc *); 95 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 96 static ulong_t elf_entry_pt(void); 97 static char *elf_get_so(const char *, const char *); 98 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 99 static int elf_needed(Lm_list *, Aliste, Rt_map *); 100 static void elf_unmap_so(Rt_map *); 101 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 102 103 /* 104 * Functions and data accessed through indirect pointers. 105 */ 106 Fct elf_fct = { 107 elf_are_u, 108 elf_entry_pt, 109 elf_map_so, 110 elf_unmap_so, 111 elf_needed, 112 lookup_sym, 113 elf_reloc, 114 elf_dflt_dirs, 115 elf_secure_dirs, 116 elf_fix_name, 117 elf_get_so, 118 elf_dladdr, 119 dlsym_handle, 120 elf_verify_vers, 121 elf_set_prot 122 }; 123 124 125 /* 126 * Redefine NEEDED name if necessary. 127 */ 128 static Pnode * 129 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 130 { 131 /* 132 * For ABI compliance, if we are asked for ld.so.1, then really give 133 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 134 */ 135 if (((*name == '/') && 136 /* BEGIN CSTYLED */ 137 #if defined(_ELF64) 138 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 139 #else 140 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 141 #endif 142 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 143 /* END CSTYLED */ 144 Pnode *pnp; 145 146 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 147 MSG_ORIG(MSG_PTH_LIBSYS))); 148 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 149 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 150 if (pnp) 151 free(pnp); 152 return (0); 153 } 154 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 155 pnp->p_orig = (orig & PN_SER_MASK); 156 return (pnp); 157 } 158 159 return (expand_paths(clmp, name, orig, 0)); 160 } 161 162 /* 163 * Determine if we have been given an ELF file and if so determine if the file 164 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 165 * with associated error information. 166 */ 167 static int 168 elf_are_u(Rej_desc *rej) 169 { 170 Ehdr *ehdr; 171 172 /* 173 * Determine if we're an elf file. If not simply return, we don't set 174 * any rejection information as this test allows use to scroll through 175 * the objects we support (ELF, AOUT). 176 */ 177 if (fmap->fm_fsize < sizeof (Ehdr) || 178 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 179 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 180 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 181 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 182 return (0); 183 } 184 185 /* 186 * Check class and encoding. 187 */ 188 /* LINTED */ 189 ehdr = (Ehdr *)fmap->fm_maddr; 190 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 191 rej->rej_type = SGS_REJ_CLASS; 192 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 193 return (0); 194 } 195 if (ehdr->e_ident[EI_DATA] != M_DATA) { 196 rej->rej_type = SGS_REJ_DATA; 197 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 198 return (0); 199 } 200 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 201 (ehdr->e_type != ET_DYN)) { 202 rej->rej_type = SGS_REJ_TYPE; 203 rej->rej_info = (uint_t)ehdr->e_type; 204 return (0); 205 } 206 207 /* 208 * Verify machine specific flags, and hardware capability requirements. 209 */ 210 if ((elf_mach_flags_check(rej, ehdr) == 0) || 211 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 212 return (0); 213 214 /* 215 * Verify ELF version. ??? is this too restrictive ??? 216 */ 217 if (ehdr->e_version > EV_CURRENT) { 218 rej->rej_type = SGS_REJ_VERSION; 219 rej->rej_info = (uint_t)ehdr->e_version; 220 return (0); 221 } 222 return (1); 223 } 224 225 /* 226 * The runtime linker employs lazy loading to provide the libraries needed for 227 * debugging, preloading .o's and dldump(). As these are seldom used, the 228 * standard startup of ld.so.1 doesn't initialize all the information necessary 229 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 230 * is called we get here to perform these initializations: 231 * 232 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 233 * dependency. Typically, for all other objects, this is called during 234 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 235 * 236 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 237 * are by default skipped thus delaying all relative relocation processing 238 * on every invocation of ld.so.1. 239 */ 240 int 241 elf_rtld_load() 242 { 243 Lm_list *lml = &lml_rtld; 244 Rt_map *lmp = lml->lm_head; 245 246 if (lml->lm_flags & LML_FLG_PLTREL) 247 return (1); 248 249 /* 250 * As we need to refer to the DYNINFO() information, insure that it has 251 * been initialized. 252 */ 253 if (elf_needed(lml, ALIST_OFF_DATA, lmp) == 0) 254 return (0); 255 256 #if defined(__i386) 257 /* 258 * This is a kludge to give ld.so.1 a performance benefit on i386. 259 * It's based around two factors. 260 * 261 * o JMPSLOT relocations (PLT's) actually need a relative relocation 262 * applied to the GOT entry so that they can find PLT0. 263 * 264 * o ld.so.1 does not exercise *any* PLT's before it has made a call 265 * to elf_lazy_load(). This is because all dynamic dependencies 266 * are recorded as lazy dependencies. 267 */ 268 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 269 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 270 (ulong_t)ADDR(lmp)); 271 #endif 272 273 lml->lm_flags |= LML_FLG_PLTREL; 274 return (1); 275 } 276 277 /* 278 * Lazy load an object. 279 */ 280 Rt_map * 281 elf_lazy_load(Rt_map *clmp, Slookup *slp, uint_t ndx, const char *sym) 282 { 283 Rt_map *nlmp, *hlmp; 284 Dyninfo *dip = &DYNINFO(clmp)[ndx], *pdip; 285 uint_t flags = 0; 286 Pnode *pnp; 287 const char *name; 288 Lm_list *lml = LIST(clmp); 289 Lm_cntl *lmc; 290 Aliste lmco; 291 292 /* 293 * If this dependency has already been processed, we're done. 294 */ 295 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 296 (dip->di_flags & FLG_DI_LDD_DONE)) 297 return (nlmp); 298 299 /* 300 * If we're running under ldd(1), indicate that this dependency has been 301 * processed (see test above). It doesn't matter whether the object is 302 * successfully loaded or not, this flag simply ensures that we don't 303 * repeatedly attempt to load an object that has already failed to load. 304 * To do so would create multiple failure diagnostics for the same 305 * object under ldd(1). 306 */ 307 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 308 dip->di_flags |= FLG_DI_LDD_DONE; 309 310 /* 311 * Determine the initial dependency name. 312 */ 313 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 314 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 315 316 /* 317 * If this object needs to establish its own group, make sure a handle 318 * is created. 319 */ 320 if (dip->di_flags & FLG_DI_GROUP) 321 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 322 323 /* 324 * Lazy dependencies are identified as DT_NEEDED entries with a 325 * DF_P1_LAZYLOAD flag in the previous DT_POSFLAG_1 element. The 326 * dynamic information element that corresponds to the DT_POSFLAG_1 327 * entry is free, and thus used to store the present entrance 328 * identifier. This identifier is used to prevent multiple attempts to 329 * load a failed lazy loadable dependency within the same runtime linker 330 * operation. However, future attempts to reload this dependency are 331 * still possible. 332 */ 333 if (ndx && (pdip = dip - 1) && (pdip->di_flags & FLG_DI_POSFLAG1)) 334 pdip->di_info = (void *)slp->sl_id; 335 336 /* 337 * Expand the requested name if necessary. 338 */ 339 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 340 return (0); 341 342 /* 343 * Provided the object on the head of the link-map has completed its 344 * relocation, create a new link-map control list for this request. 345 */ 346 hlmp = lml->lm_head; 347 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 348 if ((lmc = alist_append(&lml->lm_lists, 0, sizeof (Lm_cntl), 349 AL_CNT_LMLISTS)) == 0) { 350 remove_pnode(pnp); 351 return (0); 352 } 353 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 354 } else { 355 lmc = 0; 356 lmco = ALIST_OFF_DATA; 357 } 358 359 /* 360 * Load the associated object. 361 */ 362 dip->di_info = nlmp = 363 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 364 365 /* 366 * Remove any expanded pathname infrastructure. Reduce the pending lazy 367 * dependency count of the caller, together with the link-map lists 368 * count of objects that still have lazy dependencies pending. 369 */ 370 remove_pnode(pnp); 371 if (--LAZY(clmp) == 0) 372 LIST(clmp)->lm_lazy--; 373 374 /* 375 * Finish processing the objects associated with this request, and 376 * create an association between the caller and this dependency. 377 */ 378 if (nlmp && ((bind_one(clmp, nlmp, BND_NEEDED) == 0) || 379 (analyze_lmc(lml, lmco, nlmp) == 0) || 380 (relocate_lmc(lml, lmco, clmp, nlmp) == 0))) 381 dip->di_info = nlmp = 0; 382 383 /* 384 * If this lazyload has failed, and we've created a new link-map 385 * control list to which this request has added objects, then remove 386 * all the objects that have been associated to this request. 387 */ 388 if ((nlmp == 0) && lmc && lmc->lc_head) 389 remove_lmc(lml, clmp, lmc, lmco, name); 390 391 /* 392 * Finally, remove any link-map control list that was created. 393 */ 394 if (lmc) 395 remove_cntl(lml, lmco); 396 397 /* 398 * If this lazy loading failed, record the fact, and bump the lazy 399 * counts. 400 */ 401 if (nlmp == 0) { 402 dip->di_flags |= FLG_DI_LAZYFAIL; 403 if (LAZY(clmp)++ == 0) 404 LIST(clmp)->lm_lazy++; 405 } 406 407 return (nlmp); 408 } 409 410 /* 411 * Return the entry point of the ELF executable. 412 */ 413 static ulong_t 414 elf_entry_pt(void) 415 { 416 return (ENTRY(lml_main.lm_head)); 417 } 418 419 /* 420 * Unmap a given ELF shared object from the address space. 421 */ 422 static void 423 elf_unmap_so(Rt_map *lmp) 424 { 425 caddr_t addr; 426 size_t size; 427 Mmap *mmaps; 428 429 /* 430 * If this link map represents a relocatable object concatenation, then 431 * the image was simply generated in allocated memory. Free the memory. 432 * 433 * Note: the memory was originally allocated in the libelf:_elf_outmap 434 * routine and would normally have been free'd in elf_outsync(), but 435 * because we 'interpose' on that routine the memory wasn't free'd at 436 * that time. 437 */ 438 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 439 free((void *)ADDR(lmp)); 440 return; 441 } 442 443 /* 444 * If padding was enabled via rtld_db, then we have at least one page 445 * in front of the image - and possibly a trailing page. 446 * Unmap the front page first: 447 */ 448 if (PADSTART(lmp) != ADDR(lmp)) { 449 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 450 size = ADDR(lmp) - (ulong_t)addr; 451 (void) munmap(addr, size); 452 } 453 454 /* 455 * Unmap any trailing padding. 456 */ 457 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 458 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 459 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 460 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 461 (void) munmap(addr, size); 462 } 463 464 /* 465 * Unmmap all mapped segments. 466 */ 467 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 468 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 469 } 470 471 /* 472 * Determine if a dependency requires a particular version and if so verify 473 * that the version exists in the dependency. 474 */ 475 static int 476 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 477 { 478 Verneed *vnd = VERNEED(clmp); 479 int _num, num = VERNEEDNUM(clmp); 480 char *cstrs = (char *)STRTAB(clmp); 481 Lm_list *lml = LIST(clmp); 482 483 /* 484 * Traverse the callers version needed information and determine if any 485 * specific versions are required from the dependency. 486 */ 487 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 488 for (_num = 1; _num <= num; _num++, 489 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 490 Half cnt = vnd->vn_cnt; 491 Vernaux *vnap; 492 char *nstrs, *need; 493 494 /* 495 * Determine if a needed entry matches this dependency. 496 */ 497 need = (char *)(cstrs + vnd->vn_file); 498 if (strcmp(name, need) != 0) 499 continue; 500 501 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 502 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 503 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 504 505 /* 506 * Validate that each version required actually exists in the 507 * dependency. 508 */ 509 nstrs = (char *)STRTAB(nlmp); 510 511 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 512 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 513 char *version, *define; 514 Verdef *vdf = VERDEF(nlmp); 515 ulong_t _num, num = VERDEFNUM(nlmp); 516 int found = 0; 517 518 version = (char *)(cstrs + vnap->vna_name); 519 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 520 521 for (_num = 1; _num <= num; _num++, 522 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 523 Verdaux *vdap; 524 525 if (vnap->vna_hash != vdf->vd_hash) 526 continue; 527 528 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 529 define = (char *)(nstrs + vdap->vda_name); 530 if (strcmp(version, define) != 0) 531 continue; 532 533 found++; 534 break; 535 } 536 537 /* 538 * If we're being traced print out any matched version 539 * when the verbose (-v) option is in effect. Always 540 * print any unmatched versions. 541 */ 542 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 543 /* BEGIN CSTYLED */ 544 if (found) { 545 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 546 continue; 547 548 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 549 need, version, NAME(nlmp)); 550 } else { 551 if (rtld_flags & RT_FL_SILENCERR) 552 continue; 553 554 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 555 need, version); 556 } 557 /* END CSTYLED */ 558 continue; 559 } 560 561 /* 562 * If the version hasn't been found then this is a 563 * candidate for a fatal error condition. Weak 564 * version definition requirements are silently 565 * ignored. Also, if the image inspected for a version 566 * definition has no versioning recorded at all then 567 * silently ignore this (this provides better backward 568 * compatibility to old images created prior to 569 * versioning being available). Both of these skipped 570 * diagnostics are available under tracing (see above). 571 */ 572 if ((found == 0) && (num != 0) && 573 (!(vnap->vna_flags & VER_FLG_WEAK))) { 574 eprintf(lml, ERR_FATAL, 575 MSG_INTL(MSG_VER_NFOUND), need, version, 576 NAME(clmp)); 577 return (0); 578 } 579 } 580 } 581 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 582 return (1); 583 } 584 585 /* 586 * Search through the dynamic section for DT_NEEDED entries and perform one 587 * of two functions. If only the first argument is specified then load the 588 * defined shared object, otherwise add the link map representing the defined 589 * link map the the dlopen list. 590 */ 591 static int 592 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 593 { 594 Dyn *dyn, *pdyn; 595 ulong_t ndx = 0; 596 uint_t lazy, flags; 597 Word lmflags = lml->lm_flags; 598 Word lmtflags = lml->lm_tflags; 599 600 /* 601 * Process each shared object on needed list. 602 */ 603 if (DYN(clmp) == 0) 604 return (1); 605 606 for (dyn = (Dyn *)DYN(clmp), pdyn = NULL; dyn->d_tag != DT_NULL; 607 pdyn = dyn++, ndx++) { 608 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 609 Rt_map *nlmp = 0; 610 char *name; 611 int silent = 0; 612 Pnode *pnp; 613 614 switch (dyn->d_tag) { 615 case DT_POSFLAG_1: 616 dip->di_flags |= FLG_DI_POSFLAG1; 617 continue; 618 case DT_NEEDED: 619 case DT_USED: 620 lazy = flags = 0; 621 dip->di_flags |= FLG_DI_NEEDED; 622 623 if (pdyn && (pdyn->d_tag == DT_POSFLAG_1)) { 624 if ((pdyn->d_un.d_val & DF_P1_LAZYLOAD) && 625 ((lmtflags & LML_TFLG_NOLAZYLD) == 0)) { 626 dip->di_flags |= FLG_DI_LAZY; 627 lazy = 1; 628 } 629 if (pdyn->d_un.d_val & DF_P1_GROUPPERM) { 630 dip->di_flags |= FLG_DI_GROUP; 631 flags = 632 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 633 } 634 } 635 636 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 637 638 /* 639 * NOTE, libc.so.1 can't be lazy loaded. Although a 640 * lazy position flag won't be produced when a RTLDINFO 641 * .dynamic entry is found (introduced with the UPM in 642 * Solaris 10), it was possible to mark libc for lazy 643 * loading on previous releases. To reduce the overhead 644 * of testing for this occurrence, only carry out this 645 * check for the first object on the link-map list 646 * (there aren't many applications built without libc). 647 */ 648 if (lazy && (lml->lm_head == clmp) && 649 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 650 lazy = 0; 651 652 /* 653 * Don't bring in lazy loaded objects yet unless we've 654 * been asked to attempt to load all available objects 655 * (crle(1) sets LD_FLAGS=loadavail). Even under 656 * RTLD_NOW we don't process this - RTLD_NOW will cause 657 * relocation processing which in turn might trigger 658 * lazy loading, but its possible that the object has a 659 * lazy loaded file with no bindings (i.e., it should 660 * never have been a dependency in the first place). 661 */ 662 if (lazy) { 663 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 664 LAZY(clmp)++; 665 lazy = flags = 0; 666 continue; 667 } 668 669 /* 670 * Silence any error messages - see description 671 * under elf_lookup_filtee(). 672 */ 673 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 674 rtld_flags |= RT_FL_SILENCERR; 675 silent = 1; 676 } 677 } 678 break; 679 case DT_AUXILIARY: 680 dip->di_flags |= FLG_DI_AUXFLTR; 681 continue; 682 case DT_SUNW_AUXILIARY: 683 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 684 continue; 685 case DT_FILTER: 686 dip->di_flags |= FLG_DI_STDFLTR; 687 continue; 688 case DT_SUNW_FILTER: 689 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 690 continue; 691 default: 692 continue; 693 } 694 695 DBG_CALL(Dbg_file_needed(clmp, name)); 696 697 /* 698 * If we're running under ldd(1), indicate that this dependency 699 * has been processed. It doesn't matter whether the object is 700 * successfully loaded or not, this flag simply ensures that we 701 * don't repeatedly attempt to load an object that has already 702 * failed to load. To do so would create multiple failure 703 * diagnostics for the same object under ldd(1). 704 */ 705 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 706 dip->di_flags |= FLG_DI_LDD_DONE; 707 708 /* 709 * Establish the objects name, load it and establish a binding 710 * with the caller. 711 */ 712 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 713 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 714 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 715 nlmp = 0; 716 717 /* 718 * Clean up any infrastructure, including the removal of the 719 * error suppression state, if it had been previously set in 720 * this routine. 721 */ 722 if (pnp) 723 remove_pnode(pnp); 724 if (silent) 725 rtld_flags &= ~RT_FL_SILENCERR; 726 727 if ((dip->di_info = (void *)nlmp) == 0) { 728 /* 729 * If the object could not be mapped, continue if error 730 * suppression is established or we're here with ldd(1). 731 */ 732 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 733 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 734 continue; 735 else 736 return (0); 737 } 738 } 739 740 if (LAZY(clmp)) 741 lml->lm_lazy++; 742 743 return (1); 744 } 745 746 static int 747 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 748 { 749 prmap_t *maps, *_maps; 750 int pfd, num, _num; 751 caddr_t eaddr = vaddr + size; 752 int err; 753 754 /* 755 * If memory reservations have been established for alternative objects 756 * determine if this object falls within the reservation, if it does no 757 * further checking is required. 758 */ 759 if (rtld_flags & RT_FL_MEMRESV) { 760 Rtc_head *head = (Rtc_head *)config->c_bgn; 761 762 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 763 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 764 return (0); 765 } 766 767 /* 768 * Determine the mappings presently in use by this process. 769 */ 770 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 771 return (1); 772 773 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 774 err = errno; 775 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 776 strerror(err)); 777 return (1); 778 } 779 780 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 781 return (1); 782 783 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 784 err = errno; 785 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 786 strerror(err)); 787 free(maps); 788 return (1); 789 } 790 791 /* 792 * Determine if the supplied address clashes with any of the present 793 * process mappings. 794 */ 795 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 796 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 797 Rt_map *lmp; 798 const char *str; 799 800 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 801 continue; 802 803 /* 804 * We have a memory clash. See if one of the known dynamic 805 * dependency mappings represents this space so as to provide 806 * the user a more meaningful message. 807 */ 808 if ((lmp = _caller(vaddr, 0)) != 0) 809 str = NAME(lmp); 810 else 811 str = MSG_INTL(MSG_STR_UNKNOWN); 812 813 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 814 EC_NATPTR(vaddr), EC_OFF(size), str); 815 return (1); 816 } 817 free(maps); 818 return (0); 819 } 820 821 /* 822 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 823 * are used to obtained an aligned reservation from anonymous memory. If 824 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 825 * reservation using the file as backing. 826 */ 827 static Am_ret 828 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 829 int mperm, int fd, Xword align) 830 { 831 Am_ret amret; 832 int mflag = MAP_PRIVATE | MAP_NORESERVE; 833 834 #if defined(MAP_ALIGN) 835 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 836 mflag |= MAP_ALIGN; 837 *maddr = (caddr_t)align; 838 } 839 #endif 840 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 841 return (amret); 842 843 if (amret == AM_OK) 844 return (AM_OK); 845 846 /* 847 * If an anonymous memory request failed (which should only be the 848 * case if it is unsupported on the system we're running on), establish 849 * the initial mapping directly from the file. 850 */ 851 *maddr = 0; 852 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 853 fd, 0)) == MAP_FAILED) { 854 int err = errno; 855 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 856 strerror(err)); 857 return (AM_ERROR); 858 } 859 return (AM_NOSUP); 860 } 861 862 static void * 863 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 864 int fd, Off foff) 865 { 866 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 867 static int notd = 0; 868 869 /* 870 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 871 * flag. 872 */ 873 if (notd == 0) { 874 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 875 mflag |= MAP_TEXT; 876 else 877 mflag |= MAP_INITDATA; 878 } 879 #endif 880 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 881 return (0); 882 883 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 884 if ((notd == 0) && (errno == EINVAL)) { 885 /* 886 * MAP_TEXT and MAP_INITDATA may not be supported on this 887 * platform, try again without. 888 */ 889 notd = 1; 890 mflag &= ~(MAP_TEXT | MAP_INITDATA); 891 892 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 893 } 894 #endif 895 return (MAP_FAILED); 896 } 897 898 /* 899 * Map in a file. 900 */ 901 static caddr_t 902 elf_map_it( 903 Lm_list *lml, /* link-map list */ 904 const char *name, /* actual name stored for pathname */ 905 Off fsize, /* total mapping claim of the file */ 906 Ehdr *ehdr, /* ELF header of file */ 907 Phdr *fphdr, /* first loadable Phdr */ 908 Phdr *lphdr, /* last loadable Phdr */ 909 Phdr **rrphdr, /* return first Phdr in reservation */ 910 caddr_t *rraddr, /* return start of reservation */ 911 Off *rrsize, /* return total size of reservation */ 912 int fixed, /* image is resolved to a fixed addr */ 913 int fd, /* images file descriptor */ 914 Xword align, /* image segments maximum alignment */ 915 Mmap *mmaps, /* mmap information array and */ 916 uint_t *mmapcnt) /* mapping count */ 917 { 918 caddr_t raddr; /* reservation address */ 919 Off rsize; /* reservation size */ 920 Phdr *phdr; /* working program header poiner */ 921 caddr_t maddr; /* working mmap address */ 922 caddr_t faddr; /* working file address */ 923 size_t padsize; /* object padding requirement */ 924 size_t padpsize = 0; /* padding size rounded to next page */ 925 size_t padmsize = 0; /* padding size rounded for alignment */ 926 int skipfseg; /* skip mapping first segment */ 927 int mperm; /* segment permissions */ 928 Am_ret amret = AM_NOSUP; 929 930 /* 931 * If padding is required extend both the front and rear of the image. 932 * To insure the image itself is mapped at the correct alignment the 933 * initial padding is rounded up to the nearest page. Once the image is 934 * mapped the excess can be pruned to the nearest page required for the 935 * actual padding itself. 936 */ 937 if ((padsize = r_debug.rtd_objpad) != 0) { 938 padpsize = M_PROUND(padsize); 939 if (fixed) 940 padmsize = padpsize; 941 else 942 padmsize = S_ROUND(padsize, align); 943 } 944 945 /* 946 * Determine the initial permissions used to map in the first segment. 947 * If this segments memsz is greater that its filesz then the difference 948 * must be zeroed. Make sure this segment is writable. 949 */ 950 mperm = 0; 951 if (fphdr->p_flags & PF_R) 952 mperm |= PROT_READ; 953 if (fphdr->p_flags & PF_X) 954 mperm |= PROT_EXEC; 955 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 956 mperm |= PROT_WRITE; 957 958 /* 959 * Determine whether or not to let system reserve address space based on 960 * whether this is a dynamic executable (addresses in object are fixed) 961 * or a shared object (addresses in object are relative to the objects' 962 * base). 963 */ 964 if (fixed) { 965 /* 966 * Determine the reservation address and size, and insure that 967 * this reservation isn't already in use. 968 */ 969 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 970 raddr = maddr - padpsize; 971 rsize = fsize + padpsize + padsize; 972 973 if (lml_main.lm_head) { 974 if (elf_map_check(lml, name, raddr, rsize) != 0) 975 return (0); 976 } 977 978 /* 979 * As this is a fixed image, all segments must be individually 980 * mapped. 981 */ 982 skipfseg = 0; 983 984 } else { 985 size_t esize; 986 987 /* 988 * If this isn't a fixed image, reserve enough address space for 989 * the entire image to be mapped. The amount of reservation is 990 * the range between the beginning of the first, and end of the 991 * last loadable segment, together with any padding, plus the 992 * alignment of the first segment. 993 * 994 * The optimal reservation is made as a no-reserve mapping from 995 * anonymous memory. Each segment is then mapped into this 996 * reservation. If the anonymous mapping capability isn't 997 * available, the reservation is obtained from the file itself. 998 * In this case the first segment of the image is mapped as part 999 * of the reservation, thus only the following segments need to 1000 * be remapped. 1001 */ 1002 rsize = fsize + padmsize + padsize; 1003 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 1004 fd, align)) == AM_ERROR) 1005 return (0); 1006 maddr = raddr + padmsize; 1007 faddr = (caddr_t)S_ROUND((Off)maddr, align); 1008 1009 /* 1010 * If this reservation has been obtained from anonymous memory, 1011 * then all segments must be individually mapped. Otherwise, 1012 * the first segment heads the reservation. 1013 */ 1014 if (amret == AM_OK) 1015 skipfseg = 0; 1016 else 1017 skipfseg = 1; 1018 1019 /* 1020 * For backward compatibility (where MAP_ALIGN isn't available), 1021 * insure the alignment of the reservation is adequate for this 1022 * object, and if not remap the object to obtain the correct 1023 * alignment. 1024 */ 1025 if (faddr != maddr) { 1026 (void) munmap(raddr, rsize); 1027 1028 rsize += align; 1029 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 1030 mperm, fd, align)) == AM_ERROR) 1031 return (0); 1032 1033 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 1034 padpsize), align); 1035 1036 esize = maddr - raddr + padpsize; 1037 1038 /* 1039 * As ths image has been realigned, the first segment 1040 * of the file needs to be remapped to its correct 1041 * location. 1042 */ 1043 skipfseg = 0; 1044 } else 1045 esize = padmsize - padpsize; 1046 1047 /* 1048 * If this reservation included padding, remove any excess for 1049 * the start of the image (the padding was adjusted to insure 1050 * the image was aligned appropriately). 1051 */ 1052 if (esize) { 1053 (void) munmap(raddr, esize); 1054 raddr += esize; 1055 rsize -= esize; 1056 } 1057 } 1058 1059 /* 1060 * At this point we know the initial location of the image, and its 1061 * size. Pass these back to the caller for inclusion in the link-map 1062 * that will eventually be created. 1063 */ 1064 *rraddr = raddr; 1065 *rrsize = rsize; 1066 1067 /* 1068 * The first loadable segment is now pointed to by maddr. This segment 1069 * will eventually contain the elf header and program headers, so reset 1070 * the program header. Pass this back to the caller for inclusion in 1071 * the link-map so it can be used for later unmapping operations. 1072 */ 1073 /* LINTED */ 1074 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1075 1076 /* 1077 * If padding is required at the front of the image, obtain that now. 1078 * Note, if we've already obtained a reservation from anonymous memory 1079 * then this reservation will already include suitable padding. 1080 * Otherwise this reservation is backed by the file, or in the case of 1081 * a fixed image, doesn't yet exist. Map the padding so that it is 1082 * suitably protected (PROT_NONE), and insure the first segment of the 1083 * file is mapped to its correct location. 1084 */ 1085 if (padsize) { 1086 if (amret == AM_NOSUP) { 1087 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1088 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1089 MAP_FAILED) 1090 return (0); 1091 1092 skipfseg = 0; 1093 } 1094 rsize -= padpsize; 1095 } 1096 1097 /* 1098 * Map individual segments. For a fixed image, these will each be 1099 * unique mappings. For a reservation these will fill in the 1100 * reservation. 1101 */ 1102 for (phdr = fphdr; phdr <= lphdr; 1103 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1104 caddr_t addr; 1105 Off mlen, flen; 1106 size_t size; 1107 1108 /* 1109 * Skip non-loadable segments or segments that don't occupy 1110 * any memory. 1111 */ 1112 if (((phdr->p_type != PT_LOAD) && 1113 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1114 continue; 1115 1116 /* 1117 * Establish this segments address relative to our base. 1118 */ 1119 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1120 (fixed ? 0 : faddr))); 1121 1122 /* 1123 * Determine the mapping protection from the segment attributes. 1124 * Also determine the etext address from the last loadable 1125 * segment which has permissions but no write access. 1126 */ 1127 mperm = 0; 1128 if (phdr->p_flags) { 1129 if (phdr->p_flags & PF_R) 1130 mperm |= PROT_READ; 1131 if (phdr->p_flags & PF_X) 1132 mperm |= PROT_EXEC; 1133 if (phdr->p_flags & PF_W) 1134 mperm |= PROT_WRITE; 1135 else 1136 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1137 (ulong_t)(fixed ? 0 : faddr); 1138 } 1139 1140 /* 1141 * Determine the type of mapping required. 1142 */ 1143 if (phdr->p_type == PT_SUNWBSS) { 1144 /* 1145 * Potentially, we can defer the loading of any SUNWBSS 1146 * segment, depending on whether the symbols it provides 1147 * have been bound to. In this manner, large segments 1148 * that are interposed upon between shared libraries 1149 * may not require mapping. Note, that the mapping 1150 * information is recorded in our mapping descriptor at 1151 * this time. 1152 */ 1153 mlen = phdr->p_memsz; 1154 flen = 0; 1155 1156 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1157 /* 1158 * If this segment has no backing file and no flags 1159 * specified, then it defines a reservation. At this 1160 * point all standard loadable segments will have been 1161 * processed. The segment reservation is mapped 1162 * directly from /dev/null. 1163 */ 1164 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1165 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1166 return (0); 1167 1168 mlen = phdr->p_memsz; 1169 flen = 0; 1170 1171 } else if (phdr->p_filesz == 0) { 1172 /* 1173 * If this segment has no backing file then it defines a 1174 * nobits segment and is mapped directly from /dev/zero. 1175 */ 1176 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1177 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1178 return (0); 1179 1180 mlen = phdr->p_memsz; 1181 flen = 0; 1182 1183 } else { 1184 Off foff; 1185 1186 /* 1187 * This mapping originates from the file. Determine the 1188 * file offset to which the mapping will be directed 1189 * (must be aligned) and how much to map (might be more 1190 * than the file in the case of .bss). 1191 */ 1192 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1193 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1194 flen = phdr->p_filesz + (phdr->p_offset - foff); 1195 1196 /* 1197 * If this is a non-fixed, non-anonymous mapping, and no 1198 * padding is involved, then the first loadable segment 1199 * is already part of the initial reservation. In this 1200 * case there is no need to remap this segment. 1201 */ 1202 if ((skipfseg == 0) || (phdr != fphdr)) { 1203 int phdr_mperm = mperm; 1204 /* 1205 * If this segments memsz is greater that its 1206 * filesz then the difference must be zeroed. 1207 * Make sure this segment is writable. 1208 */ 1209 if (phdr->p_memsz > phdr->p_filesz) 1210 mperm |= PROT_WRITE; 1211 1212 if (elf_map_textdata((caddr_t)addr, flen, 1213 mperm, phdr_mperm, 1214 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1215 MAP_FAILED) { 1216 int err = errno; 1217 eprintf(lml, ERR_FATAL, 1218 MSG_INTL(MSG_SYS_MMAP), name, 1219 strerror(err)); 1220 return (0); 1221 } 1222 } 1223 1224 /* 1225 * If the memory occupancy of the segment overflows the 1226 * definition in the file, we need to "zero out" the end 1227 * of the mapping we've established, and if necessary, 1228 * map some more space from /dev/zero. Note, zero'ed 1229 * memory must end on a double word boundary to satisfy 1230 * zero(). 1231 */ 1232 if (phdr->p_memsz > phdr->p_filesz) { 1233 caddr_t zaddr; 1234 size_t zlen, zplen; 1235 Off fend; 1236 1237 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1238 (fixed ? 0 : faddr)); 1239 zaddr = (caddr_t)M_PROUND(foff); 1240 zplen = (size_t)(zaddr - foff); 1241 1242 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1243 phdr->p_memsz + (fixed ? 0 : faddr))); 1244 zlen = (size_t)(fend - foff); 1245 1246 /* 1247 * Determine whether the number of bytes that 1248 * must be zero'ed overflow to the next page. 1249 * If not, simply clear the exact bytes 1250 * (filesz to memsz) from this page. Otherwise, 1251 * clear the remaining bytes of this page, and 1252 * map an following pages from /dev/zero. 1253 */ 1254 if (zlen < zplen) 1255 zero((caddr_t)foff, (long)zlen); 1256 else { 1257 zero((caddr_t)foff, (long)zplen); 1258 1259 if ((zlen = (fend - (Off)zaddr)) > 0) { 1260 if (dz_map(lml, zaddr, zlen, 1261 mperm, 1262 MAP_FIXED | MAP_PRIVATE) == 1263 MAP_FAILED) 1264 return (0); 1265 } 1266 } 1267 } 1268 } 1269 1270 /* 1271 * Unmap anything from the last mapping address to this one and 1272 * update the mapping claim pointer. 1273 */ 1274 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1275 (void) munmap(maddr, size); 1276 rsize -= size; 1277 } 1278 1279 /* 1280 * Retain this segments mapping information. 1281 */ 1282 mmaps[*mmapcnt].m_vaddr = addr; 1283 mmaps[*mmapcnt].m_msize = mlen; 1284 mmaps[*mmapcnt].m_fsize = flen; 1285 mmaps[*mmapcnt].m_perm = mperm; 1286 (*mmapcnt)++; 1287 1288 maddr = addr + M_PROUND(mlen); 1289 rsize -= M_PROUND(mlen); 1290 } 1291 1292 /* 1293 * If padding is required at the end of the image, obtain that now. 1294 * Note, if we've already obtained a reservation from anonymous memory 1295 * then this reservation will already include suitable padding. 1296 */ 1297 if (padsize) { 1298 if (amret == AM_NOSUP) { 1299 /* 1300 * maddr is currently page aligned from the last segment 1301 * mapping. 1302 */ 1303 if (dz_map(lml, maddr, padsize, PROT_NONE, 1304 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1305 MAP_FAILED) 1306 return (0); 1307 } 1308 maddr += padsize; 1309 rsize -= padsize; 1310 } 1311 1312 /* 1313 * Unmap any final reservation. 1314 */ 1315 if ((fixed == 0) && (rsize != 0)) 1316 (void) munmap(maddr, rsize); 1317 1318 return (faddr); 1319 } 1320 1321 /* 1322 * A null symbol interpretor. Used if a filter has no associated filtees. 1323 */ 1324 /* ARGSUSED0 */ 1325 static Sym * 1326 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1327 { 1328 return ((Sym *)0); 1329 } 1330 1331 /* 1332 * Disable filtee use. 1333 */ 1334 static void 1335 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1336 { 1337 dip->di_info = 0; 1338 1339 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1340 /* 1341 * If this is an object filter, free the filtee's duplication. 1342 */ 1343 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1344 free(REFNAME(lmp)); 1345 REFNAME(lmp) = (char *)0; 1346 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1347 1348 /* 1349 * Indicate that this filtee is no longer available. 1350 */ 1351 if (dip->di_flags & FLG_DI_STDFLTR) 1352 SYMINTP(lmp) = elf_null_find_sym; 1353 1354 } 1355 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1356 /* 1357 * Indicate that this standard filtee is no longer available. 1358 */ 1359 if (SYMSFLTRCNT(lmp)) 1360 SYMSFLTRCNT(lmp)--; 1361 } else { 1362 /* 1363 * Indicate that this auxiliary filtee is no longer available. 1364 */ 1365 if (SYMAFLTRCNT(lmp)) 1366 SYMAFLTRCNT(lmp)--; 1367 } 1368 dip->di_flags &= ~MSK_DI_FILTER; 1369 } 1370 1371 /* 1372 * Find symbol interpreter - filters. 1373 * This function is called when the symbols from a shared object should 1374 * be resolved from the shared objects filtees instead of from within itself. 1375 * 1376 * A symbol name of 0 is used to trigger filtee loading. 1377 */ 1378 static Sym * 1379 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1380 { 1381 const char *name = slp->sl_name, *filtees; 1382 Rt_map *clmp = slp->sl_cmap; 1383 Rt_map *ilmp = slp->sl_imap; 1384 Pnode *pnp, **pnpp; 1385 int any; 1386 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1387 Lm_list *lml = LIST(ilmp); 1388 1389 /* 1390 * Indicate that the filter has been used. If a binding already exists 1391 * to the caller, indicate that this object is referenced. This insures 1392 * we don't generate false unreferenced diagnostics from ldd -u/U or 1393 * debugging. Don't create a binding regardless, as this filter may 1394 * have been dlopen()'ed. 1395 */ 1396 if (name && (ilmp != clmp)) { 1397 Word tracing = (LIST(clmp)->lm_flags & 1398 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1399 1400 if (tracing || DBG_ENABLED) { 1401 Bnd_desc *bdp; 1402 Aliste idx; 1403 1404 FLAGS1(ilmp) |= FL1_RT_USED; 1405 1406 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1407 for (APLIST_TRAVERSE(CALLERS(ilmp), idx, bdp)) { 1408 if (bdp->b_caller == clmp) { 1409 bdp->b_flags |= BND_REFER; 1410 break; 1411 } 1412 } 1413 } 1414 } 1415 } 1416 1417 /* 1418 * If this is the first call to process this filter, establish the 1419 * filtee list. If a configuration file exists, determine if any 1420 * filtee associations for this filter, and its filtee reference, are 1421 * defined. Otherwise, process the filtee reference. Any token 1422 * expansion is also completed at this point (i.e., $PLATFORM). 1423 */ 1424 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1425 if (dip->di_info == 0) { 1426 if (rtld_flags2 & RT_FL2_FLTCFG) 1427 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1428 filtees); 1429 1430 if (dip->di_info == 0) { 1431 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1432 if ((lml->lm_flags & 1433 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1434 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1435 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1436 NAME(ilmp), filtees); 1437 1438 if ((dip->di_info = (void *)expand_paths(ilmp, 1439 filtees, PN_SER_FILTEE, 0)) == 0) { 1440 elf_disable_filtee(ilmp, dip); 1441 return ((Sym *)0); 1442 } 1443 } 1444 } 1445 1446 /* 1447 * Traverse the filtee list, dlopen()'ing any objects specified and 1448 * using their group handle to lookup the symbol. 1449 */ 1450 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1451 pnpp = &pnp->p_next, pnp = *pnpp) { 1452 int mode; 1453 Grp_hdl *ghp; 1454 Rt_map *nlmp = 0; 1455 1456 if (pnp->p_len == 0) 1457 continue; 1458 1459 /* 1460 * Establish the mode of the filtee from the filter. As filtees 1461 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1462 * and the filtees aren't global. It would be nice to have 1463 * RTLD_FIRST used here also, but as filters got out long before 1464 * RTLD_FIRST was introduced it's a little too late now. 1465 */ 1466 mode = MODE(ilmp) | RTLD_GROUP; 1467 mode &= ~RTLD_GLOBAL; 1468 1469 /* 1470 * Insure that any auxiliary filter can locate symbols from its 1471 * caller. 1472 */ 1473 if (dip->di_flags & FLG_DI_AUXFLTR) 1474 mode |= RTLD_PARENT; 1475 1476 /* 1477 * Process any hardware capability directory. Establish a new 1478 * link-map control list from which to analyze any newly added 1479 * objects. 1480 */ 1481 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1482 Lm_cntl *lmc; 1483 Aliste lmco; 1484 1485 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1486 if ((lmc = alist_append(&lml->lm_lists, 0, 1487 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1488 return ((Sym *)0); 1489 lmco = (Aliste)((char *)lmc - 1490 (char *)lml->lm_lists); 1491 } else { 1492 lmc = 0; 1493 lmco = ALIST_OFF_DATA; 1494 } 1495 1496 pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees, 1497 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1498 1499 /* 1500 * Now that any hardware capability objects have been 1501 * processed, remove any link-map control list. 1502 */ 1503 if (lmc) 1504 remove_cntl(lml, lmco); 1505 } 1506 1507 if (pnp->p_len == 0) 1508 continue; 1509 1510 /* 1511 * Process an individual filtee. 1512 */ 1513 if (pnp->p_info == 0) { 1514 const char *filtee = pnp->p_name; 1515 int audit = 0; 1516 1517 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1518 1519 ghp = 0; 1520 1521 /* 1522 * Determine if the reference link map is already 1523 * loaded. As an optimization compare the filtee with 1524 * our interpretor. The most common filter is 1525 * libdl.so.1, which is a filter on ld.so.1. 1526 */ 1527 #if defined(_ELF64) 1528 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1529 #else 1530 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1531 #endif 1532 /* 1533 * Create an association between ld.so.1 and the 1534 * filter. As an optimization, a handle for 1535 * ld.so.1 itself (required for the dlopen() 1536 * family filtering mechanism) shouldn't search 1537 * any dependencies of ld.so.1. Omitting 1538 * GPD_ADDEPS prevents the addition of any 1539 * ld.so.1 dependencies to this handle. 1540 */ 1541 nlmp = lml_rtld.lm_head; 1542 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1543 (GPH_LDSO | GPH_FIRST | GPH_FILTEE), 1544 (GPD_DLSYM | GPD_RELOC), GPD_PARENT)) == 0) 1545 nlmp = 0; 1546 1547 /* 1548 * Establish the filter handle to prevent any 1549 * recursion. 1550 */ 1551 if (nlmp && ghp) 1552 pnp->p_info = (void *)ghp; 1553 1554 /* 1555 * Audit the filter/filtee established. Ignore 1556 * any return from the auditor, as we can't 1557 * allow ignore filtering to ld.so.1, otherwise 1558 * nothing is going to work. 1559 */ 1560 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1561 LML_TFLG_AUD_OBJFILTER)) 1562 (void) audit_objfilter(ilmp, filtees, 1563 nlmp, 0); 1564 1565 } else { 1566 Rej_desc rej = { 0 }; 1567 Lm_cntl *lmc; 1568 Aliste lmco; 1569 1570 /* 1571 * Establish a new link-map control list from 1572 * which to analyze any newly added objects. 1573 */ 1574 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1575 if ((lmc = 1576 alist_append(&lml->lm_lists, 0, 1577 sizeof (Lm_cntl), 1578 AL_CNT_LMLISTS)) == 0) 1579 return ((Sym *)0); 1580 lmco = (Aliste)((char *)lmc - 1581 (char *)lml->lm_lists); 1582 } else { 1583 lmc = 0; 1584 lmco = ALIST_OFF_DATA; 1585 } 1586 1587 /* 1588 * Load the filtee. Note, an auditor can 1589 * provide an alternative name. 1590 */ 1591 if ((nlmp = load_path(lml, lmco, &(pnp->p_name), 1592 ilmp, mode, FLG_RT_HANDLE, &ghp, 0, 1593 &rej)) == 0) { 1594 file_notfound(LIST(ilmp), filtee, ilmp, 1595 FLG_RT_HANDLE, &rej); 1596 remove_rej(&rej); 1597 } 1598 filtee = pnp->p_name; 1599 1600 /* 1601 * Establish the filter handle to prevent any 1602 * recursion. 1603 */ 1604 if (nlmp && ghp) { 1605 ghp->gh_flags |= GPH_FILTEE; 1606 pnp->p_info = (void *)ghp; 1607 } 1608 1609 /* 1610 * Audit the filter/filtee established. A 1611 * return of 0 indicates the auditor wishes to 1612 * ignore this filtee. 1613 */ 1614 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1615 LML_TFLG_AUD_OBJFILTER)) { 1616 if (audit_objfilter(ilmp, filtees, 1617 nlmp, 0) == 0) { 1618 audit = 1; 1619 nlmp = 0; 1620 } 1621 } 1622 1623 /* 1624 * Finish processing the objects associated with 1625 * this request. Create an association between 1626 * this object and the originating filter to 1627 * provide sufficient information to tear down 1628 * this filtee if necessary. 1629 */ 1630 if (nlmp && ghp && 1631 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1632 (relocate_lmc(lml, lmco, ilmp, nlmp) == 0))) 1633 nlmp = 0; 1634 1635 /* 1636 * If the filtee has been successfully 1637 * processed, then create an association 1638 * between the filter and filtee. This 1639 * association provides sufficient information 1640 * to tear down the filter and filtee if 1641 * necessary. 1642 */ 1643 DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD)); 1644 if (nlmp && ghp && 1645 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1646 nlmp = 0; 1647 1648 /* 1649 * If this filtee loading has failed, and we've 1650 * created a new link-map control list to which 1651 * this request has added objects, then remove 1652 * all the objects that have been associated to 1653 * this request. 1654 */ 1655 if ((nlmp == 0) && lmc && lmc->lc_head) 1656 remove_lmc(lml, clmp, lmc, lmco, name); 1657 1658 /* 1659 * Remove any link-map control list that was 1660 * created. 1661 */ 1662 if (lmc) 1663 remove_cntl(lml, lmco); 1664 } 1665 1666 /* 1667 * Generate a diagnostic if the filtee couldn't be 1668 * loaded, null out the pnode entry, and continue 1669 * the search. Otherwise, retain this group handle 1670 * for future symbol searches. 1671 */ 1672 if (nlmp == 0) { 1673 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1674 audit)); 1675 1676 pnp->p_info = 0; 1677 pnp->p_len = 0; 1678 continue; 1679 } 1680 } 1681 1682 ghp = (Grp_hdl *)pnp->p_info; 1683 1684 /* 1685 * If we're just here to trigger filtee loading skip the symbol 1686 * lookup so we'll continue looking for additional filtees. 1687 */ 1688 if (name) { 1689 Grp_desc *gdp; 1690 Sym *sym = 0; 1691 Aliste idx; 1692 Slookup sl = *slp; 1693 1694 sl.sl_flags |= LKUP_FIRST; 1695 any++; 1696 1697 /* 1698 * Look for the symbol in the handles dependencies. 1699 */ 1700 for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) { 1701 if ((gdp->gd_flags & GPD_DLSYM) == 0) 1702 continue; 1703 1704 /* 1705 * If our parent is a dependency don't look at 1706 * it (otherwise we are in a recursive loop). 1707 * This situation can occur with auxiliary 1708 * filters if the filtee has a dependency on the 1709 * filter. This dependency isn't necessary as 1710 * auxiliary filters are opened RTLD_PARENT, but 1711 * users may still unknowingly add an explicit 1712 * dependency to the parent. 1713 */ 1714 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1715 continue; 1716 1717 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1718 binfo)) != 0) || 1719 (ghp->gh_flags & GPH_FIRST)) 1720 break; 1721 } 1722 1723 /* 1724 * If a symbol has been found, indicate the binding 1725 * and return the symbol. 1726 */ 1727 if (sym) { 1728 *binfo |= DBG_BINFO_FILTEE; 1729 return (sym); 1730 } 1731 } 1732 1733 /* 1734 * If this object is tagged to terminate filtee processing we're 1735 * done. 1736 */ 1737 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1738 break; 1739 } 1740 1741 /* 1742 * If we're just here to trigger filtee loading then we're done. 1743 */ 1744 if (name == 0) 1745 return ((Sym *)0); 1746 1747 /* 1748 * If no filtees have been found for a filter, clean up any Pnode 1749 * structures and disable their search completely. For auxiliary 1750 * filters we can reselect the symbol search function so that we never 1751 * enter this routine again for this object. For standard filters we 1752 * use the null symbol routine. 1753 */ 1754 if (any == 0) { 1755 remove_pnode((Pnode *)dip->di_info); 1756 elf_disable_filtee(ilmp, dip); 1757 return ((Sym *)0); 1758 } 1759 1760 return ((Sym *)0); 1761 } 1762 1763 /* 1764 * Focal point for disabling error messages for auxiliary filters. As an 1765 * auxiliary filter allows for filtee use, but provides a fallback should a 1766 * filtee not exist (or fail to load), any errors generated as a consequence of 1767 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1768 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1769 * produced. ldd(1) employs printf(), and here, the selection of whether to 1770 * print a diagnostic in regards to auxiliary filters is a little more complex. 1771 * 1772 * . The determination of whether to produce an ldd message, or a fatal 1773 * error message is driven by LML_FLG_TRC_ENABLE. 1774 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1775 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1776 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1777 * 1778 * . If the calling object is lddstub, then several classes of message are 1779 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1780 * a stub executable employed to preload a user specified library against. 1781 * 1782 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1783 * be suppressed. All detailed ldd messages should still be produced. 1784 */ 1785 Sym * 1786 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1787 { 1788 Sym *sym; 1789 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1790 int silent = 0; 1791 1792 /* 1793 * Make sure this entry is still acting as a filter. We may have tried 1794 * to process this previously, and disabled it if the filtee couldn't 1795 * be processed. However, other entries may provide different filtees 1796 * that are yet to be completed. 1797 */ 1798 if (dip->di_flags == 0) 1799 return ((Sym *)0); 1800 1801 /* 1802 * Indicate whether an error message is required should this filtee not 1803 * be found, based on the type of filter. 1804 */ 1805 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1806 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1807 rtld_flags |= RT_FL_SILENCERR; 1808 silent = 1; 1809 } 1810 1811 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1812 1813 if (silent) 1814 rtld_flags &= ~RT_FL_SILENCERR; 1815 1816 return (sym); 1817 } 1818 1819 /* 1820 * Compute the elf hash value (as defined in the ELF access library). 1821 * The form of the hash table is: 1822 * 1823 * |--------------| 1824 * | # of buckets | 1825 * |--------------| 1826 * | # of chains | 1827 * |--------------| 1828 * | bucket[] | 1829 * |--------------| 1830 * | chain[] | 1831 * |--------------| 1832 */ 1833 ulong_t 1834 elf_hash(const char *name) 1835 { 1836 uint_t hval = 0; 1837 1838 while (*name) { 1839 uint_t g; 1840 hval = (hval << 4) + *name++; 1841 if ((g = (hval & 0xf0000000)) != 0) 1842 hval ^= g >> 24; 1843 hval &= ~g; 1844 } 1845 return ((ulong_t)hval); 1846 } 1847 1848 /* 1849 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1850 * function specially in the executable - if they have a value, even though 1851 * undefined, we use that value. This allows us to associate all references 1852 * to a function's address to a single place in the process: the plt entry 1853 * for that function in the executable. Calls to lookup from plt binding 1854 * routines do NOT set LKUP_SPEC in the flag. 1855 */ 1856 Sym * 1857 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1858 { 1859 const char *name = slp->sl_name; 1860 Rt_map *ilmp = slp->sl_imap; 1861 ulong_t hash = slp->sl_hash; 1862 uint_t ndx, htmp, buckets, *chainptr; 1863 Sym *sym, *symtabptr; 1864 char *strtabptr, *strtabname; 1865 uint_t flags1; 1866 Syminfo *sip; 1867 1868 /* 1869 * If we're only here to establish a symbols index, skip the diagnostic 1870 * used to trace a symbol search. 1871 */ 1872 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1873 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1874 1875 if (HASH(ilmp) == 0) 1876 return ((Sym *)0); 1877 1878 buckets = HASH(ilmp)[0]; 1879 /* LINTED */ 1880 htmp = (uint_t)hash % buckets; 1881 1882 /* 1883 * Get the first symbol on hash chain and initialize the string 1884 * and symbol table pointers. 1885 */ 1886 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1887 return ((Sym *)0); 1888 1889 chainptr = HASH(ilmp) + 2 + buckets; 1890 strtabptr = STRTAB(ilmp); 1891 symtabptr = SYMTAB(ilmp); 1892 1893 while (ndx) { 1894 sym = symtabptr + ndx; 1895 strtabname = strtabptr + sym->st_name; 1896 1897 /* 1898 * Compare the symbol found with the name required. If the 1899 * names don't match continue with the next hash entry. 1900 */ 1901 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1902 if ((ndx = chainptr[ndx]) != 0) 1903 continue; 1904 return ((Sym *)0); 1905 } 1906 1907 /* 1908 * The Solaris ld does not put DT_VERSYM in the dynamic 1909 * section, but the GNU ld does. The GNU runtime linker 1910 * interprets the top bit of the 16-bit Versym value 1911 * (0x8000) as the "hidden" bit. If this bit is set, 1912 * the linker is supposed to act as if that symbol does 1913 * not exist. The hidden bit supports their versioning 1914 * scheme, which allows multiple incompatible functions 1915 * with the same name to exist at different versions 1916 * within an object. The Solaris linker does not support this 1917 * mechanism, or the model of interface evolution that 1918 * it allows, but we honor the hidden bit in GNU ld 1919 * produced objects in order to interoperate with them. 1920 */ 1921 if ((VERSYM(ilmp) != NULL) && 1922 ((VERSYM(ilmp)[ndx] & 0x8000) != 0)) { 1923 DBG_CALL(Dbg_syms_ignore_gnuver(ilmp, name, 1924 ndx, VERSYM(ilmp)[ndx])); 1925 if ((ndx = chainptr[ndx]) != 0) 1926 continue; 1927 return ((Sym *)0); 1928 } 1929 1930 /* 1931 * If we're only here to establish a symbols index, we're done. 1932 */ 1933 if (slp->sl_flags & LKUP_SYMNDX) 1934 return (sym); 1935 1936 /* 1937 * If we find a match and the symbol is defined, return the 1938 * symbol pointer and the link map in which it was found. 1939 */ 1940 if (sym->st_shndx != SHN_UNDEF) { 1941 *dlmp = ilmp; 1942 *binfo |= DBG_BINFO_FOUND; 1943 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1944 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1945 is_sym_interposer(ilmp, sym))) 1946 *binfo |= DBG_BINFO_INTERPOSE; 1947 break; 1948 1949 /* 1950 * If we find a match and the symbol is undefined, the 1951 * symbol type is a function, and the value of the symbol 1952 * is non zero, then this is a special case. This allows 1953 * the resolution of a function address to the plt[] entry. 1954 * See SPARC ABI, Dynamic Linking, Function Addresses for 1955 * more details. 1956 */ 1957 } else if ((slp->sl_flags & LKUP_SPEC) && 1958 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1959 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1960 *dlmp = ilmp; 1961 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1962 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1963 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1964 is_sym_interposer(ilmp, sym))) 1965 *binfo |= DBG_BINFO_INTERPOSE; 1966 return (sym); 1967 } 1968 1969 /* 1970 * Undefined symbol. 1971 */ 1972 return ((Sym *)0); 1973 } 1974 1975 /* 1976 * We've found a match. Determine if the defining object contains 1977 * symbol binding information. 1978 */ 1979 if ((sip = SYMINFO(ilmp)) != 0) 1980 sip += ndx; 1981 1982 /* 1983 * If this definition is a singleton, and we haven't followed a default 1984 * symbol search knowing that we're looking for a singleton (presumably 1985 * because the symbol definition has been changed since the referring 1986 * object was built), then reject this binding so that the caller can 1987 * fall back to a standard symbol search. 1988 */ 1989 if ((ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON) && 1990 (((slp->sl_flags & LKUP_STANDARD) == 0) || 1991 (((slp->sl_flags & LKUP_SINGLETON) == 0) && 1992 (LIST(ilmp)->lm_flags & LML_FLG_GROUPSEXIST)))) { 1993 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 1994 DBG_BNDREJ_SINGLE)); 1995 *binfo |= BINFO_REJSINGLE; 1996 *binfo &= ~DBG_BINFO_MSK; 1997 return ((Sym *)0); 1998 } 1999 2000 /* 2001 * If this is a direct binding request, but the symbol definition has 2002 * disabled directly binding to it (presumably because the symbol 2003 * definition has been changed since the referring object was built), 2004 * indicate this failure so that the caller can fall back to a standard 2005 * symbol search. 2006 */ 2007 if (sip && (slp->sl_flags & LKUP_DIRECT) && 2008 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 2009 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 2010 DBG_BNDREJ_NODIR)); 2011 *binfo |= BINFO_REJDIRECT; 2012 *binfo &= ~DBG_BINFO_MSK; 2013 return ((Sym *)0); 2014 } 2015 2016 /* 2017 * Determine whether this object is acting as a filter. 2018 */ 2019 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 2020 return (sym); 2021 2022 /* 2023 * Determine if this object offers per-symbol filtering, and if so, 2024 * whether this symbol references a filtee. 2025 */ 2026 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 2027 /* 2028 * If this is a standard filter reference, and no standard 2029 * filtees remain to be inspected, we're done. If this is an 2030 * auxiliary filter reference, and no auxiliary filtees remain, 2031 * we'll fall through in case any object filtering is available. 2032 */ 2033 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 2034 (SYMSFLTRCNT(ilmp) == 0)) 2035 return ((Sym *)0); 2036 2037 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 2038 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 2039 SYMAFLTRCNT(ilmp))) { 2040 Sym *fsym; 2041 2042 /* 2043 * This symbol has an associated filtee. Lookup the 2044 * symbol in the filtee, and if it is found return it. 2045 * If the symbol doesn't exist, and this is a standard 2046 * filter, return an error, otherwise fall through to 2047 * catch any object filtering that may be available. 2048 */ 2049 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2050 sip->si_boundto)) != 0) 2051 return (fsym); 2052 if (sip->si_flags & SYMINFO_FLG_FILTER) 2053 return ((Sym *)0); 2054 } 2055 } 2056 2057 /* 2058 * Determine if this object provides global filtering. 2059 */ 2060 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 2061 Sym *fsym; 2062 2063 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 2064 /* 2065 * This object has an associated filtee. Lookup the 2066 * symbol in the filtee, and if it is found return it. 2067 * If the symbol doesn't exist, and this is a standard 2068 * filter, return and error, otherwise return the symbol 2069 * within the filter itself. 2070 */ 2071 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2072 OBJFLTRNDX(ilmp))) != 0) 2073 return (fsym); 2074 } 2075 2076 if (flags1 & FL1_RT_OBJSFLTR) 2077 return ((Sym *)0); 2078 } 2079 return (sym); 2080 } 2081 2082 /* 2083 * Create a new Rt_map structure for an ELF object and initialize 2084 * all values. 2085 */ 2086 Rt_map * 2087 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2088 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2089 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2090 { 2091 Rt_map *lmp; 2092 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2093 Xword rpath = 0; 2094 Ehdr *ehdr = (Ehdr *)addr; 2095 2096 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2097 lml->lm_lmidstr, lmco)); 2098 2099 /* 2100 * Allocate space for the link-map and private elf information. Once 2101 * these are allocated and initialized, we can use remove_so(0, lmp) to 2102 * tear down the link-map should any failures occur. 2103 */ 2104 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2105 return (0); 2106 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2107 free(lmp); 2108 return (0); 2109 } 2110 2111 /* 2112 * All fields not filled in were set to 0 by calloc. 2113 */ 2114 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2115 DYN(lmp) = ld; 2116 ADDR(lmp) = addr; 2117 MSIZE(lmp) = msize; 2118 ENTRY(lmp) = (Addr)entry; 2119 SYMINTP(lmp) = elf_find_sym; 2120 ETEXT(lmp) = etext; 2121 FCT(lmp) = &elf_fct; 2122 LIST(lmp) = lml; 2123 PADSTART(lmp) = paddr; 2124 PADIMLEN(lmp) = padimsize; 2125 THREADID(lmp) = rt_thr_self(); 2126 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2127 SORTVAL(lmp) = -1; 2128 2129 MMAPS(lmp) = mmaps; 2130 MMAPCNT(lmp) = mmapcnt; 2131 ASSERT(mmapcnt != 0); 2132 2133 /* 2134 * If this is a shared object, add the base address to each address. 2135 * if this is an executable, use address as is. 2136 */ 2137 if (ehdr->e_type == ET_EXEC) { 2138 base = 0; 2139 FLAGS(lmp) |= FLG_RT_FIXED; 2140 } else 2141 base = addr; 2142 2143 /* 2144 * Fill in rest of the link map entries with information from the file's 2145 * dynamic structure. 2146 */ 2147 if (ld) { 2148 uint_t dynndx = 0; 2149 Xword pltpadsz = 0; 2150 Rti_desc *rti; 2151 2152 /* CSTYLED */ 2153 for ( ; ld->d_tag != DT_NULL; ++ld, dynndx++) { 2154 switch ((Xword)ld->d_tag) { 2155 case DT_SYMTAB: 2156 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2157 break; 2158 case DT_SUNW_SYMTAB: 2159 SUNWSYMTAB(lmp) = 2160 (void *)(ld->d_un.d_ptr + base); 2161 break; 2162 case DT_SUNW_SYMSZ: 2163 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2164 break; 2165 case DT_STRTAB: 2166 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2167 break; 2168 case DT_SYMENT: 2169 SYMENT(lmp) = ld->d_un.d_val; 2170 break; 2171 case DT_FEATURE_1: 2172 ld->d_un.d_val |= DTF_1_PARINIT; 2173 if (ld->d_un.d_val & DTF_1_CONFEXP) 2174 crle = 1; 2175 break; 2176 case DT_MOVESZ: 2177 MOVESZ(lmp) = ld->d_un.d_val; 2178 FLAGS(lmp) |= FLG_RT_MOVE; 2179 break; 2180 case DT_MOVEENT: 2181 MOVEENT(lmp) = ld->d_un.d_val; 2182 break; 2183 case DT_MOVETAB: 2184 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2185 break; 2186 case DT_REL: 2187 case DT_RELA: 2188 /* 2189 * At this time, ld.so. can only handle one 2190 * type of relocation per object. 2191 */ 2192 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2193 break; 2194 case DT_RELSZ: 2195 case DT_RELASZ: 2196 RELSZ(lmp) = ld->d_un.d_val; 2197 break; 2198 case DT_RELENT: 2199 case DT_RELAENT: 2200 RELENT(lmp) = ld->d_un.d_val; 2201 break; 2202 case DT_RELCOUNT: 2203 case DT_RELACOUNT: 2204 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2205 break; 2206 case DT_TEXTREL: 2207 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2208 break; 2209 case DT_HASH: 2210 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2211 break; 2212 case DT_PLTGOT: 2213 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2214 break; 2215 case DT_PLTRELSZ: 2216 PLTRELSZ(lmp) = ld->d_un.d_val; 2217 break; 2218 case DT_JMPREL: 2219 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2220 break; 2221 case DT_INIT: 2222 if (ld->d_un.d_ptr != NULL) 2223 INIT(lmp) = 2224 (void (*)())(ld->d_un.d_ptr + base); 2225 break; 2226 case DT_FINI: 2227 if (ld->d_un.d_ptr != NULL) 2228 FINI(lmp) = 2229 (void (*)())(ld->d_un.d_ptr + base); 2230 break; 2231 case DT_INIT_ARRAY: 2232 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2233 base); 2234 break; 2235 case DT_INIT_ARRAYSZ: 2236 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2237 break; 2238 case DT_FINI_ARRAY: 2239 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2240 base); 2241 break; 2242 case DT_FINI_ARRAYSZ: 2243 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2244 break; 2245 case DT_PREINIT_ARRAY: 2246 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2247 base); 2248 break; 2249 case DT_PREINIT_ARRAYSZ: 2250 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2251 break; 2252 case DT_RPATH: 2253 case DT_RUNPATH: 2254 rpath = ld->d_un.d_val; 2255 break; 2256 case DT_FILTER: 2257 fltr = ld->d_un.d_val; 2258 OBJFLTRNDX(lmp) = dynndx; 2259 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2260 break; 2261 case DT_AUXILIARY: 2262 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2263 fltr = ld->d_un.d_val; 2264 OBJFLTRNDX(lmp) = dynndx; 2265 } 2266 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2267 break; 2268 case DT_SUNW_FILTER: 2269 SYMSFLTRCNT(lmp)++; 2270 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2271 break; 2272 case DT_SUNW_AUXILIARY: 2273 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2274 SYMAFLTRCNT(lmp)++; 2275 } 2276 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2277 break; 2278 case DT_DEPAUDIT: 2279 if (!(rtld_flags & RT_FL_NOAUDIT)) 2280 audit = ld->d_un.d_val; 2281 break; 2282 case DT_CONFIG: 2283 cfile = ld->d_un.d_val; 2284 break; 2285 case DT_DEBUG: 2286 /* 2287 * DT_DEBUG entries are only created in 2288 * dynamic objects that require an interpretor 2289 * (ie. all dynamic executables and some shared 2290 * objects), and provide for a hand-shake with 2291 * debuggers. This entry is initialized to 2292 * zero by the link-editor. If a debugger has 2293 * us and updated this entry set the debugger 2294 * flag, and finish initializing the debugging 2295 * structure (see setup() also). Switch off any 2296 * configuration object use as most debuggers 2297 * can't handle fixed dynamic executables as 2298 * dependencies, and we can't handle requests 2299 * like object padding for alternative objects. 2300 */ 2301 if (ld->d_un.d_ptr) 2302 rtld_flags |= 2303 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2304 ld->d_un.d_ptr = (Addr)&r_debug; 2305 break; 2306 case DT_VERNEED: 2307 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2308 base); 2309 break; 2310 case DT_VERNEEDNUM: 2311 /* LINTED */ 2312 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2313 break; 2314 case DT_VERDEF: 2315 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2316 break; 2317 case DT_VERDEFNUM: 2318 /* LINTED */ 2319 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2320 break; 2321 case DT_VERSYM: 2322 /* 2323 * The Solaris ld does not produce DT_VERSYM, 2324 * but the GNU ld does, in order to support 2325 * their style of versioning, which differs 2326 * from ours in some ways, while using the 2327 * same data structures. The presence of 2328 * DT_VERSYM therefore means that GNU 2329 * versioning rules apply to the given file. 2330 * If DT_VERSYM is not present, then Solaris 2331 * versioning rules apply. 2332 */ 2333 VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base); 2334 break; 2335 case DT_BIND_NOW: 2336 if ((ld->d_un.d_val & DF_BIND_NOW) && 2337 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2338 MODE(lmp) |= RTLD_NOW; 2339 MODE(lmp) &= ~RTLD_LAZY; 2340 } 2341 break; 2342 case DT_FLAGS: 2343 FLAGS2(lmp) |= FL2_RT_DTFLAGS; 2344 if (ld->d_un.d_val & DF_SYMBOLIC) 2345 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2346 if (ld->d_un.d_val & DF_TEXTREL) 2347 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2348 if ((ld->d_un.d_val & DF_BIND_NOW) && 2349 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2350 MODE(lmp) |= RTLD_NOW; 2351 MODE(lmp) &= ~RTLD_LAZY; 2352 } 2353 /* 2354 * Capture any static TLS use, and enforce that 2355 * this object be non-deletable. 2356 */ 2357 if (ld->d_un.d_val & DF_STATIC_TLS) { 2358 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2359 MODE(lmp) |= RTLD_NODELETE; 2360 } 2361 break; 2362 case DT_FLAGS_1: 2363 if (ld->d_un.d_val & DF_1_DISPRELPND) 2364 FLAGS1(lmp) |= FL1_RT_DISPREL; 2365 if (ld->d_un.d_val & DF_1_GROUP) 2366 FLAGS(lmp) |= 2367 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2368 if ((ld->d_un.d_val & DF_1_NOW) && 2369 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2370 MODE(lmp) |= RTLD_NOW; 2371 MODE(lmp) &= ~RTLD_LAZY; 2372 } 2373 if (ld->d_un.d_val & DF_1_NODELETE) 2374 MODE(lmp) |= RTLD_NODELETE; 2375 if (ld->d_un.d_val & DF_1_INITFIRST) 2376 FLAGS(lmp) |= FLG_RT_INITFRST; 2377 if (ld->d_un.d_val & DF_1_NOOPEN) 2378 FLAGS(lmp) |= FLG_RT_NOOPEN; 2379 if (ld->d_un.d_val & DF_1_LOADFLTR) 2380 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2381 if (ld->d_un.d_val & DF_1_NODUMP) 2382 FLAGS(lmp) |= FLG_RT_NODUMP; 2383 if (ld->d_un.d_val & DF_1_CONFALT) 2384 crle = 1; 2385 if (ld->d_un.d_val & DF_1_DIRECT) 2386 FLAGS1(lmp) |= FL1_RT_DIRECT; 2387 if (ld->d_un.d_val & DF_1_NODEFLIB) 2388 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2389 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2390 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2391 if (ld->d_un.d_val & DF_1_TRANS) 2392 FLAGS(lmp) |= FLG_RT_TRANS; 2393 #ifndef EXPAND_RELATIVE 2394 if (ld->d_un.d_val & DF_1_ORIGIN) 2395 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2396 #endif 2397 /* 2398 * Global auditing is only meaningful when 2399 * specified by the initiating object of the 2400 * process - typically the dynamic executable. 2401 * If this is the initiaiting object, its link- 2402 * map will not yet have been added to the 2403 * link-map list, and consequently the link-map 2404 * list is empty. (see setup()). 2405 */ 2406 if (ld->d_un.d_val & DF_1_GLOBAUDIT) { 2407 if (lml_main.lm_head == 0) 2408 FLAGS1(lmp) |= FL1_RT_GLOBAUD; 2409 else 2410 DBG_CALL(Dbg_audit_ignore(lmp)); 2411 } 2412 2413 /* 2414 * If this object identifies itself as an 2415 * interposer, but relocation processing has 2416 * already started, then demote it. It's too 2417 * late to guarantee complete interposition. 2418 */ 2419 /* BEGIN CSTYLED */ 2420 if (ld->d_un.d_val & 2421 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2422 if (lml->lm_flags & LML_FLG_STARTREL) { 2423 DBG_CALL(Dbg_util_intoolate(lmp)); 2424 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2425 (void) printf( 2426 MSG_INTL(MSG_LDD_REL_ERR2), 2427 NAME(lmp)); 2428 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2429 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2430 else 2431 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2432 } 2433 /* END CSTYLED */ 2434 break; 2435 case DT_SYMINFO: 2436 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2437 base); 2438 break; 2439 case DT_SYMINENT: 2440 SYMINENT(lmp) = ld->d_un.d_val; 2441 break; 2442 case DT_PLTPAD: 2443 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2444 break; 2445 case DT_PLTPADSZ: 2446 pltpadsz = ld->d_un.d_val; 2447 break; 2448 case DT_SUNW_RTLDINF: 2449 /* 2450 * Maintain a list of RTLDINFO structures. 2451 * Typically, libc is the only supplier, and 2452 * only one structure is provided. However, 2453 * multiple suppliers and multiple structures 2454 * are supported. For example, one structure 2455 * may provide thread_init, and another 2456 * structure may provide atexit reservations. 2457 */ 2458 if ((rti = alist_append(&lml->lm_rti, 0, 2459 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2460 remove_so(0, lmp); 2461 return (0); 2462 } 2463 rti->rti_lmp = lmp; 2464 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2465 break; 2466 case DT_SUNW_SORTENT: 2467 SUNWSORTENT(lmp) = ld->d_un.d_val; 2468 break; 2469 case DT_SUNW_SYMSORT: 2470 SUNWSYMSORT(lmp) = 2471 (void *)(ld->d_un.d_ptr + base); 2472 break; 2473 case DT_SUNW_SYMSORTSZ: 2474 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2475 break; 2476 case DT_DEPRECATED_SPARC_REGISTER: 2477 case M_DT_REGISTER: 2478 FLAGS(lmp) |= FLG_RT_REGSYMS; 2479 break; 2480 case M_DT_PLTRESERVE: 2481 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2482 base); 2483 break; 2484 } 2485 } 2486 2487 if (PLTPAD(lmp)) { 2488 if (pltpadsz == (Xword)0) 2489 PLTPAD(lmp) = 0; 2490 else 2491 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2492 pltpadsz); 2493 } 2494 2495 /* 2496 * Allocate a Dynamic Info structure. 2497 */ 2498 if ((DYNINFO(lmp) = calloc((size_t)dynndx, 2499 sizeof (Dyninfo))) == 0) { 2500 remove_so(0, lmp); 2501 return (0); 2502 } 2503 DYNINFOCNT(lmp) = dynndx; 2504 } 2505 2506 /* 2507 * A dynsym contains only global functions. We want to have 2508 * a version of it that also includes local functions, so that 2509 * dladdr() will be able to report names for local functions 2510 * when used to generate a stack trace for a stripped file. 2511 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2512 * 2513 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2514 * in order to avoid having to have two copies of the global 2515 * symbols held in DT_SYMTAB: The local symbols are placed in 2516 * a separate section than the globals in the dynsym, but the 2517 * linker conspires to put the data for these two sections adjacent 2518 * to each other. DT_SUNW_SYMTAB points at the top of the local 2519 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2520 * 2521 * If the two sections are not adjacent, then something went wrong 2522 * at link time. We use ASSERT to kill the process if this is 2523 * a debug build. In a production build, we will silently ignore 2524 * the presence of the .ldynsym and proceed. We can detect this 2525 * situation by checking to see that DT_SYMTAB lies in 2526 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2527 */ 2528 if ((SUNWSYMTAB(lmp) != NULL) && 2529 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2530 (((char *)SYMTAB(lmp) >= 2531 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2532 ASSERT(0); 2533 SUNWSYMTAB(lmp) = NULL; 2534 SUNWSYMSZ(lmp) = 0; 2535 } 2536 2537 /* 2538 * If configuration file use hasn't been disabled, and a configuration 2539 * file hasn't already been set via an environment variable, see if any 2540 * application specific configuration file is specified. An LD_CONFIG 2541 * setting is used first, but if this image was generated via crle(1) 2542 * then a default configuration file is a fall-back. 2543 */ 2544 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2545 if (cfile) 2546 config->c_name = (const char *)(cfile + 2547 (char *)STRTAB(lmp)); 2548 else if (crle) { 2549 rtld_flags |= RT_FL_CONFAPP; 2550 #ifndef EXPAND_RELATIVE 2551 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2552 #endif 2553 } 2554 } 2555 2556 if (rpath) 2557 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2558 if (fltr) { 2559 /* 2560 * If this object is a global filter, duplicate the filtee 2561 * string name(s) so that REFNAME() is available in core files. 2562 * This cludge was useful for debuggers at one point, but only 2563 * when the filtee name was an individual full path. 2564 */ 2565 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2566 remove_so(0, lmp); 2567 return (0); 2568 } 2569 } 2570 2571 if (rtld_flags & RT_FL_RELATIVE) 2572 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2573 2574 /* 2575 * For Intel ABI compatibility. It's possible that a JMPREL can be 2576 * specified without any other relocations (e.g. a dynamic executable 2577 * normally only contains .plt relocations). If this is the case then 2578 * no REL, RELSZ or RELENT will have been created. For us to be able 2579 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2580 * the RELENT for these relocations. Refer to elf_reloc() for more 2581 * details. 2582 */ 2583 if (!RELENT(lmp) && JMPREL(lmp)) 2584 RELENT(lmp) = sizeof (Rel); 2585 2586 /* 2587 * Establish any per-object auditing. If we're establishing `main's 2588 * link-map its too early to go searching for audit objects so just 2589 * hold the object name for later (see setup()). 2590 */ 2591 if (audit) { 2592 char *cp = audit + (char *)STRTAB(lmp); 2593 2594 if (*cp) { 2595 if (((AUDITORS(lmp) = 2596 calloc(1, sizeof (Audit_desc))) == 0) || 2597 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2598 remove_so(0, lmp); 2599 return (0); 2600 } 2601 if (lml_main.lm_head) { 2602 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2603 remove_so(0, lmp); 2604 return (0); 2605 } 2606 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2607 lml->lm_flags |= LML_FLG_LOCAUDIT; 2608 } 2609 } 2610 } 2611 2612 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2613 remove_so(0, lmp); 2614 return (0); 2615 } 2616 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2617 remove_so(0, lmp); 2618 return (0); 2619 } 2620 2621 /* 2622 * Add the mapped object to the end of the link map list. 2623 */ 2624 lm_append(lml, lmco, lmp); 2625 return (lmp); 2626 } 2627 2628 /* 2629 * Assign hardware/software capabilities. 2630 */ 2631 void 2632 cap_assign(Cap *cap, Rt_map *lmp) 2633 { 2634 while (cap->c_tag != CA_SUNW_NULL) { 2635 switch (cap->c_tag) { 2636 case CA_SUNW_HW_1: 2637 HWCAP(lmp) = cap->c_un.c_val; 2638 break; 2639 case CA_SUNW_SF_1: 2640 SFCAP(lmp) = cap->c_un.c_val; 2641 } 2642 cap++; 2643 } 2644 } 2645 2646 /* 2647 * Map in an ELF object. 2648 * Takes an open file descriptor for the object to map and its pathname; returns 2649 * a pointer to a Rt_map structure for this object, or 0 on error. 2650 */ 2651 static Rt_map * 2652 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2653 int fd) 2654 { 2655 int i; /* general temporary */ 2656 Off memsize = 0; /* total memory size of pathname */ 2657 Off mentry; /* entry point */ 2658 Ehdr *ehdr; /* ELF header of ld.so */ 2659 Phdr *phdr; /* first Phdr in file */ 2660 Phdr *phdr0; /* Saved first Phdr in file */ 2661 Phdr *pptr; /* working Phdr */ 2662 Phdr *fph = 0; /* first loadable Phdr */ 2663 Phdr *lph; /* last loadable Phdr */ 2664 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2665 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2666 Phdr *swph = 0; /* program header for SUNWBSS */ 2667 Phdr *tlph = 0; /* program header for PT_TLS */ 2668 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2669 Cap *cap = 0; /* program header for SUNWCAP */ 2670 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2671 size_t size; /* size of elf and program headers */ 2672 caddr_t faddr = 0; /* mapping address of pathname */ 2673 Rt_map *lmp; /* link map created */ 2674 caddr_t paddr; /* start of padded image */ 2675 Off plen; /* size of image including padding */ 2676 Half etype; 2677 int fixed; 2678 Mmap *mmaps; 2679 uint_t mmapcnt = 0; 2680 Xword align = 0; 2681 2682 /* LINTED */ 2683 ehdr = (Ehdr *)fmap->fm_maddr; 2684 2685 /* 2686 * If this a relocatable object then special processing is required. 2687 */ 2688 if ((etype = ehdr->e_type) == ET_REL) 2689 return (elf_obj_file(lml, lmco, pname, fd)); 2690 2691 /* 2692 * If this isn't a dynamic executable or shared object we can't process 2693 * it. If this is a dynamic executable then all addresses are fixed. 2694 */ 2695 if (etype == ET_EXEC) { 2696 fixed = 1; 2697 } else if (etype == ET_DYN) { 2698 fixed = 0; 2699 } else { 2700 Conv_inv_buf_t inv_buf; 2701 2702 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2703 conv_ehdr_type(etype, 0, &inv_buf)); 2704 return (0); 2705 } 2706 2707 /* 2708 * If our original mapped page was not large enough to hold all the 2709 * program headers remap them. 2710 */ 2711 size = (size_t)((char *)ehdr->e_phoff + 2712 (ehdr->e_phnum * ehdr->e_phentsize)); 2713 if (size > fmap->fm_fsize) { 2714 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2715 return (0); 2716 } 2717 if (size > fmap->fm_msize) { 2718 fmap_setup(); 2719 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2720 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2721 int err = errno; 2722 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2723 strerror(err)); 2724 return (0); 2725 } 2726 fmap->fm_msize = size; 2727 /* LINTED */ 2728 ehdr = (Ehdr *)fmap->fm_maddr; 2729 } 2730 /* LINTED */ 2731 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2732 2733 /* 2734 * Get entry point. 2735 */ 2736 mentry = ehdr->e_entry; 2737 2738 /* 2739 * Point at program headers and perform some basic validation. 2740 */ 2741 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2742 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2743 if ((pptr->p_type == PT_LOAD) || 2744 (pptr->p_type == PT_SUNWBSS)) { 2745 2746 if (fph == 0) { 2747 fph = pptr; 2748 /* LINTED argument lph is initialized in first pass */ 2749 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2750 eprintf(lml, ERR_ELF, 2751 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2752 return (0); 2753 } 2754 2755 lph = pptr; 2756 2757 if (pptr->p_memsz) 2758 lmph = pptr; 2759 if (pptr->p_filesz) 2760 lfph = pptr; 2761 if (pptr->p_type == PT_SUNWBSS) 2762 swph = pptr; 2763 if (pptr->p_align > align) 2764 align = pptr->p_align; 2765 2766 } else if (pptr->p_type == PT_DYNAMIC) { 2767 mld = (Dyn *)(pptr->p_vaddr); 2768 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2769 tlph = pptr; 2770 } else if (pptr->p_type == PT_SUNWCAP) { 2771 cap = (Cap *)(pptr->p_vaddr); 2772 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2773 unwindph = pptr; 2774 } 2775 } 2776 2777 #if defined(MAP_ALIGN) 2778 /* 2779 * Make sure the maximum page alignment is a power of 2 >= the default 2780 * segment alignment, for use with MAP_ALIGN. 2781 */ 2782 align = S_ROUND(align, M_SEGM_ALIGN); 2783 #endif 2784 2785 /* 2786 * We'd better have at least one loadable segment, together with some 2787 * specified file and memory size. 2788 */ 2789 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2790 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2791 return (0); 2792 } 2793 2794 /* 2795 * Check that the files size accounts for the loadable sections 2796 * we're going to map in (failure to do this may cause spurious 2797 * bus errors if we're given a truncated file). 2798 */ 2799 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2800 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2801 return (0); 2802 } 2803 2804 /* 2805 * Memsize must be page rounded so that if we add object padding 2806 * at the end it will start at the beginning of a page. 2807 */ 2808 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2809 M_PTRUNC((ulong_t)fph->p_vaddr)); 2810 2811 /* 2812 * Determine if an existing mapping is acceptable. 2813 */ 2814 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2815 (strcmp(pname, interp->i_name) == 0)) { 2816 /* 2817 * If this is the interpreter then it has already been mapped 2818 * and we have the address so don't map it again. Note that 2819 * the common occurrence of a reference to the interpretor 2820 * (libdl -> ld.so.1) will have been caught during filter 2821 * initialization (see elf_lookup_filtee()). However, some 2822 * ELF implementations are known to record libc.so.1 as the 2823 * interpretor, and thus this test catches this behavior. 2824 */ 2825 paddr = faddr = interp->i_faddr; 2826 2827 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2828 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2829 (fph == lph) && (fph->p_filesz == fph->p_memsz) && 2830 (((Xword)fmap->fm_maddr % align) == 0)) { 2831 size_t rsize; 2832 2833 /* 2834 * If the file contains a single segment, and the mapping 2835 * required has already been established from the initial fmap 2836 * mapping, then we don't need to do anything more. Reset the 2837 * fmap address so that any later files start a new fmap. This 2838 * is really an optimization for filters, such as libdl.so, 2839 * libthread, etc. that are constructed to be a single text 2840 * segment. 2841 */ 2842 paddr = faddr = fmap->fm_maddr; 2843 2844 /* 2845 * Free any unused mapping by assigning the fmap buffer to the 2846 * unused region. fmap_setup() will unmap this area and 2847 * establish defaults for future mappings. 2848 */ 2849 rsize = M_PROUND(fph->p_filesz); 2850 fmap->fm_maddr += rsize; 2851 fmap->fm_msize -= rsize; 2852 fmap_setup(); 2853 } 2854 2855 /* 2856 * Allocate a mapping array to retain mapped segment information. 2857 */ 2858 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2859 return (0); 2860 2861 /* 2862 * If we're reusing an existing mapping determine the objects etext 2863 * address. Otherwise map the file (which will calculate the etext 2864 * address as part of the mapping process). 2865 */ 2866 if (faddr) { 2867 caddr_t base; 2868 2869 if (fixed) 2870 base = 0; 2871 else 2872 base = faddr; 2873 2874 /* LINTED */ 2875 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2876 2877 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2878 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2879 if (pptr->p_type != PT_LOAD) 2880 continue; 2881 2882 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2883 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2884 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2885 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2886 mmapcnt++; 2887 2888 if (!(pptr->p_flags & PF_W)) { 2889 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2890 (ulong_t)pptr->p_memsz + 2891 (ulong_t)(fixed ? 0 : faddr); 2892 } 2893 } 2894 } else { 2895 /* 2896 * Map the file. 2897 */ 2898 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2899 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2900 return (0); 2901 } 2902 2903 /* 2904 * Calculate absolute base addresses and entry points. 2905 */ 2906 if (!fixed) { 2907 if (mld) 2908 /* LINTED */ 2909 mld = (Dyn *)((Off)mld + faddr); 2910 if (cap) 2911 /* LINTED */ 2912 cap = (Cap *)((Off)cap + faddr); 2913 mentry += (Off)faddr; 2914 } 2915 2916 /* 2917 * Create new link map structure for newly mapped shared object. 2918 */ 2919 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2920 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2921 mmapcnt))) { 2922 (void) munmap((caddr_t)faddr, memsize); 2923 return (0); 2924 } 2925 2926 /* 2927 * Start the system loading in the ELF information we'll be processing. 2928 */ 2929 if (REL(lmp)) { 2930 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2931 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2932 MADV_WILLNEED); 2933 } 2934 2935 /* 2936 * If this shared object contains any special segments, record them. 2937 */ 2938 if (swph) { 2939 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2940 SUNWBSS(lmp) = phdr + (swph - phdr0); 2941 } 2942 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2943 remove_so(lml, lmp); 2944 return (0); 2945 } 2946 2947 if (unwindph) 2948 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2949 2950 if (cap) 2951 cap_assign(cap, lmp); 2952 2953 return (lmp); 2954 } 2955 2956 /* 2957 * Function to correct protection settings. Segments are all mapped initially 2958 * with permissions as given in the segment header. We need to turn on write 2959 * permissions on a text segment if there are any relocations against that 2960 * segment, and them turn write permission back off again before returning 2961 * control to the user. This function turns the permission on or off depending 2962 * on the value of the argument. 2963 */ 2964 int 2965 elf_set_prot(Rt_map *lmp, int permission) 2966 { 2967 Mmap *mmaps; 2968 2969 /* 2970 * If this is an allocated image (ie. a relocatable object) we can't 2971 * mprotect() anything. 2972 */ 2973 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2974 return (1); 2975 2976 DBG_CALL(Dbg_file_prot(lmp, permission)); 2977 2978 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2979 if (mmaps->m_perm & PROT_WRITE) 2980 continue; 2981 2982 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2983 (mmaps->m_perm | permission)) == -1) { 2984 int err = errno; 2985 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2986 NAME(lmp), strerror(err)); 2987 return (0); 2988 } 2989 } 2990 return (1); 2991 } 2992 2993 /* 2994 * Build full pathname of shared object from given directory name and filename. 2995 */ 2996 static char * 2997 elf_get_so(const char *dir, const char *file) 2998 { 2999 static char pname[PATH_MAX]; 3000 3001 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 3002 return (pname); 3003 } 3004 3005 /* 3006 * The copy relocation is recorded in a copy structure which will be applied 3007 * after all other relocations are carried out. This provides for copying data 3008 * that must be relocated itself (ie. pointers in shared objects). This 3009 * structure also provides a means of binding RTLD_GROUP dependencies to any 3010 * copy relocations that have been taken from any group members. 3011 * 3012 * If the size of the .bss area available for the copy information is not the 3013 * same as the source of the data inform the user if we're under ldd(1) control 3014 * (this checking was only established in 5.3, so by only issuing an error via 3015 * ldd(1) we maintain the standard set by previous releases). 3016 */ 3017 int 3018 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 3019 Rt_map *dlmp, const void *dadd) 3020 { 3021 Rel_copy rc; 3022 Lm_list *lml = LIST(rlmp); 3023 3024 rc.r_name = name; 3025 rc.r_rsym = rsym; /* the new reference symbol and its */ 3026 rc.r_rlmp = rlmp; /* associated link-map */ 3027 rc.r_dlmp = dlmp; /* the defining link-map */ 3028 rc.r_dsym = dsym; /* the original definition */ 3029 rc.r_radd = radd; 3030 rc.r_dadd = dadd; 3031 3032 if (rsym->st_size > dsym->st_size) 3033 rc.r_size = (size_t)dsym->st_size; 3034 else 3035 rc.r_size = (size_t)rsym->st_size; 3036 3037 if (alist_append(©_R(dlmp), &rc, sizeof (Rel_copy), 3038 AL_CNT_COPYREL) == 0) { 3039 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3040 return (0); 3041 else 3042 return (1); 3043 } 3044 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 3045 if (aplist_append(©_S(rlmp), dlmp, 3046 AL_CNT_COPYREL) == NULL) { 3047 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3048 return (0); 3049 else 3050 return (1); 3051 } 3052 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 3053 } 3054 3055 /* 3056 * If we are tracing (ldd), warn the user if 3057 * 1) the size from the reference symbol differs from the 3058 * copy definition. We can only copy as much data as the 3059 * reference (dynamic executables) entry allows. 3060 * 2) the copy definition has STV_PROTECTED visibility. 3061 */ 3062 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3063 if (rsym->st_size != dsym->st_size) { 3064 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 3065 _conv_reloc_type(M_R_COPY), demangle(name), 3066 NAME(rlmp), EC_XWORD(rsym->st_size), 3067 NAME(dlmp), EC_XWORD(dsym->st_size)); 3068 if (rsym->st_size > dsym->st_size) 3069 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 3070 NAME(dlmp)); 3071 else 3072 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 3073 NAME(rlmp)); 3074 } 3075 3076 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 3077 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 3078 _conv_reloc_type(M_R_COPY), demangle(name), 3079 NAME(dlmp)); 3080 } 3081 } 3082 3083 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 3084 (Xword)rc.r_size)); 3085 return (1); 3086 } 3087 3088 /* 3089 * Determine the symbol location of an address within a link-map. Look for 3090 * the nearest symbol (whose value is less than or equal to the required 3091 * address). This is the object specific part of dladdr(). 3092 */ 3093 static void 3094 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 3095 { 3096 ulong_t ndx, cnt, base, _value; 3097 Sym *sym, *_sym = NULL; 3098 const char *str; 3099 int _flags; 3100 uint_t *dynaddr_ndx; 3101 uint_t dynaddr_n = 0; 3102 ulong_t value; 3103 3104 /* 3105 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 3106 * the dynsym that starts with any local function symbols that exist in 3107 * the library and then moves to the data held in SYMTAB(). In this 3108 * case, SUNWSYMSZ tells us how long the symbol table is. The 3109 * availability of local function symbols will enhance the results 3110 * we can provide. 3111 * 3112 * If SUNWSYMTAB() is non-NULL, then there might also be a 3113 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 3114 * an array of indices into SUNWSYMTAB, sorted by increasing 3115 * address. We can use this to do an O(log N) search instead of a 3116 * brute force search. 3117 * 3118 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 3119 * contains only global symbols. In that case, the length of 3120 * the symbol table comes from the nchain field of the related 3121 * symbol lookup hash table. 3122 */ 3123 str = STRTAB(lmp); 3124 if (SUNWSYMSZ(lmp) == NULL) { 3125 sym = SYMTAB(lmp); 3126 /* 3127 * If we don't have a .hash table there are no symbols 3128 * to look at. 3129 */ 3130 if (HASH(lmp) == 0) 3131 return; 3132 cnt = HASH(lmp)[1]; 3133 } else { 3134 sym = SUNWSYMTAB(lmp); 3135 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3136 dynaddr_ndx = SUNWSYMSORT(lmp); 3137 if (dynaddr_ndx != NULL) 3138 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3139 } 3140 3141 if (FLAGS(lmp) & FLG_RT_FIXED) 3142 base = 0; 3143 else 3144 base = ADDR(lmp); 3145 3146 if (dynaddr_n > 0) { /* Binary search */ 3147 long low = 0, low_bnd; 3148 long high = dynaddr_n - 1, high_bnd; 3149 long mid; 3150 Sym *mid_sym; 3151 3152 /* 3153 * Note that SUNWSYMSORT only contains symbols types that 3154 * supply memory addresses, so there's no need to check and 3155 * filter out any other types. 3156 */ 3157 low_bnd = low; 3158 high_bnd = high; 3159 while (low <= high) { 3160 mid = (low + high) / 2; 3161 mid_sym = &sym[dynaddr_ndx[mid]]; 3162 value = mid_sym->st_value + base; 3163 if (addr < value) { 3164 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3165 addr) 3166 high_bnd = high; 3167 high = mid - 1; 3168 } else if (addr > value) { 3169 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3170 addr) 3171 low_bnd = low; 3172 low = mid + 1; 3173 } else { 3174 _sym = mid_sym; 3175 _value = value; 3176 break; 3177 } 3178 } 3179 /* 3180 * If the above didn't find it exactly, then we must 3181 * return the closest symbol with a value that doesn't 3182 * exceed the one we are looking for. If that symbol exists, 3183 * it will lie in the range bounded by low_bnd and 3184 * high_bnd. This is a linear search, but a short one. 3185 */ 3186 if (_sym == NULL) { 3187 for (mid = low_bnd; mid <= high_bnd; mid++) { 3188 mid_sym = &sym[dynaddr_ndx[mid]]; 3189 value = mid_sym->st_value + base; 3190 if (addr >= value) { 3191 _sym = mid_sym; 3192 _value = value; 3193 } else { 3194 break; 3195 } 3196 } 3197 } 3198 } else { /* Linear search */ 3199 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3200 /* 3201 * Skip expected symbol types that are not functions 3202 * or data: 3203 * - A symbol table starts with an undefined symbol 3204 * in slot 0. If we are using SUNWSYMTAB(), 3205 * there will be a second undefined symbol 3206 * right before the globals. 3207 * - The local part of SUNWSYMTAB() contains a 3208 * series of function symbols. Each section 3209 * starts with an initial STT_FILE symbol. 3210 */ 3211 if ((sym->st_shndx == SHN_UNDEF) || 3212 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3213 continue; 3214 3215 value = sym->st_value + base; 3216 if (value > addr) 3217 continue; 3218 if (value < _value) 3219 continue; 3220 3221 _sym = sym; 3222 _value = value; 3223 3224 /* 3225 * Note, because we accept local and global symbols 3226 * we could find a section symbol that matches the 3227 * associated address, which means that the symbol 3228 * name will be null. In this case continue the 3229 * search in case we can find a global symbol of 3230 * the same value. 3231 */ 3232 if ((value == addr) && 3233 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3234 break; 3235 } 3236 } 3237 3238 _flags = flags & RTLD_DL_MASK; 3239 if (_sym) { 3240 if (_flags == RTLD_DL_SYMENT) 3241 *info = (void *)_sym; 3242 else if (_flags == RTLD_DL_LINKMAP) 3243 *info = (void *)lmp; 3244 3245 dlip->dli_sname = str + _sym->st_name; 3246 dlip->dli_saddr = (void *)_value; 3247 } else { 3248 /* 3249 * addr lies between the beginning of the mapped segment and 3250 * the first global symbol. We have no symbol to return 3251 * and the caller requires one. We use _START_, the base 3252 * address of the mapping. 3253 */ 3254 3255 if (_flags == RTLD_DL_SYMENT) { 3256 /* 3257 * An actual symbol struct is needed, so we 3258 * construct one for _START_. To do this in a 3259 * fully accurate way requires a different symbol 3260 * for each mapped segment. This requires the 3261 * use of dynamic memory and a mutex. That's too much 3262 * plumbing for a fringe case of limited importance. 3263 * 3264 * Fortunately, we can simplify: 3265 * - Only the st_size and st_info fields are useful 3266 * outside of the linker internals. The others 3267 * reference things that outside code cannot see, 3268 * and can be set to 0. 3269 * - It's just a label and there is no size 3270 * to report. So, the size should be 0. 3271 * This means that only st_info needs a non-zero 3272 * (constant) value. A static struct will suffice. 3273 * It must be const (readonly) so the caller can't 3274 * change its meaning for subsequent callers. 3275 */ 3276 static const Sym fsym = { 0, 0, 0, 3277 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3278 *info = (void *) &fsym; 3279 } 3280 3281 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3282 dlip->dli_saddr = (void *) ADDR(lmp); 3283 } 3284 } 3285 3286 static void 3287 elf_lazy_cleanup(APlist *alp) 3288 { 3289 Rt_map *lmp; 3290 Aliste idx; 3291 3292 /* 3293 * Cleanup any link-maps added to this dynamic list and free it. 3294 */ 3295 for (APLIST_TRAVERSE(alp, idx, lmp)) 3296 FLAGS(lmp) &= ~FLG_RT_TMPLIST; 3297 free(alp); 3298 } 3299 3300 /* 3301 * This routine is called as a last fall-back to search for a symbol from a 3302 * standard relocation. To maintain lazy loadings goal of reducing the number 3303 * of objects mapped, any symbol search is first carried out using the objects 3304 * that already exist in the process (either on a link-map list or handle). 3305 * If a symbol can't be found, and lazy dependencies are still pending, this 3306 * routine loads the dependencies in an attempt to locate the symbol. 3307 * 3308 * Only new objects are inspected as we will have already inspected presently 3309 * loaded objects before calling this routine. However, a new object may not 3310 * be new - although the di_lmp might be zero, the object may have been mapped 3311 * as someone elses dependency. Thus there's a possibility of some symbol 3312 * search duplication. 3313 */ 3314 Sym * 3315 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 3316 { 3317 Sym *sym = 0; 3318 APlist *alist = NULL; 3319 Aliste idx; 3320 Rt_map *lmp1, *lmp = slp->sl_imap; 3321 const char *name = slp->sl_name; 3322 3323 /* 3324 * Generate a local list of new objects to process. This list can grow 3325 * as each object supplies its own lazy dependencies. 3326 */ 3327 if (aplist_append(&alist, lmp, AL_CNT_LAZYFIND) == NULL) 3328 return (NULL); 3329 FLAGS(lmp) |= FLG_RT_TMPLIST; 3330 3331 for (APLIST_TRAVERSE(alist, idx, lmp1)) { 3332 uint_t cnt = 0; 3333 Slookup sl = *slp; 3334 Dyninfo *dip, *pdip; 3335 3336 /* 3337 * Discard any relocation index from further symbol searches. 3338 * This index will have already been used to trigger any 3339 * necessary lazy-loads, and it might be because one of these 3340 * lazy loads have failed that we're here performing this 3341 * fallback. By removing the relocation index we don't try 3342 * and perform the same failed lazy loading activity again. 3343 */ 3344 sl.sl_rsymndx = 0; 3345 3346 /* 3347 * Loop through the lazy DT_NEEDED entries examining each object 3348 * for the required symbol. If the symbol is not found, the 3349 * object is in turn added to the local alist, so that the 3350 * objects lazy DT_NEEDED entries can be examined. 3351 */ 3352 lmp = lmp1; 3353 for (dip = DYNINFO(lmp), pdip = NULL; cnt < DYNINFOCNT(lmp); 3354 cnt++, pdip = dip++) { 3355 Rt_map *nlmp; 3356 3357 if (((dip->di_flags & FLG_DI_LAZY) == 0) || 3358 dip->di_info) 3359 continue; 3360 3361 /* 3362 * If this object has already failed to lazy load, and 3363 * we're still processing the same runtime linker 3364 * operation that produced the failure, don't bother 3365 * to try and load the object again. 3366 */ 3367 if ((dip->di_flags & FLG_DI_LAZYFAIL) && pdip && 3368 (pdip->di_flags & FLG_DI_POSFLAG1)) { 3369 if (pdip->di_info == (void *)ld_entry_cnt) 3370 continue; 3371 3372 dip->di_flags &= ~FLG_DI_LAZYFAIL; 3373 pdip->di_info = NULL; 3374 } 3375 3376 /* 3377 * Try loading this lazy dependency. If the object 3378 * can't be loaded, consider this non-fatal and continue 3379 * the search. Lazy loaded dependencies need not exist 3380 * and their loading should only turn out to be fatal 3381 * if they are required to satisfy a relocation. 3382 * 3383 * If the file is already loaded and relocated we must 3384 * still inspect it for symbols, even though it might 3385 * have already been searched. This lazy load operation 3386 * might have promoted the permissions of the object, 3387 * and thus made the object applicable for this symbol 3388 * search, whereas before the object might have been 3389 * skipped. 3390 */ 3391 if ((nlmp = elf_lazy_load(lmp, &sl, cnt, name)) == 0) 3392 continue; 3393 3394 /* 3395 * If this object isn't yet a part of the dynamic list 3396 * then inspect it for the symbol. If the symbol isn't 3397 * found add the object to the dynamic list so that we 3398 * can inspect its dependencies. 3399 */ 3400 if (FLAGS(nlmp) & FLG_RT_TMPLIST) 3401 continue; 3402 3403 sl.sl_imap = nlmp; 3404 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3405 break; 3406 3407 /* 3408 * Some dlsym() operations are already traversing a 3409 * link-map (dlopen(0)), and thus there's no need to 3410 * build our own dynamic dependency list. 3411 */ 3412 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3413 if (aplist_append(&alist, nlmp, 3414 AL_CNT_LAZYFIND) == 0) { 3415 elf_lazy_cleanup(alist); 3416 return (0); 3417 } 3418 FLAGS(nlmp) |= FLG_RT_TMPLIST; 3419 } 3420 } 3421 if (sym) 3422 break; 3423 } 3424 3425 elf_lazy_cleanup(alist); 3426 return (sym); 3427 } 3428 3429 /* 3430 * Warning message for bad r_offset. 3431 */ 3432 void 3433 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3434 ulong_t rsymndx) 3435 { 3436 const char *name = (char *)0; 3437 Lm_list *lml = LIST(lmp); 3438 int trace; 3439 3440 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3441 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3442 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3443 trace = 1; 3444 else 3445 trace = 0; 3446 3447 if ((trace == 0) && (DBG_ENABLED == 0)) 3448 return; 3449 3450 if (rsymndx) { 3451 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3452 (rsymndx * SYMENT(lmp))); 3453 3454 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3455 name = (char *)(STRTAB(lmp) + symref->st_name); 3456 } 3457 3458 if (name == 0) 3459 name = MSG_ORIG(MSG_STR_EMPTY); 3460 3461 if (trace) { 3462 const char *rstr; 3463 3464 rstr = _conv_reloc_type((uint_t)rtype); 3465 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3466 EC_ADDR(roffset)); 3467 return; 3468 } 3469 3470 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3471 } 3472 3473 /* 3474 * Resolve a static TLS relocation. 3475 */ 3476 long 3477 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3478 ulong_t roffset, long value) 3479 { 3480 Lm_list *lml = LIST(lmp); 3481 3482 /* 3483 * Relocations against a static TLS block have limited support once 3484 * process initialization has completed. Any error condition should be 3485 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3486 * however individual relocations are tested in case the dynamic flag 3487 * had not been set when this object was built. 3488 */ 3489 if (PTTLS(lmp) == 0) { 3490 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3491 M_REL_SHT_TYPE, rel, NULL, name)); 3492 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3493 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3494 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3495 return (0); 3496 } 3497 3498 /* 3499 * If no static TLS has been set aside for this object, determine if 3500 * any can be obtained. Enforce that any object using static TLS is 3501 * non-deletable. 3502 */ 3503 if (TLSSTATOFF(lmp) == 0) { 3504 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3505 MODE(lmp) |= RTLD_NODELETE; 3506 3507 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3508 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3509 M_REL_SHT_TYPE, rel, NULL, name)); 3510 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3511 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3512 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3513 return (0); 3514 } 3515 } 3516 3517 /* 3518 * Typically, a static TLS offset is maintained as a symbols value. 3519 * For local symbols that are not apart of the dynamic symbol table, 3520 * the TLS relocation points to a section symbol, and the static TLS 3521 * offset was deposited in the associated GOT table. Make sure the GOT 3522 * is cleared, so that the value isn't reused in do_reloc(). 3523 */ 3524 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3525 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3526 value = *(long *)roffset; 3527 *(long *)roffset = 0; 3528 } else { 3529 value = sym->st_value; 3530 } 3531 } 3532 return (-(TLSSTATOFF(lmp) - value)); 3533 } 3534 3535 /* 3536 * If the symbol is not found and the reference was not to a weak symbol, report 3537 * an error. Weak references may be unresolved. 3538 */ 3539 int 3540 elf_reloc_error(Rt_map *lmp, const char *name, void *rel, uint_t binfo) 3541 { 3542 Lm_list *lml = LIST(lmp); 3543 3544 /* 3545 * Under crle(1), relocation failures are ignored. 3546 */ 3547 if (lml->lm_flags & LML_FLG_IGNRELERR) 3548 return (1); 3549 3550 /* 3551 * Under ldd(1), unresolved references are reported. However, if the 3552 * original reference is EXTERN or PARENT these references are ignored 3553 * unless ldd's -p option is in effect. 3554 */ 3555 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3556 if (((binfo & DBG_BINFO_REF_MSK) == 0) || 3557 ((lml->lm_flags & LML_FLG_TRC_NOPAREXT) != 0)) { 3558 (void) printf(MSG_INTL(MSG_LDD_SYM_NFOUND), 3559 demangle(name), NAME(lmp)); 3560 } 3561 return (1); 3562 } 3563 3564 /* 3565 * Otherwise, the unresolved references is fatal. 3566 */ 3567 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, 3568 NULL, name)); 3569 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 3570 demangle(name)); 3571 3572 return (0); 3573 } 3574