1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 1988 AT&T 29 * All Rights Reserved 30 */ 31 32 /* 33 * Object file dependent support for ELF objects. 34 */ 35 36 #include <stdio.h> 37 #include <sys/procfs.h> 38 #include <sys/mman.h> 39 #include <sys/debug.h> 40 #include <string.h> 41 #include <limits.h> 42 #include <dlfcn.h> 43 #include <debug.h> 44 #include <conv.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 50 /* 51 * Default and secure dependency search paths. 52 */ 53 static Pnode elf_dflt_dirs[] = { 54 #if defined(_ELF64) 55 #ifndef SGS_PRE_UNIFIED_PROCESS 56 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 57 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 58 #endif 59 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 60 LA_SER_DEFAULT, 0, 0 } 61 #else 62 #ifndef SGS_PRE_UNIFIED_PROCESS 63 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 64 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 65 #endif 66 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 67 LA_SER_DEFAULT, 0, 0 } 68 #endif 69 }; 70 71 static Pnode elf_secure_dirs[] = { 72 #if defined(_ELF64) 73 #ifndef SGS_PRE_UNIFIED_PROCESS 74 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 75 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 76 #endif 77 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 78 MSG_PTH_USRLIBSE_64_SIZE, 79 LA_SER_SECURE, 0, 0 } 80 #else 81 #ifndef SGS_PRE_UNIFIED_PROCESS 82 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 83 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 84 #endif 85 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 86 LA_SER_SECURE, 0, 0 } 87 #endif 88 }; 89 90 /* 91 * Defines for local functions. 92 */ 93 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 94 static int elf_are_u(Rej_desc *); 95 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 96 static ulong_t elf_entry_pt(void); 97 static char *elf_get_so(const char *, const char *); 98 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, 99 int, int *); 100 static int elf_needed(Lm_list *, Aliste, Rt_map *, int *); 101 static void elf_unmap_so(Rt_map *); 102 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 103 104 /* 105 * Functions and data accessed through indirect pointers. 106 */ 107 Fct elf_fct = { 108 elf_are_u, 109 elf_entry_pt, 110 elf_map_so, 111 elf_unmap_so, 112 elf_needed, 113 lookup_sym, 114 elf_reloc, 115 elf_dflt_dirs, 116 elf_secure_dirs, 117 elf_fix_name, 118 elf_get_so, 119 elf_dladdr, 120 dlsym_handle, 121 elf_verify_vers, 122 elf_set_prot 123 }; 124 125 126 /* 127 * Redefine NEEDED name if necessary. 128 */ 129 static Pnode * 130 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 131 { 132 /* 133 * For ABI compliance, if we are asked for ld.so.1, then really give 134 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 135 */ 136 if (((*name == '/') && 137 /* BEGIN CSTYLED */ 138 #if defined(_ELF64) 139 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 140 #else 141 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 142 #endif 143 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 144 /* END CSTYLED */ 145 Pnode *pnp; 146 147 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 148 MSG_ORIG(MSG_PTH_LIBSYS))); 149 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 150 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 151 if (pnp) 152 free(pnp); 153 return (0); 154 } 155 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 156 return (pnp); 157 } 158 159 return (expand_paths(clmp, name, orig, 0)); 160 } 161 162 /* 163 * Determine if we have been given an ELF file and if so determine if the file 164 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 165 * with associated error information. 166 */ 167 static int 168 elf_are_u(Rej_desc *rej) 169 { 170 Ehdr *ehdr; 171 172 /* 173 * Determine if we're an elf file. If not simply return, we don't set 174 * any rejection information as this test allows use to scroll through 175 * the objects we support (ELF, AOUT). 176 */ 177 if (fmap->fm_fsize < sizeof (Ehdr) || 178 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 179 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 180 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 181 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 182 return (0); 183 } 184 185 /* 186 * Check class and encoding. 187 */ 188 /* LINTED */ 189 ehdr = (Ehdr *)fmap->fm_maddr; 190 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 191 rej->rej_type = SGS_REJ_CLASS; 192 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 193 return (0); 194 } 195 if (ehdr->e_ident[EI_DATA] != M_DATA) { 196 rej->rej_type = SGS_REJ_DATA; 197 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 198 return (0); 199 } 200 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 201 (ehdr->e_type != ET_DYN)) { 202 rej->rej_type = SGS_REJ_TYPE; 203 rej->rej_info = (uint_t)ehdr->e_type; 204 return (0); 205 } 206 207 /* 208 * Verify machine specific flags, and hardware capability requirements. 209 */ 210 if ((elf_mach_flags_check(rej, ehdr) == 0) || 211 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 212 return (0); 213 214 /* 215 * Verify ELF version. ??? is this too restrictive ??? 216 */ 217 if (ehdr->e_version > EV_CURRENT) { 218 rej->rej_type = SGS_REJ_VERSION; 219 rej->rej_info = (uint_t)ehdr->e_version; 220 return (0); 221 } 222 return (1); 223 } 224 225 /* 226 * The runtime linker employs lazy loading to provide the libraries needed for 227 * debugging, preloading .o's and dldump(). As these are seldom used, the 228 * standard startup of ld.so.1 doesn't initialize all the information necessary 229 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 230 * is called we get here to perform these initializations: 231 * 232 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 233 * dependency. Typically, for all other objects, this is called during 234 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 235 * 236 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 237 * are by default skipped thus delaying all relative relocation processing 238 * on every invocation of ld.so.1. 239 */ 240 int 241 elf_rtld_load() 242 { 243 Lm_list *lml = &lml_rtld; 244 Rt_map *lmp = lml->lm_head; 245 246 if (lml->lm_flags & LML_FLG_PLTREL) 247 return (1); 248 249 /* 250 * As we need to refer to the DYNINFO() information, insure that it has 251 * been initialized. 252 */ 253 if (elf_needed(lml, ALIST_OFF_DATA, lmp, NULL) == 0) 254 return (0); 255 256 #if defined(__i386) 257 /* 258 * This is a kludge to give ld.so.1 a performance benefit on i386. 259 * It's based around two factors. 260 * 261 * o JMPSLOT relocations (PLT's) actually need a relative relocation 262 * applied to the GOT entry so that they can find PLT0. 263 * 264 * o ld.so.1 does not exercise *any* PLT's before it has made a call 265 * to elf_lazy_load(). This is because all dynamic dependencies 266 * are recorded as lazy dependencies. 267 */ 268 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 269 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 270 (ulong_t)ADDR(lmp)); 271 #endif 272 273 lml->lm_flags |= LML_FLG_PLTREL; 274 return (1); 275 } 276 277 /* 278 * Lazy load an object. 279 */ 280 Rt_map * 281 elf_lazy_load(Rt_map *clmp, Slookup *slp, uint_t ndx, const char *sym, 282 int *in_nfavl) 283 { 284 Rt_map *nlmp, *hlmp; 285 Dyninfo *dip = &DYNINFO(clmp)[ndx], *pdip; 286 uint_t flags = 0; 287 Pnode *pnp; 288 const char *name; 289 Lm_list *lml = LIST(clmp); 290 Lm_cntl *lmc; 291 Aliste lmco; 292 293 /* 294 * If this dependency has already been processed, we're done. 295 */ 296 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 297 (dip->di_flags & FLG_DI_LDD_DONE)) 298 return (nlmp); 299 300 /* 301 * If we're running under ldd(1), indicate that this dependency has been 302 * processed (see test above). It doesn't matter whether the object is 303 * successfully loaded or not, this flag simply ensures that we don't 304 * repeatedly attempt to load an object that has already failed to load. 305 * To do so would create multiple failure diagnostics for the same 306 * object under ldd(1). 307 */ 308 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 309 dip->di_flags |= FLG_DI_LDD_DONE; 310 311 /* 312 * Determine the initial dependency name. 313 */ 314 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 315 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 316 317 /* 318 * If this object needs to establish its own group, make sure a handle 319 * is created. 320 */ 321 if (dip->di_flags & FLG_DI_GROUP) 322 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 323 324 /* 325 * Lazy dependencies are identified as DT_NEEDED entries with a 326 * DF_P1_LAZYLOAD flag in the previous DT_POSFLAG_1 element. The 327 * dynamic information element that corresponds to the DT_POSFLAG_1 328 * entry is free, and thus used to store the present entrance 329 * identifier. This identifier is used to prevent multiple attempts to 330 * load a failed lazy loadable dependency within the same runtime linker 331 * operation. However, future attempts to reload this dependency are 332 * still possible. 333 */ 334 if (ndx && (pdip = dip - 1) && (pdip->di_flags & FLG_DI_POSFLAG1)) 335 pdip->di_info = (void *)slp->sl_id; 336 337 /* 338 * Expand the requested name if necessary. 339 */ 340 if ((pnp = elf_fix_name(name, clmp, 0)) == 0) 341 return (0); 342 343 /* 344 * Provided the object on the head of the link-map has completed its 345 * relocation, create a new link-map control list for this request. 346 */ 347 hlmp = lml->lm_head; 348 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 349 if ((lmc = alist_append(&lml->lm_lists, 0, sizeof (Lm_cntl), 350 AL_CNT_LMLISTS)) == 0) { 351 remove_pnode(pnp); 352 return (0); 353 } 354 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 355 } else { 356 lmc = 0; 357 lmco = ALIST_OFF_DATA; 358 } 359 360 /* 361 * Load the associated object. 362 */ 363 dip->di_info = nlmp = 364 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0, in_nfavl); 365 366 /* 367 * Remove any expanded pathname infrastructure. Reduce the pending lazy 368 * dependency count of the caller, together with the link-map lists 369 * count of objects that still have lazy dependencies pending. 370 */ 371 remove_pnode(pnp); 372 if (--LAZY(clmp) == 0) 373 LIST(clmp)->lm_lazy--; 374 375 /* 376 * Finish processing the objects associated with this request, and 377 * create an association between the caller and this dependency. 378 */ 379 if (nlmp && ((bind_one(clmp, nlmp, BND_NEEDED) == 0) || 380 (analyze_lmc(lml, lmco, nlmp, in_nfavl) == 0) || 381 (relocate_lmc(lml, lmco, clmp, nlmp, in_nfavl) == 0))) 382 dip->di_info = nlmp = 0; 383 384 /* 385 * If this lazyload has failed, and we've created a new link-map 386 * control list to which this request has added objects, then remove 387 * all the objects that have been associated to this request. 388 */ 389 if ((nlmp == 0) && lmc && lmc->lc_head) 390 remove_lmc(lml, clmp, lmc, lmco, name); 391 392 /* 393 * Finally, remove any link-map control list that was created. 394 */ 395 if (lmc) 396 remove_cntl(lml, lmco); 397 398 /* 399 * If this lazy loading failed, record the fact, and bump the lazy 400 * counts. 401 */ 402 if (nlmp == 0) { 403 dip->di_flags |= FLG_DI_LAZYFAIL; 404 if (LAZY(clmp)++ == 0) 405 LIST(clmp)->lm_lazy++; 406 } 407 408 return (nlmp); 409 } 410 411 /* 412 * Return the entry point of the ELF executable. 413 */ 414 static ulong_t 415 elf_entry_pt(void) 416 { 417 return (ENTRY(lml_main.lm_head)); 418 } 419 420 /* 421 * Unmap a given ELF shared object from the address space. 422 */ 423 static void 424 elf_unmap_so(Rt_map *lmp) 425 { 426 caddr_t addr; 427 size_t size; 428 Mmap *mmaps; 429 430 /* 431 * If this link map represents a relocatable object concatenation, then 432 * the image was simply generated in allocated memory. Free the memory. 433 * 434 * Note: the memory was originally allocated in the libelf:_elf_outmap 435 * routine and would normally have been free'd in elf_outsync(), but 436 * because we 'interpose' on that routine the memory wasn't free'd at 437 * that time. 438 */ 439 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 440 free((void *)ADDR(lmp)); 441 return; 442 } 443 444 /* 445 * If padding was enabled via rtld_db, then we have at least one page 446 * in front of the image - and possibly a trailing page. 447 * Unmap the front page first: 448 */ 449 if (PADSTART(lmp) != ADDR(lmp)) { 450 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 451 size = ADDR(lmp) - (ulong_t)addr; 452 (void) munmap(addr, size); 453 } 454 455 /* 456 * Unmap any trailing padding. 457 */ 458 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 459 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 460 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 461 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 462 (void) munmap(addr, size); 463 } 464 465 /* 466 * Unmmap all mapped segments. 467 */ 468 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 469 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 470 } 471 472 /* 473 * Determine if a dependency requires a particular version and if so verify 474 * that the version exists in the dependency. 475 */ 476 static int 477 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 478 { 479 Verneed *vnd = VERNEED(clmp); 480 int _num, num = VERNEEDNUM(clmp); 481 char *cstrs = (char *)STRTAB(clmp); 482 Lm_list *lml = LIST(clmp); 483 484 /* 485 * Traverse the callers version needed information and determine if any 486 * specific versions are required from the dependency. 487 */ 488 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 489 for (_num = 1; _num <= num; _num++, 490 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 491 Half cnt = vnd->vn_cnt; 492 Vernaux *vnap; 493 char *nstrs, *need; 494 495 /* 496 * Determine if a needed entry matches this dependency. 497 */ 498 need = (char *)(cstrs + vnd->vn_file); 499 if (strcmp(name, need) != 0) 500 continue; 501 502 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 503 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 504 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 505 506 /* 507 * Validate that each version required actually exists in the 508 * dependency. 509 */ 510 nstrs = (char *)STRTAB(nlmp); 511 512 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 513 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 514 char *version, *define; 515 Verdef *vdf = VERDEF(nlmp); 516 ulong_t _num, num = VERDEFNUM(nlmp); 517 int found = 0; 518 519 /* 520 * Skip validation of versions that are marked 521 * INFO. This optimization is used for versions 522 * that are inherited by another version. Verification 523 * of the inheriting version is sufficient. 524 * 525 * Such versions are recorded in the object for the 526 * benefit of VERSYM entries that refer to them. This 527 * provides a purely diagnositic benefit. 528 */ 529 if (vnap->vna_flags & VER_FLG_INFO) 530 continue; 531 532 version = (char *)(cstrs + vnap->vna_name); 533 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 534 535 for (_num = 1; _num <= num; _num++, 536 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 537 Verdaux *vdap; 538 539 if (vnap->vna_hash != vdf->vd_hash) 540 continue; 541 542 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 543 define = (char *)(nstrs + vdap->vda_name); 544 if (strcmp(version, define) != 0) 545 continue; 546 547 found++; 548 break; 549 } 550 551 /* 552 * If we're being traced print out any matched version 553 * when the verbose (-v) option is in effect. Always 554 * print any unmatched versions. 555 */ 556 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 557 /* BEGIN CSTYLED */ 558 if (found) { 559 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 560 continue; 561 562 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 563 need, version, NAME(nlmp)); 564 } else { 565 if (rtld_flags & RT_FL_SILENCERR) 566 continue; 567 568 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 569 need, version); 570 } 571 /* END CSTYLED */ 572 continue; 573 } 574 575 /* 576 * If the version hasn't been found then this is a 577 * candidate for a fatal error condition. Weak 578 * version definition requirements are silently 579 * ignored. Also, if the image inspected for a version 580 * definition has no versioning recorded at all then 581 * silently ignore this (this provides better backward 582 * compatibility to old images created prior to 583 * versioning being available). Both of these skipped 584 * diagnostics are available under tracing (see above). 585 */ 586 if ((found == 0) && (num != 0) && 587 (!(vnap->vna_flags & VER_FLG_WEAK))) { 588 eprintf(lml, ERR_FATAL, 589 MSG_INTL(MSG_VER_NFOUND), need, version, 590 NAME(clmp)); 591 return (0); 592 } 593 } 594 } 595 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 596 return (1); 597 } 598 599 /* 600 * Search through the dynamic section for DT_NEEDED entries and perform one 601 * of two functions. If only the first argument is specified then load the 602 * defined shared object, otherwise add the link map representing the defined 603 * link map the the dlopen list. 604 */ 605 static int 606 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp, int *in_nfavl) 607 { 608 Dyn *dyn, *pdyn; 609 ulong_t ndx = 0; 610 uint_t lazy, flags; 611 Word lmflags = lml->lm_flags; 612 Word lmtflags = lml->lm_tflags; 613 614 /* 615 * Process each shared object on needed list. 616 */ 617 if (DYN(clmp) == 0) 618 return (1); 619 620 for (dyn = (Dyn *)DYN(clmp), pdyn = NULL; dyn->d_tag != DT_NULL; 621 pdyn = dyn++, ndx++) { 622 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 623 Rt_map *nlmp = 0; 624 char *name; 625 int silent = 0; 626 Pnode *pnp; 627 628 switch (dyn->d_tag) { 629 case DT_POSFLAG_1: 630 dip->di_flags |= FLG_DI_POSFLAG1; 631 continue; 632 case DT_NEEDED: 633 case DT_USED: 634 lazy = flags = 0; 635 dip->di_flags |= FLG_DI_NEEDED; 636 637 if (pdyn && (pdyn->d_tag == DT_POSFLAG_1)) { 638 if ((pdyn->d_un.d_val & DF_P1_LAZYLOAD) && 639 ((lmtflags & LML_TFLG_NOLAZYLD) == 0)) { 640 dip->di_flags |= FLG_DI_LAZY; 641 lazy = 1; 642 } 643 if (pdyn->d_un.d_val & DF_P1_GROUPPERM) { 644 dip->di_flags |= FLG_DI_GROUP; 645 flags = 646 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 647 } 648 } 649 650 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 651 652 /* 653 * NOTE, libc.so.1 can't be lazy loaded. Although a 654 * lazy position flag won't be produced when a RTLDINFO 655 * .dynamic entry is found (introduced with the UPM in 656 * Solaris 10), it was possible to mark libc for lazy 657 * loading on previous releases. To reduce the overhead 658 * of testing for this occurrence, only carry out this 659 * check for the first object on the link-map list 660 * (there aren't many applications built without libc). 661 */ 662 if (lazy && (lml->lm_head == clmp) && 663 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 664 lazy = 0; 665 666 /* 667 * Don't bring in lazy loaded objects yet unless we've 668 * been asked to attempt to load all available objects 669 * (crle(1) sets LD_FLAGS=loadavail). Even under 670 * RTLD_NOW we don't process this - RTLD_NOW will cause 671 * relocation processing which in turn might trigger 672 * lazy loading, but its possible that the object has a 673 * lazy loaded file with no bindings (i.e., it should 674 * never have been a dependency in the first place). 675 */ 676 if (lazy) { 677 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 678 LAZY(clmp)++; 679 lazy = flags = 0; 680 continue; 681 } 682 683 /* 684 * Silence any error messages - see description 685 * under elf_lookup_filtee(). 686 */ 687 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 688 rtld_flags |= RT_FL_SILENCERR; 689 silent = 1; 690 } 691 } 692 break; 693 case DT_AUXILIARY: 694 dip->di_flags |= FLG_DI_AUXFLTR; 695 continue; 696 case DT_SUNW_AUXILIARY: 697 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 698 continue; 699 case DT_FILTER: 700 dip->di_flags |= FLG_DI_STDFLTR; 701 continue; 702 case DT_SUNW_FILTER: 703 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 704 continue; 705 default: 706 continue; 707 } 708 709 DBG_CALL(Dbg_file_needed(clmp, name)); 710 711 /* 712 * If we're running under ldd(1), indicate that this dependency 713 * has been processed. It doesn't matter whether the object is 714 * successfully loaded or not, this flag simply ensures that we 715 * don't repeatedly attempt to load an object that has already 716 * failed to load. To do so would create multiple failure 717 * diagnostics for the same object under ldd(1). 718 */ 719 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 720 dip->di_flags |= FLG_DI_LDD_DONE; 721 722 /* 723 * Establish the objects name, load it and establish a binding 724 * with the caller. 725 */ 726 if (((pnp = elf_fix_name(name, clmp, 0)) == 0) || ((nlmp = 727 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0, 728 in_nfavl)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 729 nlmp = 0; 730 731 /* 732 * Clean up any infrastructure, including the removal of the 733 * error suppression state, if it had been previously set in 734 * this routine. 735 */ 736 if (pnp) 737 remove_pnode(pnp); 738 if (silent) 739 rtld_flags &= ~RT_FL_SILENCERR; 740 741 if ((dip->di_info = (void *)nlmp) == 0) { 742 /* 743 * If the object could not be mapped, continue if error 744 * suppression is established or we're here with ldd(1). 745 */ 746 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 747 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 748 continue; 749 else 750 return (0); 751 } 752 } 753 754 if (LAZY(clmp)) 755 lml->lm_lazy++; 756 757 return (1); 758 } 759 760 static int 761 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 762 { 763 prmap_t *maps, *_maps; 764 int pfd, num, _num; 765 caddr_t eaddr = vaddr + size; 766 int err; 767 768 /* 769 * If memory reservations have been established for alternative objects 770 * determine if this object falls within the reservation, if it does no 771 * further checking is required. 772 */ 773 if (rtld_flags & RT_FL_MEMRESV) { 774 Rtc_head *head = (Rtc_head *)config->c_bgn; 775 776 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 777 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 778 return (0); 779 } 780 781 /* 782 * Determine the mappings presently in use by this process. 783 */ 784 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 785 return (1); 786 787 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 788 err = errno; 789 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 790 strerror(err)); 791 return (1); 792 } 793 794 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 795 return (1); 796 797 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 798 err = errno; 799 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 800 strerror(err)); 801 free(maps); 802 return (1); 803 } 804 805 /* 806 * Determine if the supplied address clashes with any of the present 807 * process mappings. 808 */ 809 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 810 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 811 Rt_map *lmp; 812 const char *str; 813 814 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 815 continue; 816 817 /* 818 * We have a memory clash. See if one of the known dynamic 819 * dependency mappings represents this space so as to provide 820 * the user a more meaningful message. 821 */ 822 if ((lmp = _caller(vaddr, 0)) != 0) 823 str = NAME(lmp); 824 else 825 str = MSG_INTL(MSG_STR_UNKNOWN); 826 827 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 828 EC_NATPTR(vaddr), EC_OFF(size), str); 829 return (1); 830 } 831 free(maps); 832 return (0); 833 } 834 835 /* 836 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 837 * are used to obtained an aligned reservation from anonymous memory. If 838 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 839 * reservation using the file as backing. 840 */ 841 static Am_ret 842 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 843 int mperm, int fd, Xword align) 844 { 845 Am_ret amret; 846 int mflag = MAP_PRIVATE | MAP_NORESERVE; 847 848 #if defined(MAP_ALIGN) 849 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 850 mflag |= MAP_ALIGN; 851 *maddr = (caddr_t)align; 852 } 853 #endif 854 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 855 return (amret); 856 857 if (amret == AM_OK) 858 return (AM_OK); 859 860 /* 861 * If an anonymous memory request failed (which should only be the 862 * case if it is unsupported on the system we're running on), establish 863 * the initial mapping directly from the file. 864 */ 865 *maddr = 0; 866 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 867 fd, 0)) == MAP_FAILED) { 868 int err = errno; 869 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 870 strerror(err)); 871 return (AM_ERROR); 872 } 873 return (AM_NOSUP); 874 } 875 876 static void * 877 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 878 int fd, Off foff) 879 { 880 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 881 static int notd = 0; 882 883 /* 884 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 885 * flag. 886 */ 887 if (notd == 0) { 888 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 889 mflag |= MAP_TEXT; 890 else 891 mflag |= MAP_INITDATA; 892 } 893 #endif 894 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 895 return (0); 896 897 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 898 if ((notd == 0) && (errno == EINVAL)) { 899 /* 900 * MAP_TEXT and MAP_INITDATA may not be supported on this 901 * platform, try again without. 902 */ 903 notd = 1; 904 mflag &= ~(MAP_TEXT | MAP_INITDATA); 905 906 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 907 } 908 #endif 909 return (MAP_FAILED); 910 } 911 912 /* 913 * Map in a file. 914 */ 915 static caddr_t 916 elf_map_it( 917 Lm_list *lml, /* link-map list */ 918 const char *name, /* actual name stored for pathname */ 919 Off fsize, /* total mapping claim of the file */ 920 Ehdr *ehdr, /* ELF header of file */ 921 Phdr *fphdr, /* first loadable Phdr */ 922 Phdr *lphdr, /* last loadable Phdr */ 923 Phdr **rrphdr, /* return first Phdr in reservation */ 924 caddr_t *rraddr, /* return start of reservation */ 925 Off *rrsize, /* return total size of reservation */ 926 int fixed, /* image is resolved to a fixed addr */ 927 int fd, /* images file descriptor */ 928 Xword align, /* image segments maximum alignment */ 929 Mmap *mmaps, /* mmap information array and */ 930 uint_t *mmapcnt) /* mapping count */ 931 { 932 caddr_t raddr; /* reservation address */ 933 Off rsize; /* reservation size */ 934 Phdr *phdr; /* working program header poiner */ 935 caddr_t maddr; /* working mmap address */ 936 caddr_t faddr; /* working file address */ 937 size_t padsize; /* object padding requirement */ 938 size_t padpsize = 0; /* padding size rounded to next page */ 939 size_t padmsize = 0; /* padding size rounded for alignment */ 940 int skipfseg; /* skip mapping first segment */ 941 int mperm; /* segment permissions */ 942 Am_ret amret = AM_NOSUP; 943 944 /* 945 * If padding is required extend both the front and rear of the image. 946 * To insure the image itself is mapped at the correct alignment the 947 * initial padding is rounded up to the nearest page. Once the image is 948 * mapped the excess can be pruned to the nearest page required for the 949 * actual padding itself. 950 */ 951 if ((padsize = r_debug.rtd_objpad) != 0) { 952 padpsize = M_PROUND(padsize); 953 if (fixed) 954 padmsize = padpsize; 955 else 956 padmsize = S_ROUND(padsize, align); 957 } 958 959 /* 960 * Determine the initial permissions used to map in the first segment. 961 * If this segments memsz is greater that its filesz then the difference 962 * must be zeroed. Make sure this segment is writable. 963 */ 964 mperm = 0; 965 if (fphdr->p_flags & PF_R) 966 mperm |= PROT_READ; 967 if (fphdr->p_flags & PF_X) 968 mperm |= PROT_EXEC; 969 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 970 mperm |= PROT_WRITE; 971 972 /* 973 * Determine whether or not to let system reserve address space based on 974 * whether this is a dynamic executable (addresses in object are fixed) 975 * or a shared object (addresses in object are relative to the objects' 976 * base). 977 */ 978 if (fixed) { 979 /* 980 * Determine the reservation address and size, and insure that 981 * this reservation isn't already in use. 982 */ 983 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 984 raddr = maddr - padpsize; 985 rsize = fsize + padpsize + padsize; 986 987 if (lml_main.lm_head) { 988 if (elf_map_check(lml, name, raddr, rsize) != 0) 989 return (0); 990 } 991 992 /* 993 * As this is a fixed image, all segments must be individually 994 * mapped. 995 */ 996 skipfseg = 0; 997 998 } else { 999 size_t esize; 1000 1001 /* 1002 * If this isn't a fixed image, reserve enough address space for 1003 * the entire image to be mapped. The amount of reservation is 1004 * the range between the beginning of the first, and end of the 1005 * last loadable segment, together with any padding, plus the 1006 * alignment of the first segment. 1007 * 1008 * The optimal reservation is made as a no-reserve mapping from 1009 * anonymous memory. Each segment is then mapped into this 1010 * reservation. If the anonymous mapping capability isn't 1011 * available, the reservation is obtained from the file itself. 1012 * In this case the first segment of the image is mapped as part 1013 * of the reservation, thus only the following segments need to 1014 * be remapped. 1015 */ 1016 rsize = fsize + padmsize + padsize; 1017 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 1018 fd, align)) == AM_ERROR) 1019 return (0); 1020 maddr = raddr + padmsize; 1021 faddr = (caddr_t)S_ROUND((Off)maddr, align); 1022 1023 /* 1024 * If this reservation has been obtained from anonymous memory, 1025 * then all segments must be individually mapped. Otherwise, 1026 * the first segment heads the reservation. 1027 */ 1028 if (amret == AM_OK) 1029 skipfseg = 0; 1030 else 1031 skipfseg = 1; 1032 1033 /* 1034 * For backward compatibility (where MAP_ALIGN isn't available), 1035 * insure the alignment of the reservation is adequate for this 1036 * object, and if not remap the object to obtain the correct 1037 * alignment. 1038 */ 1039 if (faddr != maddr) { 1040 (void) munmap(raddr, rsize); 1041 1042 rsize += align; 1043 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 1044 mperm, fd, align)) == AM_ERROR) 1045 return (0); 1046 1047 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 1048 padpsize), align); 1049 1050 esize = maddr - raddr + padpsize; 1051 1052 /* 1053 * As ths image has been realigned, the first segment 1054 * of the file needs to be remapped to its correct 1055 * location. 1056 */ 1057 skipfseg = 0; 1058 } else 1059 esize = padmsize - padpsize; 1060 1061 /* 1062 * If this reservation included padding, remove any excess for 1063 * the start of the image (the padding was adjusted to insure 1064 * the image was aligned appropriately). 1065 */ 1066 if (esize) { 1067 (void) munmap(raddr, esize); 1068 raddr += esize; 1069 rsize -= esize; 1070 } 1071 } 1072 1073 /* 1074 * At this point we know the initial location of the image, and its 1075 * size. Pass these back to the caller for inclusion in the link-map 1076 * that will eventually be created. 1077 */ 1078 *rraddr = raddr; 1079 *rrsize = rsize; 1080 1081 /* 1082 * The first loadable segment is now pointed to by maddr. This segment 1083 * will eventually contain the elf header and program headers, so reset 1084 * the program header. Pass this back to the caller for inclusion in 1085 * the link-map so it can be used for later unmapping operations. 1086 */ 1087 /* LINTED */ 1088 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1089 1090 /* 1091 * If padding is required at the front of the image, obtain that now. 1092 * Note, if we've already obtained a reservation from anonymous memory 1093 * then this reservation will already include suitable padding. 1094 * Otherwise this reservation is backed by the file, or in the case of 1095 * a fixed image, doesn't yet exist. Map the padding so that it is 1096 * suitably protected (PROT_NONE), and insure the first segment of the 1097 * file is mapped to its correct location. 1098 */ 1099 if (padsize) { 1100 if (amret == AM_NOSUP) { 1101 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1102 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1103 MAP_FAILED) 1104 return (0); 1105 1106 skipfseg = 0; 1107 } 1108 rsize -= padpsize; 1109 } 1110 1111 /* 1112 * Map individual segments. For a fixed image, these will each be 1113 * unique mappings. For a reservation these will fill in the 1114 * reservation. 1115 */ 1116 for (phdr = fphdr; phdr <= lphdr; 1117 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1118 caddr_t addr; 1119 Off mlen, flen; 1120 size_t size; 1121 1122 /* 1123 * Skip non-loadable segments or segments that don't occupy 1124 * any memory. 1125 */ 1126 if (((phdr->p_type != PT_LOAD) && 1127 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1128 continue; 1129 1130 /* 1131 * Establish this segments address relative to our base. 1132 */ 1133 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1134 (fixed ? 0 : faddr))); 1135 1136 /* 1137 * Determine the mapping protection from the segment attributes. 1138 * Also determine the etext address from the last loadable 1139 * segment which has permissions but no write access. 1140 */ 1141 mperm = 0; 1142 if (phdr->p_flags) { 1143 if (phdr->p_flags & PF_R) 1144 mperm |= PROT_READ; 1145 if (phdr->p_flags & PF_X) 1146 mperm |= PROT_EXEC; 1147 if (phdr->p_flags & PF_W) 1148 mperm |= PROT_WRITE; 1149 else 1150 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1151 (ulong_t)(fixed ? 0 : faddr); 1152 } 1153 1154 /* 1155 * Determine the type of mapping required. 1156 */ 1157 if (phdr->p_type == PT_SUNWBSS) { 1158 /* 1159 * Potentially, we can defer the loading of any SUNWBSS 1160 * segment, depending on whether the symbols it provides 1161 * have been bound to. In this manner, large segments 1162 * that are interposed upon between shared libraries 1163 * may not require mapping. Note, that the mapping 1164 * information is recorded in our mapping descriptor at 1165 * this time. 1166 */ 1167 mlen = phdr->p_memsz; 1168 flen = 0; 1169 1170 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1171 /* 1172 * If this segment has no backing file and no flags 1173 * specified, then it defines a reservation. At this 1174 * point all standard loadable segments will have been 1175 * processed. The segment reservation is mapped 1176 * directly from /dev/null. 1177 */ 1178 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1179 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1180 return (0); 1181 1182 mlen = phdr->p_memsz; 1183 flen = 0; 1184 1185 } else if (phdr->p_filesz == 0) { 1186 /* 1187 * If this segment has no backing file then it defines a 1188 * nobits segment and is mapped directly from /dev/zero. 1189 */ 1190 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1191 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1192 return (0); 1193 1194 mlen = phdr->p_memsz; 1195 flen = 0; 1196 1197 } else { 1198 Off foff; 1199 1200 /* 1201 * This mapping originates from the file. Determine the 1202 * file offset to which the mapping will be directed 1203 * (must be aligned) and how much to map (might be more 1204 * than the file in the case of .bss). 1205 */ 1206 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1207 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1208 flen = phdr->p_filesz + (phdr->p_offset - foff); 1209 1210 /* 1211 * If this is a non-fixed, non-anonymous mapping, and no 1212 * padding is involved, then the first loadable segment 1213 * is already part of the initial reservation. In this 1214 * case there is no need to remap this segment. 1215 */ 1216 if ((skipfseg == 0) || (phdr != fphdr)) { 1217 int phdr_mperm = mperm; 1218 /* 1219 * If this segments memsz is greater that its 1220 * filesz then the difference must be zeroed. 1221 * Make sure this segment is writable. 1222 */ 1223 if (phdr->p_memsz > phdr->p_filesz) 1224 mperm |= PROT_WRITE; 1225 1226 if (elf_map_textdata((caddr_t)addr, flen, 1227 mperm, phdr_mperm, 1228 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1229 MAP_FAILED) { 1230 int err = errno; 1231 eprintf(lml, ERR_FATAL, 1232 MSG_INTL(MSG_SYS_MMAP), name, 1233 strerror(err)); 1234 return (0); 1235 } 1236 } 1237 1238 /* 1239 * If the memory occupancy of the segment overflows the 1240 * definition in the file, we need to "zero out" the end 1241 * of the mapping we've established, and if necessary, 1242 * map some more space from /dev/zero. Note, zero'ed 1243 * memory must end on a double word boundary to satisfy 1244 * zero(). 1245 */ 1246 if (phdr->p_memsz > phdr->p_filesz) { 1247 caddr_t zaddr; 1248 size_t zlen, zplen; 1249 Off fend; 1250 1251 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1252 (fixed ? 0 : faddr)); 1253 zaddr = (caddr_t)M_PROUND(foff); 1254 zplen = (size_t)(zaddr - foff); 1255 1256 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1257 phdr->p_memsz + (fixed ? 0 : faddr))); 1258 zlen = (size_t)(fend - foff); 1259 1260 /* 1261 * Determine whether the number of bytes that 1262 * must be zero'ed overflow to the next page. 1263 * If not, simply clear the exact bytes 1264 * (filesz to memsz) from this page. Otherwise, 1265 * clear the remaining bytes of this page, and 1266 * map an following pages from /dev/zero. 1267 */ 1268 if (zlen < zplen) 1269 zero((caddr_t)foff, (long)zlen); 1270 else { 1271 zero((caddr_t)foff, (long)zplen); 1272 1273 if ((zlen = (fend - (Off)zaddr)) > 0) { 1274 if (dz_map(lml, zaddr, zlen, 1275 mperm, 1276 MAP_FIXED | MAP_PRIVATE) == 1277 MAP_FAILED) 1278 return (0); 1279 } 1280 } 1281 } 1282 } 1283 1284 /* 1285 * Unmap anything from the last mapping address to this one and 1286 * update the mapping claim pointer. 1287 */ 1288 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1289 (void) munmap(maddr, size); 1290 rsize -= size; 1291 } 1292 1293 /* 1294 * Retain this segments mapping information. 1295 */ 1296 mmaps[*mmapcnt].m_vaddr = addr; 1297 mmaps[*mmapcnt].m_msize = mlen; 1298 mmaps[*mmapcnt].m_fsize = flen; 1299 mmaps[*mmapcnt].m_perm = mperm; 1300 (*mmapcnt)++; 1301 1302 maddr = addr + M_PROUND(mlen); 1303 rsize -= M_PROUND(mlen); 1304 } 1305 1306 /* 1307 * If padding is required at the end of the image, obtain that now. 1308 * Note, if we've already obtained a reservation from anonymous memory 1309 * then this reservation will already include suitable padding. 1310 */ 1311 if (padsize) { 1312 if (amret == AM_NOSUP) { 1313 /* 1314 * maddr is currently page aligned from the last segment 1315 * mapping. 1316 */ 1317 if (dz_map(lml, maddr, padsize, PROT_NONE, 1318 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1319 MAP_FAILED) 1320 return (0); 1321 } 1322 maddr += padsize; 1323 rsize -= padsize; 1324 } 1325 1326 /* 1327 * Unmap any final reservation. 1328 */ 1329 if ((fixed == 0) && (rsize != 0)) 1330 (void) munmap(maddr, rsize); 1331 1332 return (faddr); 1333 } 1334 1335 /* 1336 * A null symbol interpretor. Used if a filter has no associated filtees. 1337 */ 1338 /* ARGSUSED0 */ 1339 static Sym * 1340 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo, int *in_nfavl) 1341 { 1342 return ((Sym *)0); 1343 } 1344 1345 /* 1346 * Disable filtee use. 1347 */ 1348 static void 1349 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1350 { 1351 dip->di_info = 0; 1352 1353 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1354 /* 1355 * If this is an object filter, free the filtee's duplication. 1356 */ 1357 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1358 free(REFNAME(lmp)); 1359 REFNAME(lmp) = (char *)0; 1360 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1361 1362 /* 1363 * Indicate that this filtee is no longer available. 1364 */ 1365 if (dip->di_flags & FLG_DI_STDFLTR) 1366 SYMINTP(lmp) = elf_null_find_sym; 1367 1368 } 1369 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1370 /* 1371 * Indicate that this standard filtee is no longer available. 1372 */ 1373 if (SYMSFLTRCNT(lmp)) 1374 SYMSFLTRCNT(lmp)--; 1375 } else { 1376 /* 1377 * Indicate that this auxiliary filtee is no longer available. 1378 */ 1379 if (SYMAFLTRCNT(lmp)) 1380 SYMAFLTRCNT(lmp)--; 1381 } 1382 dip->di_flags &= ~MSK_DI_FILTER; 1383 } 1384 1385 /* 1386 * Find symbol interpreter - filters. 1387 * This function is called when the symbols from a shared object should 1388 * be resolved from the shared objects filtees instead of from within itself. 1389 * 1390 * A symbol name of 0 is used to trigger filtee loading. 1391 */ 1392 static Sym * 1393 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx, 1394 int *in_nfavl) 1395 { 1396 const char *name = slp->sl_name, *filtees; 1397 Rt_map *clmp = slp->sl_cmap; 1398 Rt_map *ilmp = slp->sl_imap; 1399 Pnode *pnp, **pnpp; 1400 int any; 1401 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1402 Lm_list *lml = LIST(ilmp); 1403 1404 /* 1405 * Indicate that the filter has been used. If a binding already exists 1406 * to the caller, indicate that this object is referenced. This insures 1407 * we don't generate false unreferenced diagnostics from ldd -u/U or 1408 * debugging. Don't create a binding regardless, as this filter may 1409 * have been dlopen()'ed. 1410 */ 1411 if (name && (ilmp != clmp)) { 1412 Word tracing = (LIST(clmp)->lm_flags & 1413 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1414 1415 if (tracing || DBG_ENABLED) { 1416 Bnd_desc *bdp; 1417 Aliste idx; 1418 1419 FLAGS1(ilmp) |= FL1_RT_USED; 1420 1421 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1422 for (APLIST_TRAVERSE(CALLERS(ilmp), idx, bdp)) { 1423 if (bdp->b_caller == clmp) { 1424 bdp->b_flags |= BND_REFER; 1425 break; 1426 } 1427 } 1428 } 1429 } 1430 } 1431 1432 /* 1433 * If this is the first call to process this filter, establish the 1434 * filtee list. If a configuration file exists, determine if any 1435 * filtee associations for this filter, and its filtee reference, are 1436 * defined. Otherwise, process the filtee reference. Any token 1437 * expansion is also completed at this point (i.e., $PLATFORM). 1438 */ 1439 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1440 if (dip->di_info == 0) { 1441 if (rtld_flags2 & RT_FL2_FLTCFG) 1442 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1443 filtees); 1444 1445 if (dip->di_info == 0) { 1446 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1447 if ((lml->lm_flags & 1448 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1449 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1450 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1451 NAME(ilmp), filtees); 1452 1453 if ((dip->di_info = (void *)expand_paths(ilmp, 1454 filtees, 0, 0)) == 0) { 1455 elf_disable_filtee(ilmp, dip); 1456 return ((Sym *)0); 1457 } 1458 } 1459 } 1460 1461 /* 1462 * Traverse the filtee list, dlopen()'ing any objects specified and 1463 * using their group handle to lookup the symbol. 1464 */ 1465 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1466 pnpp = &pnp->p_next, pnp = *pnpp) { 1467 int mode; 1468 Grp_hdl *ghp; 1469 Rt_map *nlmp = 0; 1470 1471 if (pnp->p_len == 0) 1472 continue; 1473 1474 /* 1475 * Establish the mode of the filtee from the filter. As filtees 1476 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1477 * and the filtees aren't global. It would be nice to have 1478 * RTLD_FIRST used here also, but as filters got out long before 1479 * RTLD_FIRST was introduced it's a little too late now. 1480 */ 1481 mode = MODE(ilmp) | RTLD_GROUP; 1482 mode &= ~RTLD_GLOBAL; 1483 1484 /* 1485 * Insure that any auxiliary filter can locate symbols from its 1486 * caller. 1487 */ 1488 if (dip->di_flags & FLG_DI_AUXFLTR) 1489 mode |= RTLD_PARENT; 1490 1491 /* 1492 * Process any hardware capability directory. Establish a new 1493 * link-map control list from which to analyze any newly added 1494 * objects. 1495 */ 1496 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1497 Lm_cntl *lmc; 1498 Aliste lmco; 1499 1500 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1501 if ((lmc = alist_append(&lml->lm_lists, 0, 1502 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1503 return ((Sym *)0); 1504 lmco = (Aliste)((char *)lmc - 1505 (char *)lml->lm_lists); 1506 } else { 1507 lmc = 0; 1508 lmco = ALIST_OFF_DATA; 1509 } 1510 1511 pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees, 1512 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP), in_nfavl); 1513 1514 /* 1515 * Now that any hardware capability objects have been 1516 * processed, remove any link-map control list. 1517 */ 1518 if (lmc) 1519 remove_cntl(lml, lmco); 1520 } 1521 1522 if (pnp->p_len == 0) 1523 continue; 1524 1525 /* 1526 * Process an individual filtee. 1527 */ 1528 if (pnp->p_info == 0) { 1529 const char *filtee = pnp->p_name; 1530 int audit = 0; 1531 1532 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1533 1534 ghp = 0; 1535 1536 /* 1537 * Determine if the reference link map is already 1538 * loaded. As an optimization compare the filtee with 1539 * our interpretor. The most common filter is 1540 * libdl.so.1, which is a filter on ld.so.1. 1541 */ 1542 #if defined(_ELF64) 1543 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1544 #else 1545 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1546 #endif 1547 /* 1548 * Create an association between ld.so.1 and the 1549 * filter. As an optimization, a handle for 1550 * ld.so.1 itself (required for the dlopen() 1551 * family filtering mechanism) shouldn't search 1552 * any dependencies of ld.so.1. Omitting 1553 * GPD_ADDEPS prevents the addition of any 1554 * ld.so.1 dependencies to this handle. 1555 */ 1556 nlmp = lml_rtld.lm_head; 1557 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1558 (GPH_LDSO | GPH_FIRST | GPH_FILTEE), 1559 (GPD_DLSYM | GPD_RELOC), GPD_PARENT)) == 0) 1560 nlmp = 0; 1561 1562 /* 1563 * Establish the filter handle to prevent any 1564 * recursion. 1565 */ 1566 if (nlmp && ghp) 1567 pnp->p_info = (void *)ghp; 1568 1569 /* 1570 * Audit the filter/filtee established. Ignore 1571 * any return from the auditor, as we can't 1572 * allow ignore filtering to ld.so.1, otherwise 1573 * nothing is going to work. 1574 */ 1575 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1576 LML_TFLG_AUD_OBJFILTER)) 1577 (void) audit_objfilter(ilmp, filtees, 1578 nlmp, 0); 1579 1580 } else { 1581 Rej_desc rej = { 0 }; 1582 Lm_cntl *lmc; 1583 Aliste lmco; 1584 1585 /* 1586 * Establish a new link-map control list from 1587 * which to analyze any newly added objects. 1588 */ 1589 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1590 if ((lmc = 1591 alist_append(&lml->lm_lists, 0, 1592 sizeof (Lm_cntl), 1593 AL_CNT_LMLISTS)) == 0) 1594 return ((Sym *)0); 1595 lmco = (Aliste)((char *)lmc - 1596 (char *)lml->lm_lists); 1597 } else { 1598 lmc = 0; 1599 lmco = ALIST_OFF_DATA; 1600 } 1601 1602 /* 1603 * Load the filtee. Note, an auditor can 1604 * provide an alternative name. 1605 */ 1606 if ((nlmp = load_path(lml, lmco, &(pnp->p_name), 1607 ilmp, mode, FLG_RT_HANDLE, &ghp, 0, 1608 &rej, in_nfavl)) == 0) { 1609 file_notfound(LIST(ilmp), filtee, ilmp, 1610 FLG_RT_HANDLE, &rej); 1611 remove_rej(&rej); 1612 } 1613 filtee = pnp->p_name; 1614 1615 /* 1616 * Establish the filter handle to prevent any 1617 * recursion. 1618 */ 1619 if (nlmp && ghp) { 1620 ghp->gh_flags |= GPH_FILTEE; 1621 pnp->p_info = (void *)ghp; 1622 1623 FLAGS1(nlmp) |= FL1_RT_USED; 1624 } 1625 1626 /* 1627 * Audit the filter/filtee established. A 1628 * return of 0 indicates the auditor wishes to 1629 * ignore this filtee. 1630 */ 1631 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1632 LML_TFLG_AUD_OBJFILTER)) { 1633 if (audit_objfilter(ilmp, filtees, 1634 nlmp, 0) == 0) { 1635 audit = 1; 1636 nlmp = 0; 1637 } 1638 } 1639 1640 /* 1641 * Finish processing the objects associated with 1642 * this request. Create an association between 1643 * this object and the originating filter to 1644 * provide sufficient information to tear down 1645 * this filtee if necessary. 1646 */ 1647 if (nlmp && ghp && ((analyze_lmc(lml, lmco, 1648 nlmp, in_nfavl) == 0) || (relocate_lmc(lml, 1649 lmco, ilmp, nlmp, in_nfavl) == 0))) 1650 nlmp = 0; 1651 1652 /* 1653 * If the filtee has been successfully 1654 * processed, then create an association 1655 * between the filter and filtee. This 1656 * association provides sufficient information 1657 * to tear down the filter and filtee if 1658 * necessary. 1659 */ 1660 DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD)); 1661 if (nlmp && ghp && 1662 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1663 nlmp = 0; 1664 1665 /* 1666 * If this filtee loading has failed, and we've 1667 * created a new link-map control list to which 1668 * this request has added objects, then remove 1669 * all the objects that have been associated to 1670 * this request. 1671 */ 1672 if ((nlmp == 0) && lmc && lmc->lc_head) 1673 remove_lmc(lml, clmp, lmc, lmco, name); 1674 1675 /* 1676 * Remove any link-map control list that was 1677 * created. 1678 */ 1679 if (lmc) 1680 remove_cntl(lml, lmco); 1681 } 1682 1683 /* 1684 * Generate a diagnostic if the filtee couldn't be 1685 * loaded, null out the pnode entry, and continue 1686 * the search. Otherwise, retain this group handle 1687 * for future symbol searches. 1688 */ 1689 if (nlmp == 0) { 1690 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1691 audit)); 1692 1693 pnp->p_info = 0; 1694 pnp->p_len = 0; 1695 continue; 1696 } 1697 } 1698 1699 ghp = (Grp_hdl *)pnp->p_info; 1700 1701 /* 1702 * If we're just here to trigger filtee loading skip the symbol 1703 * lookup so we'll continue looking for additional filtees. 1704 */ 1705 if (name) { 1706 Grp_desc *gdp; 1707 Sym *sym = 0; 1708 Aliste idx; 1709 Slookup sl = *slp; 1710 1711 sl.sl_flags |= LKUP_FIRST; 1712 any++; 1713 1714 /* 1715 * Look for the symbol in the handles dependencies. 1716 */ 1717 for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) { 1718 if ((gdp->gd_flags & GPD_DLSYM) == 0) 1719 continue; 1720 1721 /* 1722 * If our parent is a dependency don't look at 1723 * it (otherwise we are in a recursive loop). 1724 * This situation can occur with auxiliary 1725 * filters if the filtee has a dependency on the 1726 * filter. This dependency isn't necessary as 1727 * auxiliary filters are opened RTLD_PARENT, but 1728 * users may still unknowingly add an explicit 1729 * dependency to the parent. 1730 */ 1731 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1732 continue; 1733 1734 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1735 binfo, in_nfavl)) != 0) || 1736 (ghp->gh_flags & GPH_FIRST)) 1737 break; 1738 } 1739 1740 /* 1741 * If a symbol has been found, indicate the binding 1742 * and return the symbol. 1743 */ 1744 if (sym) { 1745 *binfo |= DBG_BINFO_FILTEE; 1746 return (sym); 1747 } 1748 } 1749 1750 /* 1751 * If this object is tagged to terminate filtee processing we're 1752 * done. 1753 */ 1754 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1755 break; 1756 } 1757 1758 /* 1759 * If we're just here to trigger filtee loading then we're done. 1760 */ 1761 if (name == 0) 1762 return ((Sym *)0); 1763 1764 /* 1765 * If no filtees have been found for a filter, clean up any Pnode 1766 * structures and disable their search completely. For auxiliary 1767 * filters we can reselect the symbol search function so that we never 1768 * enter this routine again for this object. For standard filters we 1769 * use the null symbol routine. 1770 */ 1771 if (any == 0) { 1772 remove_pnode((Pnode *)dip->di_info); 1773 elf_disable_filtee(ilmp, dip); 1774 return ((Sym *)0); 1775 } 1776 1777 return ((Sym *)0); 1778 } 1779 1780 /* 1781 * Focal point for disabling error messages for auxiliary filters. As an 1782 * auxiliary filter allows for filtee use, but provides a fallback should a 1783 * filtee not exist (or fail to load), any errors generated as a consequence of 1784 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1785 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1786 * produced. ldd(1) employs printf(), and here, the selection of whether to 1787 * print a diagnostic in regards to auxiliary filters is a little more complex. 1788 * 1789 * . The determination of whether to produce an ldd message, or a fatal 1790 * error message is driven by LML_FLG_TRC_ENABLE. 1791 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1792 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1793 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1794 * 1795 * . If the calling object is lddstub, then several classes of message are 1796 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1797 * a stub executable employed to preload a user specified library against. 1798 * 1799 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1800 * be suppressed. All detailed ldd messages should still be produced. 1801 */ 1802 Sym * 1803 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx, 1804 int *in_nfavl) 1805 { 1806 Sym *sym; 1807 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1808 int silent = 0; 1809 1810 /* 1811 * Make sure this entry is still acting as a filter. We may have tried 1812 * to process this previously, and disabled it if the filtee couldn't 1813 * be processed. However, other entries may provide different filtees 1814 * that are yet to be completed. 1815 */ 1816 if (dip->di_flags == 0) 1817 return ((Sym *)0); 1818 1819 /* 1820 * Indicate whether an error message is required should this filtee not 1821 * be found, based on the type of filter. 1822 */ 1823 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1824 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1825 rtld_flags |= RT_FL_SILENCERR; 1826 silent = 1; 1827 } 1828 1829 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx, in_nfavl); 1830 1831 if (silent) 1832 rtld_flags &= ~RT_FL_SILENCERR; 1833 1834 return (sym); 1835 } 1836 1837 /* 1838 * Compute the elf hash value (as defined in the ELF access library). 1839 * The form of the hash table is: 1840 * 1841 * |--------------| 1842 * | # of buckets | 1843 * |--------------| 1844 * | # of chains | 1845 * |--------------| 1846 * | bucket[] | 1847 * |--------------| 1848 * | chain[] | 1849 * |--------------| 1850 */ 1851 ulong_t 1852 elf_hash(const char *name) 1853 { 1854 uint_t hval = 0; 1855 1856 while (*name) { 1857 uint_t g; 1858 hval = (hval << 4) + *name++; 1859 if ((g = (hval & 0xf0000000)) != 0) 1860 hval ^= g >> 24; 1861 hval &= ~g; 1862 } 1863 return ((ulong_t)hval); 1864 } 1865 1866 /* 1867 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1868 * function specially in the executable - if they have a value, even though 1869 * undefined, we use that value. This allows us to associate all references 1870 * to a function's address to a single place in the process: the plt entry 1871 * for that function in the executable. Calls to lookup from plt binding 1872 * routines do NOT set LKUP_SPEC in the flag. 1873 */ 1874 Sym * 1875 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo, int *in_nfavl) 1876 { 1877 const char *name = slp->sl_name; 1878 Rt_map *ilmp = slp->sl_imap; 1879 ulong_t hash = slp->sl_hash; 1880 uint_t ndx, htmp, buckets, *chainptr; 1881 Sym *sym, *symtabptr; 1882 char *strtabptr, *strtabname; 1883 uint_t flags1; 1884 Syminfo *sip; 1885 1886 /* 1887 * If we're only here to establish a symbols index, skip the diagnostic 1888 * used to trace a symbol search. 1889 */ 1890 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1891 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1892 1893 if (HASH(ilmp) == 0) 1894 return ((Sym *)0); 1895 1896 buckets = HASH(ilmp)[0]; 1897 /* LINTED */ 1898 htmp = (uint_t)hash % buckets; 1899 1900 /* 1901 * Get the first symbol on hash chain and initialize the string 1902 * and symbol table pointers. 1903 */ 1904 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1905 return ((Sym *)0); 1906 1907 chainptr = HASH(ilmp) + 2 + buckets; 1908 strtabptr = STRTAB(ilmp); 1909 symtabptr = SYMTAB(ilmp); 1910 1911 while (ndx) { 1912 sym = symtabptr + ndx; 1913 strtabname = strtabptr + sym->st_name; 1914 1915 /* 1916 * Compare the symbol found with the name required. If the 1917 * names don't match continue with the next hash entry. 1918 */ 1919 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1920 if ((ndx = chainptr[ndx]) != 0) 1921 continue; 1922 return ((Sym *)0); 1923 } 1924 1925 /* 1926 * The Solaris ld does not put DT_VERSYM in the dynamic 1927 * section, but the GNU ld does. The GNU runtime linker 1928 * interprets the top bit of the 16-bit Versym value 1929 * (0x8000) as the "hidden" bit. If this bit is set, 1930 * the linker is supposed to act as if that symbol does 1931 * not exist. The hidden bit supports their versioning 1932 * scheme, which allows multiple incompatible functions 1933 * with the same name to exist at different versions 1934 * within an object. The Solaris linker does not support this 1935 * mechanism, or the model of interface evolution that 1936 * it allows, but we honor the hidden bit in GNU ld 1937 * produced objects in order to interoperate with them. 1938 */ 1939 if ((VERSYM(ilmp) != NULL) && 1940 ((VERSYM(ilmp)[ndx] & 0x8000) != 0)) { 1941 DBG_CALL(Dbg_syms_ignore_gnuver(ilmp, name, 1942 ndx, VERSYM(ilmp)[ndx])); 1943 if ((ndx = chainptr[ndx]) != 0) 1944 continue; 1945 return ((Sym *)0); 1946 } 1947 1948 /* 1949 * If we're only here to establish a symbols index, we're done. 1950 */ 1951 if (slp->sl_flags & LKUP_SYMNDX) 1952 return (sym); 1953 1954 /* 1955 * If we find a match and the symbol is defined, return the 1956 * symbol pointer and the link map in which it was found. 1957 */ 1958 if (sym->st_shndx != SHN_UNDEF) { 1959 *dlmp = ilmp; 1960 *binfo |= DBG_BINFO_FOUND; 1961 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1962 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1963 is_sym_interposer(ilmp, sym))) 1964 *binfo |= DBG_BINFO_INTERPOSE; 1965 break; 1966 1967 /* 1968 * If we find a match and the symbol is undefined, the 1969 * symbol type is a function, and the value of the symbol 1970 * is non zero, then this is a special case. This allows 1971 * the resolution of a function address to the plt[] entry. 1972 * See SPARC ABI, Dynamic Linking, Function Addresses for 1973 * more details. 1974 */ 1975 } else if ((slp->sl_flags & LKUP_SPEC) && 1976 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1977 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1978 *dlmp = ilmp; 1979 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1980 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1981 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1982 is_sym_interposer(ilmp, sym))) 1983 *binfo |= DBG_BINFO_INTERPOSE; 1984 return (sym); 1985 } 1986 1987 /* 1988 * Undefined symbol. 1989 */ 1990 return ((Sym *)0); 1991 } 1992 1993 /* 1994 * We've found a match. Determine if the defining object contains 1995 * symbol binding information. 1996 */ 1997 if ((sip = SYMINFO(ilmp)) != 0) 1998 sip += ndx; 1999 2000 /* 2001 * If this definition is a singleton, and we haven't followed a default 2002 * symbol search knowing that we're looking for a singleton (presumably 2003 * because the symbol definition has been changed since the referring 2004 * object was built), then reject this binding so that the caller can 2005 * fall back to a standard symbol search. 2006 */ 2007 if ((ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON) && 2008 (((slp->sl_flags & LKUP_STANDARD) == 0) || 2009 (((slp->sl_flags & LKUP_SINGLETON) == 0) && 2010 (LIST(ilmp)->lm_flags & LML_FLG_GROUPSEXIST)))) { 2011 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 2012 DBG_BNDREJ_SINGLE)); 2013 *binfo |= BINFO_REJSINGLE; 2014 *binfo &= ~DBG_BINFO_MSK; 2015 return ((Sym *)0); 2016 } 2017 2018 /* 2019 * If this is a direct binding request, but the symbol definition has 2020 * disabled directly binding to it (presumably because the symbol 2021 * definition has been changed since the referring object was built), 2022 * indicate this failure so that the caller can fall back to a standard 2023 * symbol search. 2024 */ 2025 if (sip && (slp->sl_flags & LKUP_DIRECT) && 2026 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 2027 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 2028 DBG_BNDREJ_NODIR)); 2029 *binfo |= BINFO_REJDIRECT; 2030 *binfo &= ~DBG_BINFO_MSK; 2031 return ((Sym *)0); 2032 } 2033 2034 /* 2035 * Determine whether this object is acting as a filter. 2036 */ 2037 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 2038 return (sym); 2039 2040 /* 2041 * Determine if this object offers per-symbol filtering, and if so, 2042 * whether this symbol references a filtee. 2043 */ 2044 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 2045 /* 2046 * If this is a standard filter reference, and no standard 2047 * filtees remain to be inspected, we're done. If this is an 2048 * auxiliary filter reference, and no auxiliary filtees remain, 2049 * we'll fall through in case any object filtering is available. 2050 */ 2051 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 2052 (SYMSFLTRCNT(ilmp) == 0)) 2053 return ((Sym *)0); 2054 2055 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 2056 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 2057 SYMAFLTRCNT(ilmp))) { 2058 Sym *fsym; 2059 2060 /* 2061 * This symbol has an associated filtee. Lookup the 2062 * symbol in the filtee, and if it is found return it. 2063 * If the symbol doesn't exist, and this is a standard 2064 * filter, return an error, otherwise fall through to 2065 * catch any object filtering that may be available. 2066 */ 2067 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2068 sip->si_boundto, in_nfavl)) != 0) 2069 return (fsym); 2070 if (sip->si_flags & SYMINFO_FLG_FILTER) 2071 return ((Sym *)0); 2072 } 2073 } 2074 2075 /* 2076 * Determine if this object provides global filtering. 2077 */ 2078 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 2079 Sym *fsym; 2080 2081 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 2082 /* 2083 * This object has an associated filtee. Lookup the 2084 * symbol in the filtee, and if it is found return it. 2085 * If the symbol doesn't exist, and this is a standard 2086 * filter, return and error, otherwise return the symbol 2087 * within the filter itself. 2088 */ 2089 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2090 OBJFLTRNDX(ilmp), in_nfavl)) != 0) 2091 return (fsym); 2092 } 2093 2094 if (flags1 & FL1_RT_OBJSFLTR) 2095 return ((Sym *)0); 2096 } 2097 return (sym); 2098 } 2099 2100 /* 2101 * Create a new Rt_map structure for an ELF object and initialize 2102 * all values. 2103 */ 2104 Rt_map * 2105 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2106 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2107 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt, 2108 int *in_nfavl) 2109 { 2110 Rt_map *lmp; 2111 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2112 Xword rpath = 0; 2113 Ehdr *ehdr = (Ehdr *)addr; 2114 2115 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2116 lml->lm_lmidstr, lmco)); 2117 2118 /* 2119 * Allocate space for the link-map and private elf information. Once 2120 * these are allocated and initialized, we can use remove_so(0, lmp) to 2121 * tear down the link-map should any failures occur. 2122 */ 2123 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2124 return (0); 2125 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2126 free(lmp); 2127 return (0); 2128 } 2129 2130 /* 2131 * All fields not filled in were set to 0 by calloc. 2132 */ 2133 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2134 DYN(lmp) = ld; 2135 ADDR(lmp) = addr; 2136 MSIZE(lmp) = msize; 2137 ENTRY(lmp) = (Addr)entry; 2138 SYMINTP(lmp) = elf_find_sym; 2139 ETEXT(lmp) = etext; 2140 FCT(lmp) = &elf_fct; 2141 LIST(lmp) = lml; 2142 PADSTART(lmp) = paddr; 2143 PADIMLEN(lmp) = padimsize; 2144 THREADID(lmp) = rt_thr_self(); 2145 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2146 SORTVAL(lmp) = -1; 2147 2148 MMAPS(lmp) = mmaps; 2149 MMAPCNT(lmp) = mmapcnt; 2150 ASSERT(mmapcnt != 0); 2151 2152 /* 2153 * If this is a shared object, add the base address to each address. 2154 * if this is an executable, use address as is. 2155 */ 2156 if (ehdr->e_type == ET_EXEC) { 2157 base = 0; 2158 FLAGS(lmp) |= FLG_RT_FIXED; 2159 } else 2160 base = addr; 2161 2162 /* 2163 * Fill in rest of the link map entries with information from the file's 2164 * dynamic structure. 2165 */ 2166 if (ld) { 2167 uint_t dynndx = 0; 2168 Xword pltpadsz = 0; 2169 Rti_desc *rti; 2170 2171 /* CSTYLED */ 2172 for ( ; ld->d_tag != DT_NULL; ++ld, dynndx++) { 2173 switch ((Xword)ld->d_tag) { 2174 case DT_SYMTAB: 2175 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2176 break; 2177 case DT_SUNW_SYMTAB: 2178 SUNWSYMTAB(lmp) = 2179 (void *)(ld->d_un.d_ptr + base); 2180 break; 2181 case DT_SUNW_SYMSZ: 2182 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2183 break; 2184 case DT_STRTAB: 2185 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2186 break; 2187 case DT_SYMENT: 2188 SYMENT(lmp) = ld->d_un.d_val; 2189 break; 2190 case DT_FEATURE_1: 2191 ld->d_un.d_val |= DTF_1_PARINIT; 2192 if (ld->d_un.d_val & DTF_1_CONFEXP) 2193 crle = 1; 2194 break; 2195 case DT_MOVESZ: 2196 MOVESZ(lmp) = ld->d_un.d_val; 2197 FLAGS(lmp) |= FLG_RT_MOVE; 2198 break; 2199 case DT_MOVEENT: 2200 MOVEENT(lmp) = ld->d_un.d_val; 2201 break; 2202 case DT_MOVETAB: 2203 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2204 break; 2205 case DT_REL: 2206 case DT_RELA: 2207 /* 2208 * At this time, ld.so. can only handle one 2209 * type of relocation per object. 2210 */ 2211 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2212 break; 2213 case DT_RELSZ: 2214 case DT_RELASZ: 2215 RELSZ(lmp) = ld->d_un.d_val; 2216 break; 2217 case DT_RELENT: 2218 case DT_RELAENT: 2219 RELENT(lmp) = ld->d_un.d_val; 2220 break; 2221 case DT_RELCOUNT: 2222 case DT_RELACOUNT: 2223 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2224 break; 2225 case DT_TEXTREL: 2226 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2227 break; 2228 case DT_HASH: 2229 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2230 break; 2231 case DT_PLTGOT: 2232 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2233 break; 2234 case DT_PLTRELSZ: 2235 PLTRELSZ(lmp) = ld->d_un.d_val; 2236 break; 2237 case DT_JMPREL: 2238 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2239 break; 2240 case DT_INIT: 2241 if (ld->d_un.d_ptr != NULL) 2242 INIT(lmp) = 2243 (void (*)())(ld->d_un.d_ptr + base); 2244 break; 2245 case DT_FINI: 2246 if (ld->d_un.d_ptr != NULL) 2247 FINI(lmp) = 2248 (void (*)())(ld->d_un.d_ptr + base); 2249 break; 2250 case DT_INIT_ARRAY: 2251 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2252 base); 2253 break; 2254 case DT_INIT_ARRAYSZ: 2255 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2256 break; 2257 case DT_FINI_ARRAY: 2258 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2259 base); 2260 break; 2261 case DT_FINI_ARRAYSZ: 2262 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2263 break; 2264 case DT_PREINIT_ARRAY: 2265 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2266 base); 2267 break; 2268 case DT_PREINIT_ARRAYSZ: 2269 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2270 break; 2271 case DT_RPATH: 2272 case DT_RUNPATH: 2273 rpath = ld->d_un.d_val; 2274 break; 2275 case DT_FILTER: 2276 fltr = ld->d_un.d_val; 2277 OBJFLTRNDX(lmp) = dynndx; 2278 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2279 break; 2280 case DT_AUXILIARY: 2281 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2282 fltr = ld->d_un.d_val; 2283 OBJFLTRNDX(lmp) = dynndx; 2284 } 2285 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2286 break; 2287 case DT_SUNW_FILTER: 2288 SYMSFLTRCNT(lmp)++; 2289 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2290 break; 2291 case DT_SUNW_AUXILIARY: 2292 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2293 SYMAFLTRCNT(lmp)++; 2294 } 2295 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2296 break; 2297 case DT_DEPAUDIT: 2298 if (!(rtld_flags & RT_FL_NOAUDIT)) 2299 audit = ld->d_un.d_val; 2300 break; 2301 case DT_CONFIG: 2302 cfile = ld->d_un.d_val; 2303 break; 2304 case DT_DEBUG: 2305 /* 2306 * DT_DEBUG entries are only created in 2307 * dynamic objects that require an interpretor 2308 * (ie. all dynamic executables and some shared 2309 * objects), and provide for a hand-shake with 2310 * debuggers. This entry is initialized to 2311 * zero by the link-editor. If a debugger has 2312 * us and updated this entry set the debugger 2313 * flag, and finish initializing the debugging 2314 * structure (see setup() also). Switch off any 2315 * configuration object use as most debuggers 2316 * can't handle fixed dynamic executables as 2317 * dependencies, and we can't handle requests 2318 * like object padding for alternative objects. 2319 */ 2320 if (ld->d_un.d_ptr) 2321 rtld_flags |= 2322 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2323 ld->d_un.d_ptr = (Addr)&r_debug; 2324 break; 2325 case DT_VERNEED: 2326 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2327 base); 2328 break; 2329 case DT_VERNEEDNUM: 2330 /* LINTED */ 2331 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2332 break; 2333 case DT_VERDEF: 2334 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2335 break; 2336 case DT_VERDEFNUM: 2337 /* LINTED */ 2338 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2339 break; 2340 case DT_VERSYM: 2341 /* 2342 * The Solaris ld does not produce DT_VERSYM, 2343 * but the GNU ld does, in order to support 2344 * their style of versioning, which differs 2345 * from ours in some ways, while using the 2346 * same data structures. The presence of 2347 * DT_VERSYM therefore means that GNU 2348 * versioning rules apply to the given file. 2349 * If DT_VERSYM is not present, then Solaris 2350 * versioning rules apply. 2351 */ 2352 VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base); 2353 break; 2354 case DT_BIND_NOW: 2355 if ((ld->d_un.d_val & DF_BIND_NOW) && 2356 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2357 MODE(lmp) |= RTLD_NOW; 2358 MODE(lmp) &= ~RTLD_LAZY; 2359 } 2360 break; 2361 case DT_FLAGS: 2362 FLAGS2(lmp) |= FL2_RT_DTFLAGS; 2363 if (ld->d_un.d_val & DF_SYMBOLIC) 2364 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2365 if (ld->d_un.d_val & DF_TEXTREL) 2366 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2367 if ((ld->d_un.d_val & DF_BIND_NOW) && 2368 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2369 MODE(lmp) |= RTLD_NOW; 2370 MODE(lmp) &= ~RTLD_LAZY; 2371 } 2372 /* 2373 * Capture any static TLS use, and enforce that 2374 * this object be non-deletable. 2375 */ 2376 if (ld->d_un.d_val & DF_STATIC_TLS) { 2377 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2378 MODE(lmp) |= RTLD_NODELETE; 2379 } 2380 break; 2381 case DT_FLAGS_1: 2382 if (ld->d_un.d_val & DF_1_DISPRELPND) 2383 FLAGS1(lmp) |= FL1_RT_DISPREL; 2384 if (ld->d_un.d_val & DF_1_GROUP) 2385 FLAGS(lmp) |= 2386 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2387 if ((ld->d_un.d_val & DF_1_NOW) && 2388 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2389 MODE(lmp) |= RTLD_NOW; 2390 MODE(lmp) &= ~RTLD_LAZY; 2391 } 2392 if (ld->d_un.d_val & DF_1_NODELETE) 2393 MODE(lmp) |= RTLD_NODELETE; 2394 if (ld->d_un.d_val & DF_1_INITFIRST) 2395 FLAGS(lmp) |= FLG_RT_INITFRST; 2396 if (ld->d_un.d_val & DF_1_NOOPEN) 2397 FLAGS(lmp) |= FLG_RT_NOOPEN; 2398 if (ld->d_un.d_val & DF_1_LOADFLTR) 2399 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2400 if (ld->d_un.d_val & DF_1_NODUMP) 2401 FLAGS(lmp) |= FLG_RT_NODUMP; 2402 if (ld->d_un.d_val & DF_1_CONFALT) 2403 crle = 1; 2404 if (ld->d_un.d_val & DF_1_DIRECT) 2405 FLAGS1(lmp) |= FL1_RT_DIRECT; 2406 if (ld->d_un.d_val & DF_1_NODEFLIB) 2407 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2408 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2409 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2410 if (ld->d_un.d_val & DF_1_TRANS) 2411 FLAGS(lmp) |= FLG_RT_TRANS; 2412 #ifndef EXPAND_RELATIVE 2413 if (ld->d_un.d_val & DF_1_ORIGIN) 2414 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2415 #endif 2416 /* 2417 * Global auditing is only meaningful when 2418 * specified by the initiating object of the 2419 * process - typically the dynamic executable. 2420 * If this is the initiaiting object, its link- 2421 * map will not yet have been added to the 2422 * link-map list, and consequently the link-map 2423 * list is empty. (see setup()). 2424 */ 2425 if (ld->d_un.d_val & DF_1_GLOBAUDIT) { 2426 if (lml_main.lm_head == 0) 2427 FLAGS1(lmp) |= FL1_RT_GLOBAUD; 2428 else 2429 DBG_CALL(Dbg_audit_ignore(lmp)); 2430 } 2431 2432 /* 2433 * If this object identifies itself as an 2434 * interposer, but relocation processing has 2435 * already started, then demote it. It's too 2436 * late to guarantee complete interposition. 2437 */ 2438 /* BEGIN CSTYLED */ 2439 if (ld->d_un.d_val & 2440 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2441 if (lml->lm_flags & LML_FLG_STARTREL) { 2442 DBG_CALL(Dbg_util_intoolate(lmp)); 2443 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2444 (void) printf( 2445 MSG_INTL(MSG_LDD_REL_ERR2), 2446 NAME(lmp)); 2447 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2448 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2449 else 2450 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2451 } 2452 /* END CSTYLED */ 2453 break; 2454 case DT_SYMINFO: 2455 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2456 base); 2457 break; 2458 case DT_SYMINENT: 2459 SYMINENT(lmp) = ld->d_un.d_val; 2460 break; 2461 case DT_PLTPAD: 2462 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2463 break; 2464 case DT_PLTPADSZ: 2465 pltpadsz = ld->d_un.d_val; 2466 break; 2467 case DT_SUNW_RTLDINF: 2468 /* 2469 * Maintain a list of RTLDINFO structures. 2470 * Typically, libc is the only supplier, and 2471 * only one structure is provided. However, 2472 * multiple suppliers and multiple structures 2473 * are supported. For example, one structure 2474 * may provide thread_init, and another 2475 * structure may provide atexit reservations. 2476 */ 2477 if ((rti = alist_append(&lml->lm_rti, 0, 2478 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2479 remove_so(0, lmp); 2480 return (0); 2481 } 2482 rti->rti_lmp = lmp; 2483 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2484 break; 2485 case DT_SUNW_SORTENT: 2486 SUNWSORTENT(lmp) = ld->d_un.d_val; 2487 break; 2488 case DT_SUNW_SYMSORT: 2489 SUNWSYMSORT(lmp) = 2490 (void *)(ld->d_un.d_ptr + base); 2491 break; 2492 case DT_SUNW_SYMSORTSZ: 2493 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2494 break; 2495 case DT_DEPRECATED_SPARC_REGISTER: 2496 case M_DT_REGISTER: 2497 FLAGS(lmp) |= FLG_RT_REGSYMS; 2498 break; 2499 case M_DT_PLTRESERVE: 2500 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2501 base); 2502 break; 2503 } 2504 } 2505 2506 if (PLTPAD(lmp)) { 2507 if (pltpadsz == (Xword)0) 2508 PLTPAD(lmp) = 0; 2509 else 2510 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2511 pltpadsz); 2512 } 2513 2514 /* 2515 * Allocate a Dynamic Info structure. 2516 */ 2517 if ((DYNINFO(lmp) = calloc((size_t)dynndx, 2518 sizeof (Dyninfo))) == 0) { 2519 remove_so(0, lmp); 2520 return (0); 2521 } 2522 DYNINFOCNT(lmp) = dynndx; 2523 } 2524 2525 /* 2526 * A dynsym contains only global functions. We want to have 2527 * a version of it that also includes local functions, so that 2528 * dladdr() will be able to report names for local functions 2529 * when used to generate a stack trace for a stripped file. 2530 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2531 * 2532 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2533 * in order to avoid having to have two copies of the global 2534 * symbols held in DT_SYMTAB: The local symbols are placed in 2535 * a separate section than the globals in the dynsym, but the 2536 * linker conspires to put the data for these two sections adjacent 2537 * to each other. DT_SUNW_SYMTAB points at the top of the local 2538 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2539 * 2540 * If the two sections are not adjacent, then something went wrong 2541 * at link time. We use ASSERT to kill the process if this is 2542 * a debug build. In a production build, we will silently ignore 2543 * the presence of the .ldynsym and proceed. We can detect this 2544 * situation by checking to see that DT_SYMTAB lies in 2545 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2546 */ 2547 if ((SUNWSYMTAB(lmp) != NULL) && 2548 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2549 (((char *)SYMTAB(lmp) >= 2550 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2551 ASSERT(0); 2552 SUNWSYMTAB(lmp) = NULL; 2553 SUNWSYMSZ(lmp) = 0; 2554 } 2555 2556 /* 2557 * If configuration file use hasn't been disabled, and a configuration 2558 * file hasn't already been set via an environment variable, see if any 2559 * application specific configuration file is specified. An LD_CONFIG 2560 * setting is used first, but if this image was generated via crle(1) 2561 * then a default configuration file is a fall-back. 2562 */ 2563 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2564 if (cfile) 2565 config->c_name = (const char *)(cfile + 2566 (char *)STRTAB(lmp)); 2567 else if (crle) { 2568 rtld_flags |= RT_FL_CONFAPP; 2569 #ifndef EXPAND_RELATIVE 2570 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2571 #endif 2572 } 2573 } 2574 2575 if (rpath) 2576 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2577 if (fltr) { 2578 /* 2579 * If this object is a global filter, duplicate the filtee 2580 * string name(s) so that REFNAME() is available in core files. 2581 * This cludge was useful for debuggers at one point, but only 2582 * when the filtee name was an individual full path. 2583 */ 2584 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2585 remove_so(0, lmp); 2586 return (0); 2587 } 2588 } 2589 2590 if (rtld_flags & RT_FL_RELATIVE) 2591 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2592 2593 /* 2594 * For Intel ABI compatibility. It's possible that a JMPREL can be 2595 * specified without any other relocations (e.g. a dynamic executable 2596 * normally only contains .plt relocations). If this is the case then 2597 * no REL, RELSZ or RELENT will have been created. For us to be able 2598 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2599 * the RELENT for these relocations. Refer to elf_reloc() for more 2600 * details. 2601 */ 2602 if (!RELENT(lmp) && JMPREL(lmp)) 2603 RELENT(lmp) = sizeof (Rel); 2604 2605 /* 2606 * Establish any per-object auditing. If we're establishing `main's 2607 * link-map its too early to go searching for audit objects so just 2608 * hold the object name for later (see setup()). 2609 */ 2610 if (audit) { 2611 char *cp = audit + (char *)STRTAB(lmp); 2612 2613 if (*cp) { 2614 if (((AUDITORS(lmp) = 2615 calloc(1, sizeof (Audit_desc))) == 0) || 2616 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2617 remove_so(0, lmp); 2618 return (0); 2619 } 2620 if (lml_main.lm_head) { 2621 if (audit_setup(lmp, AUDITORS(lmp), 0, 2622 in_nfavl) == 0) { 2623 remove_so(0, lmp); 2624 return (0); 2625 } 2626 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2627 lml->lm_flags |= LML_FLG_LOCAUDIT; 2628 } 2629 } 2630 } 2631 2632 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2633 remove_so(0, lmp); 2634 return (0); 2635 } 2636 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2637 remove_so(0, lmp); 2638 return (0); 2639 } 2640 2641 /* 2642 * Add the mapped object to the end of the link map list. 2643 */ 2644 lm_append(lml, lmco, lmp); 2645 return (lmp); 2646 } 2647 2648 /* 2649 * Assign hardware/software capabilities. 2650 */ 2651 void 2652 cap_assign(Cap *cap, Rt_map *lmp) 2653 { 2654 while (cap->c_tag != CA_SUNW_NULL) { 2655 switch (cap->c_tag) { 2656 case CA_SUNW_HW_1: 2657 HWCAP(lmp) = cap->c_un.c_val; 2658 break; 2659 case CA_SUNW_SF_1: 2660 SFCAP(lmp) = cap->c_un.c_val; 2661 } 2662 cap++; 2663 } 2664 } 2665 2666 /* 2667 * Map in an ELF object. 2668 * Takes an open file descriptor for the object to map and its pathname; returns 2669 * a pointer to a Rt_map structure for this object, or 0 on error. 2670 */ 2671 static Rt_map * 2672 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2673 int fd, int *in_nfavl) 2674 { 2675 int i; /* general temporary */ 2676 Off memsize = 0; /* total memory size of pathname */ 2677 Off mentry; /* entry point */ 2678 Ehdr *ehdr; /* ELF header of ld.so */ 2679 Phdr *phdr; /* first Phdr in file */ 2680 Phdr *phdr0; /* Saved first Phdr in file */ 2681 Phdr *pptr; /* working Phdr */ 2682 Phdr *fph = 0; /* first loadable Phdr */ 2683 Phdr *lph; /* last loadable Phdr */ 2684 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2685 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2686 Phdr *swph = 0; /* program header for SUNWBSS */ 2687 Phdr *tlph = 0; /* program header for PT_TLS */ 2688 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2689 Cap *cap = 0; /* program header for SUNWCAP */ 2690 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2691 size_t size; /* size of elf and program headers */ 2692 caddr_t faddr = 0; /* mapping address of pathname */ 2693 Rt_map *lmp; /* link map created */ 2694 caddr_t paddr; /* start of padded image */ 2695 Off plen; /* size of image including padding */ 2696 Half etype; 2697 int fixed; 2698 Mmap *mmaps; 2699 uint_t mmapcnt = 0; 2700 Xword align = 0; 2701 2702 /* LINTED */ 2703 ehdr = (Ehdr *)fmap->fm_maddr; 2704 2705 /* 2706 * If this a relocatable object then special processing is required. 2707 */ 2708 if ((etype = ehdr->e_type) == ET_REL) 2709 return (elf_obj_file(lml, lmco, pname, fd)); 2710 2711 /* 2712 * If this isn't a dynamic executable or shared object we can't process 2713 * it. If this is a dynamic executable then all addresses are fixed. 2714 */ 2715 if (etype == ET_EXEC) { 2716 fixed = 1; 2717 } else if (etype == ET_DYN) { 2718 fixed = 0; 2719 } else { 2720 Conv_inv_buf_t inv_buf; 2721 2722 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2723 conv_ehdr_type(etype, 0, &inv_buf)); 2724 return (0); 2725 } 2726 2727 /* 2728 * If our original mapped page was not large enough to hold all the 2729 * program headers remap them. 2730 */ 2731 size = (size_t)((char *)ehdr->e_phoff + 2732 (ehdr->e_phnum * ehdr->e_phentsize)); 2733 if (size > fmap->fm_fsize) { 2734 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2735 return (0); 2736 } 2737 if (size > fmap->fm_msize) { 2738 fmap_setup(); 2739 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2740 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2741 int err = errno; 2742 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2743 strerror(err)); 2744 return (0); 2745 } 2746 fmap->fm_msize = size; 2747 /* LINTED */ 2748 ehdr = (Ehdr *)fmap->fm_maddr; 2749 } 2750 /* LINTED */ 2751 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2752 2753 /* 2754 * Get entry point. 2755 */ 2756 mentry = ehdr->e_entry; 2757 2758 /* 2759 * Point at program headers and perform some basic validation. 2760 */ 2761 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2762 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2763 if ((pptr->p_type == PT_LOAD) || 2764 (pptr->p_type == PT_SUNWBSS)) { 2765 2766 if (fph == 0) { 2767 fph = pptr; 2768 /* LINTED argument lph is initialized in first pass */ 2769 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2770 eprintf(lml, ERR_ELF, 2771 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2772 return (0); 2773 } 2774 2775 lph = pptr; 2776 2777 if (pptr->p_memsz) 2778 lmph = pptr; 2779 if (pptr->p_filesz) 2780 lfph = pptr; 2781 if (pptr->p_type == PT_SUNWBSS) 2782 swph = pptr; 2783 if (pptr->p_align > align) 2784 align = pptr->p_align; 2785 2786 } else if (pptr->p_type == PT_DYNAMIC) { 2787 mld = (Dyn *)(pptr->p_vaddr); 2788 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2789 tlph = pptr; 2790 } else if (pptr->p_type == PT_SUNWCAP) { 2791 cap = (Cap *)(pptr->p_vaddr); 2792 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2793 unwindph = pptr; 2794 } 2795 } 2796 2797 #if defined(MAP_ALIGN) 2798 /* 2799 * Make sure the maximum page alignment is a power of 2 >= the default 2800 * segment alignment, for use with MAP_ALIGN. 2801 */ 2802 align = S_ROUND(align, M_SEGM_ALIGN); 2803 #endif 2804 2805 /* 2806 * We'd better have at least one loadable segment, together with some 2807 * specified file and memory size. 2808 */ 2809 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2810 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2811 return (0); 2812 } 2813 2814 /* 2815 * Check that the files size accounts for the loadable sections 2816 * we're going to map in (failure to do this may cause spurious 2817 * bus errors if we're given a truncated file). 2818 */ 2819 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2820 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2821 return (0); 2822 } 2823 2824 /* 2825 * Memsize must be page rounded so that if we add object padding 2826 * at the end it will start at the beginning of a page. 2827 */ 2828 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2829 M_PTRUNC((ulong_t)fph->p_vaddr)); 2830 2831 /* 2832 * Determine if an existing mapping is acceptable. 2833 */ 2834 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2835 (strcmp(pname, interp->i_name) == 0)) { 2836 /* 2837 * If this is the interpreter then it has already been mapped 2838 * and we have the address so don't map it again. Note that 2839 * the common occurrence of a reference to the interpretor 2840 * (libdl -> ld.so.1) will have been caught during filter 2841 * initialization (see elf_lookup_filtee()). However, some 2842 * ELF implementations are known to record libc.so.1 as the 2843 * interpretor, and thus this test catches this behavior. 2844 */ 2845 paddr = faddr = interp->i_faddr; 2846 2847 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2848 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2849 (fph == lph) && (fph->p_filesz == fph->p_memsz) && 2850 (((Xword)fmap->fm_maddr % align) == 0)) { 2851 size_t rsize; 2852 2853 /* 2854 * If the file contains a single segment, and the mapping 2855 * required has already been established from the initial fmap 2856 * mapping, then we don't need to do anything more. Reset the 2857 * fmap address so that any later files start a new fmap. This 2858 * is really an optimization for filters, such as libdl.so, 2859 * libthread, etc. that are constructed to be a single text 2860 * segment. 2861 */ 2862 paddr = faddr = fmap->fm_maddr; 2863 2864 /* 2865 * Free any unused mapping by assigning the fmap buffer to the 2866 * unused region. fmap_setup() will unmap this area and 2867 * establish defaults for future mappings. 2868 */ 2869 rsize = M_PROUND(fph->p_filesz); 2870 fmap->fm_maddr += rsize; 2871 fmap->fm_msize -= rsize; 2872 fmap_setup(); 2873 } 2874 2875 /* 2876 * Allocate a mapping array to retain mapped segment information. 2877 */ 2878 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2879 return (0); 2880 2881 /* 2882 * If we're reusing an existing mapping determine the objects etext 2883 * address. Otherwise map the file (which will calculate the etext 2884 * address as part of the mapping process). 2885 */ 2886 if (faddr) { 2887 caddr_t base; 2888 2889 if (fixed) 2890 base = 0; 2891 else 2892 base = faddr; 2893 2894 /* LINTED */ 2895 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2896 2897 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2898 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2899 if (pptr->p_type != PT_LOAD) 2900 continue; 2901 2902 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2903 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2904 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2905 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2906 mmapcnt++; 2907 2908 if (!(pptr->p_flags & PF_W)) { 2909 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2910 (ulong_t)pptr->p_memsz + 2911 (ulong_t)(fixed ? 0 : faddr); 2912 } 2913 } 2914 } else { 2915 /* 2916 * Map the file. 2917 */ 2918 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2919 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2920 return (0); 2921 } 2922 2923 /* 2924 * Calculate absolute base addresses and entry points. 2925 */ 2926 if (!fixed) { 2927 if (mld) 2928 /* LINTED */ 2929 mld = (Dyn *)((Off)mld + faddr); 2930 if (cap) 2931 /* LINTED */ 2932 cap = (Cap *)((Off)cap + faddr); 2933 mentry += (Off)faddr; 2934 } 2935 2936 /* 2937 * Create new link map structure for newly mapped shared object. 2938 */ 2939 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2940 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2941 mmapcnt, in_nfavl))) { 2942 (void) munmap((caddr_t)faddr, memsize); 2943 return (0); 2944 } 2945 2946 /* 2947 * Start the system loading in the ELF information we'll be processing. 2948 */ 2949 if (REL(lmp)) { 2950 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2951 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2952 MADV_WILLNEED); 2953 } 2954 2955 /* 2956 * If this shared object contains any special segments, record them. 2957 */ 2958 if (swph) { 2959 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2960 SUNWBSS(lmp) = phdr + (swph - phdr0); 2961 } 2962 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2963 remove_so(lml, lmp); 2964 return (0); 2965 } 2966 2967 if (unwindph) 2968 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2969 2970 if (cap) 2971 cap_assign(cap, lmp); 2972 2973 return (lmp); 2974 } 2975 2976 /* 2977 * Function to correct protection settings. Segments are all mapped initially 2978 * with permissions as given in the segment header. We need to turn on write 2979 * permissions on a text segment if there are any relocations against that 2980 * segment, and them turn write permission back off again before returning 2981 * control to the user. This function turns the permission on or off depending 2982 * on the value of the argument. 2983 */ 2984 int 2985 elf_set_prot(Rt_map *lmp, int permission) 2986 { 2987 Mmap *mmaps; 2988 2989 /* 2990 * If this is an allocated image (ie. a relocatable object) we can't 2991 * mprotect() anything. 2992 */ 2993 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2994 return (1); 2995 2996 DBG_CALL(Dbg_file_prot(lmp, permission)); 2997 2998 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2999 if (mmaps->m_perm & PROT_WRITE) 3000 continue; 3001 3002 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 3003 (mmaps->m_perm | permission)) == -1) { 3004 int err = errno; 3005 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 3006 NAME(lmp), strerror(err)); 3007 return (0); 3008 } 3009 } 3010 return (1); 3011 } 3012 3013 /* 3014 * Build full pathname of shared object from given directory name and filename. 3015 */ 3016 static char * 3017 elf_get_so(const char *dir, const char *file) 3018 { 3019 static char pname[PATH_MAX]; 3020 3021 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 3022 return (pname); 3023 } 3024 3025 /* 3026 * The copy relocation is recorded in a copy structure which will be applied 3027 * after all other relocations are carried out. This provides for copying data 3028 * that must be relocated itself (ie. pointers in shared objects). This 3029 * structure also provides a means of binding RTLD_GROUP dependencies to any 3030 * copy relocations that have been taken from any group members. 3031 * 3032 * If the size of the .bss area available for the copy information is not the 3033 * same as the source of the data inform the user if we're under ldd(1) control 3034 * (this checking was only established in 5.3, so by only issuing an error via 3035 * ldd(1) we maintain the standard set by previous releases). 3036 */ 3037 int 3038 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 3039 Rt_map *dlmp, const void *dadd) 3040 { 3041 Rel_copy rc; 3042 Lm_list *lml = LIST(rlmp); 3043 3044 rc.r_name = name; 3045 rc.r_rsym = rsym; /* the new reference symbol and its */ 3046 rc.r_rlmp = rlmp; /* associated link-map */ 3047 rc.r_dlmp = dlmp; /* the defining link-map */ 3048 rc.r_dsym = dsym; /* the original definition */ 3049 rc.r_radd = radd; 3050 rc.r_dadd = dadd; 3051 3052 if (rsym->st_size > dsym->st_size) 3053 rc.r_size = (size_t)dsym->st_size; 3054 else 3055 rc.r_size = (size_t)rsym->st_size; 3056 3057 if (alist_append(©_R(dlmp), &rc, sizeof (Rel_copy), 3058 AL_CNT_COPYREL) == 0) { 3059 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3060 return (0); 3061 else 3062 return (1); 3063 } 3064 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 3065 if (aplist_append(©_S(rlmp), dlmp, 3066 AL_CNT_COPYREL) == NULL) { 3067 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3068 return (0); 3069 else 3070 return (1); 3071 } 3072 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 3073 } 3074 3075 /* 3076 * If we are tracing (ldd), warn the user if 3077 * 1) the size from the reference symbol differs from the 3078 * copy definition. We can only copy as much data as the 3079 * reference (dynamic executables) entry allows. 3080 * 2) the copy definition has STV_PROTECTED visibility. 3081 */ 3082 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3083 if (rsym->st_size != dsym->st_size) { 3084 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 3085 _conv_reloc_type(M_R_COPY), demangle(name), 3086 NAME(rlmp), EC_XWORD(rsym->st_size), 3087 NAME(dlmp), EC_XWORD(dsym->st_size)); 3088 if (rsym->st_size > dsym->st_size) 3089 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 3090 NAME(dlmp)); 3091 else 3092 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 3093 NAME(rlmp)); 3094 } 3095 3096 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 3097 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 3098 _conv_reloc_type(M_R_COPY), demangle(name), 3099 NAME(dlmp)); 3100 } 3101 } 3102 3103 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 3104 (Xword)rc.r_size)); 3105 return (1); 3106 } 3107 3108 /* 3109 * Determine the symbol location of an address within a link-map. Look for 3110 * the nearest symbol (whose value is less than or equal to the required 3111 * address). This is the object specific part of dladdr(). 3112 */ 3113 static void 3114 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 3115 { 3116 ulong_t ndx, cnt, base, _value; 3117 Sym *sym, *_sym = NULL; 3118 const char *str; 3119 int _flags; 3120 uint_t *dynaddr_ndx; 3121 uint_t dynaddr_n = 0; 3122 ulong_t value; 3123 3124 /* 3125 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 3126 * the dynsym that starts with any local function symbols that exist in 3127 * the library and then moves to the data held in SYMTAB(). In this 3128 * case, SUNWSYMSZ tells us how long the symbol table is. The 3129 * availability of local function symbols will enhance the results 3130 * we can provide. 3131 * 3132 * If SUNWSYMTAB() is non-NULL, then there might also be a 3133 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 3134 * an array of indices into SUNWSYMTAB, sorted by increasing 3135 * address. We can use this to do an O(log N) search instead of a 3136 * brute force search. 3137 * 3138 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 3139 * contains only global symbols. In that case, the length of 3140 * the symbol table comes from the nchain field of the related 3141 * symbol lookup hash table. 3142 */ 3143 str = STRTAB(lmp); 3144 if (SUNWSYMSZ(lmp) == NULL) { 3145 sym = SYMTAB(lmp); 3146 /* 3147 * If we don't have a .hash table there are no symbols 3148 * to look at. 3149 */ 3150 if (HASH(lmp) == 0) 3151 return; 3152 cnt = HASH(lmp)[1]; 3153 } else { 3154 sym = SUNWSYMTAB(lmp); 3155 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3156 dynaddr_ndx = SUNWSYMSORT(lmp); 3157 if (dynaddr_ndx != NULL) 3158 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3159 } 3160 3161 if (FLAGS(lmp) & FLG_RT_FIXED) 3162 base = 0; 3163 else 3164 base = ADDR(lmp); 3165 3166 if (dynaddr_n > 0) { /* Binary search */ 3167 long low = 0, low_bnd; 3168 long high = dynaddr_n - 1, high_bnd; 3169 long mid; 3170 Sym *mid_sym; 3171 3172 /* 3173 * Note that SUNWSYMSORT only contains symbols types that 3174 * supply memory addresses, so there's no need to check and 3175 * filter out any other types. 3176 */ 3177 low_bnd = low; 3178 high_bnd = high; 3179 while (low <= high) { 3180 mid = (low + high) / 2; 3181 mid_sym = &sym[dynaddr_ndx[mid]]; 3182 value = mid_sym->st_value + base; 3183 if (addr < value) { 3184 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3185 addr) 3186 high_bnd = high; 3187 high = mid - 1; 3188 } else if (addr > value) { 3189 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3190 addr) 3191 low_bnd = low; 3192 low = mid + 1; 3193 } else { 3194 _sym = mid_sym; 3195 _value = value; 3196 break; 3197 } 3198 } 3199 /* 3200 * If the above didn't find it exactly, then we must 3201 * return the closest symbol with a value that doesn't 3202 * exceed the one we are looking for. If that symbol exists, 3203 * it will lie in the range bounded by low_bnd and 3204 * high_bnd. This is a linear search, but a short one. 3205 */ 3206 if (_sym == NULL) { 3207 for (mid = low_bnd; mid <= high_bnd; mid++) { 3208 mid_sym = &sym[dynaddr_ndx[mid]]; 3209 value = mid_sym->st_value + base; 3210 if (addr >= value) { 3211 _sym = mid_sym; 3212 _value = value; 3213 } else { 3214 break; 3215 } 3216 } 3217 } 3218 } else { /* Linear search */ 3219 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3220 /* 3221 * Skip expected symbol types that are not functions 3222 * or data: 3223 * - A symbol table starts with an undefined symbol 3224 * in slot 0. If we are using SUNWSYMTAB(), 3225 * there will be a second undefined symbol 3226 * right before the globals. 3227 * - The local part of SUNWSYMTAB() contains a 3228 * series of function symbols. Each section 3229 * starts with an initial STT_FILE symbol. 3230 */ 3231 if ((sym->st_shndx == SHN_UNDEF) || 3232 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3233 continue; 3234 3235 value = sym->st_value + base; 3236 if (value > addr) 3237 continue; 3238 if (value < _value) 3239 continue; 3240 3241 _sym = sym; 3242 _value = value; 3243 3244 /* 3245 * Note, because we accept local and global symbols 3246 * we could find a section symbol that matches the 3247 * associated address, which means that the symbol 3248 * name will be null. In this case continue the 3249 * search in case we can find a global symbol of 3250 * the same value. 3251 */ 3252 if ((value == addr) && 3253 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3254 break; 3255 } 3256 } 3257 3258 _flags = flags & RTLD_DL_MASK; 3259 if (_sym) { 3260 if (_flags == RTLD_DL_SYMENT) 3261 *info = (void *)_sym; 3262 else if (_flags == RTLD_DL_LINKMAP) 3263 *info = (void *)lmp; 3264 3265 dlip->dli_sname = str + _sym->st_name; 3266 dlip->dli_saddr = (void *)_value; 3267 } else { 3268 /* 3269 * addr lies between the beginning of the mapped segment and 3270 * the first global symbol. We have no symbol to return 3271 * and the caller requires one. We use _START_, the base 3272 * address of the mapping. 3273 */ 3274 3275 if (_flags == RTLD_DL_SYMENT) { 3276 /* 3277 * An actual symbol struct is needed, so we 3278 * construct one for _START_. To do this in a 3279 * fully accurate way requires a different symbol 3280 * for each mapped segment. This requires the 3281 * use of dynamic memory and a mutex. That's too much 3282 * plumbing for a fringe case of limited importance. 3283 * 3284 * Fortunately, we can simplify: 3285 * - Only the st_size and st_info fields are useful 3286 * outside of the linker internals. The others 3287 * reference things that outside code cannot see, 3288 * and can be set to 0. 3289 * - It's just a label and there is no size 3290 * to report. So, the size should be 0. 3291 * This means that only st_info needs a non-zero 3292 * (constant) value. A static struct will suffice. 3293 * It must be const (readonly) so the caller can't 3294 * change its meaning for subsequent callers. 3295 */ 3296 static const Sym fsym = { 0, 0, 0, 3297 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3298 *info = (void *) &fsym; 3299 } 3300 3301 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3302 dlip->dli_saddr = (void *) ADDR(lmp); 3303 } 3304 } 3305 3306 static void 3307 elf_lazy_cleanup(APlist *alp) 3308 { 3309 Rt_map *lmp; 3310 Aliste idx; 3311 3312 /* 3313 * Cleanup any link-maps added to this dynamic list and free it. 3314 */ 3315 for (APLIST_TRAVERSE(alp, idx, lmp)) 3316 FLAGS(lmp) &= ~FLG_RT_TMPLIST; 3317 free(alp); 3318 } 3319 3320 /* 3321 * This routine is called as a last fall-back to search for a symbol from a 3322 * standard relocation. To maintain lazy loadings goal of reducing the number 3323 * of objects mapped, any symbol search is first carried out using the objects 3324 * that already exist in the process (either on a link-map list or handle). 3325 * If a symbol can't be found, and lazy dependencies are still pending, this 3326 * routine loads the dependencies in an attempt to locate the symbol. 3327 * 3328 * Only new objects are inspected as we will have already inspected presently 3329 * loaded objects before calling this routine. However, a new object may not 3330 * be new - although the di_lmp might be zero, the object may have been mapped 3331 * as someone elses dependency. Thus there's a possibility of some symbol 3332 * search duplication. 3333 */ 3334 Sym * 3335 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo, int *in_nfavl) 3336 { 3337 Sym *sym = 0; 3338 APlist *alist = NULL; 3339 Aliste idx; 3340 Rt_map *lmp1, *lmp = slp->sl_imap; 3341 const char *name = slp->sl_name; 3342 3343 /* 3344 * Generate a local list of new objects to process. This list can grow 3345 * as each object supplies its own lazy dependencies. 3346 */ 3347 if (aplist_append(&alist, lmp, AL_CNT_LAZYFIND) == NULL) 3348 return (NULL); 3349 FLAGS(lmp) |= FLG_RT_TMPLIST; 3350 3351 for (APLIST_TRAVERSE(alist, idx, lmp1)) { 3352 uint_t cnt = 0; 3353 Slookup sl = *slp; 3354 Dyninfo *dip, *pdip; 3355 3356 /* 3357 * Discard any relocation index from further symbol searches. 3358 * This index will have already been used to trigger any 3359 * necessary lazy-loads, and it might be because one of these 3360 * lazy loads have failed that we're here performing this 3361 * fallback. By removing the relocation index we don't try 3362 * and perform the same failed lazy loading activity again. 3363 */ 3364 sl.sl_rsymndx = 0; 3365 3366 /* 3367 * Loop through the lazy DT_NEEDED entries examining each object 3368 * for the required symbol. If the symbol is not found, the 3369 * object is in turn added to the local alist, so that the 3370 * objects lazy DT_NEEDED entries can be examined. 3371 */ 3372 lmp = lmp1; 3373 for (dip = DYNINFO(lmp), pdip = NULL; cnt < DYNINFOCNT(lmp); 3374 cnt++, pdip = dip++) { 3375 Rt_map *nlmp; 3376 3377 if (((dip->di_flags & FLG_DI_LAZY) == 0) || 3378 dip->di_info) 3379 continue; 3380 3381 /* 3382 * If this object has already failed to lazy load, and 3383 * we're still processing the same runtime linker 3384 * operation that produced the failure, don't bother 3385 * to try and load the object again. 3386 */ 3387 if ((dip->di_flags & FLG_DI_LAZYFAIL) && pdip && 3388 (pdip->di_flags & FLG_DI_POSFLAG1)) { 3389 if (pdip->di_info == (void *)ld_entry_cnt) 3390 continue; 3391 3392 dip->di_flags &= ~FLG_DI_LAZYFAIL; 3393 pdip->di_info = NULL; 3394 } 3395 3396 /* 3397 * Try loading this lazy dependency. If the object 3398 * can't be loaded, consider this non-fatal and continue 3399 * the search. Lazy loaded dependencies need not exist 3400 * and their loading should only turn out to be fatal 3401 * if they are required to satisfy a relocation. 3402 * 3403 * If the file is already loaded and relocated we must 3404 * still inspect it for symbols, even though it might 3405 * have already been searched. This lazy load operation 3406 * might have promoted the permissions of the object, 3407 * and thus made the object applicable for this symbol 3408 * search, whereas before the object might have been 3409 * skipped. 3410 */ 3411 if ((nlmp = elf_lazy_load(lmp, &sl, cnt, 3412 name, in_nfavl)) == 0) 3413 continue; 3414 3415 /* 3416 * If this object isn't yet a part of the dynamic list 3417 * then inspect it for the symbol. If the symbol isn't 3418 * found add the object to the dynamic list so that we 3419 * can inspect its dependencies. 3420 */ 3421 if (FLAGS(nlmp) & FLG_RT_TMPLIST) 3422 continue; 3423 3424 sl.sl_imap = nlmp; 3425 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, 3426 binfo, in_nfavl)) 3427 break; 3428 3429 /* 3430 * Some dlsym() operations are already traversing a 3431 * link-map (dlopen(0)), and thus there's no need to 3432 * build our own dynamic dependency list. 3433 */ 3434 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3435 if (aplist_append(&alist, nlmp, 3436 AL_CNT_LAZYFIND) == 0) { 3437 elf_lazy_cleanup(alist); 3438 return (0); 3439 } 3440 FLAGS(nlmp) |= FLG_RT_TMPLIST; 3441 } 3442 } 3443 if (sym) 3444 break; 3445 } 3446 3447 elf_lazy_cleanup(alist); 3448 return (sym); 3449 } 3450 3451 /* 3452 * Warning message for bad r_offset. 3453 */ 3454 void 3455 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3456 ulong_t rsymndx) 3457 { 3458 const char *name = (char *)0; 3459 Lm_list *lml = LIST(lmp); 3460 int trace; 3461 3462 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3463 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3464 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3465 trace = 1; 3466 else 3467 trace = 0; 3468 3469 if ((trace == 0) && (DBG_ENABLED == 0)) 3470 return; 3471 3472 if (rsymndx) { 3473 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3474 (rsymndx * SYMENT(lmp))); 3475 3476 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3477 name = (char *)(STRTAB(lmp) + symref->st_name); 3478 } 3479 3480 if (name == 0) 3481 name = MSG_ORIG(MSG_STR_EMPTY); 3482 3483 if (trace) { 3484 const char *rstr; 3485 3486 rstr = _conv_reloc_type((uint_t)rtype); 3487 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3488 EC_ADDR(roffset)); 3489 return; 3490 } 3491 3492 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3493 } 3494 3495 /* 3496 * Resolve a static TLS relocation. 3497 */ 3498 long 3499 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3500 ulong_t roffset, long value) 3501 { 3502 Lm_list *lml = LIST(lmp); 3503 3504 /* 3505 * Relocations against a static TLS block have limited support once 3506 * process initialization has completed. Any error condition should be 3507 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3508 * however individual relocations are tested in case the dynamic flag 3509 * had not been set when this object was built. 3510 */ 3511 if (PTTLS(lmp) == 0) { 3512 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3513 M_REL_SHT_TYPE, rel, NULL, name)); 3514 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3515 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3516 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3517 return (0); 3518 } 3519 3520 /* 3521 * If no static TLS has been set aside for this object, determine if 3522 * any can be obtained. Enforce that any object using static TLS is 3523 * non-deletable. 3524 */ 3525 if (TLSSTATOFF(lmp) == 0) { 3526 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3527 MODE(lmp) |= RTLD_NODELETE; 3528 3529 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3530 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3531 M_REL_SHT_TYPE, rel, NULL, name)); 3532 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3533 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3534 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3535 return (0); 3536 } 3537 } 3538 3539 /* 3540 * Typically, a static TLS offset is maintained as a symbols value. 3541 * For local symbols that are not apart of the dynamic symbol table, 3542 * the TLS relocation points to a section symbol, and the static TLS 3543 * offset was deposited in the associated GOT table. Make sure the GOT 3544 * is cleared, so that the value isn't reused in do_reloc(). 3545 */ 3546 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3547 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3548 value = *(long *)roffset; 3549 *(long *)roffset = 0; 3550 } else { 3551 value = sym->st_value; 3552 } 3553 } 3554 return (-(TLSSTATOFF(lmp) - value)); 3555 } 3556 3557 /* 3558 * If the symbol is not found and the reference was not to a weak symbol, report 3559 * an error. Weak references may be unresolved. 3560 */ 3561 int 3562 elf_reloc_error(Rt_map *lmp, const char *name, void *rel, uint_t binfo) 3563 { 3564 Lm_list *lml = LIST(lmp); 3565 3566 /* 3567 * Under crle(1), relocation failures are ignored. 3568 */ 3569 if (lml->lm_flags & LML_FLG_IGNRELERR) 3570 return (1); 3571 3572 /* 3573 * Under ldd(1), unresolved references are reported. However, if the 3574 * original reference is EXTERN or PARENT these references are ignored 3575 * unless ldd's -p option is in effect. 3576 */ 3577 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3578 if (((binfo & DBG_BINFO_REF_MSK) == 0) || 3579 ((lml->lm_flags & LML_FLG_TRC_NOPAREXT) != 0)) { 3580 (void) printf(MSG_INTL(MSG_LDD_SYM_NFOUND), 3581 demangle(name), NAME(lmp)); 3582 } 3583 return (1); 3584 } 3585 3586 /* 3587 * Otherwise, the unresolved references is fatal. 3588 */ 3589 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, 3590 NULL, name)); 3591 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 3592 demangle(name)); 3593 3594 return (0); 3595 } 3596