1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 1988 AT&T 29 * All Rights Reserved 30 */ 31 32 #pragma ident "%Z%%M% %I% %E% SMI" 33 34 /* 35 * Object file dependent support for ELF objects. 36 */ 37 38 #include <stdio.h> 39 #include <sys/procfs.h> 40 #include <sys/mman.h> 41 #include <sys/debug.h> 42 #include <string.h> 43 #include <limits.h> 44 #include <dlfcn.h> 45 #include <debug.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "msg.h" 51 52 /* 53 * Default and secure dependency search paths. 54 */ 55 static Pnode elf_dflt_dirs[] = { 56 #if defined(_ELF64) 57 #ifndef SGS_PRE_UNIFIED_PROCESS 58 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 59 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 60 #endif 61 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 62 LA_SER_DEFAULT, 0, 0 } 63 #else 64 #ifndef SGS_PRE_UNIFIED_PROCESS 65 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 66 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 67 #endif 68 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 69 LA_SER_DEFAULT, 0, 0 } 70 #endif 71 }; 72 73 static Pnode elf_secure_dirs[] = { 74 #if defined(_ELF64) 75 #ifndef SGS_PRE_UNIFIED_PROCESS 76 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 77 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 78 #endif 79 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 80 MSG_PTH_USRLIBSE_64_SIZE, 81 LA_SER_SECURE, 0, 0 } 82 #else 83 #ifndef SGS_PRE_UNIFIED_PROCESS 84 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 85 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 86 #endif 87 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 88 LA_SER_SECURE, 0, 0 } 89 #endif 90 }; 91 92 /* 93 * Defines for local functions. 94 */ 95 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 96 static int elf_are_u(Rej_desc *); 97 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 98 static ulong_t elf_entry_pt(void); 99 static char *elf_get_so(const char *, const char *); 100 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, 101 int, int *); 102 static int elf_needed(Lm_list *, Aliste, Rt_map *, int *); 103 static void elf_unmap_so(Rt_map *); 104 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 105 106 /* 107 * Functions and data accessed through indirect pointers. 108 */ 109 Fct elf_fct = { 110 elf_are_u, 111 elf_entry_pt, 112 elf_map_so, 113 elf_unmap_so, 114 elf_needed, 115 lookup_sym, 116 elf_reloc, 117 elf_dflt_dirs, 118 elf_secure_dirs, 119 elf_fix_name, 120 elf_get_so, 121 elf_dladdr, 122 dlsym_handle, 123 elf_verify_vers, 124 elf_set_prot 125 }; 126 127 128 /* 129 * Redefine NEEDED name if necessary. 130 */ 131 static Pnode * 132 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 133 { 134 /* 135 * For ABI compliance, if we are asked for ld.so.1, then really give 136 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 137 */ 138 if (((*name == '/') && 139 /* BEGIN CSTYLED */ 140 #if defined(_ELF64) 141 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 142 #else 143 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 144 #endif 145 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 146 /* END CSTYLED */ 147 Pnode *pnp; 148 149 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 150 MSG_ORIG(MSG_PTH_LIBSYS))); 151 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 152 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 153 if (pnp) 154 free(pnp); 155 return (0); 156 } 157 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 158 return (pnp); 159 } 160 161 return (expand_paths(clmp, name, orig, 0)); 162 } 163 164 /* 165 * Determine if we have been given an ELF file and if so determine if the file 166 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 167 * with associated error information. 168 */ 169 static int 170 elf_are_u(Rej_desc *rej) 171 { 172 Ehdr *ehdr; 173 174 /* 175 * Determine if we're an elf file. If not simply return, we don't set 176 * any rejection information as this test allows use to scroll through 177 * the objects we support (ELF, AOUT). 178 */ 179 if (fmap->fm_fsize < sizeof (Ehdr) || 180 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 181 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 182 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 183 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 184 return (0); 185 } 186 187 /* 188 * Check class and encoding. 189 */ 190 /* LINTED */ 191 ehdr = (Ehdr *)fmap->fm_maddr; 192 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 193 rej->rej_type = SGS_REJ_CLASS; 194 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 195 return (0); 196 } 197 if (ehdr->e_ident[EI_DATA] != M_DATA) { 198 rej->rej_type = SGS_REJ_DATA; 199 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 200 return (0); 201 } 202 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 203 (ehdr->e_type != ET_DYN)) { 204 rej->rej_type = SGS_REJ_TYPE; 205 rej->rej_info = (uint_t)ehdr->e_type; 206 return (0); 207 } 208 209 /* 210 * Verify machine specific flags, and hardware capability requirements. 211 */ 212 if ((elf_mach_flags_check(rej, ehdr) == 0) || 213 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 214 return (0); 215 216 /* 217 * Verify ELF version. ??? is this too restrictive ??? 218 */ 219 if (ehdr->e_version > EV_CURRENT) { 220 rej->rej_type = SGS_REJ_VERSION; 221 rej->rej_info = (uint_t)ehdr->e_version; 222 return (0); 223 } 224 return (1); 225 } 226 227 /* 228 * The runtime linker employs lazy loading to provide the libraries needed for 229 * debugging, preloading .o's and dldump(). As these are seldom used, the 230 * standard startup of ld.so.1 doesn't initialize all the information necessary 231 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 232 * is called we get here to perform these initializations: 233 * 234 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 235 * dependency. Typically, for all other objects, this is called during 236 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 237 * 238 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 239 * are by default skipped thus delaying all relative relocation processing 240 * on every invocation of ld.so.1. 241 */ 242 int 243 elf_rtld_load() 244 { 245 Lm_list *lml = &lml_rtld; 246 Rt_map *lmp = lml->lm_head; 247 248 if (lml->lm_flags & LML_FLG_PLTREL) 249 return (1); 250 251 /* 252 * As we need to refer to the DYNINFO() information, insure that it has 253 * been initialized. 254 */ 255 if (elf_needed(lml, ALIST_OFF_DATA, lmp, NULL) == 0) 256 return (0); 257 258 #if defined(__i386) 259 /* 260 * This is a kludge to give ld.so.1 a performance benefit on i386. 261 * It's based around two factors. 262 * 263 * o JMPSLOT relocations (PLT's) actually need a relative relocation 264 * applied to the GOT entry so that they can find PLT0. 265 * 266 * o ld.so.1 does not exercise *any* PLT's before it has made a call 267 * to elf_lazy_load(). This is because all dynamic dependencies 268 * are recorded as lazy dependencies. 269 */ 270 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 271 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 272 (ulong_t)ADDR(lmp)); 273 #endif 274 275 lml->lm_flags |= LML_FLG_PLTREL; 276 return (1); 277 } 278 279 /* 280 * Lazy load an object. 281 */ 282 Rt_map * 283 elf_lazy_load(Rt_map *clmp, Slookup *slp, uint_t ndx, const char *sym, 284 int *in_nfavl) 285 { 286 Rt_map *nlmp, *hlmp; 287 Dyninfo *dip = &DYNINFO(clmp)[ndx], *pdip; 288 uint_t flags = 0; 289 Pnode *pnp; 290 const char *name; 291 Lm_list *lml = LIST(clmp); 292 Lm_cntl *lmc; 293 Aliste lmco; 294 295 /* 296 * If this dependency has already been processed, we're done. 297 */ 298 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 299 (dip->di_flags & FLG_DI_LDD_DONE)) 300 return (nlmp); 301 302 /* 303 * If we're running under ldd(1), indicate that this dependency has been 304 * processed (see test above). It doesn't matter whether the object is 305 * successfully loaded or not, this flag simply ensures that we don't 306 * repeatedly attempt to load an object that has already failed to load. 307 * To do so would create multiple failure diagnostics for the same 308 * object under ldd(1). 309 */ 310 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 311 dip->di_flags |= FLG_DI_LDD_DONE; 312 313 /* 314 * Determine the initial dependency name. 315 */ 316 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 317 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 318 319 /* 320 * If this object needs to establish its own group, make sure a handle 321 * is created. 322 */ 323 if (dip->di_flags & FLG_DI_GROUP) 324 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 325 326 /* 327 * Lazy dependencies are identified as DT_NEEDED entries with a 328 * DF_P1_LAZYLOAD flag in the previous DT_POSFLAG_1 element. The 329 * dynamic information element that corresponds to the DT_POSFLAG_1 330 * entry is free, and thus used to store the present entrance 331 * identifier. This identifier is used to prevent multiple attempts to 332 * load a failed lazy loadable dependency within the same runtime linker 333 * operation. However, future attempts to reload this dependency are 334 * still possible. 335 */ 336 if (ndx && (pdip = dip - 1) && (pdip->di_flags & FLG_DI_POSFLAG1)) 337 pdip->di_info = (void *)slp->sl_id; 338 339 /* 340 * Expand the requested name if necessary. 341 */ 342 if ((pnp = elf_fix_name(name, clmp, 0)) == 0) 343 return (0); 344 345 /* 346 * Provided the object on the head of the link-map has completed its 347 * relocation, create a new link-map control list for this request. 348 */ 349 hlmp = lml->lm_head; 350 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 351 if ((lmc = alist_append(&lml->lm_lists, 0, sizeof (Lm_cntl), 352 AL_CNT_LMLISTS)) == 0) { 353 remove_pnode(pnp); 354 return (0); 355 } 356 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 357 } else { 358 lmc = 0; 359 lmco = ALIST_OFF_DATA; 360 } 361 362 /* 363 * Load the associated object. 364 */ 365 dip->di_info = nlmp = 366 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0, in_nfavl); 367 368 /* 369 * Remove any expanded pathname infrastructure. Reduce the pending lazy 370 * dependency count of the caller, together with the link-map lists 371 * count of objects that still have lazy dependencies pending. 372 */ 373 remove_pnode(pnp); 374 if (--LAZY(clmp) == 0) 375 LIST(clmp)->lm_lazy--; 376 377 /* 378 * Finish processing the objects associated with this request, and 379 * create an association between the caller and this dependency. 380 */ 381 if (nlmp && ((bind_one(clmp, nlmp, BND_NEEDED) == 0) || 382 (analyze_lmc(lml, lmco, nlmp, in_nfavl) == 0) || 383 (relocate_lmc(lml, lmco, clmp, nlmp, in_nfavl) == 0))) 384 dip->di_info = nlmp = 0; 385 386 /* 387 * If this lazyload has failed, and we've created a new link-map 388 * control list to which this request has added objects, then remove 389 * all the objects that have been associated to this request. 390 */ 391 if ((nlmp == 0) && lmc && lmc->lc_head) 392 remove_lmc(lml, clmp, lmc, lmco, name); 393 394 /* 395 * Finally, remove any link-map control list that was created. 396 */ 397 if (lmc) 398 remove_cntl(lml, lmco); 399 400 /* 401 * If this lazy loading failed, record the fact, and bump the lazy 402 * counts. 403 */ 404 if (nlmp == 0) { 405 dip->di_flags |= FLG_DI_LAZYFAIL; 406 if (LAZY(clmp)++ == 0) 407 LIST(clmp)->lm_lazy++; 408 } 409 410 return (nlmp); 411 } 412 413 /* 414 * Return the entry point of the ELF executable. 415 */ 416 static ulong_t 417 elf_entry_pt(void) 418 { 419 return (ENTRY(lml_main.lm_head)); 420 } 421 422 /* 423 * Unmap a given ELF shared object from the address space. 424 */ 425 static void 426 elf_unmap_so(Rt_map *lmp) 427 { 428 caddr_t addr; 429 size_t size; 430 Mmap *mmaps; 431 432 /* 433 * If this link map represents a relocatable object concatenation, then 434 * the image was simply generated in allocated memory. Free the memory. 435 * 436 * Note: the memory was originally allocated in the libelf:_elf_outmap 437 * routine and would normally have been free'd in elf_outsync(), but 438 * because we 'interpose' on that routine the memory wasn't free'd at 439 * that time. 440 */ 441 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 442 free((void *)ADDR(lmp)); 443 return; 444 } 445 446 /* 447 * If padding was enabled via rtld_db, then we have at least one page 448 * in front of the image - and possibly a trailing page. 449 * Unmap the front page first: 450 */ 451 if (PADSTART(lmp) != ADDR(lmp)) { 452 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 453 size = ADDR(lmp) - (ulong_t)addr; 454 (void) munmap(addr, size); 455 } 456 457 /* 458 * Unmap any trailing padding. 459 */ 460 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 461 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 462 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 463 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 464 (void) munmap(addr, size); 465 } 466 467 /* 468 * Unmmap all mapped segments. 469 */ 470 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 471 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 472 } 473 474 /* 475 * Determine if a dependency requires a particular version and if so verify 476 * that the version exists in the dependency. 477 */ 478 static int 479 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 480 { 481 Verneed *vnd = VERNEED(clmp); 482 int _num, num = VERNEEDNUM(clmp); 483 char *cstrs = (char *)STRTAB(clmp); 484 Lm_list *lml = LIST(clmp); 485 486 /* 487 * Traverse the callers version needed information and determine if any 488 * specific versions are required from the dependency. 489 */ 490 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 491 for (_num = 1; _num <= num; _num++, 492 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 493 Half cnt = vnd->vn_cnt; 494 Vernaux *vnap; 495 char *nstrs, *need; 496 497 /* 498 * Determine if a needed entry matches this dependency. 499 */ 500 need = (char *)(cstrs + vnd->vn_file); 501 if (strcmp(name, need) != 0) 502 continue; 503 504 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 505 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 506 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 507 508 /* 509 * Validate that each version required actually exists in the 510 * dependency. 511 */ 512 nstrs = (char *)STRTAB(nlmp); 513 514 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 515 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 516 char *version, *define; 517 Verdef *vdf = VERDEF(nlmp); 518 ulong_t _num, num = VERDEFNUM(nlmp); 519 int found = 0; 520 521 version = (char *)(cstrs + vnap->vna_name); 522 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 523 524 for (_num = 1; _num <= num; _num++, 525 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 526 Verdaux *vdap; 527 528 if (vnap->vna_hash != vdf->vd_hash) 529 continue; 530 531 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 532 define = (char *)(nstrs + vdap->vda_name); 533 if (strcmp(version, define) != 0) 534 continue; 535 536 found++; 537 break; 538 } 539 540 /* 541 * If we're being traced print out any matched version 542 * when the verbose (-v) option is in effect. Always 543 * print any unmatched versions. 544 */ 545 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 546 /* BEGIN CSTYLED */ 547 if (found) { 548 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 549 continue; 550 551 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 552 need, version, NAME(nlmp)); 553 } else { 554 if (rtld_flags & RT_FL_SILENCERR) 555 continue; 556 557 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 558 need, version); 559 } 560 /* END CSTYLED */ 561 continue; 562 } 563 564 /* 565 * If the version hasn't been found then this is a 566 * candidate for a fatal error condition. Weak 567 * version definition requirements are silently 568 * ignored. Also, if the image inspected for a version 569 * definition has no versioning recorded at all then 570 * silently ignore this (this provides better backward 571 * compatibility to old images created prior to 572 * versioning being available). Both of these skipped 573 * diagnostics are available under tracing (see above). 574 */ 575 if ((found == 0) && (num != 0) && 576 (!(vnap->vna_flags & VER_FLG_WEAK))) { 577 eprintf(lml, ERR_FATAL, 578 MSG_INTL(MSG_VER_NFOUND), need, version, 579 NAME(clmp)); 580 return (0); 581 } 582 } 583 } 584 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 585 return (1); 586 } 587 588 /* 589 * Search through the dynamic section for DT_NEEDED entries and perform one 590 * of two functions. If only the first argument is specified then load the 591 * defined shared object, otherwise add the link map representing the defined 592 * link map the the dlopen list. 593 */ 594 static int 595 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp, int *in_nfavl) 596 { 597 Dyn *dyn, *pdyn; 598 ulong_t ndx = 0; 599 uint_t lazy, flags; 600 Word lmflags = lml->lm_flags; 601 Word lmtflags = lml->lm_tflags; 602 603 /* 604 * Process each shared object on needed list. 605 */ 606 if (DYN(clmp) == 0) 607 return (1); 608 609 for (dyn = (Dyn *)DYN(clmp), pdyn = NULL; dyn->d_tag != DT_NULL; 610 pdyn = dyn++, ndx++) { 611 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 612 Rt_map *nlmp = 0; 613 char *name; 614 int silent = 0; 615 Pnode *pnp; 616 617 switch (dyn->d_tag) { 618 case DT_POSFLAG_1: 619 dip->di_flags |= FLG_DI_POSFLAG1; 620 continue; 621 case DT_NEEDED: 622 case DT_USED: 623 lazy = flags = 0; 624 dip->di_flags |= FLG_DI_NEEDED; 625 626 if (pdyn && (pdyn->d_tag == DT_POSFLAG_1)) { 627 if ((pdyn->d_un.d_val & DF_P1_LAZYLOAD) && 628 ((lmtflags & LML_TFLG_NOLAZYLD) == 0)) { 629 dip->di_flags |= FLG_DI_LAZY; 630 lazy = 1; 631 } 632 if (pdyn->d_un.d_val & DF_P1_GROUPPERM) { 633 dip->di_flags |= FLG_DI_GROUP; 634 flags = 635 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 636 } 637 } 638 639 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 640 641 /* 642 * NOTE, libc.so.1 can't be lazy loaded. Although a 643 * lazy position flag won't be produced when a RTLDINFO 644 * .dynamic entry is found (introduced with the UPM in 645 * Solaris 10), it was possible to mark libc for lazy 646 * loading on previous releases. To reduce the overhead 647 * of testing for this occurrence, only carry out this 648 * check for the first object on the link-map list 649 * (there aren't many applications built without libc). 650 */ 651 if (lazy && (lml->lm_head == clmp) && 652 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 653 lazy = 0; 654 655 /* 656 * Don't bring in lazy loaded objects yet unless we've 657 * been asked to attempt to load all available objects 658 * (crle(1) sets LD_FLAGS=loadavail). Even under 659 * RTLD_NOW we don't process this - RTLD_NOW will cause 660 * relocation processing which in turn might trigger 661 * lazy loading, but its possible that the object has a 662 * lazy loaded file with no bindings (i.e., it should 663 * never have been a dependency in the first place). 664 */ 665 if (lazy) { 666 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 667 LAZY(clmp)++; 668 lazy = flags = 0; 669 continue; 670 } 671 672 /* 673 * Silence any error messages - see description 674 * under elf_lookup_filtee(). 675 */ 676 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 677 rtld_flags |= RT_FL_SILENCERR; 678 silent = 1; 679 } 680 } 681 break; 682 case DT_AUXILIARY: 683 dip->di_flags |= FLG_DI_AUXFLTR; 684 continue; 685 case DT_SUNW_AUXILIARY: 686 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 687 continue; 688 case DT_FILTER: 689 dip->di_flags |= FLG_DI_STDFLTR; 690 continue; 691 case DT_SUNW_FILTER: 692 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 693 continue; 694 default: 695 continue; 696 } 697 698 DBG_CALL(Dbg_file_needed(clmp, name)); 699 700 /* 701 * If we're running under ldd(1), indicate that this dependency 702 * has been processed. It doesn't matter whether the object is 703 * successfully loaded or not, this flag simply ensures that we 704 * don't repeatedly attempt to load an object that has already 705 * failed to load. To do so would create multiple failure 706 * diagnostics for the same object under ldd(1). 707 */ 708 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 709 dip->di_flags |= FLG_DI_LDD_DONE; 710 711 /* 712 * Establish the objects name, load it and establish a binding 713 * with the caller. 714 */ 715 if (((pnp = elf_fix_name(name, clmp, 0)) == 0) || ((nlmp = 716 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0, 717 in_nfavl)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 718 nlmp = 0; 719 720 /* 721 * Clean up any infrastructure, including the removal of the 722 * error suppression state, if it had been previously set in 723 * this routine. 724 */ 725 if (pnp) 726 remove_pnode(pnp); 727 if (silent) 728 rtld_flags &= ~RT_FL_SILENCERR; 729 730 if ((dip->di_info = (void *)nlmp) == 0) { 731 /* 732 * If the object could not be mapped, continue if error 733 * suppression is established or we're here with ldd(1). 734 */ 735 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 736 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 737 continue; 738 else 739 return (0); 740 } 741 } 742 743 if (LAZY(clmp)) 744 lml->lm_lazy++; 745 746 return (1); 747 } 748 749 static int 750 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 751 { 752 prmap_t *maps, *_maps; 753 int pfd, num, _num; 754 caddr_t eaddr = vaddr + size; 755 int err; 756 757 /* 758 * If memory reservations have been established for alternative objects 759 * determine if this object falls within the reservation, if it does no 760 * further checking is required. 761 */ 762 if (rtld_flags & RT_FL_MEMRESV) { 763 Rtc_head *head = (Rtc_head *)config->c_bgn; 764 765 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 766 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 767 return (0); 768 } 769 770 /* 771 * Determine the mappings presently in use by this process. 772 */ 773 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 774 return (1); 775 776 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 777 err = errno; 778 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 779 strerror(err)); 780 return (1); 781 } 782 783 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 784 return (1); 785 786 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 787 err = errno; 788 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 789 strerror(err)); 790 free(maps); 791 return (1); 792 } 793 794 /* 795 * Determine if the supplied address clashes with any of the present 796 * process mappings. 797 */ 798 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 799 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 800 Rt_map *lmp; 801 const char *str; 802 803 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 804 continue; 805 806 /* 807 * We have a memory clash. See if one of the known dynamic 808 * dependency mappings represents this space so as to provide 809 * the user a more meaningful message. 810 */ 811 if ((lmp = _caller(vaddr, 0)) != 0) 812 str = NAME(lmp); 813 else 814 str = MSG_INTL(MSG_STR_UNKNOWN); 815 816 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 817 EC_NATPTR(vaddr), EC_OFF(size), str); 818 return (1); 819 } 820 free(maps); 821 return (0); 822 } 823 824 /* 825 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 826 * are used to obtained an aligned reservation from anonymous memory. If 827 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 828 * reservation using the file as backing. 829 */ 830 static Am_ret 831 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 832 int mperm, int fd, Xword align) 833 { 834 Am_ret amret; 835 int mflag = MAP_PRIVATE | MAP_NORESERVE; 836 837 #if defined(MAP_ALIGN) 838 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 839 mflag |= MAP_ALIGN; 840 *maddr = (caddr_t)align; 841 } 842 #endif 843 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 844 return (amret); 845 846 if (amret == AM_OK) 847 return (AM_OK); 848 849 /* 850 * If an anonymous memory request failed (which should only be the 851 * case if it is unsupported on the system we're running on), establish 852 * the initial mapping directly from the file. 853 */ 854 *maddr = 0; 855 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 856 fd, 0)) == MAP_FAILED) { 857 int err = errno; 858 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 859 strerror(err)); 860 return (AM_ERROR); 861 } 862 return (AM_NOSUP); 863 } 864 865 static void * 866 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 867 int fd, Off foff) 868 { 869 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 870 static int notd = 0; 871 872 /* 873 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 874 * flag. 875 */ 876 if (notd == 0) { 877 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 878 mflag |= MAP_TEXT; 879 else 880 mflag |= MAP_INITDATA; 881 } 882 #endif 883 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 884 return (0); 885 886 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 887 if ((notd == 0) && (errno == EINVAL)) { 888 /* 889 * MAP_TEXT and MAP_INITDATA may not be supported on this 890 * platform, try again without. 891 */ 892 notd = 1; 893 mflag &= ~(MAP_TEXT | MAP_INITDATA); 894 895 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 896 } 897 #endif 898 return (MAP_FAILED); 899 } 900 901 /* 902 * Map in a file. 903 */ 904 static caddr_t 905 elf_map_it( 906 Lm_list *lml, /* link-map list */ 907 const char *name, /* actual name stored for pathname */ 908 Off fsize, /* total mapping claim of the file */ 909 Ehdr *ehdr, /* ELF header of file */ 910 Phdr *fphdr, /* first loadable Phdr */ 911 Phdr *lphdr, /* last loadable Phdr */ 912 Phdr **rrphdr, /* return first Phdr in reservation */ 913 caddr_t *rraddr, /* return start of reservation */ 914 Off *rrsize, /* return total size of reservation */ 915 int fixed, /* image is resolved to a fixed addr */ 916 int fd, /* images file descriptor */ 917 Xword align, /* image segments maximum alignment */ 918 Mmap *mmaps, /* mmap information array and */ 919 uint_t *mmapcnt) /* mapping count */ 920 { 921 caddr_t raddr; /* reservation address */ 922 Off rsize; /* reservation size */ 923 Phdr *phdr; /* working program header poiner */ 924 caddr_t maddr; /* working mmap address */ 925 caddr_t faddr; /* working file address */ 926 size_t padsize; /* object padding requirement */ 927 size_t padpsize = 0; /* padding size rounded to next page */ 928 size_t padmsize = 0; /* padding size rounded for alignment */ 929 int skipfseg; /* skip mapping first segment */ 930 int mperm; /* segment permissions */ 931 Am_ret amret = AM_NOSUP; 932 933 /* 934 * If padding is required extend both the front and rear of the image. 935 * To insure the image itself is mapped at the correct alignment the 936 * initial padding is rounded up to the nearest page. Once the image is 937 * mapped the excess can be pruned to the nearest page required for the 938 * actual padding itself. 939 */ 940 if ((padsize = r_debug.rtd_objpad) != 0) { 941 padpsize = M_PROUND(padsize); 942 if (fixed) 943 padmsize = padpsize; 944 else 945 padmsize = S_ROUND(padsize, align); 946 } 947 948 /* 949 * Determine the initial permissions used to map in the first segment. 950 * If this segments memsz is greater that its filesz then the difference 951 * must be zeroed. Make sure this segment is writable. 952 */ 953 mperm = 0; 954 if (fphdr->p_flags & PF_R) 955 mperm |= PROT_READ; 956 if (fphdr->p_flags & PF_X) 957 mperm |= PROT_EXEC; 958 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 959 mperm |= PROT_WRITE; 960 961 /* 962 * Determine whether or not to let system reserve address space based on 963 * whether this is a dynamic executable (addresses in object are fixed) 964 * or a shared object (addresses in object are relative to the objects' 965 * base). 966 */ 967 if (fixed) { 968 /* 969 * Determine the reservation address and size, and insure that 970 * this reservation isn't already in use. 971 */ 972 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 973 raddr = maddr - padpsize; 974 rsize = fsize + padpsize + padsize; 975 976 if (lml_main.lm_head) { 977 if (elf_map_check(lml, name, raddr, rsize) != 0) 978 return (0); 979 } 980 981 /* 982 * As this is a fixed image, all segments must be individually 983 * mapped. 984 */ 985 skipfseg = 0; 986 987 } else { 988 size_t esize; 989 990 /* 991 * If this isn't a fixed image, reserve enough address space for 992 * the entire image to be mapped. The amount of reservation is 993 * the range between the beginning of the first, and end of the 994 * last loadable segment, together with any padding, plus the 995 * alignment of the first segment. 996 * 997 * The optimal reservation is made as a no-reserve mapping from 998 * anonymous memory. Each segment is then mapped into this 999 * reservation. If the anonymous mapping capability isn't 1000 * available, the reservation is obtained from the file itself. 1001 * In this case the first segment of the image is mapped as part 1002 * of the reservation, thus only the following segments need to 1003 * be remapped. 1004 */ 1005 rsize = fsize + padmsize + padsize; 1006 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 1007 fd, align)) == AM_ERROR) 1008 return (0); 1009 maddr = raddr + padmsize; 1010 faddr = (caddr_t)S_ROUND((Off)maddr, align); 1011 1012 /* 1013 * If this reservation has been obtained from anonymous memory, 1014 * then all segments must be individually mapped. Otherwise, 1015 * the first segment heads the reservation. 1016 */ 1017 if (amret == AM_OK) 1018 skipfseg = 0; 1019 else 1020 skipfseg = 1; 1021 1022 /* 1023 * For backward compatibility (where MAP_ALIGN isn't available), 1024 * insure the alignment of the reservation is adequate for this 1025 * object, and if not remap the object to obtain the correct 1026 * alignment. 1027 */ 1028 if (faddr != maddr) { 1029 (void) munmap(raddr, rsize); 1030 1031 rsize += align; 1032 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 1033 mperm, fd, align)) == AM_ERROR) 1034 return (0); 1035 1036 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 1037 padpsize), align); 1038 1039 esize = maddr - raddr + padpsize; 1040 1041 /* 1042 * As ths image has been realigned, the first segment 1043 * of the file needs to be remapped to its correct 1044 * location. 1045 */ 1046 skipfseg = 0; 1047 } else 1048 esize = padmsize - padpsize; 1049 1050 /* 1051 * If this reservation included padding, remove any excess for 1052 * the start of the image (the padding was adjusted to insure 1053 * the image was aligned appropriately). 1054 */ 1055 if (esize) { 1056 (void) munmap(raddr, esize); 1057 raddr += esize; 1058 rsize -= esize; 1059 } 1060 } 1061 1062 /* 1063 * At this point we know the initial location of the image, and its 1064 * size. Pass these back to the caller for inclusion in the link-map 1065 * that will eventually be created. 1066 */ 1067 *rraddr = raddr; 1068 *rrsize = rsize; 1069 1070 /* 1071 * The first loadable segment is now pointed to by maddr. This segment 1072 * will eventually contain the elf header and program headers, so reset 1073 * the program header. Pass this back to the caller for inclusion in 1074 * the link-map so it can be used for later unmapping operations. 1075 */ 1076 /* LINTED */ 1077 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1078 1079 /* 1080 * If padding is required at the front of the image, obtain that now. 1081 * Note, if we've already obtained a reservation from anonymous memory 1082 * then this reservation will already include suitable padding. 1083 * Otherwise this reservation is backed by the file, or in the case of 1084 * a fixed image, doesn't yet exist. Map the padding so that it is 1085 * suitably protected (PROT_NONE), and insure the first segment of the 1086 * file is mapped to its correct location. 1087 */ 1088 if (padsize) { 1089 if (amret == AM_NOSUP) { 1090 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1091 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1092 MAP_FAILED) 1093 return (0); 1094 1095 skipfseg = 0; 1096 } 1097 rsize -= padpsize; 1098 } 1099 1100 /* 1101 * Map individual segments. For a fixed image, these will each be 1102 * unique mappings. For a reservation these will fill in the 1103 * reservation. 1104 */ 1105 for (phdr = fphdr; phdr <= lphdr; 1106 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1107 caddr_t addr; 1108 Off mlen, flen; 1109 size_t size; 1110 1111 /* 1112 * Skip non-loadable segments or segments that don't occupy 1113 * any memory. 1114 */ 1115 if (((phdr->p_type != PT_LOAD) && 1116 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1117 continue; 1118 1119 /* 1120 * Establish this segments address relative to our base. 1121 */ 1122 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1123 (fixed ? 0 : faddr))); 1124 1125 /* 1126 * Determine the mapping protection from the segment attributes. 1127 * Also determine the etext address from the last loadable 1128 * segment which has permissions but no write access. 1129 */ 1130 mperm = 0; 1131 if (phdr->p_flags) { 1132 if (phdr->p_flags & PF_R) 1133 mperm |= PROT_READ; 1134 if (phdr->p_flags & PF_X) 1135 mperm |= PROT_EXEC; 1136 if (phdr->p_flags & PF_W) 1137 mperm |= PROT_WRITE; 1138 else 1139 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1140 (ulong_t)(fixed ? 0 : faddr); 1141 } 1142 1143 /* 1144 * Determine the type of mapping required. 1145 */ 1146 if (phdr->p_type == PT_SUNWBSS) { 1147 /* 1148 * Potentially, we can defer the loading of any SUNWBSS 1149 * segment, depending on whether the symbols it provides 1150 * have been bound to. In this manner, large segments 1151 * that are interposed upon between shared libraries 1152 * may not require mapping. Note, that the mapping 1153 * information is recorded in our mapping descriptor at 1154 * this time. 1155 */ 1156 mlen = phdr->p_memsz; 1157 flen = 0; 1158 1159 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1160 /* 1161 * If this segment has no backing file and no flags 1162 * specified, then it defines a reservation. At this 1163 * point all standard loadable segments will have been 1164 * processed. The segment reservation is mapped 1165 * directly from /dev/null. 1166 */ 1167 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1168 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1169 return (0); 1170 1171 mlen = phdr->p_memsz; 1172 flen = 0; 1173 1174 } else if (phdr->p_filesz == 0) { 1175 /* 1176 * If this segment has no backing file then it defines a 1177 * nobits segment and is mapped directly from /dev/zero. 1178 */ 1179 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1180 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1181 return (0); 1182 1183 mlen = phdr->p_memsz; 1184 flen = 0; 1185 1186 } else { 1187 Off foff; 1188 1189 /* 1190 * This mapping originates from the file. Determine the 1191 * file offset to which the mapping will be directed 1192 * (must be aligned) and how much to map (might be more 1193 * than the file in the case of .bss). 1194 */ 1195 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1196 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1197 flen = phdr->p_filesz + (phdr->p_offset - foff); 1198 1199 /* 1200 * If this is a non-fixed, non-anonymous mapping, and no 1201 * padding is involved, then the first loadable segment 1202 * is already part of the initial reservation. In this 1203 * case there is no need to remap this segment. 1204 */ 1205 if ((skipfseg == 0) || (phdr != fphdr)) { 1206 int phdr_mperm = mperm; 1207 /* 1208 * If this segments memsz is greater that its 1209 * filesz then the difference must be zeroed. 1210 * Make sure this segment is writable. 1211 */ 1212 if (phdr->p_memsz > phdr->p_filesz) 1213 mperm |= PROT_WRITE; 1214 1215 if (elf_map_textdata((caddr_t)addr, flen, 1216 mperm, phdr_mperm, 1217 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1218 MAP_FAILED) { 1219 int err = errno; 1220 eprintf(lml, ERR_FATAL, 1221 MSG_INTL(MSG_SYS_MMAP), name, 1222 strerror(err)); 1223 return (0); 1224 } 1225 } 1226 1227 /* 1228 * If the memory occupancy of the segment overflows the 1229 * definition in the file, we need to "zero out" the end 1230 * of the mapping we've established, and if necessary, 1231 * map some more space from /dev/zero. Note, zero'ed 1232 * memory must end on a double word boundary to satisfy 1233 * zero(). 1234 */ 1235 if (phdr->p_memsz > phdr->p_filesz) { 1236 caddr_t zaddr; 1237 size_t zlen, zplen; 1238 Off fend; 1239 1240 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1241 (fixed ? 0 : faddr)); 1242 zaddr = (caddr_t)M_PROUND(foff); 1243 zplen = (size_t)(zaddr - foff); 1244 1245 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1246 phdr->p_memsz + (fixed ? 0 : faddr))); 1247 zlen = (size_t)(fend - foff); 1248 1249 /* 1250 * Determine whether the number of bytes that 1251 * must be zero'ed overflow to the next page. 1252 * If not, simply clear the exact bytes 1253 * (filesz to memsz) from this page. Otherwise, 1254 * clear the remaining bytes of this page, and 1255 * map an following pages from /dev/zero. 1256 */ 1257 if (zlen < zplen) 1258 zero((caddr_t)foff, (long)zlen); 1259 else { 1260 zero((caddr_t)foff, (long)zplen); 1261 1262 if ((zlen = (fend - (Off)zaddr)) > 0) { 1263 if (dz_map(lml, zaddr, zlen, 1264 mperm, 1265 MAP_FIXED | MAP_PRIVATE) == 1266 MAP_FAILED) 1267 return (0); 1268 } 1269 } 1270 } 1271 } 1272 1273 /* 1274 * Unmap anything from the last mapping address to this one and 1275 * update the mapping claim pointer. 1276 */ 1277 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1278 (void) munmap(maddr, size); 1279 rsize -= size; 1280 } 1281 1282 /* 1283 * Retain this segments mapping information. 1284 */ 1285 mmaps[*mmapcnt].m_vaddr = addr; 1286 mmaps[*mmapcnt].m_msize = mlen; 1287 mmaps[*mmapcnt].m_fsize = flen; 1288 mmaps[*mmapcnt].m_perm = mperm; 1289 (*mmapcnt)++; 1290 1291 maddr = addr + M_PROUND(mlen); 1292 rsize -= M_PROUND(mlen); 1293 } 1294 1295 /* 1296 * If padding is required at the end of the image, obtain that now. 1297 * Note, if we've already obtained a reservation from anonymous memory 1298 * then this reservation will already include suitable padding. 1299 */ 1300 if (padsize) { 1301 if (amret == AM_NOSUP) { 1302 /* 1303 * maddr is currently page aligned from the last segment 1304 * mapping. 1305 */ 1306 if (dz_map(lml, maddr, padsize, PROT_NONE, 1307 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1308 MAP_FAILED) 1309 return (0); 1310 } 1311 maddr += padsize; 1312 rsize -= padsize; 1313 } 1314 1315 /* 1316 * Unmap any final reservation. 1317 */ 1318 if ((fixed == 0) && (rsize != 0)) 1319 (void) munmap(maddr, rsize); 1320 1321 return (faddr); 1322 } 1323 1324 /* 1325 * A null symbol interpretor. Used if a filter has no associated filtees. 1326 */ 1327 /* ARGSUSED0 */ 1328 static Sym * 1329 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo, int *in_nfavl) 1330 { 1331 return ((Sym *)0); 1332 } 1333 1334 /* 1335 * Disable filtee use. 1336 */ 1337 static void 1338 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1339 { 1340 dip->di_info = 0; 1341 1342 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1343 /* 1344 * If this is an object filter, free the filtee's duplication. 1345 */ 1346 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1347 free(REFNAME(lmp)); 1348 REFNAME(lmp) = (char *)0; 1349 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1350 1351 /* 1352 * Indicate that this filtee is no longer available. 1353 */ 1354 if (dip->di_flags & FLG_DI_STDFLTR) 1355 SYMINTP(lmp) = elf_null_find_sym; 1356 1357 } 1358 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1359 /* 1360 * Indicate that this standard filtee is no longer available. 1361 */ 1362 if (SYMSFLTRCNT(lmp)) 1363 SYMSFLTRCNT(lmp)--; 1364 } else { 1365 /* 1366 * Indicate that this auxiliary filtee is no longer available. 1367 */ 1368 if (SYMAFLTRCNT(lmp)) 1369 SYMAFLTRCNT(lmp)--; 1370 } 1371 dip->di_flags &= ~MSK_DI_FILTER; 1372 } 1373 1374 /* 1375 * Find symbol interpreter - filters. 1376 * This function is called when the symbols from a shared object should 1377 * be resolved from the shared objects filtees instead of from within itself. 1378 * 1379 * A symbol name of 0 is used to trigger filtee loading. 1380 */ 1381 static Sym * 1382 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx, 1383 int *in_nfavl) 1384 { 1385 const char *name = slp->sl_name, *filtees; 1386 Rt_map *clmp = slp->sl_cmap; 1387 Rt_map *ilmp = slp->sl_imap; 1388 Pnode *pnp, **pnpp; 1389 int any; 1390 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1391 Lm_list *lml = LIST(ilmp); 1392 1393 /* 1394 * Indicate that the filter has been used. If a binding already exists 1395 * to the caller, indicate that this object is referenced. This insures 1396 * we don't generate false unreferenced diagnostics from ldd -u/U or 1397 * debugging. Don't create a binding regardless, as this filter may 1398 * have been dlopen()'ed. 1399 */ 1400 if (name && (ilmp != clmp)) { 1401 Word tracing = (LIST(clmp)->lm_flags & 1402 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1403 1404 if (tracing || DBG_ENABLED) { 1405 Bnd_desc *bdp; 1406 Aliste idx; 1407 1408 FLAGS1(ilmp) |= FL1_RT_USED; 1409 1410 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1411 for (APLIST_TRAVERSE(CALLERS(ilmp), idx, bdp)) { 1412 if (bdp->b_caller == clmp) { 1413 bdp->b_flags |= BND_REFER; 1414 break; 1415 } 1416 } 1417 } 1418 } 1419 } 1420 1421 /* 1422 * If this is the first call to process this filter, establish the 1423 * filtee list. If a configuration file exists, determine if any 1424 * filtee associations for this filter, and its filtee reference, are 1425 * defined. Otherwise, process the filtee reference. Any token 1426 * expansion is also completed at this point (i.e., $PLATFORM). 1427 */ 1428 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1429 if (dip->di_info == 0) { 1430 if (rtld_flags2 & RT_FL2_FLTCFG) 1431 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1432 filtees); 1433 1434 if (dip->di_info == 0) { 1435 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1436 if ((lml->lm_flags & 1437 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1438 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1439 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1440 NAME(ilmp), filtees); 1441 1442 if ((dip->di_info = (void *)expand_paths(ilmp, 1443 filtees, 0, 0)) == 0) { 1444 elf_disable_filtee(ilmp, dip); 1445 return ((Sym *)0); 1446 } 1447 } 1448 } 1449 1450 /* 1451 * Traverse the filtee list, dlopen()'ing any objects specified and 1452 * using their group handle to lookup the symbol. 1453 */ 1454 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1455 pnpp = &pnp->p_next, pnp = *pnpp) { 1456 int mode; 1457 Grp_hdl *ghp; 1458 Rt_map *nlmp = 0; 1459 1460 if (pnp->p_len == 0) 1461 continue; 1462 1463 /* 1464 * Establish the mode of the filtee from the filter. As filtees 1465 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1466 * and the filtees aren't global. It would be nice to have 1467 * RTLD_FIRST used here also, but as filters got out long before 1468 * RTLD_FIRST was introduced it's a little too late now. 1469 */ 1470 mode = MODE(ilmp) | RTLD_GROUP; 1471 mode &= ~RTLD_GLOBAL; 1472 1473 /* 1474 * Insure that any auxiliary filter can locate symbols from its 1475 * caller. 1476 */ 1477 if (dip->di_flags & FLG_DI_AUXFLTR) 1478 mode |= RTLD_PARENT; 1479 1480 /* 1481 * Process any hardware capability directory. Establish a new 1482 * link-map control list from which to analyze any newly added 1483 * objects. 1484 */ 1485 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1486 Lm_cntl *lmc; 1487 Aliste lmco; 1488 1489 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1490 if ((lmc = alist_append(&lml->lm_lists, 0, 1491 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1492 return ((Sym *)0); 1493 lmco = (Aliste)((char *)lmc - 1494 (char *)lml->lm_lists); 1495 } else { 1496 lmc = 0; 1497 lmco = ALIST_OFF_DATA; 1498 } 1499 1500 pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees, 1501 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP), in_nfavl); 1502 1503 /* 1504 * Now that any hardware capability objects have been 1505 * processed, remove any link-map control list. 1506 */ 1507 if (lmc) 1508 remove_cntl(lml, lmco); 1509 } 1510 1511 if (pnp->p_len == 0) 1512 continue; 1513 1514 /* 1515 * Process an individual filtee. 1516 */ 1517 if (pnp->p_info == 0) { 1518 const char *filtee = pnp->p_name; 1519 int audit = 0; 1520 1521 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1522 1523 ghp = 0; 1524 1525 /* 1526 * Determine if the reference link map is already 1527 * loaded. As an optimization compare the filtee with 1528 * our interpretor. The most common filter is 1529 * libdl.so.1, which is a filter on ld.so.1. 1530 */ 1531 #if defined(_ELF64) 1532 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1533 #else 1534 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1535 #endif 1536 /* 1537 * Create an association between ld.so.1 and the 1538 * filter. As an optimization, a handle for 1539 * ld.so.1 itself (required for the dlopen() 1540 * family filtering mechanism) shouldn't search 1541 * any dependencies of ld.so.1. Omitting 1542 * GPD_ADDEPS prevents the addition of any 1543 * ld.so.1 dependencies to this handle. 1544 */ 1545 nlmp = lml_rtld.lm_head; 1546 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1547 (GPH_LDSO | GPH_FIRST | GPH_FILTEE), 1548 (GPD_DLSYM | GPD_RELOC), GPD_PARENT)) == 0) 1549 nlmp = 0; 1550 1551 /* 1552 * Establish the filter handle to prevent any 1553 * recursion. 1554 */ 1555 if (nlmp && ghp) 1556 pnp->p_info = (void *)ghp; 1557 1558 /* 1559 * Audit the filter/filtee established. Ignore 1560 * any return from the auditor, as we can't 1561 * allow ignore filtering to ld.so.1, otherwise 1562 * nothing is going to work. 1563 */ 1564 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1565 LML_TFLG_AUD_OBJFILTER)) 1566 (void) audit_objfilter(ilmp, filtees, 1567 nlmp, 0); 1568 1569 } else { 1570 Rej_desc rej = { 0 }; 1571 Lm_cntl *lmc; 1572 Aliste lmco; 1573 1574 /* 1575 * Establish a new link-map control list from 1576 * which to analyze any newly added objects. 1577 */ 1578 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1579 if ((lmc = 1580 alist_append(&lml->lm_lists, 0, 1581 sizeof (Lm_cntl), 1582 AL_CNT_LMLISTS)) == 0) 1583 return ((Sym *)0); 1584 lmco = (Aliste)((char *)lmc - 1585 (char *)lml->lm_lists); 1586 } else { 1587 lmc = 0; 1588 lmco = ALIST_OFF_DATA; 1589 } 1590 1591 /* 1592 * Load the filtee. Note, an auditor can 1593 * provide an alternative name. 1594 */ 1595 if ((nlmp = load_path(lml, lmco, &(pnp->p_name), 1596 ilmp, mode, FLG_RT_HANDLE, &ghp, 0, 1597 &rej, in_nfavl)) == 0) { 1598 file_notfound(LIST(ilmp), filtee, ilmp, 1599 FLG_RT_HANDLE, &rej); 1600 remove_rej(&rej); 1601 } 1602 filtee = pnp->p_name; 1603 1604 /* 1605 * Establish the filter handle to prevent any 1606 * recursion. 1607 */ 1608 if (nlmp && ghp) { 1609 ghp->gh_flags |= GPH_FILTEE; 1610 pnp->p_info = (void *)ghp; 1611 1612 FLAGS1(nlmp) |= FL1_RT_USED; 1613 } 1614 1615 /* 1616 * Audit the filter/filtee established. A 1617 * return of 0 indicates the auditor wishes to 1618 * ignore this filtee. 1619 */ 1620 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1621 LML_TFLG_AUD_OBJFILTER)) { 1622 if (audit_objfilter(ilmp, filtees, 1623 nlmp, 0) == 0) { 1624 audit = 1; 1625 nlmp = 0; 1626 } 1627 } 1628 1629 /* 1630 * Finish processing the objects associated with 1631 * this request. Create an association between 1632 * this object and the originating filter to 1633 * provide sufficient information to tear down 1634 * this filtee if necessary. 1635 */ 1636 if (nlmp && ghp && ((analyze_lmc(lml, lmco, 1637 nlmp, in_nfavl) == 0) || (relocate_lmc(lml, 1638 lmco, ilmp, nlmp, in_nfavl) == 0))) 1639 nlmp = 0; 1640 1641 /* 1642 * If the filtee has been successfully 1643 * processed, then create an association 1644 * between the filter and filtee. This 1645 * association provides sufficient information 1646 * to tear down the filter and filtee if 1647 * necessary. 1648 */ 1649 DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD)); 1650 if (nlmp && ghp && 1651 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1652 nlmp = 0; 1653 1654 /* 1655 * If this filtee loading has failed, and we've 1656 * created a new link-map control list to which 1657 * this request has added objects, then remove 1658 * all the objects that have been associated to 1659 * this request. 1660 */ 1661 if ((nlmp == 0) && lmc && lmc->lc_head) 1662 remove_lmc(lml, clmp, lmc, lmco, name); 1663 1664 /* 1665 * Remove any link-map control list that was 1666 * created. 1667 */ 1668 if (lmc) 1669 remove_cntl(lml, lmco); 1670 } 1671 1672 /* 1673 * Generate a diagnostic if the filtee couldn't be 1674 * loaded, null out the pnode entry, and continue 1675 * the search. Otherwise, retain this group handle 1676 * for future symbol searches. 1677 */ 1678 if (nlmp == 0) { 1679 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1680 audit)); 1681 1682 pnp->p_info = 0; 1683 pnp->p_len = 0; 1684 continue; 1685 } 1686 } 1687 1688 ghp = (Grp_hdl *)pnp->p_info; 1689 1690 /* 1691 * If we're just here to trigger filtee loading skip the symbol 1692 * lookup so we'll continue looking for additional filtees. 1693 */ 1694 if (name) { 1695 Grp_desc *gdp; 1696 Sym *sym = 0; 1697 Aliste idx; 1698 Slookup sl = *slp; 1699 1700 sl.sl_flags |= LKUP_FIRST; 1701 any++; 1702 1703 /* 1704 * Look for the symbol in the handles dependencies. 1705 */ 1706 for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) { 1707 if ((gdp->gd_flags & GPD_DLSYM) == 0) 1708 continue; 1709 1710 /* 1711 * If our parent is a dependency don't look at 1712 * it (otherwise we are in a recursive loop). 1713 * This situation can occur with auxiliary 1714 * filters if the filtee has a dependency on the 1715 * filter. This dependency isn't necessary as 1716 * auxiliary filters are opened RTLD_PARENT, but 1717 * users may still unknowingly add an explicit 1718 * dependency to the parent. 1719 */ 1720 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1721 continue; 1722 1723 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1724 binfo, in_nfavl)) != 0) || 1725 (ghp->gh_flags & GPH_FIRST)) 1726 break; 1727 } 1728 1729 /* 1730 * If a symbol has been found, indicate the binding 1731 * and return the symbol. 1732 */ 1733 if (sym) { 1734 *binfo |= DBG_BINFO_FILTEE; 1735 return (sym); 1736 } 1737 } 1738 1739 /* 1740 * If this object is tagged to terminate filtee processing we're 1741 * done. 1742 */ 1743 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1744 break; 1745 } 1746 1747 /* 1748 * If we're just here to trigger filtee loading then we're done. 1749 */ 1750 if (name == 0) 1751 return ((Sym *)0); 1752 1753 /* 1754 * If no filtees have been found for a filter, clean up any Pnode 1755 * structures and disable their search completely. For auxiliary 1756 * filters we can reselect the symbol search function so that we never 1757 * enter this routine again for this object. For standard filters we 1758 * use the null symbol routine. 1759 */ 1760 if (any == 0) { 1761 remove_pnode((Pnode *)dip->di_info); 1762 elf_disable_filtee(ilmp, dip); 1763 return ((Sym *)0); 1764 } 1765 1766 return ((Sym *)0); 1767 } 1768 1769 /* 1770 * Focal point for disabling error messages for auxiliary filters. As an 1771 * auxiliary filter allows for filtee use, but provides a fallback should a 1772 * filtee not exist (or fail to load), any errors generated as a consequence of 1773 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1774 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1775 * produced. ldd(1) employs printf(), and here, the selection of whether to 1776 * print a diagnostic in regards to auxiliary filters is a little more complex. 1777 * 1778 * . The determination of whether to produce an ldd message, or a fatal 1779 * error message is driven by LML_FLG_TRC_ENABLE. 1780 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1781 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1782 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1783 * 1784 * . If the calling object is lddstub, then several classes of message are 1785 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1786 * a stub executable employed to preload a user specified library against. 1787 * 1788 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1789 * be suppressed. All detailed ldd messages should still be produced. 1790 */ 1791 Sym * 1792 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx, 1793 int *in_nfavl) 1794 { 1795 Sym *sym; 1796 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1797 int silent = 0; 1798 1799 /* 1800 * Make sure this entry is still acting as a filter. We may have tried 1801 * to process this previously, and disabled it if the filtee couldn't 1802 * be processed. However, other entries may provide different filtees 1803 * that are yet to be completed. 1804 */ 1805 if (dip->di_flags == 0) 1806 return ((Sym *)0); 1807 1808 /* 1809 * Indicate whether an error message is required should this filtee not 1810 * be found, based on the type of filter. 1811 */ 1812 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1813 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1814 rtld_flags |= RT_FL_SILENCERR; 1815 silent = 1; 1816 } 1817 1818 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx, in_nfavl); 1819 1820 if (silent) 1821 rtld_flags &= ~RT_FL_SILENCERR; 1822 1823 return (sym); 1824 } 1825 1826 /* 1827 * Compute the elf hash value (as defined in the ELF access library). 1828 * The form of the hash table is: 1829 * 1830 * |--------------| 1831 * | # of buckets | 1832 * |--------------| 1833 * | # of chains | 1834 * |--------------| 1835 * | bucket[] | 1836 * |--------------| 1837 * | chain[] | 1838 * |--------------| 1839 */ 1840 ulong_t 1841 elf_hash(const char *name) 1842 { 1843 uint_t hval = 0; 1844 1845 while (*name) { 1846 uint_t g; 1847 hval = (hval << 4) + *name++; 1848 if ((g = (hval & 0xf0000000)) != 0) 1849 hval ^= g >> 24; 1850 hval &= ~g; 1851 } 1852 return ((ulong_t)hval); 1853 } 1854 1855 /* 1856 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1857 * function specially in the executable - if they have a value, even though 1858 * undefined, we use that value. This allows us to associate all references 1859 * to a function's address to a single place in the process: the plt entry 1860 * for that function in the executable. Calls to lookup from plt binding 1861 * routines do NOT set LKUP_SPEC in the flag. 1862 */ 1863 Sym * 1864 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo, int *in_nfavl) 1865 { 1866 const char *name = slp->sl_name; 1867 Rt_map *ilmp = slp->sl_imap; 1868 ulong_t hash = slp->sl_hash; 1869 uint_t ndx, htmp, buckets, *chainptr; 1870 Sym *sym, *symtabptr; 1871 char *strtabptr, *strtabname; 1872 uint_t flags1; 1873 Syminfo *sip; 1874 1875 /* 1876 * If we're only here to establish a symbols index, skip the diagnostic 1877 * used to trace a symbol search. 1878 */ 1879 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1880 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1881 1882 if (HASH(ilmp) == 0) 1883 return ((Sym *)0); 1884 1885 buckets = HASH(ilmp)[0]; 1886 /* LINTED */ 1887 htmp = (uint_t)hash % buckets; 1888 1889 /* 1890 * Get the first symbol on hash chain and initialize the string 1891 * and symbol table pointers. 1892 */ 1893 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1894 return ((Sym *)0); 1895 1896 chainptr = HASH(ilmp) + 2 + buckets; 1897 strtabptr = STRTAB(ilmp); 1898 symtabptr = SYMTAB(ilmp); 1899 1900 while (ndx) { 1901 sym = symtabptr + ndx; 1902 strtabname = strtabptr + sym->st_name; 1903 1904 /* 1905 * Compare the symbol found with the name required. If the 1906 * names don't match continue with the next hash entry. 1907 */ 1908 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1909 if ((ndx = chainptr[ndx]) != 0) 1910 continue; 1911 return ((Sym *)0); 1912 } 1913 1914 /* 1915 * The Solaris ld does not put DT_VERSYM in the dynamic 1916 * section, but the GNU ld does. The GNU runtime linker 1917 * interprets the top bit of the 16-bit Versym value 1918 * (0x8000) as the "hidden" bit. If this bit is set, 1919 * the linker is supposed to act as if that symbol does 1920 * not exist. The hidden bit supports their versioning 1921 * scheme, which allows multiple incompatible functions 1922 * with the same name to exist at different versions 1923 * within an object. The Solaris linker does not support this 1924 * mechanism, or the model of interface evolution that 1925 * it allows, but we honor the hidden bit in GNU ld 1926 * produced objects in order to interoperate with them. 1927 */ 1928 if ((VERSYM(ilmp) != NULL) && 1929 ((VERSYM(ilmp)[ndx] & 0x8000) != 0)) { 1930 DBG_CALL(Dbg_syms_ignore_gnuver(ilmp, name, 1931 ndx, VERSYM(ilmp)[ndx])); 1932 if ((ndx = chainptr[ndx]) != 0) 1933 continue; 1934 return ((Sym *)0); 1935 } 1936 1937 /* 1938 * If we're only here to establish a symbols index, we're done. 1939 */ 1940 if (slp->sl_flags & LKUP_SYMNDX) 1941 return (sym); 1942 1943 /* 1944 * If we find a match and the symbol is defined, return the 1945 * symbol pointer and the link map in which it was found. 1946 */ 1947 if (sym->st_shndx != SHN_UNDEF) { 1948 *dlmp = ilmp; 1949 *binfo |= DBG_BINFO_FOUND; 1950 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1951 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1952 is_sym_interposer(ilmp, sym))) 1953 *binfo |= DBG_BINFO_INTERPOSE; 1954 break; 1955 1956 /* 1957 * If we find a match and the symbol is undefined, the 1958 * symbol type is a function, and the value of the symbol 1959 * is non zero, then this is a special case. This allows 1960 * the resolution of a function address to the plt[] entry. 1961 * See SPARC ABI, Dynamic Linking, Function Addresses for 1962 * more details. 1963 */ 1964 } else if ((slp->sl_flags & LKUP_SPEC) && 1965 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1966 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1967 *dlmp = ilmp; 1968 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1969 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1970 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1971 is_sym_interposer(ilmp, sym))) 1972 *binfo |= DBG_BINFO_INTERPOSE; 1973 return (sym); 1974 } 1975 1976 /* 1977 * Undefined symbol. 1978 */ 1979 return ((Sym *)0); 1980 } 1981 1982 /* 1983 * We've found a match. Determine if the defining object contains 1984 * symbol binding information. 1985 */ 1986 if ((sip = SYMINFO(ilmp)) != 0) 1987 sip += ndx; 1988 1989 /* 1990 * If this definition is a singleton, and we haven't followed a default 1991 * symbol search knowing that we're looking for a singleton (presumably 1992 * because the symbol definition has been changed since the referring 1993 * object was built), then reject this binding so that the caller can 1994 * fall back to a standard symbol search. 1995 */ 1996 if ((ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON) && 1997 (((slp->sl_flags & LKUP_STANDARD) == 0) || 1998 (((slp->sl_flags & LKUP_SINGLETON) == 0) && 1999 (LIST(ilmp)->lm_flags & LML_FLG_GROUPSEXIST)))) { 2000 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 2001 DBG_BNDREJ_SINGLE)); 2002 *binfo |= BINFO_REJSINGLE; 2003 *binfo &= ~DBG_BINFO_MSK; 2004 return ((Sym *)0); 2005 } 2006 2007 /* 2008 * If this is a direct binding request, but the symbol definition has 2009 * disabled directly binding to it (presumably because the symbol 2010 * definition has been changed since the referring object was built), 2011 * indicate this failure so that the caller can fall back to a standard 2012 * symbol search. 2013 */ 2014 if (sip && (slp->sl_flags & LKUP_DIRECT) && 2015 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 2016 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 2017 DBG_BNDREJ_NODIR)); 2018 *binfo |= BINFO_REJDIRECT; 2019 *binfo &= ~DBG_BINFO_MSK; 2020 return ((Sym *)0); 2021 } 2022 2023 /* 2024 * Determine whether this object is acting as a filter. 2025 */ 2026 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 2027 return (sym); 2028 2029 /* 2030 * Determine if this object offers per-symbol filtering, and if so, 2031 * whether this symbol references a filtee. 2032 */ 2033 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 2034 /* 2035 * If this is a standard filter reference, and no standard 2036 * filtees remain to be inspected, we're done. If this is an 2037 * auxiliary filter reference, and no auxiliary filtees remain, 2038 * we'll fall through in case any object filtering is available. 2039 */ 2040 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 2041 (SYMSFLTRCNT(ilmp) == 0)) 2042 return ((Sym *)0); 2043 2044 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 2045 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 2046 SYMAFLTRCNT(ilmp))) { 2047 Sym *fsym; 2048 2049 /* 2050 * This symbol has an associated filtee. Lookup the 2051 * symbol in the filtee, and if it is found return it. 2052 * If the symbol doesn't exist, and this is a standard 2053 * filter, return an error, otherwise fall through to 2054 * catch any object filtering that may be available. 2055 */ 2056 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2057 sip->si_boundto, in_nfavl)) != 0) 2058 return (fsym); 2059 if (sip->si_flags & SYMINFO_FLG_FILTER) 2060 return ((Sym *)0); 2061 } 2062 } 2063 2064 /* 2065 * Determine if this object provides global filtering. 2066 */ 2067 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 2068 Sym *fsym; 2069 2070 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 2071 /* 2072 * This object has an associated filtee. Lookup the 2073 * symbol in the filtee, and if it is found return it. 2074 * If the symbol doesn't exist, and this is a standard 2075 * filter, return and error, otherwise return the symbol 2076 * within the filter itself. 2077 */ 2078 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2079 OBJFLTRNDX(ilmp), in_nfavl)) != 0) 2080 return (fsym); 2081 } 2082 2083 if (flags1 & FL1_RT_OBJSFLTR) 2084 return ((Sym *)0); 2085 } 2086 return (sym); 2087 } 2088 2089 /* 2090 * Create a new Rt_map structure for an ELF object and initialize 2091 * all values. 2092 */ 2093 Rt_map * 2094 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2095 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2096 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt, 2097 int *in_nfavl) 2098 { 2099 Rt_map *lmp; 2100 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2101 Xword rpath = 0; 2102 Ehdr *ehdr = (Ehdr *)addr; 2103 2104 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2105 lml->lm_lmidstr, lmco)); 2106 2107 /* 2108 * Allocate space for the link-map and private elf information. Once 2109 * these are allocated and initialized, we can use remove_so(0, lmp) to 2110 * tear down the link-map should any failures occur. 2111 */ 2112 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2113 return (0); 2114 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2115 free(lmp); 2116 return (0); 2117 } 2118 2119 /* 2120 * All fields not filled in were set to 0 by calloc. 2121 */ 2122 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2123 DYN(lmp) = ld; 2124 ADDR(lmp) = addr; 2125 MSIZE(lmp) = msize; 2126 ENTRY(lmp) = (Addr)entry; 2127 SYMINTP(lmp) = elf_find_sym; 2128 ETEXT(lmp) = etext; 2129 FCT(lmp) = &elf_fct; 2130 LIST(lmp) = lml; 2131 PADSTART(lmp) = paddr; 2132 PADIMLEN(lmp) = padimsize; 2133 THREADID(lmp) = rt_thr_self(); 2134 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2135 SORTVAL(lmp) = -1; 2136 2137 MMAPS(lmp) = mmaps; 2138 MMAPCNT(lmp) = mmapcnt; 2139 ASSERT(mmapcnt != 0); 2140 2141 /* 2142 * If this is a shared object, add the base address to each address. 2143 * if this is an executable, use address as is. 2144 */ 2145 if (ehdr->e_type == ET_EXEC) { 2146 base = 0; 2147 FLAGS(lmp) |= FLG_RT_FIXED; 2148 } else 2149 base = addr; 2150 2151 /* 2152 * Fill in rest of the link map entries with information from the file's 2153 * dynamic structure. 2154 */ 2155 if (ld) { 2156 uint_t dynndx = 0; 2157 Xword pltpadsz = 0; 2158 Rti_desc *rti; 2159 2160 /* CSTYLED */ 2161 for ( ; ld->d_tag != DT_NULL; ++ld, dynndx++) { 2162 switch ((Xword)ld->d_tag) { 2163 case DT_SYMTAB: 2164 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2165 break; 2166 case DT_SUNW_SYMTAB: 2167 SUNWSYMTAB(lmp) = 2168 (void *)(ld->d_un.d_ptr + base); 2169 break; 2170 case DT_SUNW_SYMSZ: 2171 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2172 break; 2173 case DT_STRTAB: 2174 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2175 break; 2176 case DT_SYMENT: 2177 SYMENT(lmp) = ld->d_un.d_val; 2178 break; 2179 case DT_FEATURE_1: 2180 ld->d_un.d_val |= DTF_1_PARINIT; 2181 if (ld->d_un.d_val & DTF_1_CONFEXP) 2182 crle = 1; 2183 break; 2184 case DT_MOVESZ: 2185 MOVESZ(lmp) = ld->d_un.d_val; 2186 FLAGS(lmp) |= FLG_RT_MOVE; 2187 break; 2188 case DT_MOVEENT: 2189 MOVEENT(lmp) = ld->d_un.d_val; 2190 break; 2191 case DT_MOVETAB: 2192 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2193 break; 2194 case DT_REL: 2195 case DT_RELA: 2196 /* 2197 * At this time, ld.so. can only handle one 2198 * type of relocation per object. 2199 */ 2200 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2201 break; 2202 case DT_RELSZ: 2203 case DT_RELASZ: 2204 RELSZ(lmp) = ld->d_un.d_val; 2205 break; 2206 case DT_RELENT: 2207 case DT_RELAENT: 2208 RELENT(lmp) = ld->d_un.d_val; 2209 break; 2210 case DT_RELCOUNT: 2211 case DT_RELACOUNT: 2212 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2213 break; 2214 case DT_TEXTREL: 2215 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2216 break; 2217 case DT_HASH: 2218 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2219 break; 2220 case DT_PLTGOT: 2221 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2222 break; 2223 case DT_PLTRELSZ: 2224 PLTRELSZ(lmp) = ld->d_un.d_val; 2225 break; 2226 case DT_JMPREL: 2227 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2228 break; 2229 case DT_INIT: 2230 if (ld->d_un.d_ptr != NULL) 2231 INIT(lmp) = 2232 (void (*)())(ld->d_un.d_ptr + base); 2233 break; 2234 case DT_FINI: 2235 if (ld->d_un.d_ptr != NULL) 2236 FINI(lmp) = 2237 (void (*)())(ld->d_un.d_ptr + base); 2238 break; 2239 case DT_INIT_ARRAY: 2240 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2241 base); 2242 break; 2243 case DT_INIT_ARRAYSZ: 2244 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2245 break; 2246 case DT_FINI_ARRAY: 2247 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2248 base); 2249 break; 2250 case DT_FINI_ARRAYSZ: 2251 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2252 break; 2253 case DT_PREINIT_ARRAY: 2254 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2255 base); 2256 break; 2257 case DT_PREINIT_ARRAYSZ: 2258 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2259 break; 2260 case DT_RPATH: 2261 case DT_RUNPATH: 2262 rpath = ld->d_un.d_val; 2263 break; 2264 case DT_FILTER: 2265 fltr = ld->d_un.d_val; 2266 OBJFLTRNDX(lmp) = dynndx; 2267 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2268 break; 2269 case DT_AUXILIARY: 2270 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2271 fltr = ld->d_un.d_val; 2272 OBJFLTRNDX(lmp) = dynndx; 2273 } 2274 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2275 break; 2276 case DT_SUNW_FILTER: 2277 SYMSFLTRCNT(lmp)++; 2278 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2279 break; 2280 case DT_SUNW_AUXILIARY: 2281 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2282 SYMAFLTRCNT(lmp)++; 2283 } 2284 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2285 break; 2286 case DT_DEPAUDIT: 2287 if (!(rtld_flags & RT_FL_NOAUDIT)) 2288 audit = ld->d_un.d_val; 2289 break; 2290 case DT_CONFIG: 2291 cfile = ld->d_un.d_val; 2292 break; 2293 case DT_DEBUG: 2294 /* 2295 * DT_DEBUG entries are only created in 2296 * dynamic objects that require an interpretor 2297 * (ie. all dynamic executables and some shared 2298 * objects), and provide for a hand-shake with 2299 * debuggers. This entry is initialized to 2300 * zero by the link-editor. If a debugger has 2301 * us and updated this entry set the debugger 2302 * flag, and finish initializing the debugging 2303 * structure (see setup() also). Switch off any 2304 * configuration object use as most debuggers 2305 * can't handle fixed dynamic executables as 2306 * dependencies, and we can't handle requests 2307 * like object padding for alternative objects. 2308 */ 2309 if (ld->d_un.d_ptr) 2310 rtld_flags |= 2311 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2312 ld->d_un.d_ptr = (Addr)&r_debug; 2313 break; 2314 case DT_VERNEED: 2315 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2316 base); 2317 break; 2318 case DT_VERNEEDNUM: 2319 /* LINTED */ 2320 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2321 break; 2322 case DT_VERDEF: 2323 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2324 break; 2325 case DT_VERDEFNUM: 2326 /* LINTED */ 2327 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2328 break; 2329 case DT_VERSYM: 2330 /* 2331 * The Solaris ld does not produce DT_VERSYM, 2332 * but the GNU ld does, in order to support 2333 * their style of versioning, which differs 2334 * from ours in some ways, while using the 2335 * same data structures. The presence of 2336 * DT_VERSYM therefore means that GNU 2337 * versioning rules apply to the given file. 2338 * If DT_VERSYM is not present, then Solaris 2339 * versioning rules apply. 2340 */ 2341 VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base); 2342 break; 2343 case DT_BIND_NOW: 2344 if ((ld->d_un.d_val & DF_BIND_NOW) && 2345 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2346 MODE(lmp) |= RTLD_NOW; 2347 MODE(lmp) &= ~RTLD_LAZY; 2348 } 2349 break; 2350 case DT_FLAGS: 2351 FLAGS2(lmp) |= FL2_RT_DTFLAGS; 2352 if (ld->d_un.d_val & DF_SYMBOLIC) 2353 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2354 if (ld->d_un.d_val & DF_TEXTREL) 2355 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2356 if ((ld->d_un.d_val & DF_BIND_NOW) && 2357 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2358 MODE(lmp) |= RTLD_NOW; 2359 MODE(lmp) &= ~RTLD_LAZY; 2360 } 2361 /* 2362 * Capture any static TLS use, and enforce that 2363 * this object be non-deletable. 2364 */ 2365 if (ld->d_un.d_val & DF_STATIC_TLS) { 2366 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2367 MODE(lmp) |= RTLD_NODELETE; 2368 } 2369 break; 2370 case DT_FLAGS_1: 2371 if (ld->d_un.d_val & DF_1_DISPRELPND) 2372 FLAGS1(lmp) |= FL1_RT_DISPREL; 2373 if (ld->d_un.d_val & DF_1_GROUP) 2374 FLAGS(lmp) |= 2375 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2376 if ((ld->d_un.d_val & DF_1_NOW) && 2377 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2378 MODE(lmp) |= RTLD_NOW; 2379 MODE(lmp) &= ~RTLD_LAZY; 2380 } 2381 if (ld->d_un.d_val & DF_1_NODELETE) 2382 MODE(lmp) |= RTLD_NODELETE; 2383 if (ld->d_un.d_val & DF_1_INITFIRST) 2384 FLAGS(lmp) |= FLG_RT_INITFRST; 2385 if (ld->d_un.d_val & DF_1_NOOPEN) 2386 FLAGS(lmp) |= FLG_RT_NOOPEN; 2387 if (ld->d_un.d_val & DF_1_LOADFLTR) 2388 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2389 if (ld->d_un.d_val & DF_1_NODUMP) 2390 FLAGS(lmp) |= FLG_RT_NODUMP; 2391 if (ld->d_un.d_val & DF_1_CONFALT) 2392 crle = 1; 2393 if (ld->d_un.d_val & DF_1_DIRECT) 2394 FLAGS1(lmp) |= FL1_RT_DIRECT; 2395 if (ld->d_un.d_val & DF_1_NODEFLIB) 2396 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2397 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2398 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2399 if (ld->d_un.d_val & DF_1_TRANS) 2400 FLAGS(lmp) |= FLG_RT_TRANS; 2401 #ifndef EXPAND_RELATIVE 2402 if (ld->d_un.d_val & DF_1_ORIGIN) 2403 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2404 #endif 2405 /* 2406 * Global auditing is only meaningful when 2407 * specified by the initiating object of the 2408 * process - typically the dynamic executable. 2409 * If this is the initiaiting object, its link- 2410 * map will not yet have been added to the 2411 * link-map list, and consequently the link-map 2412 * list is empty. (see setup()). 2413 */ 2414 if (ld->d_un.d_val & DF_1_GLOBAUDIT) { 2415 if (lml_main.lm_head == 0) 2416 FLAGS1(lmp) |= FL1_RT_GLOBAUD; 2417 else 2418 DBG_CALL(Dbg_audit_ignore(lmp)); 2419 } 2420 2421 /* 2422 * If this object identifies itself as an 2423 * interposer, but relocation processing has 2424 * already started, then demote it. It's too 2425 * late to guarantee complete interposition. 2426 */ 2427 /* BEGIN CSTYLED */ 2428 if (ld->d_un.d_val & 2429 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2430 if (lml->lm_flags & LML_FLG_STARTREL) { 2431 DBG_CALL(Dbg_util_intoolate(lmp)); 2432 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2433 (void) printf( 2434 MSG_INTL(MSG_LDD_REL_ERR2), 2435 NAME(lmp)); 2436 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2437 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2438 else 2439 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2440 } 2441 /* END CSTYLED */ 2442 break; 2443 case DT_SYMINFO: 2444 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2445 base); 2446 break; 2447 case DT_SYMINENT: 2448 SYMINENT(lmp) = ld->d_un.d_val; 2449 break; 2450 case DT_PLTPAD: 2451 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2452 break; 2453 case DT_PLTPADSZ: 2454 pltpadsz = ld->d_un.d_val; 2455 break; 2456 case DT_SUNW_RTLDINF: 2457 /* 2458 * Maintain a list of RTLDINFO structures. 2459 * Typically, libc is the only supplier, and 2460 * only one structure is provided. However, 2461 * multiple suppliers and multiple structures 2462 * are supported. For example, one structure 2463 * may provide thread_init, and another 2464 * structure may provide atexit reservations. 2465 */ 2466 if ((rti = alist_append(&lml->lm_rti, 0, 2467 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2468 remove_so(0, lmp); 2469 return (0); 2470 } 2471 rti->rti_lmp = lmp; 2472 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2473 break; 2474 case DT_SUNW_SORTENT: 2475 SUNWSORTENT(lmp) = ld->d_un.d_val; 2476 break; 2477 case DT_SUNW_SYMSORT: 2478 SUNWSYMSORT(lmp) = 2479 (void *)(ld->d_un.d_ptr + base); 2480 break; 2481 case DT_SUNW_SYMSORTSZ: 2482 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2483 break; 2484 case DT_DEPRECATED_SPARC_REGISTER: 2485 case M_DT_REGISTER: 2486 FLAGS(lmp) |= FLG_RT_REGSYMS; 2487 break; 2488 case M_DT_PLTRESERVE: 2489 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2490 base); 2491 break; 2492 } 2493 } 2494 2495 if (PLTPAD(lmp)) { 2496 if (pltpadsz == (Xword)0) 2497 PLTPAD(lmp) = 0; 2498 else 2499 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2500 pltpadsz); 2501 } 2502 2503 /* 2504 * Allocate a Dynamic Info structure. 2505 */ 2506 if ((DYNINFO(lmp) = calloc((size_t)dynndx, 2507 sizeof (Dyninfo))) == 0) { 2508 remove_so(0, lmp); 2509 return (0); 2510 } 2511 DYNINFOCNT(lmp) = dynndx; 2512 } 2513 2514 /* 2515 * A dynsym contains only global functions. We want to have 2516 * a version of it that also includes local functions, so that 2517 * dladdr() will be able to report names for local functions 2518 * when used to generate a stack trace for a stripped file. 2519 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2520 * 2521 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2522 * in order to avoid having to have two copies of the global 2523 * symbols held in DT_SYMTAB: The local symbols are placed in 2524 * a separate section than the globals in the dynsym, but the 2525 * linker conspires to put the data for these two sections adjacent 2526 * to each other. DT_SUNW_SYMTAB points at the top of the local 2527 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2528 * 2529 * If the two sections are not adjacent, then something went wrong 2530 * at link time. We use ASSERT to kill the process if this is 2531 * a debug build. In a production build, we will silently ignore 2532 * the presence of the .ldynsym and proceed. We can detect this 2533 * situation by checking to see that DT_SYMTAB lies in 2534 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2535 */ 2536 if ((SUNWSYMTAB(lmp) != NULL) && 2537 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2538 (((char *)SYMTAB(lmp) >= 2539 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2540 ASSERT(0); 2541 SUNWSYMTAB(lmp) = NULL; 2542 SUNWSYMSZ(lmp) = 0; 2543 } 2544 2545 /* 2546 * If configuration file use hasn't been disabled, and a configuration 2547 * file hasn't already been set via an environment variable, see if any 2548 * application specific configuration file is specified. An LD_CONFIG 2549 * setting is used first, but if this image was generated via crle(1) 2550 * then a default configuration file is a fall-back. 2551 */ 2552 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2553 if (cfile) 2554 config->c_name = (const char *)(cfile + 2555 (char *)STRTAB(lmp)); 2556 else if (crle) { 2557 rtld_flags |= RT_FL_CONFAPP; 2558 #ifndef EXPAND_RELATIVE 2559 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2560 #endif 2561 } 2562 } 2563 2564 if (rpath) 2565 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2566 if (fltr) { 2567 /* 2568 * If this object is a global filter, duplicate the filtee 2569 * string name(s) so that REFNAME() is available in core files. 2570 * This cludge was useful for debuggers at one point, but only 2571 * when the filtee name was an individual full path. 2572 */ 2573 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2574 remove_so(0, lmp); 2575 return (0); 2576 } 2577 } 2578 2579 if (rtld_flags & RT_FL_RELATIVE) 2580 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2581 2582 /* 2583 * For Intel ABI compatibility. It's possible that a JMPREL can be 2584 * specified without any other relocations (e.g. a dynamic executable 2585 * normally only contains .plt relocations). If this is the case then 2586 * no REL, RELSZ or RELENT will have been created. For us to be able 2587 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2588 * the RELENT for these relocations. Refer to elf_reloc() for more 2589 * details. 2590 */ 2591 if (!RELENT(lmp) && JMPREL(lmp)) 2592 RELENT(lmp) = sizeof (Rel); 2593 2594 /* 2595 * Establish any per-object auditing. If we're establishing `main's 2596 * link-map its too early to go searching for audit objects so just 2597 * hold the object name for later (see setup()). 2598 */ 2599 if (audit) { 2600 char *cp = audit + (char *)STRTAB(lmp); 2601 2602 if (*cp) { 2603 if (((AUDITORS(lmp) = 2604 calloc(1, sizeof (Audit_desc))) == 0) || 2605 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2606 remove_so(0, lmp); 2607 return (0); 2608 } 2609 if (lml_main.lm_head) { 2610 if (audit_setup(lmp, AUDITORS(lmp), 0, 2611 in_nfavl) == 0) { 2612 remove_so(0, lmp); 2613 return (0); 2614 } 2615 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2616 lml->lm_flags |= LML_FLG_LOCAUDIT; 2617 } 2618 } 2619 } 2620 2621 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2622 remove_so(0, lmp); 2623 return (0); 2624 } 2625 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2626 remove_so(0, lmp); 2627 return (0); 2628 } 2629 2630 /* 2631 * Add the mapped object to the end of the link map list. 2632 */ 2633 lm_append(lml, lmco, lmp); 2634 return (lmp); 2635 } 2636 2637 /* 2638 * Assign hardware/software capabilities. 2639 */ 2640 void 2641 cap_assign(Cap *cap, Rt_map *lmp) 2642 { 2643 while (cap->c_tag != CA_SUNW_NULL) { 2644 switch (cap->c_tag) { 2645 case CA_SUNW_HW_1: 2646 HWCAP(lmp) = cap->c_un.c_val; 2647 break; 2648 case CA_SUNW_SF_1: 2649 SFCAP(lmp) = cap->c_un.c_val; 2650 } 2651 cap++; 2652 } 2653 } 2654 2655 /* 2656 * Map in an ELF object. 2657 * Takes an open file descriptor for the object to map and its pathname; returns 2658 * a pointer to a Rt_map structure for this object, or 0 on error. 2659 */ 2660 static Rt_map * 2661 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2662 int fd, int *in_nfavl) 2663 { 2664 int i; /* general temporary */ 2665 Off memsize = 0; /* total memory size of pathname */ 2666 Off mentry; /* entry point */ 2667 Ehdr *ehdr; /* ELF header of ld.so */ 2668 Phdr *phdr; /* first Phdr in file */ 2669 Phdr *phdr0; /* Saved first Phdr in file */ 2670 Phdr *pptr; /* working Phdr */ 2671 Phdr *fph = 0; /* first loadable Phdr */ 2672 Phdr *lph; /* last loadable Phdr */ 2673 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2674 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2675 Phdr *swph = 0; /* program header for SUNWBSS */ 2676 Phdr *tlph = 0; /* program header for PT_TLS */ 2677 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2678 Cap *cap = 0; /* program header for SUNWCAP */ 2679 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2680 size_t size; /* size of elf and program headers */ 2681 caddr_t faddr = 0; /* mapping address of pathname */ 2682 Rt_map *lmp; /* link map created */ 2683 caddr_t paddr; /* start of padded image */ 2684 Off plen; /* size of image including padding */ 2685 Half etype; 2686 int fixed; 2687 Mmap *mmaps; 2688 uint_t mmapcnt = 0; 2689 Xword align = 0; 2690 2691 /* LINTED */ 2692 ehdr = (Ehdr *)fmap->fm_maddr; 2693 2694 /* 2695 * If this a relocatable object then special processing is required. 2696 */ 2697 if ((etype = ehdr->e_type) == ET_REL) 2698 return (elf_obj_file(lml, lmco, pname, fd)); 2699 2700 /* 2701 * If this isn't a dynamic executable or shared object we can't process 2702 * it. If this is a dynamic executable then all addresses are fixed. 2703 */ 2704 if (etype == ET_EXEC) { 2705 fixed = 1; 2706 } else if (etype == ET_DYN) { 2707 fixed = 0; 2708 } else { 2709 Conv_inv_buf_t inv_buf; 2710 2711 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2712 conv_ehdr_type(etype, 0, &inv_buf)); 2713 return (0); 2714 } 2715 2716 /* 2717 * If our original mapped page was not large enough to hold all the 2718 * program headers remap them. 2719 */ 2720 size = (size_t)((char *)ehdr->e_phoff + 2721 (ehdr->e_phnum * ehdr->e_phentsize)); 2722 if (size > fmap->fm_fsize) { 2723 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2724 return (0); 2725 } 2726 if (size > fmap->fm_msize) { 2727 fmap_setup(); 2728 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2729 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2730 int err = errno; 2731 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2732 strerror(err)); 2733 return (0); 2734 } 2735 fmap->fm_msize = size; 2736 /* LINTED */ 2737 ehdr = (Ehdr *)fmap->fm_maddr; 2738 } 2739 /* LINTED */ 2740 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2741 2742 /* 2743 * Get entry point. 2744 */ 2745 mentry = ehdr->e_entry; 2746 2747 /* 2748 * Point at program headers and perform some basic validation. 2749 */ 2750 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2751 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2752 if ((pptr->p_type == PT_LOAD) || 2753 (pptr->p_type == PT_SUNWBSS)) { 2754 2755 if (fph == 0) { 2756 fph = pptr; 2757 /* LINTED argument lph is initialized in first pass */ 2758 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2759 eprintf(lml, ERR_ELF, 2760 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2761 return (0); 2762 } 2763 2764 lph = pptr; 2765 2766 if (pptr->p_memsz) 2767 lmph = pptr; 2768 if (pptr->p_filesz) 2769 lfph = pptr; 2770 if (pptr->p_type == PT_SUNWBSS) 2771 swph = pptr; 2772 if (pptr->p_align > align) 2773 align = pptr->p_align; 2774 2775 } else if (pptr->p_type == PT_DYNAMIC) { 2776 mld = (Dyn *)(pptr->p_vaddr); 2777 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2778 tlph = pptr; 2779 } else if (pptr->p_type == PT_SUNWCAP) { 2780 cap = (Cap *)(pptr->p_vaddr); 2781 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2782 unwindph = pptr; 2783 } 2784 } 2785 2786 #if defined(MAP_ALIGN) 2787 /* 2788 * Make sure the maximum page alignment is a power of 2 >= the default 2789 * segment alignment, for use with MAP_ALIGN. 2790 */ 2791 align = S_ROUND(align, M_SEGM_ALIGN); 2792 #endif 2793 2794 /* 2795 * We'd better have at least one loadable segment, together with some 2796 * specified file and memory size. 2797 */ 2798 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2799 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2800 return (0); 2801 } 2802 2803 /* 2804 * Check that the files size accounts for the loadable sections 2805 * we're going to map in (failure to do this may cause spurious 2806 * bus errors if we're given a truncated file). 2807 */ 2808 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2809 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2810 return (0); 2811 } 2812 2813 /* 2814 * Memsize must be page rounded so that if we add object padding 2815 * at the end it will start at the beginning of a page. 2816 */ 2817 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2818 M_PTRUNC((ulong_t)fph->p_vaddr)); 2819 2820 /* 2821 * Determine if an existing mapping is acceptable. 2822 */ 2823 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2824 (strcmp(pname, interp->i_name) == 0)) { 2825 /* 2826 * If this is the interpreter then it has already been mapped 2827 * and we have the address so don't map it again. Note that 2828 * the common occurrence of a reference to the interpretor 2829 * (libdl -> ld.so.1) will have been caught during filter 2830 * initialization (see elf_lookup_filtee()). However, some 2831 * ELF implementations are known to record libc.so.1 as the 2832 * interpretor, and thus this test catches this behavior. 2833 */ 2834 paddr = faddr = interp->i_faddr; 2835 2836 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2837 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2838 (fph == lph) && (fph->p_filesz == fph->p_memsz) && 2839 (((Xword)fmap->fm_maddr % align) == 0)) { 2840 size_t rsize; 2841 2842 /* 2843 * If the file contains a single segment, and the mapping 2844 * required has already been established from the initial fmap 2845 * mapping, then we don't need to do anything more. Reset the 2846 * fmap address so that any later files start a new fmap. This 2847 * is really an optimization for filters, such as libdl.so, 2848 * libthread, etc. that are constructed to be a single text 2849 * segment. 2850 */ 2851 paddr = faddr = fmap->fm_maddr; 2852 2853 /* 2854 * Free any unused mapping by assigning the fmap buffer to the 2855 * unused region. fmap_setup() will unmap this area and 2856 * establish defaults for future mappings. 2857 */ 2858 rsize = M_PROUND(fph->p_filesz); 2859 fmap->fm_maddr += rsize; 2860 fmap->fm_msize -= rsize; 2861 fmap_setup(); 2862 } 2863 2864 /* 2865 * Allocate a mapping array to retain mapped segment information. 2866 */ 2867 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2868 return (0); 2869 2870 /* 2871 * If we're reusing an existing mapping determine the objects etext 2872 * address. Otherwise map the file (which will calculate the etext 2873 * address as part of the mapping process). 2874 */ 2875 if (faddr) { 2876 caddr_t base; 2877 2878 if (fixed) 2879 base = 0; 2880 else 2881 base = faddr; 2882 2883 /* LINTED */ 2884 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2885 2886 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2887 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2888 if (pptr->p_type != PT_LOAD) 2889 continue; 2890 2891 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2892 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2893 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2894 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2895 mmapcnt++; 2896 2897 if (!(pptr->p_flags & PF_W)) { 2898 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2899 (ulong_t)pptr->p_memsz + 2900 (ulong_t)(fixed ? 0 : faddr); 2901 } 2902 } 2903 } else { 2904 /* 2905 * Map the file. 2906 */ 2907 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2908 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2909 return (0); 2910 } 2911 2912 /* 2913 * Calculate absolute base addresses and entry points. 2914 */ 2915 if (!fixed) { 2916 if (mld) 2917 /* LINTED */ 2918 mld = (Dyn *)((Off)mld + faddr); 2919 if (cap) 2920 /* LINTED */ 2921 cap = (Cap *)((Off)cap + faddr); 2922 mentry += (Off)faddr; 2923 } 2924 2925 /* 2926 * Create new link map structure for newly mapped shared object. 2927 */ 2928 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2929 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2930 mmapcnt, in_nfavl))) { 2931 (void) munmap((caddr_t)faddr, memsize); 2932 return (0); 2933 } 2934 2935 /* 2936 * Start the system loading in the ELF information we'll be processing. 2937 */ 2938 if (REL(lmp)) { 2939 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2940 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2941 MADV_WILLNEED); 2942 } 2943 2944 /* 2945 * If this shared object contains any special segments, record them. 2946 */ 2947 if (swph) { 2948 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2949 SUNWBSS(lmp) = phdr + (swph - phdr0); 2950 } 2951 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2952 remove_so(lml, lmp); 2953 return (0); 2954 } 2955 2956 if (unwindph) 2957 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2958 2959 if (cap) 2960 cap_assign(cap, lmp); 2961 2962 return (lmp); 2963 } 2964 2965 /* 2966 * Function to correct protection settings. Segments are all mapped initially 2967 * with permissions as given in the segment header. We need to turn on write 2968 * permissions on a text segment if there are any relocations against that 2969 * segment, and them turn write permission back off again before returning 2970 * control to the user. This function turns the permission on or off depending 2971 * on the value of the argument. 2972 */ 2973 int 2974 elf_set_prot(Rt_map *lmp, int permission) 2975 { 2976 Mmap *mmaps; 2977 2978 /* 2979 * If this is an allocated image (ie. a relocatable object) we can't 2980 * mprotect() anything. 2981 */ 2982 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2983 return (1); 2984 2985 DBG_CALL(Dbg_file_prot(lmp, permission)); 2986 2987 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2988 if (mmaps->m_perm & PROT_WRITE) 2989 continue; 2990 2991 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2992 (mmaps->m_perm | permission)) == -1) { 2993 int err = errno; 2994 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2995 NAME(lmp), strerror(err)); 2996 return (0); 2997 } 2998 } 2999 return (1); 3000 } 3001 3002 /* 3003 * Build full pathname of shared object from given directory name and filename. 3004 */ 3005 static char * 3006 elf_get_so(const char *dir, const char *file) 3007 { 3008 static char pname[PATH_MAX]; 3009 3010 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 3011 return (pname); 3012 } 3013 3014 /* 3015 * The copy relocation is recorded in a copy structure which will be applied 3016 * after all other relocations are carried out. This provides for copying data 3017 * that must be relocated itself (ie. pointers in shared objects). This 3018 * structure also provides a means of binding RTLD_GROUP dependencies to any 3019 * copy relocations that have been taken from any group members. 3020 * 3021 * If the size of the .bss area available for the copy information is not the 3022 * same as the source of the data inform the user if we're under ldd(1) control 3023 * (this checking was only established in 5.3, so by only issuing an error via 3024 * ldd(1) we maintain the standard set by previous releases). 3025 */ 3026 int 3027 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 3028 Rt_map *dlmp, const void *dadd) 3029 { 3030 Rel_copy rc; 3031 Lm_list *lml = LIST(rlmp); 3032 3033 rc.r_name = name; 3034 rc.r_rsym = rsym; /* the new reference symbol and its */ 3035 rc.r_rlmp = rlmp; /* associated link-map */ 3036 rc.r_dlmp = dlmp; /* the defining link-map */ 3037 rc.r_dsym = dsym; /* the original definition */ 3038 rc.r_radd = radd; 3039 rc.r_dadd = dadd; 3040 3041 if (rsym->st_size > dsym->st_size) 3042 rc.r_size = (size_t)dsym->st_size; 3043 else 3044 rc.r_size = (size_t)rsym->st_size; 3045 3046 if (alist_append(©_R(dlmp), &rc, sizeof (Rel_copy), 3047 AL_CNT_COPYREL) == 0) { 3048 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3049 return (0); 3050 else 3051 return (1); 3052 } 3053 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 3054 if (aplist_append(©_S(rlmp), dlmp, 3055 AL_CNT_COPYREL) == NULL) { 3056 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3057 return (0); 3058 else 3059 return (1); 3060 } 3061 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 3062 } 3063 3064 /* 3065 * If we are tracing (ldd), warn the user if 3066 * 1) the size from the reference symbol differs from the 3067 * copy definition. We can only copy as much data as the 3068 * reference (dynamic executables) entry allows. 3069 * 2) the copy definition has STV_PROTECTED visibility. 3070 */ 3071 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3072 if (rsym->st_size != dsym->st_size) { 3073 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 3074 _conv_reloc_type(M_R_COPY), demangle(name), 3075 NAME(rlmp), EC_XWORD(rsym->st_size), 3076 NAME(dlmp), EC_XWORD(dsym->st_size)); 3077 if (rsym->st_size > dsym->st_size) 3078 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 3079 NAME(dlmp)); 3080 else 3081 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 3082 NAME(rlmp)); 3083 } 3084 3085 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 3086 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 3087 _conv_reloc_type(M_R_COPY), demangle(name), 3088 NAME(dlmp)); 3089 } 3090 } 3091 3092 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 3093 (Xword)rc.r_size)); 3094 return (1); 3095 } 3096 3097 /* 3098 * Determine the symbol location of an address within a link-map. Look for 3099 * the nearest symbol (whose value is less than or equal to the required 3100 * address). This is the object specific part of dladdr(). 3101 */ 3102 static void 3103 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 3104 { 3105 ulong_t ndx, cnt, base, _value; 3106 Sym *sym, *_sym = NULL; 3107 const char *str; 3108 int _flags; 3109 uint_t *dynaddr_ndx; 3110 uint_t dynaddr_n = 0; 3111 ulong_t value; 3112 3113 /* 3114 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 3115 * the dynsym that starts with any local function symbols that exist in 3116 * the library and then moves to the data held in SYMTAB(). In this 3117 * case, SUNWSYMSZ tells us how long the symbol table is. The 3118 * availability of local function symbols will enhance the results 3119 * we can provide. 3120 * 3121 * If SUNWSYMTAB() is non-NULL, then there might also be a 3122 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 3123 * an array of indices into SUNWSYMTAB, sorted by increasing 3124 * address. We can use this to do an O(log N) search instead of a 3125 * brute force search. 3126 * 3127 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 3128 * contains only global symbols. In that case, the length of 3129 * the symbol table comes from the nchain field of the related 3130 * symbol lookup hash table. 3131 */ 3132 str = STRTAB(lmp); 3133 if (SUNWSYMSZ(lmp) == NULL) { 3134 sym = SYMTAB(lmp); 3135 /* 3136 * If we don't have a .hash table there are no symbols 3137 * to look at. 3138 */ 3139 if (HASH(lmp) == 0) 3140 return; 3141 cnt = HASH(lmp)[1]; 3142 } else { 3143 sym = SUNWSYMTAB(lmp); 3144 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3145 dynaddr_ndx = SUNWSYMSORT(lmp); 3146 if (dynaddr_ndx != NULL) 3147 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3148 } 3149 3150 if (FLAGS(lmp) & FLG_RT_FIXED) 3151 base = 0; 3152 else 3153 base = ADDR(lmp); 3154 3155 if (dynaddr_n > 0) { /* Binary search */ 3156 long low = 0, low_bnd; 3157 long high = dynaddr_n - 1, high_bnd; 3158 long mid; 3159 Sym *mid_sym; 3160 3161 /* 3162 * Note that SUNWSYMSORT only contains symbols types that 3163 * supply memory addresses, so there's no need to check and 3164 * filter out any other types. 3165 */ 3166 low_bnd = low; 3167 high_bnd = high; 3168 while (low <= high) { 3169 mid = (low + high) / 2; 3170 mid_sym = &sym[dynaddr_ndx[mid]]; 3171 value = mid_sym->st_value + base; 3172 if (addr < value) { 3173 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3174 addr) 3175 high_bnd = high; 3176 high = mid - 1; 3177 } else if (addr > value) { 3178 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3179 addr) 3180 low_bnd = low; 3181 low = mid + 1; 3182 } else { 3183 _sym = mid_sym; 3184 _value = value; 3185 break; 3186 } 3187 } 3188 /* 3189 * If the above didn't find it exactly, then we must 3190 * return the closest symbol with a value that doesn't 3191 * exceed the one we are looking for. If that symbol exists, 3192 * it will lie in the range bounded by low_bnd and 3193 * high_bnd. This is a linear search, but a short one. 3194 */ 3195 if (_sym == NULL) { 3196 for (mid = low_bnd; mid <= high_bnd; mid++) { 3197 mid_sym = &sym[dynaddr_ndx[mid]]; 3198 value = mid_sym->st_value + base; 3199 if (addr >= value) { 3200 _sym = mid_sym; 3201 _value = value; 3202 } else { 3203 break; 3204 } 3205 } 3206 } 3207 } else { /* Linear search */ 3208 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3209 /* 3210 * Skip expected symbol types that are not functions 3211 * or data: 3212 * - A symbol table starts with an undefined symbol 3213 * in slot 0. If we are using SUNWSYMTAB(), 3214 * there will be a second undefined symbol 3215 * right before the globals. 3216 * - The local part of SUNWSYMTAB() contains a 3217 * series of function symbols. Each section 3218 * starts with an initial STT_FILE symbol. 3219 */ 3220 if ((sym->st_shndx == SHN_UNDEF) || 3221 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3222 continue; 3223 3224 value = sym->st_value + base; 3225 if (value > addr) 3226 continue; 3227 if (value < _value) 3228 continue; 3229 3230 _sym = sym; 3231 _value = value; 3232 3233 /* 3234 * Note, because we accept local and global symbols 3235 * we could find a section symbol that matches the 3236 * associated address, which means that the symbol 3237 * name will be null. In this case continue the 3238 * search in case we can find a global symbol of 3239 * the same value. 3240 */ 3241 if ((value == addr) && 3242 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3243 break; 3244 } 3245 } 3246 3247 _flags = flags & RTLD_DL_MASK; 3248 if (_sym) { 3249 if (_flags == RTLD_DL_SYMENT) 3250 *info = (void *)_sym; 3251 else if (_flags == RTLD_DL_LINKMAP) 3252 *info = (void *)lmp; 3253 3254 dlip->dli_sname = str + _sym->st_name; 3255 dlip->dli_saddr = (void *)_value; 3256 } else { 3257 /* 3258 * addr lies between the beginning of the mapped segment and 3259 * the first global symbol. We have no symbol to return 3260 * and the caller requires one. We use _START_, the base 3261 * address of the mapping. 3262 */ 3263 3264 if (_flags == RTLD_DL_SYMENT) { 3265 /* 3266 * An actual symbol struct is needed, so we 3267 * construct one for _START_. To do this in a 3268 * fully accurate way requires a different symbol 3269 * for each mapped segment. This requires the 3270 * use of dynamic memory and a mutex. That's too much 3271 * plumbing for a fringe case of limited importance. 3272 * 3273 * Fortunately, we can simplify: 3274 * - Only the st_size and st_info fields are useful 3275 * outside of the linker internals. The others 3276 * reference things that outside code cannot see, 3277 * and can be set to 0. 3278 * - It's just a label and there is no size 3279 * to report. So, the size should be 0. 3280 * This means that only st_info needs a non-zero 3281 * (constant) value. A static struct will suffice. 3282 * It must be const (readonly) so the caller can't 3283 * change its meaning for subsequent callers. 3284 */ 3285 static const Sym fsym = { 0, 0, 0, 3286 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3287 *info = (void *) &fsym; 3288 } 3289 3290 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3291 dlip->dli_saddr = (void *) ADDR(lmp); 3292 } 3293 } 3294 3295 static void 3296 elf_lazy_cleanup(APlist *alp) 3297 { 3298 Rt_map *lmp; 3299 Aliste idx; 3300 3301 /* 3302 * Cleanup any link-maps added to this dynamic list and free it. 3303 */ 3304 for (APLIST_TRAVERSE(alp, idx, lmp)) 3305 FLAGS(lmp) &= ~FLG_RT_TMPLIST; 3306 free(alp); 3307 } 3308 3309 /* 3310 * This routine is called as a last fall-back to search for a symbol from a 3311 * standard relocation. To maintain lazy loadings goal of reducing the number 3312 * of objects mapped, any symbol search is first carried out using the objects 3313 * that already exist in the process (either on a link-map list or handle). 3314 * If a symbol can't be found, and lazy dependencies are still pending, this 3315 * routine loads the dependencies in an attempt to locate the symbol. 3316 * 3317 * Only new objects are inspected as we will have already inspected presently 3318 * loaded objects before calling this routine. However, a new object may not 3319 * be new - although the di_lmp might be zero, the object may have been mapped 3320 * as someone elses dependency. Thus there's a possibility of some symbol 3321 * search duplication. 3322 */ 3323 Sym * 3324 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo, int *in_nfavl) 3325 { 3326 Sym *sym = 0; 3327 APlist *alist = NULL; 3328 Aliste idx; 3329 Rt_map *lmp1, *lmp = slp->sl_imap; 3330 const char *name = slp->sl_name; 3331 3332 /* 3333 * Generate a local list of new objects to process. This list can grow 3334 * as each object supplies its own lazy dependencies. 3335 */ 3336 if (aplist_append(&alist, lmp, AL_CNT_LAZYFIND) == NULL) 3337 return (NULL); 3338 FLAGS(lmp) |= FLG_RT_TMPLIST; 3339 3340 for (APLIST_TRAVERSE(alist, idx, lmp1)) { 3341 uint_t cnt = 0; 3342 Slookup sl = *slp; 3343 Dyninfo *dip, *pdip; 3344 3345 /* 3346 * Discard any relocation index from further symbol searches. 3347 * This index will have already been used to trigger any 3348 * necessary lazy-loads, and it might be because one of these 3349 * lazy loads have failed that we're here performing this 3350 * fallback. By removing the relocation index we don't try 3351 * and perform the same failed lazy loading activity again. 3352 */ 3353 sl.sl_rsymndx = 0; 3354 3355 /* 3356 * Loop through the lazy DT_NEEDED entries examining each object 3357 * for the required symbol. If the symbol is not found, the 3358 * object is in turn added to the local alist, so that the 3359 * objects lazy DT_NEEDED entries can be examined. 3360 */ 3361 lmp = lmp1; 3362 for (dip = DYNINFO(lmp), pdip = NULL; cnt < DYNINFOCNT(lmp); 3363 cnt++, pdip = dip++) { 3364 Rt_map *nlmp; 3365 3366 if (((dip->di_flags & FLG_DI_LAZY) == 0) || 3367 dip->di_info) 3368 continue; 3369 3370 /* 3371 * If this object has already failed to lazy load, and 3372 * we're still processing the same runtime linker 3373 * operation that produced the failure, don't bother 3374 * to try and load the object again. 3375 */ 3376 if ((dip->di_flags & FLG_DI_LAZYFAIL) && pdip && 3377 (pdip->di_flags & FLG_DI_POSFLAG1)) { 3378 if (pdip->di_info == (void *)ld_entry_cnt) 3379 continue; 3380 3381 dip->di_flags &= ~FLG_DI_LAZYFAIL; 3382 pdip->di_info = NULL; 3383 } 3384 3385 /* 3386 * Try loading this lazy dependency. If the object 3387 * can't be loaded, consider this non-fatal and continue 3388 * the search. Lazy loaded dependencies need not exist 3389 * and their loading should only turn out to be fatal 3390 * if they are required to satisfy a relocation. 3391 * 3392 * If the file is already loaded and relocated we must 3393 * still inspect it for symbols, even though it might 3394 * have already been searched. This lazy load operation 3395 * might have promoted the permissions of the object, 3396 * and thus made the object applicable for this symbol 3397 * search, whereas before the object might have been 3398 * skipped. 3399 */ 3400 if ((nlmp = elf_lazy_load(lmp, &sl, cnt, 3401 name, in_nfavl)) == 0) 3402 continue; 3403 3404 /* 3405 * If this object isn't yet a part of the dynamic list 3406 * then inspect it for the symbol. If the symbol isn't 3407 * found add the object to the dynamic list so that we 3408 * can inspect its dependencies. 3409 */ 3410 if (FLAGS(nlmp) & FLG_RT_TMPLIST) 3411 continue; 3412 3413 sl.sl_imap = nlmp; 3414 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, 3415 binfo, in_nfavl)) 3416 break; 3417 3418 /* 3419 * Some dlsym() operations are already traversing a 3420 * link-map (dlopen(0)), and thus there's no need to 3421 * build our own dynamic dependency list. 3422 */ 3423 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3424 if (aplist_append(&alist, nlmp, 3425 AL_CNT_LAZYFIND) == 0) { 3426 elf_lazy_cleanup(alist); 3427 return (0); 3428 } 3429 FLAGS(nlmp) |= FLG_RT_TMPLIST; 3430 } 3431 } 3432 if (sym) 3433 break; 3434 } 3435 3436 elf_lazy_cleanup(alist); 3437 return (sym); 3438 } 3439 3440 /* 3441 * Warning message for bad r_offset. 3442 */ 3443 void 3444 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3445 ulong_t rsymndx) 3446 { 3447 const char *name = (char *)0; 3448 Lm_list *lml = LIST(lmp); 3449 int trace; 3450 3451 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3452 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3453 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3454 trace = 1; 3455 else 3456 trace = 0; 3457 3458 if ((trace == 0) && (DBG_ENABLED == 0)) 3459 return; 3460 3461 if (rsymndx) { 3462 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3463 (rsymndx * SYMENT(lmp))); 3464 3465 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3466 name = (char *)(STRTAB(lmp) + symref->st_name); 3467 } 3468 3469 if (name == 0) 3470 name = MSG_ORIG(MSG_STR_EMPTY); 3471 3472 if (trace) { 3473 const char *rstr; 3474 3475 rstr = _conv_reloc_type((uint_t)rtype); 3476 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3477 EC_ADDR(roffset)); 3478 return; 3479 } 3480 3481 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3482 } 3483 3484 /* 3485 * Resolve a static TLS relocation. 3486 */ 3487 long 3488 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3489 ulong_t roffset, long value) 3490 { 3491 Lm_list *lml = LIST(lmp); 3492 3493 /* 3494 * Relocations against a static TLS block have limited support once 3495 * process initialization has completed. Any error condition should be 3496 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3497 * however individual relocations are tested in case the dynamic flag 3498 * had not been set when this object was built. 3499 */ 3500 if (PTTLS(lmp) == 0) { 3501 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3502 M_REL_SHT_TYPE, rel, NULL, name)); 3503 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3504 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3505 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3506 return (0); 3507 } 3508 3509 /* 3510 * If no static TLS has been set aside for this object, determine if 3511 * any can be obtained. Enforce that any object using static TLS is 3512 * non-deletable. 3513 */ 3514 if (TLSSTATOFF(lmp) == 0) { 3515 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3516 MODE(lmp) |= RTLD_NODELETE; 3517 3518 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3519 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3520 M_REL_SHT_TYPE, rel, NULL, name)); 3521 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3522 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3523 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3524 return (0); 3525 } 3526 } 3527 3528 /* 3529 * Typically, a static TLS offset is maintained as a symbols value. 3530 * For local symbols that are not apart of the dynamic symbol table, 3531 * the TLS relocation points to a section symbol, and the static TLS 3532 * offset was deposited in the associated GOT table. Make sure the GOT 3533 * is cleared, so that the value isn't reused in do_reloc(). 3534 */ 3535 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3536 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3537 value = *(long *)roffset; 3538 *(long *)roffset = 0; 3539 } else { 3540 value = sym->st_value; 3541 } 3542 } 3543 return (-(TLSSTATOFF(lmp) - value)); 3544 } 3545 3546 /* 3547 * If the symbol is not found and the reference was not to a weak symbol, report 3548 * an error. Weak references may be unresolved. 3549 */ 3550 int 3551 elf_reloc_error(Rt_map *lmp, const char *name, void *rel, uint_t binfo) 3552 { 3553 Lm_list *lml = LIST(lmp); 3554 3555 /* 3556 * Under crle(1), relocation failures are ignored. 3557 */ 3558 if (lml->lm_flags & LML_FLG_IGNRELERR) 3559 return (1); 3560 3561 /* 3562 * Under ldd(1), unresolved references are reported. However, if the 3563 * original reference is EXTERN or PARENT these references are ignored 3564 * unless ldd's -p option is in effect. 3565 */ 3566 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3567 if (((binfo & DBG_BINFO_REF_MSK) == 0) || 3568 ((lml->lm_flags & LML_FLG_TRC_NOPAREXT) != 0)) { 3569 (void) printf(MSG_INTL(MSG_LDD_SYM_NFOUND), 3570 demangle(name), NAME(lmp)); 3571 } 3572 return (1); 3573 } 3574 3575 /* 3576 * Otherwise, the unresolved references is fatal. 3577 */ 3578 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, 3579 NULL, name)); 3580 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 3581 demangle(name)); 3582 3583 return (0); 3584 } 3585