1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * Object file dependent support for ELF objects. 33 */ 34 #include "_synonyms.h" 35 36 #include <stdio.h> 37 #include <sys/procfs.h> 38 #include <sys/mman.h> 39 #include <sys/debug.h> 40 #include <string.h> 41 #include <limits.h> 42 #include <dlfcn.h> 43 #include <debug.h> 44 #include <conv.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 50 /* 51 * Default and secure dependency search paths. 52 */ 53 static Pnode elf_dflt_dirs[] = { 54 #if defined(_ELF64) 55 #ifndef SGS_PRE_UNIFIED_PROCESS 56 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 57 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 58 #endif 59 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 60 LA_SER_DEFAULT, 0, 0 } 61 #else 62 #ifndef SGS_PRE_UNIFIED_PROCESS 63 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 64 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 65 #endif 66 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 67 LA_SER_DEFAULT, 0, 0 } 68 #endif 69 }; 70 71 static Pnode elf_secure_dirs[] = { 72 #if defined(_ELF64) 73 #ifndef SGS_PRE_UNIFIED_PROCESS 74 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 75 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 76 #endif 77 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 78 MSG_PTH_USRLIBSE_64_SIZE, 79 LA_SER_SECURE, 0, 0 } 80 #else 81 #ifndef SGS_PRE_UNIFIED_PROCESS 82 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 83 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 84 #endif 85 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 86 LA_SER_SECURE, 0, 0 } 87 #endif 88 }; 89 90 /* 91 * Defines for local functions. 92 */ 93 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 94 static int elf_are_u(Rej_desc *); 95 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 96 static ulong_t elf_entry_pt(void); 97 static char *elf_get_so(const char *, const char *); 98 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 99 static int elf_needed(Lm_list *, Aliste, Rt_map *); 100 static void elf_unmap_so(Rt_map *); 101 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 102 103 /* 104 * Functions and data accessed through indirect pointers. 105 */ 106 Fct elf_fct = { 107 elf_are_u, 108 elf_entry_pt, 109 elf_map_so, 110 elf_unmap_so, 111 elf_needed, 112 lookup_sym, 113 elf_reloc, 114 elf_dflt_dirs, 115 elf_secure_dirs, 116 elf_fix_name, 117 elf_get_so, 118 elf_dladdr, 119 dlsym_handle, 120 elf_verify_vers, 121 elf_set_prot 122 }; 123 124 125 /* 126 * Redefine NEEDED name if necessary. 127 */ 128 static Pnode * 129 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 130 { 131 /* 132 * For ABI compliance, if we are asked for ld.so.1, then really give 133 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 134 */ 135 if (((*name == '/') && 136 #if defined(_ELF64) 137 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 138 #else 139 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 140 #endif 141 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 142 Pnode *pnp; 143 144 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 145 MSG_ORIG(MSG_PTH_LIBSYS))); 146 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 147 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 148 if (pnp) 149 free(pnp); 150 return (0); 151 } 152 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 153 pnp->p_orig = (orig & PN_SER_MASK); 154 return (pnp); 155 } 156 157 return (expand_paths(clmp, name, orig, 0)); 158 } 159 160 /* 161 * Determine if we have been given an ELF file and if so determine if the file 162 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 163 * with associated error information. 164 */ 165 static int 166 elf_are_u(Rej_desc *rej) 167 { 168 Ehdr *ehdr; 169 170 /* 171 * Determine if we're an elf file. If not simply return, we don't set 172 * any rejection information as this test allows use to scroll through 173 * the objects we support (ELF, AOUT). 174 */ 175 if (fmap->fm_fsize < sizeof (Ehdr) || 176 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 177 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 178 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 179 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 180 return (0); 181 } 182 183 /* 184 * Check class and encoding. 185 */ 186 /* LINTED */ 187 ehdr = (Ehdr *)fmap->fm_maddr; 188 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 189 rej->rej_type = SGS_REJ_CLASS; 190 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 191 return (0); 192 } 193 if (ehdr->e_ident[EI_DATA] != M_DATA) { 194 rej->rej_type = SGS_REJ_DATA; 195 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 196 return (0); 197 } 198 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 199 (ehdr->e_type != ET_DYN)) { 200 rej->rej_type = SGS_REJ_TYPE; 201 rej->rej_info = (uint_t)ehdr->e_type; 202 return (0); 203 } 204 205 /* 206 * Verify machine specific flags, and hardware capability requirements. 207 */ 208 if ((elf_mach_flags_check(rej, ehdr) == 0) || 209 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 210 return (0); 211 212 /* 213 * Verify ELF version. ??? is this too restrictive ??? 214 */ 215 if (ehdr->e_version > EV_CURRENT) { 216 rej->rej_type = SGS_REJ_VERSION; 217 rej->rej_info = (uint_t)ehdr->e_version; 218 return (0); 219 } 220 return (1); 221 } 222 223 /* 224 * The runtime linker employs lazy loading to provide the libraries needed for 225 * debugging, preloading .o's and dldump(). As these are seldom used, the 226 * standard startup of ld.so.1 doesn't initialize all the information necessary 227 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 228 * is called we get here to perform these initializations: 229 * 230 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 231 * dependency. Typically, for all other objects, this is called during 232 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 233 * 234 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 235 * are by default skipped thus delaying all relative relocation processing 236 * on every invocation of ld.so.1. 237 */ 238 int 239 elf_rtld_load() 240 { 241 Lm_list *lml = &lml_rtld; 242 Rt_map *lmp = lml->lm_head; 243 244 if (lml->lm_flags & LML_FLG_PLTREL) 245 return (1); 246 247 /* 248 * As we need to refer to the DYNINFO() information, insure that it has 249 * been initialized. 250 */ 251 if (elf_needed(lml, ALO_DATA, lmp) == 0) 252 return (0); 253 254 #if defined(__i386) 255 /* 256 * This is a kludge to give ld.so.1 a performance benefit on i386. 257 * It's based around two factors. 258 * 259 * o JMPSLOT relocations (PLT's) actually need a relative relocation 260 * applied to the GOT entry so that they can find PLT0. 261 * 262 * o ld.so.1 does not exercise *any* PLT's before it has made a call 263 * to elf_lazy_load(). This is because all dynamic dependencies 264 * are recorded as lazy dependencies. 265 */ 266 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 267 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 268 (ulong_t)ADDR(lmp)); 269 #endif 270 271 lml->lm_flags |= LML_FLG_PLTREL; 272 return (1); 273 } 274 275 /* 276 * Lazy load an object. 277 */ 278 Rt_map * 279 elf_lazy_load(Rt_map *clmp, uint_t ndx, const char *sym) 280 { 281 Rt_map *nlmp, *hlmp; 282 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 283 uint_t flags = 0; 284 Pnode *pnp; 285 const char *name; 286 Lm_list *lml = LIST(clmp); 287 Lm_cntl *lmc; 288 Aliste lmco; 289 290 /* 291 * If this dependency has already been processed, we're done. 292 */ 293 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 294 (dip->di_flags & FLG_DI_PROCESSD)) 295 return (nlmp); 296 297 /* 298 * Determine the initial dependency name, and indicate that this 299 * dependencies processing has initiated. 300 */ 301 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 302 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 303 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 304 dip->di_flags |= FLG_DI_PROCESSD; 305 306 if (dip->di_flags & FLG_DI_GROUP) 307 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 308 309 /* 310 * Expand the requested name if necessary. 311 */ 312 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 313 return (0); 314 315 /* 316 * Provided the object on the head of the link-map has completed its 317 * relocation, create a new link-map control list for this request. 318 */ 319 hlmp = lml->lm_head; 320 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 321 if ((lmc = alist_append(&(lml->lm_lists), 0, sizeof (Lm_cntl), 322 AL_CNT_LMLISTS)) == 0) { 323 remove_pnode(pnp); 324 return (0); 325 } 326 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 327 } else { 328 lmc = 0; 329 lmco = ALO_DATA; 330 } 331 332 /* 333 * Load the associated object. 334 */ 335 dip->di_info = nlmp = 336 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 337 338 /* 339 * Remove any expanded pathname infrastructure. Reduce the pending lazy 340 * dependency count of the caller, together with the link-map lists 341 * count of objects that still have lazy dependencies pending. 342 */ 343 remove_pnode(pnp); 344 if (--LAZY(clmp) == 0) 345 LIST(clmp)->lm_lazy--; 346 347 /* 348 * Finish processing the objects associated with this request, and 349 * create an association between the caller and this dependency. 350 */ 351 if (nlmp && (((analyze_lmc(lml, lmco, nlmp) == 0)) || 352 (relocate_lmc(lml, lmco, clmp, nlmp) == 0) || 353 (bind_one(clmp, nlmp, BND_NEEDED) == 0))) 354 dip->di_info = nlmp = 0; 355 356 /* 357 * If this lazyload has failed, and we've created a new link-map 358 * control list to which this request has added objects, then remove 359 * all the objects that have been associated to this request. 360 */ 361 if ((nlmp == 0) && lmc && lmc->lc_head) 362 remove_lmc(lml, clmp, lmc, lmco, name); 363 364 /* 365 * Finally, remove any link-map control list that was created. 366 */ 367 if (lmc) 368 remove_cntl(lml, lmco); 369 370 return (nlmp); 371 } 372 373 /* 374 * Return the entry point of the ELF executable. 375 */ 376 static ulong_t 377 elf_entry_pt(void) 378 { 379 return (ENTRY(lml_main.lm_head)); 380 } 381 382 /* 383 * Unmap a given ELF shared object from the address space. 384 */ 385 static void 386 elf_unmap_so(Rt_map *lmp) 387 { 388 caddr_t addr; 389 size_t size; 390 Mmap *mmaps; 391 392 /* 393 * If this link map represents a relocatable object concatenation, then 394 * the image was simply generated in allocated memory. Free the memory. 395 * 396 * Note: the memory was originally allocated in the libelf:_elf_outmap 397 * routine and would normally have been free'd in elf_outsync(), but 398 * because we 'interpose' on that routine the memory wasn't free'd at 399 * that time. 400 */ 401 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 402 free((void *)ADDR(lmp)); 403 return; 404 } 405 406 /* 407 * If padding was enabled via rtld_db, then we have at least one page 408 * in front of the image - and possibly a trailing page. 409 * Unmap the front page first: 410 */ 411 if (PADSTART(lmp) != ADDR(lmp)) { 412 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 413 size = ADDR(lmp) - (ulong_t)addr; 414 (void) munmap(addr, size); 415 } 416 417 /* 418 * Unmap any trailing padding. 419 */ 420 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 421 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 422 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 423 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 424 (void) munmap(addr, size); 425 } 426 427 /* 428 * Unmmap all mapped segments. 429 */ 430 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 431 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 432 } 433 434 /* 435 * Determine if a dependency requires a particular version and if so verify 436 * that the version exists in the dependency. 437 */ 438 static int 439 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 440 { 441 Verneed *vnd = VERNEED(clmp); 442 int _num, num = VERNEEDNUM(clmp); 443 char *cstrs = (char *)STRTAB(clmp); 444 Lm_list *lml = LIST(clmp); 445 446 /* 447 * Traverse the callers version needed information and determine if any 448 * specific versions are required from the dependency. 449 */ 450 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 451 for (_num = 1; _num <= num; _num++, 452 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 453 Half cnt = vnd->vn_cnt; 454 Vernaux *vnap; 455 char *nstrs, *need; 456 457 /* 458 * Determine if a needed entry matches this dependency. 459 */ 460 need = (char *)(cstrs + vnd->vn_file); 461 if (strcmp(name, need) != 0) 462 continue; 463 464 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 465 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 466 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 467 468 /* 469 * Validate that each version required actually exists in the 470 * dependency. 471 */ 472 nstrs = (char *)STRTAB(nlmp); 473 474 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 475 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 476 char *version, *define; 477 Verdef *vdf = VERDEF(nlmp); 478 ulong_t _num, num = VERDEFNUM(nlmp); 479 int found = 0; 480 481 version = (char *)(cstrs + vnap->vna_name); 482 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 483 484 for (_num = 1; _num <= num; _num++, 485 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 486 Verdaux *vdap; 487 488 if (vnap->vna_hash != vdf->vd_hash) 489 continue; 490 491 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 492 define = (char *)(nstrs + vdap->vda_name); 493 if (strcmp(version, define) != 0) 494 continue; 495 496 found++; 497 break; 498 } 499 500 /* 501 * If we're being traced print out any matched version 502 * when the verbose (-v) option is in effect. Always 503 * print any unmatched versions. 504 */ 505 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 506 if (found) { 507 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 508 continue; 509 510 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 511 need, version, NAME(nlmp)); 512 } else { 513 if (rtld_flags & RT_FL_SILENCERR) 514 continue; 515 516 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 517 need, version); 518 } 519 continue; 520 } 521 522 /* 523 * If the version hasn't been found then this is a 524 * candidate for a fatal error condition. Weak 525 * version definition requirements are silently 526 * ignored. Also, if the image inspected for a version 527 * definition has no versioning recorded at all then 528 * silently ignore this (this provides better backward 529 * compatibility to old images created prior to 530 * versioning being available). Both of these skipped 531 * diagnostics are available under tracing (see above). 532 */ 533 if ((found == 0) && (num != 0) && 534 (!(vnap->vna_flags & VER_FLG_WEAK))) { 535 eprintf(lml, ERR_FATAL, 536 MSG_INTL(MSG_VER_NFOUND), need, version, 537 NAME(clmp)); 538 return (0); 539 } 540 } 541 } 542 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 543 return (1); 544 } 545 546 /* 547 * Search through the dynamic section for DT_NEEDED entries and perform one 548 * of two functions. If only the first argument is specified then load the 549 * defined shared object, otherwise add the link map representing the defined 550 * link map the the dlopen list. 551 */ 552 static int 553 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 554 { 555 Dyn *dyn; 556 ulong_t ndx = 0; 557 uint_t lazy = 0, flags = 0; 558 Word lmflags = lml->lm_flags; 559 Word lmtflags = lml->lm_tflags; 560 561 /* 562 * Process each shared object on needed list. 563 */ 564 if (DYN(clmp) == 0) 565 return (1); 566 567 for (dyn = (Dyn *)DYN(clmp); dyn->d_tag != DT_NULL; dyn++, ndx++) { 568 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 569 Rt_map *nlmp = 0; 570 char *name; 571 int silent = 0; 572 Pnode *pnp; 573 574 switch (dyn->d_tag) { 575 case DT_POSFLAG_1: 576 if ((dyn->d_un.d_val & DF_P1_LAZYLOAD) && 577 !(lmtflags & LML_TFLG_NOLAZYLD)) 578 lazy = 1; 579 if (dyn->d_un.d_val & DF_P1_GROUPPERM) 580 flags = (FLG_RT_SETGROUP | FLG_RT_HANDLE); 581 continue; 582 case DT_NEEDED: 583 case DT_USED: 584 dip->di_flags |= FLG_DI_NEEDED; 585 if (flags) 586 dip->di_flags |= FLG_DI_GROUP; 587 588 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 589 590 /* 591 * NOTE, libc.so.1 can't be lazy loaded. Although a 592 * lazy position flag won't be produced when a RTLDINFO 593 * .dynamic entry is found (introduced with the UPM in 594 * Solaris 10), it was possible to mark libc for lazy 595 * loading on previous releases. To reduce the overhead 596 * of testing for this occurrence, only carry out this 597 * check for the first object on the link-map list 598 * (there aren't many applications built without libc). 599 */ 600 if (lazy && (lml->lm_head == clmp) && 601 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 602 lazy = 0; 603 604 /* 605 * Don't bring in lazy loaded objects yet unless we've 606 * been asked to attempt to load all available objects 607 * (crle(1) sets LD_FLAGS=loadavail). Even under 608 * RTLD_NOW we don't process this - RTLD_NOW will cause 609 * relocation processing which in turn might trigger 610 * lazy loading, but its possible that the object has a 611 * lazy loaded file with no bindings (i.e., it should 612 * never have been a dependency in the first place). 613 */ 614 if (lazy) { 615 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 616 LAZY(clmp)++; 617 lazy = flags = 0; 618 continue; 619 } 620 621 /* 622 * Silence any error messages - see description 623 * under elf_lookup_filtee(). 624 */ 625 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 626 rtld_flags |= RT_FL_SILENCERR; 627 silent = 1; 628 } 629 } 630 break; 631 case DT_AUXILIARY: 632 dip->di_flags |= FLG_DI_AUXFLTR; 633 lazy = flags = 0; 634 continue; 635 case DT_SUNW_AUXILIARY: 636 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 637 lazy = flags = 0; 638 continue; 639 case DT_FILTER: 640 dip->di_flags |= FLG_DI_STDFLTR; 641 lazy = flags = 0; 642 continue; 643 case DT_SUNW_FILTER: 644 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 645 lazy = flags = 0; 646 continue; 647 default: 648 lazy = flags = 0; 649 continue; 650 } 651 652 DBG_CALL(Dbg_file_needed(clmp, name)); 653 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 654 dip->di_flags |= FLG_DI_PROCESSD; 655 656 /* 657 * Establish the objects name, load it and establish a binding 658 * with the caller. 659 */ 660 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 661 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 662 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 663 nlmp = 0; 664 665 /* 666 * Clean up any infrastructure, including the removal of the 667 * error suppression state, if it had been previously set in 668 * this routine. 669 */ 670 if (pnp) 671 remove_pnode(pnp); 672 if (silent) 673 rtld_flags &= ~RT_FL_SILENCERR; 674 lazy = flags = 0; 675 if ((dip->di_info = (void *)nlmp) == 0) { 676 /* 677 * If the object could not be mapped, continue if error 678 * suppression is established or we're here with ldd(1). 679 */ 680 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 681 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 682 continue; 683 else 684 return (0); 685 } 686 } 687 688 if (LAZY(clmp)) 689 lml->lm_lazy++; 690 691 return (1); 692 } 693 694 static int 695 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 696 { 697 prmap_t *maps, *_maps; 698 int pfd, num, _num; 699 caddr_t eaddr = vaddr + size; 700 int err; 701 702 /* 703 * If memory reservations have been established for alternative objects 704 * determine if this object falls within the reservation, if it does no 705 * further checking is required. 706 */ 707 if (rtld_flags & RT_FL_MEMRESV) { 708 Rtc_head *head = (Rtc_head *)config->c_bgn; 709 710 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 711 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 712 return (0); 713 } 714 715 /* 716 * Determine the mappings presently in use by this process. 717 */ 718 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 719 return (1); 720 721 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 722 err = errno; 723 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 724 strerror(err)); 725 return (1); 726 } 727 728 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 729 return (1); 730 731 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 732 err = errno; 733 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 734 strerror(err)); 735 free(maps); 736 return (1); 737 } 738 739 /* 740 * Determine if the supplied address clashes with any of the present 741 * process mappings. 742 */ 743 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 744 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 745 Rt_map *lmp; 746 const char *str; 747 748 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 749 continue; 750 751 /* 752 * We have a memory clash. See if one of the known dynamic 753 * dependency mappings represents this space so as to provide 754 * the user a more meaningful message. 755 */ 756 if ((lmp = _caller(vaddr, 0)) != 0) 757 str = NAME(lmp); 758 else 759 str = MSG_INTL(MSG_STR_UNKNOWN); 760 761 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 762 EC_NATPTR(vaddr), EC_OFF(size), str); 763 return (1); 764 } 765 free(maps); 766 return (0); 767 } 768 769 /* 770 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 771 * are used to obtained an aligned reservation from anonymous memory. If 772 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 773 * reservation using the file as backing. 774 */ 775 static Am_ret 776 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 777 int mperm, int fd, Xword align) 778 { 779 Am_ret amret; 780 int mflag = MAP_PRIVATE | MAP_NORESERVE; 781 782 #if defined(MAP_ALIGN) 783 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 784 mflag |= MAP_ALIGN; 785 *maddr = (caddr_t)align; 786 } 787 #endif 788 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 789 return (amret); 790 791 if (amret == AM_OK) 792 return (AM_OK); 793 794 /* 795 * If an anonymous memory request failed (which should only be the 796 * case if it is unsupported on the system we're running on), establish 797 * the initial mapping directly from the file. 798 */ 799 *maddr = 0; 800 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 801 fd, 0)) == MAP_FAILED) { 802 int err = errno; 803 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 804 strerror(err)); 805 return (AM_ERROR); 806 } 807 return (AM_NOSUP); 808 } 809 810 static void * 811 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 812 int fd, Off foff) 813 { 814 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 815 static int notd = 0; 816 817 /* 818 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 819 * flag. 820 */ 821 if (notd == 0) { 822 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 823 mflag |= MAP_TEXT; 824 else 825 mflag |= MAP_INITDATA; 826 } 827 #endif 828 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 829 return (0); 830 831 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 832 if ((notd == 0) && (errno == EINVAL)) { 833 /* 834 * MAP_TEXT and MAP_INITDATA may not be supported on this 835 * platform, try again without. 836 */ 837 notd = 1; 838 mflag &= ~(MAP_TEXT | MAP_INITDATA); 839 840 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 841 } 842 #endif 843 return (MAP_FAILED); 844 } 845 846 /* 847 * Map in a file. 848 */ 849 static caddr_t 850 elf_map_it( 851 Lm_list *lml, /* link-map list */ 852 const char *name, /* actual name stored for pathname */ 853 Off fsize, /* total mapping claim of the file */ 854 Ehdr *ehdr, /* ELF header of file */ 855 Phdr *fphdr, /* first loadable Phdr */ 856 Phdr *lphdr, /* last loadable Phdr */ 857 Phdr **rrphdr, /* return first Phdr in reservation */ 858 caddr_t *rraddr, /* return start of reservation */ 859 Off *rrsize, /* return total size of reservation */ 860 int fixed, /* image is resolved to a fixed addr */ 861 int fd, /* images file descriptor */ 862 Xword align, /* image segments maximum alignment */ 863 Mmap *mmaps, /* mmap information array and */ 864 uint_t *mmapcnt) /* mapping count */ 865 { 866 caddr_t raddr; /* reservation address */ 867 Off rsize; /* reservation size */ 868 Phdr *phdr; /* working program header poiner */ 869 caddr_t maddr; /* working mmap address */ 870 caddr_t faddr; /* working file address */ 871 size_t padsize; /* object padding requirement */ 872 size_t padpsize = 0; /* padding size rounded to next page */ 873 size_t padmsize = 0; /* padding size rounded for alignment */ 874 int skipfseg; /* skip mapping first segment */ 875 int mperm; /* segment permissions */ 876 Am_ret amret = AM_NOSUP; 877 878 /* 879 * If padding is required extend both the front and rear of the image. 880 * To insure the image itself is mapped at the correct alignment the 881 * initial padding is rounded up to the nearest page. Once the image is 882 * mapped the excess can be pruned to the nearest page required for the 883 * actual padding itself. 884 */ 885 if ((padsize = r_debug.rtd_objpad) != 0) { 886 padpsize = M_PROUND(padsize); 887 if (fixed) 888 padmsize = padpsize; 889 else 890 padmsize = S_ROUND(padsize, align); 891 } 892 893 /* 894 * Determine the initial permissions used to map in the first segment. 895 * If this segments memsz is greater that its filesz then the difference 896 * must be zeroed. Make sure this segment is writable. 897 */ 898 mperm = 0; 899 if (fphdr->p_flags & PF_R) 900 mperm |= PROT_READ; 901 if (fphdr->p_flags & PF_X) 902 mperm |= PROT_EXEC; 903 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 904 mperm |= PROT_WRITE; 905 906 /* 907 * Determine whether or not to let system reserve address space based on 908 * whether this is a dynamic executable (addresses in object are fixed) 909 * or a shared object (addresses in object are relative to the objects' 910 * base). 911 */ 912 if (fixed) { 913 /* 914 * Determine the reservation address and size, and insure that 915 * this reservation isn't already in use. 916 */ 917 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 918 raddr = maddr - padpsize; 919 rsize = fsize + padpsize + padsize; 920 921 if (lml_main.lm_head) { 922 if (elf_map_check(lml, name, raddr, rsize) != 0) 923 return (0); 924 } 925 926 /* 927 * As this is a fixed image, all segments must be individually 928 * mapped. 929 */ 930 skipfseg = 0; 931 932 } else { 933 size_t esize; 934 935 /* 936 * If this isn't a fixed image, reserve enough address space for 937 * the entire image to be mapped. The amount of reservation is 938 * the range between the beginning of the first, and end of the 939 * last loadable segment, together with any padding, plus the 940 * alignment of the first segment. 941 * 942 * The optimal reservation is made as a no-reserve mapping from 943 * anonymous memory. Each segment is then mapped into this 944 * reservation. If the anonymous mapping capability isn't 945 * available, the reservation is obtained from the file itself. 946 * In this case the first segment of the image is mapped as part 947 * of the reservation, thus only the following segments need to 948 * be remapped. 949 */ 950 rsize = fsize + padmsize + padsize; 951 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 952 fd, align)) == AM_ERROR) 953 return (0); 954 maddr = raddr + padmsize; 955 faddr = (caddr_t)S_ROUND((Off)maddr, align); 956 957 /* 958 * If this reservation has been obtained from anonymous memory, 959 * then all segments must be individually mapped. Otherwise, 960 * the first segment heads the reservation. 961 */ 962 if (amret == AM_OK) 963 skipfseg = 0; 964 else 965 skipfseg = 1; 966 967 /* 968 * For backward compatibility (where MAP_ALIGN isn't available), 969 * insure the alignment of the reservation is adequate for this 970 * object, and if not remap the object to obtain the correct 971 * alignment. 972 */ 973 if (faddr != maddr) { 974 (void) munmap(raddr, rsize); 975 976 rsize += align; 977 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 978 mperm, fd, align)) == AM_ERROR) 979 return (0); 980 981 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 982 padpsize), align); 983 984 esize = maddr - raddr + padpsize; 985 986 /* 987 * As ths image has been realigned, the first segment 988 * of the file needs to be remapped to its correct 989 * location. 990 */ 991 skipfseg = 0; 992 } else 993 esize = padmsize - padpsize; 994 995 /* 996 * If this reservation included padding, remove any excess for 997 * the start of the image (the padding was adjusted to insure 998 * the image was aligned appropriately). 999 */ 1000 if (esize) { 1001 (void) munmap(raddr, esize); 1002 raddr += esize; 1003 rsize -= esize; 1004 } 1005 } 1006 1007 /* 1008 * At this point we know the initial location of the image, and its 1009 * size. Pass these back to the caller for inclusion in the link-map 1010 * that will eventually be created. 1011 */ 1012 *rraddr = raddr; 1013 *rrsize = rsize; 1014 1015 /* 1016 * The first loadable segment is now pointed to by maddr. This segment 1017 * will eventually contain the elf header and program headers, so reset 1018 * the program header. Pass this back to the caller for inclusion in 1019 * the link-map so it can be used for later unmapping operations. 1020 */ 1021 /* LINTED */ 1022 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1023 1024 /* 1025 * If padding is required at the front of the image, obtain that now. 1026 * Note, if we've already obtained a reservation from anonymous memory 1027 * then this reservation will already include suitable padding. 1028 * Otherwise this reservation is backed by the file, or in the case of 1029 * a fixed image, doesn't yet exist. Map the padding so that it is 1030 * suitably protected (PROT_NONE), and insure the first segment of the 1031 * file is mapped to its correct location. 1032 */ 1033 if (padsize) { 1034 if (amret == AM_NOSUP) { 1035 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1036 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1037 MAP_FAILED) 1038 return (0); 1039 1040 skipfseg = 0; 1041 } 1042 rsize -= padpsize; 1043 } 1044 1045 /* 1046 * Map individual segments. For a fixed image, these will each be 1047 * unique mappings. For a reservation these will fill in the 1048 * reservation. 1049 */ 1050 for (phdr = fphdr; phdr <= lphdr; 1051 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1052 caddr_t addr; 1053 Off mlen, flen; 1054 size_t size; 1055 1056 /* 1057 * Skip non-loadable segments or segments that don't occupy 1058 * any memory. 1059 */ 1060 if (((phdr->p_type != PT_LOAD) && 1061 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1062 continue; 1063 1064 /* 1065 * Establish this segments address relative to our base. 1066 */ 1067 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1068 (fixed ? 0 : faddr))); 1069 1070 /* 1071 * Determine the mapping protection from the segment attributes. 1072 * Also determine the etext address from the last loadable 1073 * segment which has permissions but no write access. 1074 */ 1075 mperm = 0; 1076 if (phdr->p_flags) { 1077 if (phdr->p_flags & PF_R) 1078 mperm |= PROT_READ; 1079 if (phdr->p_flags & PF_X) 1080 mperm |= PROT_EXEC; 1081 if (phdr->p_flags & PF_W) 1082 mperm |= PROT_WRITE; 1083 else 1084 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1085 (ulong_t)(fixed ? 0 : faddr); 1086 } 1087 1088 /* 1089 * Determine the type of mapping required. 1090 */ 1091 if (phdr->p_type == PT_SUNWBSS) { 1092 /* 1093 * Potentially, we can defer the loading of any SUNWBSS 1094 * segment, depending on whether the symbols it provides 1095 * have been bound to. In this manner, large segments 1096 * that are interposed upon between shared libraries 1097 * may not require mapping. Note, that the mapping 1098 * information is recorded in our mapping descriptor at 1099 * this time. 1100 */ 1101 mlen = phdr->p_memsz; 1102 flen = 0; 1103 1104 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1105 /* 1106 * If this segment has no backing file and no flags 1107 * specified, then it defines a reservation. At this 1108 * point all standard loadable segments will have been 1109 * processed. The segment reservation is mapped 1110 * directly from /dev/null. 1111 */ 1112 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1113 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1114 return (0); 1115 1116 mlen = phdr->p_memsz; 1117 flen = 0; 1118 1119 } else if (phdr->p_filesz == 0) { 1120 /* 1121 * If this segment has no backing file then it defines a 1122 * nobits segment and is mapped directly from /dev/zero. 1123 */ 1124 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1125 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1126 return (0); 1127 1128 mlen = phdr->p_memsz; 1129 flen = 0; 1130 1131 } else { 1132 Off foff; 1133 1134 /* 1135 * This mapping originates from the file. Determine the 1136 * file offset to which the mapping will be directed 1137 * (must be aligned) and how much to map (might be more 1138 * than the file in the case of .bss). 1139 */ 1140 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1141 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1142 flen = phdr->p_filesz + (phdr->p_offset - foff); 1143 1144 /* 1145 * If this is a non-fixed, non-anonymous mapping, and no 1146 * padding is involved, then the first loadable segment 1147 * is already part of the initial reservation. In this 1148 * case there is no need to remap this segment. 1149 */ 1150 if ((skipfseg == 0) || (phdr != fphdr)) { 1151 int phdr_mperm = mperm; 1152 /* 1153 * If this segments memsz is greater that its 1154 * filesz then the difference must be zeroed. 1155 * Make sure this segment is writable. 1156 */ 1157 if (phdr->p_memsz > phdr->p_filesz) 1158 mperm |= PROT_WRITE; 1159 1160 if (elf_map_textdata((caddr_t)addr, flen, 1161 mperm, phdr_mperm, 1162 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1163 MAP_FAILED) { 1164 int err = errno; 1165 eprintf(lml, ERR_FATAL, 1166 MSG_INTL(MSG_SYS_MMAP), name, 1167 strerror(err)); 1168 return (0); 1169 } 1170 } 1171 1172 /* 1173 * If the memory occupancy of the segment overflows the 1174 * definition in the file, we need to "zero out" the end 1175 * of the mapping we've established, and if necessary, 1176 * map some more space from /dev/zero. Note, zero'ed 1177 * memory must end on a double word boundary to satisfy 1178 * zero(). 1179 */ 1180 if (phdr->p_memsz > phdr->p_filesz) { 1181 caddr_t zaddr; 1182 size_t zlen, zplen; 1183 Off fend; 1184 1185 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1186 (fixed ? 0 : faddr)); 1187 zaddr = (caddr_t)M_PROUND(foff); 1188 zplen = (size_t)(zaddr - foff); 1189 1190 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1191 phdr->p_memsz + (fixed ? 0 : faddr))); 1192 zlen = (size_t)(fend - foff); 1193 1194 /* 1195 * Determine whether the number of bytes that 1196 * must be zero'ed overflow to the next page. 1197 * If not, simply clear the exact bytes 1198 * (filesz to memsz) from this page. Otherwise, 1199 * clear the remaining bytes of this page, and 1200 * map an following pages from /dev/zero. 1201 */ 1202 if (zlen < zplen) 1203 zero((caddr_t)foff, (long)zlen); 1204 else { 1205 zero((caddr_t)foff, (long)zplen); 1206 1207 if ((zlen = (fend - (Off)zaddr)) > 0) { 1208 if (dz_map(lml, zaddr, zlen, 1209 mperm, 1210 MAP_FIXED | MAP_PRIVATE) == 1211 MAP_FAILED) 1212 return (0); 1213 } 1214 } 1215 } 1216 } 1217 1218 /* 1219 * Unmap anything from the last mapping address to this one and 1220 * update the mapping claim pointer. 1221 */ 1222 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1223 (void) munmap(maddr, size); 1224 rsize -= size; 1225 } 1226 1227 /* 1228 * Retain this segments mapping information. 1229 */ 1230 mmaps[*mmapcnt].m_vaddr = addr; 1231 mmaps[*mmapcnt].m_msize = mlen; 1232 mmaps[*mmapcnt].m_fsize = flen; 1233 mmaps[*mmapcnt].m_perm = mperm; 1234 (*mmapcnt)++; 1235 1236 maddr = addr + M_PROUND(mlen); 1237 rsize -= M_PROUND(mlen); 1238 } 1239 1240 /* 1241 * If padding is required at the end of the image, obtain that now. 1242 * Note, if we've already obtained a reservation from anonymous memory 1243 * then this reservation will already include suitable padding. 1244 */ 1245 if (padsize) { 1246 if (amret == AM_NOSUP) { 1247 /* 1248 * maddr is currently page aligned from the last segment 1249 * mapping. 1250 */ 1251 if (dz_map(lml, maddr, padsize, PROT_NONE, 1252 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1253 MAP_FAILED) 1254 return (0); 1255 } 1256 maddr += padsize; 1257 rsize -= padsize; 1258 } 1259 1260 /* 1261 * Unmap any final reservation. 1262 */ 1263 if ((fixed == 0) && (rsize != 0)) 1264 (void) munmap(maddr, rsize); 1265 1266 return (faddr); 1267 } 1268 1269 /* 1270 * A null symbol interpretor. Used if a filter has no associated filtees. 1271 */ 1272 /* ARGSUSED0 */ 1273 static Sym * 1274 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1275 { 1276 return ((Sym *)0); 1277 } 1278 1279 /* 1280 * Disable filtee use. 1281 */ 1282 static void 1283 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1284 { 1285 dip->di_info = 0; 1286 1287 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1288 /* 1289 * If this is an object filter, free the filtee's duplication. 1290 */ 1291 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1292 free(REFNAME(lmp)); 1293 REFNAME(lmp) = (char *)0; 1294 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1295 1296 /* 1297 * Indicate that this filtee is no longer available. 1298 */ 1299 if (dip->di_flags & FLG_DI_STDFLTR) 1300 SYMINTP(lmp) = elf_null_find_sym; 1301 1302 } 1303 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1304 /* 1305 * Indicate that this standard filtee is no longer available. 1306 */ 1307 if (SYMSFLTRCNT(lmp)) 1308 SYMSFLTRCNT(lmp)--; 1309 } else { 1310 /* 1311 * Indicate that this auxiliary filtee is no longer available. 1312 */ 1313 if (SYMAFLTRCNT(lmp)) 1314 SYMAFLTRCNT(lmp)--; 1315 } 1316 dip->di_flags &= ~MSK_DI_FILTER; 1317 } 1318 1319 /* 1320 * Find symbol interpreter - filters. 1321 * This function is called when the symbols from a shared object should 1322 * be resolved from the shared objects filtees instead of from within itself. 1323 * 1324 * A symbol name of 0 is used to trigger filtee loading. 1325 */ 1326 static Sym * 1327 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1328 { 1329 const char *name = slp->sl_name, *filtees; 1330 Rt_map *clmp = slp->sl_cmap; 1331 Rt_map *ilmp = slp->sl_imap; 1332 Pnode *pnp, **pnpp; 1333 int any; 1334 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1335 Lm_list *lml = LIST(ilmp); 1336 1337 /* 1338 * Indicate that the filter has been used. If a binding already exists 1339 * to the caller, indicate that this object is referenced. This insures 1340 * we don't generate false unreferenced diagnostics from ldd -u/U or 1341 * debugging. Don't create a binding regardless, as this filter may 1342 * have been dlopen()'ed. 1343 */ 1344 if (name && (ilmp != clmp)) { 1345 Word tracing = (LIST(clmp)->lm_flags & 1346 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1347 1348 if (tracing || DBG_ENABLED) { 1349 Bnd_desc ** bdpp; 1350 Aliste off; 1351 1352 FLAGS1(ilmp) |= FL1_RT_USED; 1353 1354 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1355 for (ALIST_TRAVERSE(CALLERS(ilmp), off, bdpp)) { 1356 Bnd_desc * bdp = *bdpp; 1357 1358 if (bdp->b_caller == clmp) { 1359 bdp->b_flags |= BND_REFER; 1360 break; 1361 } 1362 } 1363 } 1364 } 1365 } 1366 1367 /* 1368 * If this is the first call to process this filter, establish the 1369 * filtee list. If a configuration file exists, determine if any 1370 * filtee associations for this filter, and its filtee reference, are 1371 * defined. Otherwise, process the filtee reference. Any token 1372 * expansion is also completed at this point (i.e., $PLATFORM). 1373 */ 1374 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1375 if (dip->di_info == 0) { 1376 if (rtld_flags2 & RT_FL2_FLTCFG) 1377 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1378 filtees); 1379 1380 if (dip->di_info == 0) { 1381 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1382 if ((lml->lm_flags & 1383 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1384 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1385 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1386 NAME(ilmp), filtees); 1387 1388 if ((dip->di_info = (void *)expand_paths(ilmp, 1389 filtees, PN_SER_FILTEE, 0)) == 0) { 1390 elf_disable_filtee(ilmp, dip); 1391 return ((Sym *)0); 1392 } 1393 } 1394 } 1395 1396 /* 1397 * Traverse the filtee list, dlopen()'ing any objects specified and 1398 * using their group handle to lookup the symbol. 1399 */ 1400 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1401 pnpp = &pnp->p_next, pnp = *pnpp) { 1402 int mode; 1403 Grp_hdl *ghp; 1404 Rt_map *nlmp = 0; 1405 1406 if (pnp->p_len == 0) 1407 continue; 1408 1409 /* 1410 * Establish the mode of the filtee from the filter. As filtees 1411 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1412 * and the filtees aren't global. It would be nice to have 1413 * RTLD_FIRST used here also, but as filters got out long before 1414 * RTLD_FIRST was introduced it's a little too late now. 1415 */ 1416 mode = MODE(ilmp) | RTLD_GROUP; 1417 mode &= ~RTLD_GLOBAL; 1418 1419 /* 1420 * Insure that any auxiliary filter can locate symbols from its 1421 * caller. 1422 */ 1423 if (dip->di_flags & FLG_DI_AUXFLTR) 1424 mode |= RTLD_PARENT; 1425 1426 /* 1427 * Process any hardware capability directory. Establish a new 1428 * link-map control list from which to analyze any newly added 1429 * objects. 1430 */ 1431 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1432 Lm_cntl *lmc; 1433 Aliste lmco; 1434 1435 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1436 if ((lmc = alist_append(&(lml->lm_lists), 0, 1437 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1438 return ((Sym *)0); 1439 lmco = (Aliste)((char *)lmc - 1440 (char *)lml->lm_lists); 1441 } else { 1442 lmc = 0; 1443 lmco = ALO_DATA; 1444 } 1445 1446 pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees, 1447 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1448 1449 /* 1450 * Now that any hardware capability objects have been 1451 * processed, remove any link-map control list. 1452 */ 1453 if (lmc) 1454 remove_cntl(lml, lmco); 1455 } 1456 1457 if (pnp->p_len == 0) 1458 continue; 1459 1460 /* 1461 * Process an individual filtee. 1462 */ 1463 if (pnp->p_info == 0) { 1464 const char *filtee = pnp->p_name; 1465 int audit = 0; 1466 1467 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1468 1469 ghp = 0; 1470 1471 /* 1472 * Determine if the reference link map is already 1473 * loaded. As an optimization compare the filtee with 1474 * our interpretor. The most common filter is 1475 * libdl.so.1, which is a filter on ld.so.1. 1476 */ 1477 #if defined(_ELF64) 1478 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1479 #else 1480 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1481 #endif 1482 /* 1483 * Create an association between ld.so.1 and 1484 * the filter. 1485 */ 1486 nlmp = lml_rtld.lm_head; 1487 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1488 (GPH_LDSO | GPH_FIRST | GPH_FILTEE))) == 0) 1489 nlmp = 0; 1490 1491 /* 1492 * Establish the filter handle to prevent any 1493 * recursion. 1494 */ 1495 if (nlmp && ghp) 1496 pnp->p_info = (void *)ghp; 1497 1498 /* 1499 * Audit the filter/filtee established. Ignore 1500 * any return from the auditor, as we can't 1501 * allow ignore filtering to ld.so.1, otherwise 1502 * nothing is going to work. 1503 */ 1504 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1505 LML_TFLG_AUD_OBJFILTER)) 1506 (void) audit_objfilter(ilmp, filtees, 1507 nlmp, 0); 1508 1509 } else { 1510 Rej_desc rej = { 0 }; 1511 Lm_cntl *lmc; 1512 Aliste lmco; 1513 1514 /* 1515 * Establish a new link-map control list from 1516 * which to analyze any newly added objects. 1517 */ 1518 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1519 if ((lmc = 1520 alist_append(&(lml->lm_lists), 0, 1521 sizeof (Lm_cntl), 1522 AL_CNT_LMLISTS)) == 0) 1523 return ((Sym *)0); 1524 lmco = (Aliste)((char *)lmc - 1525 (char *)lml->lm_lists); 1526 } else { 1527 lmc = 0; 1528 lmco = ALO_DATA; 1529 } 1530 1531 /* 1532 * Load the filtee. 1533 */ 1534 if ((nlmp = load_path(lml, lmco, filtee, ilmp, 1535 mode, FLG_RT_HANDLE, &ghp, 0, &rej)) == 0) { 1536 file_notfound(LIST(ilmp), filtee, ilmp, 1537 FLG_RT_HANDLE, &rej); 1538 remove_rej(&rej); 1539 } 1540 1541 /* 1542 * Establish the filter handle to prevent any 1543 * recursion. 1544 */ 1545 if (nlmp && ghp) { 1546 ghp->gh_flags |= GPH_FILTEE; 1547 pnp->p_info = (void *)ghp; 1548 } 1549 1550 /* 1551 * Audit the filter/filtee established. A 1552 * return of 0 indicates the auditor wishes to 1553 * ignore this filtee. 1554 */ 1555 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1556 LML_TFLG_AUD_OBJFILTER)) { 1557 if (audit_objfilter(ilmp, filtees, 1558 nlmp, 0) == 0) { 1559 audit = 1; 1560 nlmp = 0; 1561 } 1562 } 1563 1564 /* 1565 * Finish processing the objects associated with 1566 * this request. Create an association between 1567 * this object and the originating filter to 1568 * provide sufficient information to tear down 1569 * this filtee if necessary. 1570 */ 1571 if (nlmp && ghp && 1572 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1573 (relocate_lmc(lml, lmco, ilmp, nlmp) == 0))) 1574 nlmp = 0; 1575 1576 /* 1577 * If the filtee has been successfully 1578 * processed, then create an association 1579 * between the filter and filtee. This 1580 * association provides sufficient information 1581 * to tear down the filter and filtee if 1582 * necessary. 1583 */ 1584 DBG_CALL(Dbg_file_hdl_title(DBG_DEP_ADD)); 1585 if (nlmp && ghp && 1586 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1587 nlmp = 0; 1588 1589 /* 1590 * If this filtee loading has failed, and we've 1591 * created a new link-map control list to which 1592 * this request has added objects, then remove 1593 * all the objects that have been associated to 1594 * this request. 1595 */ 1596 if ((nlmp == 0) && lmc && lmc->lc_head) 1597 remove_lmc(lml, clmp, lmc, lmco, name); 1598 1599 /* 1600 * Remove any link-map control list that was 1601 * created. 1602 */ 1603 if (lmc) 1604 remove_cntl(lml, lmco); 1605 } 1606 1607 /* 1608 * Generate a diagnostic if the filtee couldn't be 1609 * loaded, null out the pnode entry, and continue 1610 * the search. Otherwise, retain this group handle 1611 * for future symbol searches. 1612 */ 1613 if (nlmp == 0) { 1614 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1615 audit)); 1616 1617 pnp->p_info = 0; 1618 pnp->p_len = 0; 1619 continue; 1620 } 1621 } 1622 1623 ghp = (Grp_hdl *)pnp->p_info; 1624 1625 /* 1626 * If we're just here to trigger filtee loading skip the symbol 1627 * lookup so we'll continue looking for additional filtees. 1628 */ 1629 if (name) { 1630 Grp_desc *gdp; 1631 Sym *sym = 0; 1632 Aliste off; 1633 Slookup sl = *slp; 1634 1635 sl.sl_flags |= LKUP_FIRST; 1636 any++; 1637 1638 /* 1639 * Look for the symbol in the handles dependencies. 1640 */ 1641 for (ALIST_TRAVERSE(ghp->gh_depends, off, gdp)) { 1642 if ((gdp->gd_flags & GPD_AVAIL) == 0) 1643 continue; 1644 1645 /* 1646 * If our parent is a dependency don't look at 1647 * it (otherwise we are in a recursive loop). 1648 * This situation can occur with auxiliary 1649 * filters if the filtee has a dependency on the 1650 * filter. This dependency isn't necessary as 1651 * auxiliary filters are opened RTLD_PARENT, but 1652 * users may still unknowingly add an explicit 1653 * dependency to the parent. 1654 */ 1655 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1656 continue; 1657 1658 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1659 binfo)) != 0) || 1660 (ghp->gh_flags & GPH_FIRST)) 1661 break; 1662 } 1663 1664 /* 1665 * If a symbol has been found, indicate the binding 1666 * and return the symbol. 1667 */ 1668 if (sym) { 1669 *binfo |= DBG_BINFO_FILTEE; 1670 return (sym); 1671 } 1672 } 1673 1674 /* 1675 * If this object is tagged to terminate filtee processing we're 1676 * done. 1677 */ 1678 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1679 break; 1680 } 1681 1682 /* 1683 * If we're just here to trigger filtee loading then we're done. 1684 */ 1685 if (name == 0) 1686 return ((Sym *)0); 1687 1688 /* 1689 * If no filtees have been found for a filter, clean up any Pnode 1690 * structures and disable their search completely. For auxiliary 1691 * filters we can reselect the symbol search function so that we never 1692 * enter this routine again for this object. For standard filters we 1693 * use the null symbol routine. 1694 */ 1695 if (any == 0) { 1696 remove_pnode((Pnode *)dip->di_info); 1697 elf_disable_filtee(ilmp, dip); 1698 return ((Sym *)0); 1699 } 1700 1701 return ((Sym *)0); 1702 } 1703 1704 /* 1705 * Focal point for disabling error messages for auxiliary filters. As an 1706 * auxiliary filter allows for filtee use, but provides a fallback should a 1707 * filtee not exist (or fail to load), any errors generated as a consequence of 1708 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1709 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1710 * produced. ldd(1) employs printf(), and here, the selection of whether to 1711 * print a diagnostic in regards to auxiliary filters is a little more complex. 1712 * 1713 * . The determination of whether to produce an ldd message, or a fatal 1714 * error message is driven by LML_FLG_TRC_ENABLE. 1715 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1716 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1717 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1718 * 1719 * . If the calling object is lddstub, then several classes of message are 1720 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1721 * a stub executable employed to preload a user specified library against. 1722 * 1723 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1724 * be suppressed. All detailed ldd messages should still be produced. 1725 */ 1726 Sym * 1727 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1728 { 1729 Sym *sym; 1730 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1731 int silent = 0; 1732 1733 /* 1734 * Make sure this entry is still acting as a filter. We may have tried 1735 * to process this previously, and disabled it if the filtee couldn't 1736 * be processed. However, other entries may provide different filtees 1737 * that are yet to be completed. 1738 */ 1739 if (dip->di_flags == 0) 1740 return ((Sym *)0); 1741 1742 /* 1743 * Indicate whether an error message is required should this filtee not 1744 * be found, based on the type of filter. 1745 */ 1746 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1747 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1748 rtld_flags |= RT_FL_SILENCERR; 1749 silent = 1; 1750 } 1751 1752 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1753 1754 if (silent) 1755 rtld_flags &= ~RT_FL_SILENCERR; 1756 1757 return (sym); 1758 } 1759 1760 /* 1761 * Compute the elf hash value (as defined in the ELF access library). 1762 * The form of the hash table is: 1763 * 1764 * |--------------| 1765 * | # of buckets | 1766 * |--------------| 1767 * | # of chains | 1768 * |--------------| 1769 * | bucket[] | 1770 * |--------------| 1771 * | chain[] | 1772 * |--------------| 1773 */ 1774 ulong_t 1775 elf_hash(const char *name) 1776 { 1777 uint_t hval = 0; 1778 1779 while (*name) { 1780 uint_t g; 1781 hval = (hval << 4) + *name++; 1782 if ((g = (hval & 0xf0000000)) != 0) 1783 hval ^= g >> 24; 1784 hval &= ~g; 1785 } 1786 return ((ulong_t)hval); 1787 } 1788 1789 /* 1790 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1791 * function specially in the executable - if they have a value, even though 1792 * undefined, we use that value. This allows us to associate all references 1793 * to a function's address to a single place in the process: the plt entry 1794 * for that function in the executable. Calls to lookup from plt binding 1795 * routines do NOT set LKUP_SPEC in the flag. 1796 */ 1797 Sym * 1798 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1799 { 1800 const char *name = slp->sl_name; 1801 Rt_map *ilmp = slp->sl_imap; 1802 ulong_t hash = slp->sl_hash; 1803 uint_t ndx, htmp, buckets, *chainptr; 1804 Sym *sym, *symtabptr; 1805 char *strtabptr, *strtabname; 1806 uint_t flags1; 1807 Syminfo *sip; 1808 1809 /* 1810 * If we're only here to establish a symbols index, skip the diagnostic 1811 * used to trace a symbol search. 1812 */ 1813 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1814 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1815 1816 if (HASH(ilmp) == 0) 1817 return ((Sym *)0); 1818 1819 buckets = HASH(ilmp)[0]; 1820 /* LINTED */ 1821 htmp = (uint_t)hash % buckets; 1822 1823 /* 1824 * Get the first symbol on hash chain and initialize the string 1825 * and symbol table pointers. 1826 */ 1827 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1828 return ((Sym *)0); 1829 1830 chainptr = HASH(ilmp) + 2 + buckets; 1831 strtabptr = STRTAB(ilmp); 1832 symtabptr = SYMTAB(ilmp); 1833 1834 while (ndx) { 1835 sym = symtabptr + ndx; 1836 strtabname = strtabptr + sym->st_name; 1837 1838 /* 1839 * Compare the symbol found with the name required. If the 1840 * names don't match continue with the next hash entry. 1841 */ 1842 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1843 if ((ndx = chainptr[ndx]) != 0) 1844 continue; 1845 return ((Sym *)0); 1846 } 1847 1848 /* 1849 * To accomodate objects built with the GNU ld, we quietly 1850 * ignore symbols with a version that is outside the range 1851 * of the valid versions supplied by the file. See the 1852 * comment that accompanies the VERSYM_INVALID macro in libld.h 1853 * for additional details. 1854 */ 1855 if (VERNDX_INVALID(sym->st_shndx, VERDEFNUM(ilmp), 1856 VERSYM(ilmp), VERSYM(ilmp)[ndx])) { 1857 DBG_CALL(Dbg_syms_ignore_badver(ilmp, name, 1858 ndx, VERSYM(ilmp)[ndx])); 1859 if ((ndx = chainptr[ndx]) != 0) 1860 continue; 1861 return ((Sym *)0); 1862 } 1863 1864 /* 1865 * If we're only here to establish a symbols index, we're done. 1866 */ 1867 if (slp->sl_flags & LKUP_SYMNDX) 1868 return (sym); 1869 1870 /* 1871 * If we find a match and the symbol is defined, return the 1872 * symbol pointer and the link map in which it was found. 1873 */ 1874 if (sym->st_shndx != SHN_UNDEF) { 1875 *dlmp = ilmp; 1876 *binfo |= DBG_BINFO_FOUND; 1877 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1878 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1879 is_sym_interposer(ilmp, sym))) 1880 *binfo |= DBG_BINFO_INTERPOSE; 1881 break; 1882 1883 /* 1884 * If we find a match and the symbol is undefined, the 1885 * symbol type is a function, and the value of the symbol 1886 * is non zero, then this is a special case. This allows 1887 * the resolution of a function address to the plt[] entry. 1888 * See SPARC ABI, Dynamic Linking, Function Addresses for 1889 * more details. 1890 */ 1891 } else if ((slp->sl_flags & LKUP_SPEC) && 1892 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1893 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1894 *dlmp = ilmp; 1895 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1896 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1897 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1898 is_sym_interposer(ilmp, sym))) 1899 *binfo |= DBG_BINFO_INTERPOSE; 1900 return (sym); 1901 } 1902 1903 /* 1904 * Undefined symbol. 1905 */ 1906 return ((Sym *)0); 1907 } 1908 1909 /* 1910 * We've found a match. Determine if the defining object contains 1911 * symbol binding information. 1912 */ 1913 if ((sip = SYMINFO(ilmp)) != 0) 1914 sip += ndx; 1915 1916 /* 1917 * If this is a direct binding request, but the symbol definition has 1918 * disabled directly binding to it (presumably because the symbol 1919 * definition has been changed since the referring object was built), 1920 * indicate this failure so that the caller can fall back to a standard 1921 * symbol search. Clear any debug binding information for cleanliness. 1922 */ 1923 if (sip && (slp->sl_flags & LKUP_DIRECT) && 1924 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 1925 *binfo |= BINFO_DIRECTDIS; 1926 *binfo &= ~DBG_BINFO_MSK; 1927 return ((Sym *)0); 1928 } 1929 1930 /* 1931 * Determine whether this object is acting as a filter. 1932 */ 1933 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 1934 return (sym); 1935 1936 /* 1937 * Determine if this object offers per-symbol filtering, and if so, 1938 * whether this symbol references a filtee. 1939 */ 1940 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 1941 /* 1942 * If this is a standard filter reference, and no standard 1943 * filtees remain to be inspected, we're done. If this is an 1944 * auxiliary filter reference, and no auxiliary filtees remain, 1945 * we'll fall through in case any object filtering is available. 1946 */ 1947 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 1948 (SYMSFLTRCNT(ilmp) == 0)) 1949 return ((Sym *)0); 1950 1951 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 1952 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 1953 SYMAFLTRCNT(ilmp))) { 1954 Sym * fsym; 1955 1956 /* 1957 * This symbol has an associated filtee. Lookup the 1958 * symbol in the filtee, and if it is found return it. 1959 * If the symbol doesn't exist, and this is a standard 1960 * filter, return an error, otherwise fall through to 1961 * catch any object filtering that may be available. 1962 */ 1963 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1964 sip->si_boundto)) != 0) 1965 return (fsym); 1966 if (sip->si_flags & SYMINFO_FLG_FILTER) 1967 return ((Sym *)0); 1968 } 1969 } 1970 1971 /* 1972 * Determine if this object provides global filtering. 1973 */ 1974 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 1975 Sym * fsym; 1976 1977 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 1978 /* 1979 * This object has an associated filtee. Lookup the 1980 * symbol in the filtee, and if it is found return it. 1981 * If the symbol doesn't exist, and this is a standard 1982 * filter, return and error, otherwise return the symbol 1983 * within the filter itself. 1984 */ 1985 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1986 OBJFLTRNDX(ilmp))) != 0) 1987 return (fsym); 1988 } 1989 1990 if (flags1 & FL1_RT_OBJSFLTR) 1991 return ((Sym *)0); 1992 } 1993 return (sym); 1994 } 1995 1996 /* 1997 * Create a new Rt_map structure for an ELF object and initialize 1998 * all values. 1999 */ 2000 Rt_map * 2001 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2002 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2003 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2004 { 2005 Rt_map *lmp; 2006 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2007 Xword rpath = 0; 2008 Ehdr *ehdr = (Ehdr *)addr; 2009 2010 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2011 lml->lm_lmidstr, lmco)); 2012 2013 /* 2014 * Allocate space for the link-map and private elf information. Once 2015 * these are allocated and initialized, we can use remove_so(0, lmp) to 2016 * tear down the link-map should any failures occur. 2017 */ 2018 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2019 return (0); 2020 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2021 free(lmp); 2022 return (0); 2023 } 2024 2025 /* 2026 * All fields not filled in were set to 0 by calloc. 2027 */ 2028 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2029 DYN(lmp) = ld; 2030 ADDR(lmp) = addr; 2031 MSIZE(lmp) = msize; 2032 ENTRY(lmp) = (Addr)entry; 2033 SYMINTP(lmp) = elf_find_sym; 2034 ETEXT(lmp) = etext; 2035 FCT(lmp) = &elf_fct; 2036 LIST(lmp) = lml; 2037 PADSTART(lmp) = paddr; 2038 PADIMLEN(lmp) = padimsize; 2039 THREADID(lmp) = rt_thr_self(); 2040 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2041 SORTVAL(lmp) = -1; 2042 2043 MMAPS(lmp) = mmaps; 2044 MMAPCNT(lmp) = mmapcnt; 2045 ASSERT(mmapcnt != 0); 2046 2047 /* 2048 * If this is a shared object, add the base address to each address. 2049 * if this is an executable, use address as is. 2050 */ 2051 if (ehdr->e_type == ET_EXEC) { 2052 base = 0; 2053 FLAGS(lmp) |= FLG_RT_FIXED; 2054 } else 2055 base = addr; 2056 2057 /* 2058 * Fill in rest of the link map entries with information from the file's 2059 * dynamic structure. 2060 */ 2061 if (ld) { 2062 uint_t dyncnt = 0; 2063 Xword pltpadsz = 0; 2064 Rti_desc *rti; 2065 2066 /* CSTYLED */ 2067 for ( ; ld->d_tag != DT_NULL; ++ld, dyncnt++) { 2068 switch ((Xword)ld->d_tag) { 2069 case DT_SYMTAB: 2070 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2071 break; 2072 case DT_SUNW_SYMTAB: 2073 SUNWSYMTAB(lmp) = 2074 (void *)(ld->d_un.d_ptr + base); 2075 break; 2076 case DT_SUNW_SYMSZ: 2077 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2078 break; 2079 case DT_STRTAB: 2080 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2081 break; 2082 case DT_SYMENT: 2083 SYMENT(lmp) = ld->d_un.d_val; 2084 break; 2085 case DT_FEATURE_1: 2086 ld->d_un.d_val |= DTF_1_PARINIT; 2087 if (ld->d_un.d_val & DTF_1_CONFEXP) 2088 crle = 1; 2089 break; 2090 case DT_MOVESZ: 2091 MOVESZ(lmp) = ld->d_un.d_val; 2092 FLAGS(lmp) |= FLG_RT_MOVE; 2093 break; 2094 case DT_MOVEENT: 2095 MOVEENT(lmp) = ld->d_un.d_val; 2096 break; 2097 case DT_MOVETAB: 2098 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2099 break; 2100 case DT_REL: 2101 case DT_RELA: 2102 /* 2103 * At this time we can only handle 1 type of 2104 * relocation per object. 2105 */ 2106 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2107 break; 2108 case DT_RELSZ: 2109 case DT_RELASZ: 2110 RELSZ(lmp) = ld->d_un.d_val; 2111 break; 2112 case DT_RELENT: 2113 case DT_RELAENT: 2114 RELENT(lmp) = ld->d_un.d_val; 2115 break; 2116 case DT_RELCOUNT: 2117 case DT_RELACOUNT: 2118 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2119 break; 2120 case DT_TEXTREL: 2121 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2122 break; 2123 case DT_HASH: 2124 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2125 break; 2126 case DT_PLTGOT: 2127 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2128 break; 2129 case DT_PLTRELSZ: 2130 PLTRELSZ(lmp) = ld->d_un.d_val; 2131 break; 2132 case DT_JMPREL: 2133 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2134 break; 2135 case DT_INIT: 2136 if (ld->d_un.d_ptr != NULL) 2137 INIT(lmp) = 2138 (void (*)())(ld->d_un.d_ptr + base); 2139 break; 2140 case DT_FINI: 2141 if (ld->d_un.d_ptr != NULL) 2142 FINI(lmp) = 2143 (void (*)())(ld->d_un.d_ptr + base); 2144 break; 2145 case DT_INIT_ARRAY: 2146 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2147 base); 2148 break; 2149 case DT_INIT_ARRAYSZ: 2150 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2151 break; 2152 case DT_FINI_ARRAY: 2153 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2154 base); 2155 break; 2156 case DT_FINI_ARRAYSZ: 2157 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2158 break; 2159 case DT_PREINIT_ARRAY: 2160 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2161 base); 2162 break; 2163 case DT_PREINIT_ARRAYSZ: 2164 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2165 break; 2166 case DT_RPATH: 2167 case DT_RUNPATH: 2168 rpath = ld->d_un.d_val; 2169 break; 2170 case DT_FILTER: 2171 fltr = ld->d_un.d_val; 2172 OBJFLTRNDX(lmp) = dyncnt; 2173 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2174 break; 2175 case DT_AUXILIARY: 2176 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2177 fltr = ld->d_un.d_val; 2178 OBJFLTRNDX(lmp) = dyncnt; 2179 } 2180 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2181 break; 2182 case DT_SUNW_FILTER: 2183 SYMSFLTRCNT(lmp)++; 2184 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2185 break; 2186 case DT_SUNW_AUXILIARY: 2187 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2188 SYMAFLTRCNT(lmp)++; 2189 } 2190 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2191 break; 2192 case DT_DEPAUDIT: 2193 if (!(rtld_flags & RT_FL_NOAUDIT)) 2194 audit = ld->d_un.d_val; 2195 break; 2196 case DT_CONFIG: 2197 cfile = ld->d_un.d_val; 2198 break; 2199 case DT_DEBUG: 2200 /* 2201 * DT_DEBUG entries are only created in 2202 * dynamic objects that require an interpretor 2203 * (ie. all dynamic executables and some shared 2204 * objects), and provide for a hand-shake with 2205 * debuggers. This entry is initialized to 2206 * zero by the link-editor. If a debugger has 2207 * us and updated this entry set the debugger 2208 * flag, and finish initializing the debugging 2209 * structure (see setup() also). Switch off any 2210 * configuration object use as most debuggers 2211 * can't handle fixed dynamic executables as 2212 * dependencies, and we can't handle requests 2213 * like object padding for alternative objects. 2214 */ 2215 if (ld->d_un.d_ptr) 2216 rtld_flags |= 2217 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2218 ld->d_un.d_ptr = (Addr)&r_debug; 2219 break; 2220 case DT_VERNEED: 2221 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2222 base); 2223 break; 2224 case DT_VERNEEDNUM: 2225 /* LINTED */ 2226 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2227 break; 2228 case DT_VERDEF: 2229 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2230 break; 2231 case DT_VERDEFNUM: 2232 /* LINTED */ 2233 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2234 break; 2235 case DT_VERSYM: 2236 VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base); 2237 break; 2238 case DT_BIND_NOW: 2239 if ((ld->d_un.d_val & DF_BIND_NOW) && 2240 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2241 MODE(lmp) |= RTLD_NOW; 2242 MODE(lmp) &= ~RTLD_LAZY; 2243 } 2244 break; 2245 case DT_FLAGS: 2246 if (ld->d_un.d_val & DF_SYMBOLIC) 2247 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2248 if (ld->d_un.d_val & DF_TEXTREL) 2249 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2250 if ((ld->d_un.d_val & DF_BIND_NOW) && 2251 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2252 MODE(lmp) |= RTLD_NOW; 2253 MODE(lmp) &= ~RTLD_LAZY; 2254 } 2255 /* 2256 * Capture any static TLS use, and enforce that 2257 * this object be non-deletable. 2258 */ 2259 if (ld->d_un.d_val & DF_STATIC_TLS) { 2260 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2261 MODE(lmp) |= RTLD_NODELETE; 2262 } 2263 break; 2264 case DT_FLAGS_1: 2265 if (ld->d_un.d_val & DF_1_DISPRELPND) 2266 FLAGS1(lmp) |= FL1_RT_DISPREL; 2267 if (ld->d_un.d_val & DF_1_GROUP) 2268 FLAGS(lmp) |= 2269 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2270 if ((ld->d_un.d_val & DF_1_NOW) && 2271 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2272 MODE(lmp) |= RTLD_NOW; 2273 MODE(lmp) &= ~RTLD_LAZY; 2274 } 2275 if (ld->d_un.d_val & DF_1_NODELETE) 2276 MODE(lmp) |= RTLD_NODELETE; 2277 if (ld->d_un.d_val & DF_1_INITFIRST) 2278 FLAGS(lmp) |= FLG_RT_INITFRST; 2279 if (ld->d_un.d_val & DF_1_NOOPEN) 2280 FLAGS(lmp) |= FLG_RT_NOOPEN; 2281 if (ld->d_un.d_val & DF_1_LOADFLTR) 2282 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2283 if (ld->d_un.d_val & DF_1_NODUMP) 2284 FLAGS(lmp) |= FLG_RT_NODUMP; 2285 if (ld->d_un.d_val & DF_1_CONFALT) 2286 crle = 1; 2287 if (ld->d_un.d_val & DF_1_DIRECT) 2288 FLAGS1(lmp) |= FL1_RT_DIRECT; 2289 if (ld->d_un.d_val & DF_1_NODEFLIB) 2290 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2291 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2292 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2293 if (ld->d_un.d_val & DF_1_TRANS) 2294 FLAGS(lmp) |= FLG_RT_TRANS; 2295 #ifndef EXPAND_RELATIVE 2296 if (ld->d_un.d_val & DF_1_ORIGIN) 2297 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2298 #endif 2299 /* 2300 * If this object identifies itself as an 2301 * interposer, but relocation processing has 2302 * already started, then demote it. It's too 2303 * late to guarantee complete interposition. 2304 */ 2305 if (ld->d_un.d_val & 2306 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2307 if (lml->lm_flags & LML_FLG_STARTREL) { 2308 DBG_CALL(Dbg_util_intoolate(lmp)); 2309 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2310 (void) printf( 2311 MSG_INTL(MSG_LDD_REL_ERR2), 2312 NAME(lmp)); 2313 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2314 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2315 else 2316 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2317 } 2318 break; 2319 case DT_SYMINFO: 2320 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2321 base); 2322 break; 2323 case DT_SYMINENT: 2324 SYMINENT(lmp) = ld->d_un.d_val; 2325 break; 2326 case DT_PLTPAD: 2327 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2328 break; 2329 case DT_PLTPADSZ: 2330 pltpadsz = ld->d_un.d_val; 2331 break; 2332 case DT_SUNW_RTLDINF: 2333 /* 2334 * Maintain a list of RTLDINFO structures. 2335 * Typically, libc is the only supplier, and 2336 * only one structure is provided. However, 2337 * multiple suppliers and multiple structures 2338 * are supported. For example, one structure 2339 * may provide thread_init, and another 2340 * structure may provide atexit reservations. 2341 */ 2342 if ((rti = alist_append(&lml->lm_rti, 0, 2343 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2344 remove_so(0, lmp); 2345 return (0); 2346 } 2347 rti->rti_lmp = lmp; 2348 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2349 break; 2350 case DT_SUNW_SORTENT: 2351 SUNWSORTENT(lmp) = ld->d_un.d_val; 2352 break; 2353 case DT_SUNW_SYMSORT: 2354 SUNWSYMSORT(lmp) = 2355 (void *)(ld->d_un.d_ptr + base); 2356 break; 2357 case DT_SUNW_SYMSORTSZ: 2358 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2359 break; 2360 case DT_DEPRECATED_SPARC_REGISTER: 2361 case M_DT_REGISTER: 2362 FLAGS(lmp) |= FLG_RT_REGSYMS; 2363 break; 2364 case M_DT_PLTRESERVE: 2365 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2366 base); 2367 break; 2368 } 2369 } 2370 2371 2372 if (PLTPAD(lmp)) { 2373 if (pltpadsz == (Xword)0) 2374 PLTPAD(lmp) = 0; 2375 else 2376 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2377 pltpadsz); 2378 } 2379 2380 /* 2381 * Allocate Dynamic Info structure 2382 */ 2383 if ((DYNINFO(lmp) = calloc((size_t)dyncnt, 2384 sizeof (Dyninfo))) == 0) { 2385 remove_so(0, lmp); 2386 return (0); 2387 } 2388 DYNINFOCNT(lmp) = dyncnt; 2389 } 2390 2391 /* 2392 * A dynsym contains only global functions. We want to have 2393 * a version of it that also includes local functions, so that 2394 * dladdr() will be able to report names for local functions 2395 * when used to generate a stack trace for a stripped file. 2396 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2397 * 2398 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2399 * in order to avoid having to have two copies of the global 2400 * symbols held in DT_SYMTAB: The local symbols are placed in 2401 * a separate section than the globals in the dynsym, but the 2402 * linker conspires to put the data for these two sections adjacent 2403 * to each other. DT_SUNW_SYMTAB points at the top of the local 2404 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2405 * 2406 * If the two sections are not adjacent, then something went wrong 2407 * at link time. We use ASSERT to kill the process if this is 2408 * a debug build. In a production build, we will silently ignore 2409 * the presence of the .ldynsym and proceed. We can detect this 2410 * situation by checking to see that DT_SYMTAB lies in 2411 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2412 */ 2413 if ((SUNWSYMTAB(lmp) != NULL) && 2414 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2415 (((char *)SYMTAB(lmp) >= 2416 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2417 ASSERT(0); 2418 SUNWSYMTAB(lmp) = NULL; 2419 SUNWSYMSZ(lmp) = 0; 2420 } 2421 2422 /* 2423 * If configuration file use hasn't been disabled, and a configuration 2424 * file hasn't already been set via an environment variable, see if any 2425 * application specific configuration file is specified. An LD_CONFIG 2426 * setting is used first, but if this image was generated via crle(1) 2427 * then a default configuration file is a fall-back. 2428 */ 2429 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2430 if (cfile) 2431 config->c_name = (const char *)(cfile + 2432 (char *)STRTAB(lmp)); 2433 else if (crle) { 2434 rtld_flags |= RT_FL_CONFAPP; 2435 #ifndef EXPAND_RELATIVE 2436 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2437 #endif 2438 } 2439 } 2440 2441 if (rpath) 2442 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2443 if (fltr) { 2444 /* 2445 * If this object is a global filter, duplicate the filtee 2446 * string name(s) so that REFNAME() is available in core files. 2447 * This cludge was useful for debuggers at one point, but only 2448 * when the filtee name was an individual full path. 2449 */ 2450 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2451 remove_so(0, lmp); 2452 return (0); 2453 } 2454 } 2455 2456 if (rtld_flags & RT_FL_RELATIVE) 2457 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2458 2459 /* 2460 * For Intel ABI compatibility. It's possible that a JMPREL can be 2461 * specified without any other relocations (e.g. a dynamic executable 2462 * normally only contains .plt relocations). If this is the case then 2463 * no REL, RELSZ or RELENT will have been created. For us to be able 2464 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2465 * the RELENT for these relocations. Refer to elf_reloc() for more 2466 * details. 2467 */ 2468 if (!RELENT(lmp) && JMPREL(lmp)) 2469 RELENT(lmp) = sizeof (Rel); 2470 2471 /* 2472 * Establish any per-object auditing. If we're establishing `main's 2473 * link-map its too early to go searching for audit objects so just 2474 * hold the object name for later (see setup()). 2475 */ 2476 if (audit) { 2477 char *cp = audit + (char *)STRTAB(lmp); 2478 2479 if (*cp) { 2480 if (((AUDITORS(lmp) = 2481 calloc(1, sizeof (Audit_desc))) == 0) || 2482 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2483 remove_so(0, lmp); 2484 return (0); 2485 } 2486 if (lml_main.lm_head) { 2487 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2488 remove_so(0, lmp); 2489 return (0); 2490 } 2491 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2492 lml->lm_flags |= LML_FLG_LOCAUDIT; 2493 } 2494 } 2495 } 2496 2497 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2498 remove_so(0, lmp); 2499 return (0); 2500 } 2501 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2502 remove_so(0, lmp); 2503 return (0); 2504 } 2505 2506 /* 2507 * Add the mapped object to the end of the link map list. 2508 */ 2509 lm_append(lml, lmco, lmp); 2510 return (lmp); 2511 } 2512 2513 /* 2514 * Assign hardware/software capabilities. 2515 */ 2516 void 2517 cap_assign(Cap *cap, Rt_map *lmp) 2518 { 2519 while (cap->c_tag != CA_SUNW_NULL) { 2520 switch (cap->c_tag) { 2521 case CA_SUNW_HW_1: 2522 HWCAP(lmp) = cap->c_un.c_val; 2523 break; 2524 case CA_SUNW_SF_1: 2525 SFCAP(lmp) = cap->c_un.c_val; 2526 } 2527 cap++; 2528 } 2529 } 2530 2531 /* 2532 * Map in an ELF object. 2533 * Takes an open file descriptor for the object to map and its pathname; returns 2534 * a pointer to a Rt_map structure for this object, or 0 on error. 2535 */ 2536 static Rt_map * 2537 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2538 int fd) 2539 { 2540 int i; /* general temporary */ 2541 Off memsize = 0; /* total memory size of pathname */ 2542 Off mentry; /* entry point */ 2543 Ehdr *ehdr; /* ELF header of ld.so */ 2544 Phdr *phdr; /* first Phdr in file */ 2545 Phdr *phdr0; /* Saved first Phdr in file */ 2546 Phdr *pptr; /* working Phdr */ 2547 Phdr *fph = 0; /* first loadable Phdr */ 2548 Phdr *lph; /* last loadable Phdr */ 2549 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2550 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2551 Phdr *swph = 0; /* program header for SUNWBSS */ 2552 Phdr *tlph = 0; /* program header for PT_TLS */ 2553 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2554 Cap *cap = 0; /* program header for SUNWCAP */ 2555 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2556 size_t size; /* size of elf and program headers */ 2557 caddr_t faddr = 0; /* mapping address of pathname */ 2558 Rt_map *lmp; /* link map created */ 2559 caddr_t paddr; /* start of padded image */ 2560 Off plen; /* size of image including padding */ 2561 Half etype; 2562 int fixed; 2563 Mmap *mmaps; 2564 uint_t mmapcnt = 0; 2565 Xword align = 0; 2566 2567 /* LINTED */ 2568 ehdr = (Ehdr *)fmap->fm_maddr; 2569 2570 /* 2571 * If this a relocatable object then special processing is required. 2572 */ 2573 if ((etype = ehdr->e_type) == ET_REL) 2574 return (elf_obj_file(lml, lmco, pname, fd)); 2575 2576 /* 2577 * If this isn't a dynamic executable or shared object we can't process 2578 * it. If this is a dynamic executable then all addresses are fixed. 2579 */ 2580 if (etype == ET_EXEC) 2581 fixed = 1; 2582 else if (etype == ET_DYN) 2583 fixed = 0; 2584 else { 2585 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2586 conv_ehdr_type(etype, 0)); 2587 return (0); 2588 } 2589 2590 /* 2591 * If our original mapped page was not large enough to hold all the 2592 * program headers remap them. 2593 */ 2594 size = (size_t)((char *)ehdr->e_phoff + 2595 (ehdr->e_phnum * ehdr->e_phentsize)); 2596 if (size > fmap->fm_fsize) { 2597 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2598 return (0); 2599 } 2600 if (size > fmap->fm_msize) { 2601 fmap_setup(); 2602 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2603 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2604 int err = errno; 2605 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2606 strerror(err)); 2607 return (0); 2608 } 2609 fmap->fm_msize = size; 2610 /* LINTED */ 2611 ehdr = (Ehdr *)fmap->fm_maddr; 2612 } 2613 /* LINTED */ 2614 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2615 2616 /* 2617 * Get entry point. 2618 */ 2619 mentry = ehdr->e_entry; 2620 2621 /* 2622 * Point at program headers and perform some basic validation. 2623 */ 2624 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2625 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2626 if ((pptr->p_type == PT_LOAD) || 2627 (pptr->p_type == PT_SUNWBSS)) { 2628 2629 if (fph == 0) { 2630 fph = pptr; 2631 /* LINTED argument lph is initialized in first pass */ 2632 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2633 eprintf(lml, ERR_ELF, 2634 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2635 return (0); 2636 } 2637 2638 lph = pptr; 2639 2640 if (pptr->p_memsz) 2641 lmph = pptr; 2642 if (pptr->p_filesz) 2643 lfph = pptr; 2644 if (pptr->p_type == PT_SUNWBSS) 2645 swph = pptr; 2646 if (pptr->p_align > align) 2647 align = pptr->p_align; 2648 2649 } else if (pptr->p_type == PT_DYNAMIC) { 2650 mld = (Dyn *)(pptr->p_vaddr); 2651 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2652 tlph = pptr; 2653 } else if (pptr->p_type == PT_SUNWCAP) { 2654 cap = (Cap *)(pptr->p_vaddr); 2655 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2656 unwindph = pptr; 2657 } 2658 } 2659 2660 #if defined(MAP_ALIGN) 2661 /* 2662 * Make sure the maximum page alignment is a power of 2 >= the system 2663 * page size, for use with MAP_ALIGN. 2664 */ 2665 align = M_PROUND(align); 2666 #endif 2667 2668 /* 2669 * We'd better have at least one loadable segment, together with some 2670 * specified file and memory size. 2671 */ 2672 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2673 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2674 return (0); 2675 } 2676 2677 /* 2678 * Check that the files size accounts for the loadable sections 2679 * we're going to map in (failure to do this may cause spurious 2680 * bus errors if we're given a truncated file). 2681 */ 2682 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2683 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2684 return (0); 2685 } 2686 2687 /* 2688 * Memsize must be page rounded so that if we add object padding 2689 * at the end it will start at the beginning of a page. 2690 */ 2691 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2692 M_PTRUNC((ulong_t)fph->p_vaddr)); 2693 2694 /* 2695 * Determine if an existing mapping is acceptable. 2696 */ 2697 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2698 (strcmp(pname, interp->i_name) == 0)) { 2699 /* 2700 * If this is the interpreter then it has already been mapped 2701 * and we have the address so don't map it again. Note that 2702 * the common occurrence of a reference to the interpretor 2703 * (libdl -> ld.so.1) will have been caught during filter 2704 * initialization (see elf_lookup_filtee()). However, some 2705 * ELF implementations are known to record libc.so.1 as the 2706 * interpretor, and thus this test catches this behavior. 2707 */ 2708 paddr = faddr = interp->i_faddr; 2709 2710 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2711 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2712 (fph->p_filesz == fph->p_memsz) && 2713 (((Xword)fmap->fm_maddr % align) == 0)) { 2714 /* 2715 * If the mapping required has already been established from 2716 * the initial page we don't need to do anything more. Reset 2717 * the fmap address so then any later files start a new fmap. 2718 * This is really an optimization for filters, such as libdl.so, 2719 * which should only require one page. 2720 */ 2721 paddr = faddr = fmap->fm_maddr; 2722 fmap->fm_maddr = 0; 2723 fmap_setup(); 2724 } 2725 2726 /* 2727 * Allocate a mapping array to retain mapped segment information. 2728 */ 2729 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2730 return (0); 2731 2732 /* 2733 * If we're reusing an existing mapping determine the objects etext 2734 * address. Otherwise map the file (which will calculate the etext 2735 * address as part of the mapping process). 2736 */ 2737 if (faddr) { 2738 caddr_t base; 2739 2740 if (fixed) 2741 base = 0; 2742 else 2743 base = faddr; 2744 2745 /* LINTED */ 2746 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2747 2748 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2749 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2750 if (pptr->p_type != PT_LOAD) 2751 continue; 2752 2753 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2754 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2755 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2756 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2757 mmapcnt++; 2758 2759 if (!(pptr->p_flags & PF_W)) { 2760 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2761 (ulong_t)pptr->p_memsz + 2762 (ulong_t)(fixed ? 0 : faddr); 2763 } 2764 } 2765 } else { 2766 /* 2767 * Map the file. 2768 */ 2769 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2770 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2771 return (0); 2772 } 2773 2774 /* 2775 * Calculate absolute base addresses and entry points. 2776 */ 2777 if (!fixed) { 2778 if (mld) 2779 /* LINTED */ 2780 mld = (Dyn *)((Off)mld + faddr); 2781 if (cap) 2782 /* LINTED */ 2783 cap = (Cap *)((Off)cap + faddr); 2784 mentry += (Off)faddr; 2785 } 2786 2787 /* 2788 * Create new link map structure for newly mapped shared object. 2789 */ 2790 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2791 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2792 mmapcnt))) { 2793 (void) munmap((caddr_t)faddr, memsize); 2794 return (0); 2795 } 2796 2797 /* 2798 * Start the system loading in the ELF information we'll be processing. 2799 */ 2800 if (REL(lmp)) { 2801 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2802 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2803 MADV_WILLNEED); 2804 } 2805 2806 /* 2807 * If this shared object contains any special segments, record them. 2808 */ 2809 if (swph) { 2810 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2811 SUNWBSS(lmp) = phdr + (swph - phdr0); 2812 } 2813 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2814 remove_so(lml, lmp); 2815 return (0); 2816 } 2817 2818 if (unwindph) 2819 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2820 2821 if (cap) 2822 cap_assign(cap, lmp); 2823 2824 return (lmp); 2825 } 2826 2827 /* 2828 * Function to correct protection settings. Segments are all mapped initially 2829 * with permissions as given in the segment header. We need to turn on write 2830 * permissions on a text segment if there are any relocations against that 2831 * segment, and them turn write permission back off again before returning 2832 * control to the user. This function turns the permission on or off depending 2833 * on the value of the argument. 2834 */ 2835 int 2836 elf_set_prot(Rt_map *lmp, int permission) 2837 { 2838 Mmap *mmaps; 2839 2840 /* 2841 * If this is an allocated image (ie. a relocatable object) we can't 2842 * mprotect() anything. 2843 */ 2844 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2845 return (1); 2846 2847 DBG_CALL(Dbg_file_prot(lmp, permission)); 2848 2849 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2850 if (mmaps->m_perm & PROT_WRITE) 2851 continue; 2852 2853 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2854 (mmaps->m_perm | permission)) == -1) { 2855 int err = errno; 2856 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2857 NAME(lmp), strerror(err)); 2858 return (0); 2859 } 2860 } 2861 return (1); 2862 } 2863 2864 /* 2865 * Build full pathname of shared object from given directory name and filename. 2866 */ 2867 static char * 2868 elf_get_so(const char *dir, const char *file) 2869 { 2870 static char pname[PATH_MAX]; 2871 2872 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 2873 return (pname); 2874 } 2875 2876 /* 2877 * The copy relocation is recorded in a copy structure which will be applied 2878 * after all other relocations are carried out. This provides for copying data 2879 * that must be relocated itself (ie. pointers in shared objects). This 2880 * structure also provides a means of binding RTLD_GROUP dependencies to any 2881 * copy relocations that have been taken from any group members. 2882 * 2883 * If the size of the .bss area available for the copy information is not the 2884 * same as the source of the data inform the user if we're under ldd(1) control 2885 * (this checking was only established in 5.3, so by only issuing an error via 2886 * ldd(1) we maintain the standard set by previous releases). 2887 */ 2888 int 2889 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 2890 Rt_map *dlmp, const void *dadd) 2891 { 2892 Rel_copy rc; 2893 Lm_list *lml = LIST(rlmp); 2894 2895 rc.r_name = name; 2896 rc.r_rsym = rsym; /* the new reference symbol and its */ 2897 rc.r_rlmp = rlmp; /* associated link-map */ 2898 rc.r_dlmp = dlmp; /* the defining link-map */ 2899 rc.r_dsym = dsym; /* the original definition */ 2900 rc.r_radd = radd; 2901 rc.r_dadd = dadd; 2902 2903 if (rsym->st_size > dsym->st_size) 2904 rc.r_size = (size_t)dsym->st_size; 2905 else 2906 rc.r_size = (size_t)rsym->st_size; 2907 2908 if (alist_append(©(dlmp), &rc, sizeof (Rel_copy), 2909 AL_CNT_COPYREL) == 0) { 2910 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2911 return (0); 2912 else 2913 return (1); 2914 } 2915 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 2916 if (alist_append(©(rlmp), &dlmp, 2917 sizeof (Rt_map *), AL_CNT_COPYREL) == 0) { 2918 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2919 return (0); 2920 else 2921 return (1); 2922 } 2923 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 2924 } 2925 2926 /* 2927 * If we are tracing (ldd), warn the user if 2928 * 1) the size from the reference symbol differs from the 2929 * copy definition. We can only copy as much data as the 2930 * reference (dynamic executables) entry allows. 2931 * 2) the copy definition has STV_PROTECTED visibility. 2932 */ 2933 if (lml->lm_flags & LML_FLG_TRC_WARN) { 2934 if (rsym->st_size != dsym->st_size) { 2935 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 2936 _conv_reloc_type(M_R_COPY), demangle(name), 2937 NAME(rlmp), EC_XWORD(rsym->st_size), 2938 NAME(dlmp), EC_XWORD(dsym->st_size)); 2939 if (rsym->st_size > dsym->st_size) 2940 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 2941 NAME(dlmp)); 2942 else 2943 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 2944 NAME(rlmp)); 2945 } 2946 2947 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 2948 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 2949 _conv_reloc_type(M_R_COPY), demangle(name), 2950 NAME(dlmp)); 2951 } 2952 } 2953 2954 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 2955 (Xword)rc.r_size)); 2956 return (1); 2957 } 2958 2959 /* 2960 * Determine the symbol location of an address within a link-map. Look for 2961 * the nearest symbol (whose value is less than or equal to the required 2962 * address). This is the object specific part of dladdr(). 2963 */ 2964 static void 2965 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 2966 { 2967 ulong_t ndx, cnt, base, _value; 2968 Sym *sym, *_sym = NULL; 2969 const char *str; 2970 int _flags; 2971 uint_t *dynaddr_ndx; 2972 uint_t dynaddr_n = 0; 2973 ulong_t value; 2974 2975 /* 2976 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 2977 * the dynsym that starts with any local function symbols that exist in 2978 * the library and then moves to the data held in SYMTAB(). In this 2979 * case, SUNWSYMSZ tells us how long the symbol table is. The 2980 * availability of local function symbols will enhance the results 2981 * we can provide. 2982 * 2983 * If SUNWSYMTAB() is non-NULL, then there might also be a 2984 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 2985 * an array of indices into SUNWSYMTAB, sorted by increasing 2986 * address. We can use this to do an O(log N) search instead of a 2987 * brute force search. 2988 * 2989 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 2990 * contains only global symbols. In that case, the length of 2991 * the symbol table comes from the nchain field of the related 2992 * symbol lookup hash table. 2993 */ 2994 str = STRTAB(lmp); 2995 if (SUNWSYMSZ(lmp) == NULL) { 2996 sym = SYMTAB(lmp); 2997 /* 2998 * If we don't have a .hash table there are no symbols 2999 * to look at. 3000 */ 3001 if (HASH(lmp) == 0) 3002 return; 3003 cnt = HASH(lmp)[1]; 3004 } else { 3005 sym = SUNWSYMTAB(lmp); 3006 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3007 dynaddr_ndx = SUNWSYMSORT(lmp); 3008 if (dynaddr_ndx != NULL) 3009 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3010 } 3011 3012 if (FLAGS(lmp) & FLG_RT_FIXED) 3013 base = 0; 3014 else 3015 base = ADDR(lmp); 3016 3017 if (dynaddr_n > 0) { /* Binary search */ 3018 long low = 0, low_bnd; 3019 long high = dynaddr_n - 1, high_bnd; 3020 long mid; 3021 Sym *mid_sym; 3022 3023 /* 3024 * Note that SUNWSYMSORT only contains symbols types that 3025 * supply memory addresses, so there's no need to check and 3026 * filter out any other types. 3027 */ 3028 low_bnd = low; 3029 high_bnd = high; 3030 while (low <= high) { 3031 mid = (low + high) / 2; 3032 mid_sym = &sym[dynaddr_ndx[mid]]; 3033 value = mid_sym->st_value + base; 3034 if (addr < value) { 3035 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3036 addr) 3037 high_bnd = high; 3038 high = mid - 1; 3039 } else if (addr > value) { 3040 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3041 addr) 3042 low_bnd = low; 3043 low = mid + 1; 3044 } else { 3045 _sym = mid_sym; 3046 _value = value; 3047 break; 3048 } 3049 } 3050 /* 3051 * If the above didn't find it exactly, then we must 3052 * return the closest symbol with a value that doesn't 3053 * exceed the one we are looking for. If that symbol exists, 3054 * it will lie in the range bounded by low_bnd and 3055 * high_bnd. This is a linear search, but a short one. 3056 */ 3057 if (_sym == NULL) { 3058 for (mid = low_bnd; mid <= high_bnd; mid++) { 3059 mid_sym = &sym[dynaddr_ndx[mid]]; 3060 value = mid_sym->st_value + base; 3061 if (addr >= value) { 3062 _sym = mid_sym; 3063 _value = value; 3064 } else { 3065 break; 3066 } 3067 } 3068 } 3069 } else { /* Linear search */ 3070 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3071 /* 3072 * Skip expected symbol types that are not functions 3073 * or data: 3074 * - A symbol table starts with an undefined symbol 3075 * in slot 0. If we are using SUNWSYMTAB(), 3076 * there will be a second undefined symbol 3077 * right before the globals. 3078 * - The local part of SUNWSYMTAB() contains a 3079 * series of function symbols. Each section 3080 * starts with an initial STT_FILE symbol. 3081 */ 3082 if ((sym->st_shndx == SHN_UNDEF) || 3083 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3084 continue; 3085 3086 value = sym->st_value + base; 3087 if (value > addr) 3088 continue; 3089 if (value < _value) 3090 continue; 3091 3092 _sym = sym; 3093 _value = value; 3094 3095 /* 3096 * Note, because we accept local and global symbols 3097 * we could find a section symbol that matches the 3098 * associated address, which means that the symbol 3099 * name will be null. In this case continue the 3100 * search in case we can find a global symbol of 3101 * the same value. 3102 */ 3103 if ((value == addr) && 3104 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3105 break; 3106 } 3107 } 3108 3109 _flags = flags & RTLD_DL_MASK; 3110 if (_sym) { 3111 if (_flags == RTLD_DL_SYMENT) 3112 *info = (void *)_sym; 3113 else if (_flags == RTLD_DL_LINKMAP) 3114 *info = (void *)lmp; 3115 3116 dlip->dli_sname = str + _sym->st_name; 3117 dlip->dli_saddr = (void *)_value; 3118 } else { 3119 /* 3120 * addr lies between the beginning of the mapped segment and 3121 * the first global symbol. We have no symbol to return 3122 * and the caller requires one. We use _START_, the base 3123 * address of the mapping. 3124 */ 3125 3126 if (_flags == RTLD_DL_SYMENT) { 3127 /* 3128 * An actual symbol struct is needed, so we 3129 * construct one for _START_. To do this in a 3130 * fully accurate way requires a different symbol 3131 * for each mapped segment. This requires the 3132 * use of dynamic memory and a mutex. That's too much 3133 * plumbing for a fringe case of limited importance. 3134 * 3135 * Fortunately, we can simplify: 3136 * - Only the st_size and st_info fields are useful 3137 * outside of the linker internals. The others 3138 * reference things that outside code cannot see, 3139 * and can be set to 0. 3140 * - It's just a label and there is no size 3141 * to report. So, the size should be 0. 3142 * This means that only st_info needs a non-zero 3143 * (constant) value. A static struct will suffice. 3144 * It must be const (readonly) so the caller can't 3145 * change its meaning for subsequent callers. 3146 */ 3147 static const Sym fsym = { 0, 0, 0, 3148 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3149 *info = (void *) &fsym; 3150 } 3151 3152 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3153 dlip->dli_saddr = (void *) ADDR(lmp); 3154 } 3155 } 3156 3157 static void 3158 elf_lazy_cleanup(Alist *alp) 3159 { 3160 Rt_map **lmpp; 3161 Aliste off; 3162 3163 /* 3164 * Cleanup any link-maps added to this dynamic list and free it. 3165 */ 3166 for (ALIST_TRAVERSE(alp, off, lmpp)) 3167 FLAGS(*lmpp) &= ~FLG_RT_DLSYM; 3168 free(alp); 3169 } 3170 3171 /* 3172 * This routine is called upon to search for a symbol from the dependencies of 3173 * the initial link-map. To maintain lazy loadings goal of reducing the number 3174 * of objects mapped, any symbol search is first carried out using the objects 3175 * that already exist in the process (either on a link-map list or handle). 3176 * If a symbol can't be found, and lazy dependencies are still pending, this 3177 * routine loads the dependencies in an attempt to locate the symbol. 3178 * 3179 * Only new objects are inspected as we will have already inspected presently 3180 * loaded objects before calling this routine. However, a new object may not 3181 * be new - although the di_lmp might be zero, the object may have been mapped 3182 * as someone elses dependency. Thus there's a possibility of some symbol 3183 * search duplication. 3184 */ 3185 3186 Sym * 3187 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 3188 { 3189 Sym *sym = 0; 3190 Alist * alist = 0; 3191 Aliste off; 3192 Rt_map ** lmpp, * lmp = slp->sl_imap; 3193 const char *name = slp->sl_name; 3194 3195 if (alist_append(&alist, &lmp, sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) 3196 return (0); 3197 FLAGS(lmp) |= FLG_RT_DLSYM; 3198 3199 for (ALIST_TRAVERSE(alist, off, lmpp)) { 3200 uint_t cnt = 0; 3201 Slookup sl = *slp; 3202 Dyninfo *dip; 3203 3204 /* 3205 * Loop through the DT_NEEDED entries examining each object for 3206 * the symbol. If the symbol is not found the object is in turn 3207 * added to the alist, so that its DT_NEEDED entires may be 3208 * examined. 3209 */ 3210 lmp = *lmpp; 3211 for (dip = DYNINFO(lmp); cnt < DYNINFOCNT(lmp); cnt++, dip++) { 3212 Rt_map *nlmp; 3213 3214 if (((dip->di_flags & FLG_DI_NEEDED) == 0) || 3215 dip->di_info) 3216 continue; 3217 3218 /* 3219 * If this entry defines a lazy dependency try loading 3220 * it. If the file can't be loaded, consider this 3221 * non-fatal and continue the search (lazy loaded 3222 * dependencies need not exist and their loading should 3223 * only be fatal if called from a relocation). 3224 * 3225 * If the file is already loaded and relocated we must 3226 * still inspect it for symbols, even though it might 3227 * have already been searched. This lazy load operation 3228 * might have promoted the permissions of the object, 3229 * and thus made the object applicable for this symbol 3230 * search, whereas before the object might have been 3231 * skipped. 3232 */ 3233 if ((nlmp = elf_lazy_load(lmp, cnt, name)) == 0) 3234 continue; 3235 3236 /* 3237 * If this object isn't yet a part of the dynamic list 3238 * then inspect it for the symbol. If the symbol isn't 3239 * found add the object to the dynamic list so that we 3240 * can inspect its dependencies. 3241 */ 3242 if (FLAGS(nlmp) & FLG_RT_DLSYM) 3243 continue; 3244 3245 sl.sl_imap = nlmp; 3246 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3247 break; 3248 3249 /* 3250 * Some dlsym() operations are already traversing a 3251 * link-map (dlopen(0)), and thus there's no need to 3252 * build our own dynamic dependency list. 3253 */ 3254 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3255 if (alist_append(&alist, &nlmp, 3256 sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) { 3257 elf_lazy_cleanup(alist); 3258 return (0); 3259 } 3260 FLAGS(nlmp) |= FLG_RT_DLSYM; 3261 } 3262 } 3263 if (sym) 3264 break; 3265 } 3266 3267 elf_lazy_cleanup(alist); 3268 return (sym); 3269 } 3270 3271 /* 3272 * Warning message for bad r_offset. 3273 */ 3274 void 3275 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3276 ulong_t rsymndx) 3277 { 3278 const char *name = (char *)0; 3279 Lm_list *lml = LIST(lmp); 3280 int trace; 3281 3282 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3283 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3284 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3285 trace = 1; 3286 else 3287 trace = 0; 3288 3289 if ((trace == 0) && (DBG_ENABLED == 0)) 3290 return; 3291 3292 if (rsymndx) { 3293 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3294 (rsymndx * SYMENT(lmp))); 3295 3296 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3297 name = (char *)(STRTAB(lmp) + symref->st_name); 3298 } 3299 3300 if (name == 0) 3301 name = MSG_ORIG(MSG_STR_EMPTY); 3302 3303 if (trace) { 3304 const char *rstr; 3305 3306 rstr = _conv_reloc_type((uint_t)rtype); 3307 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3308 EC_ADDR(roffset)); 3309 return; 3310 } 3311 3312 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3313 } 3314 3315 /* 3316 * Resolve a static TLS relocation. 3317 */ 3318 long 3319 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3320 ulong_t roffset, long value) 3321 { 3322 Lm_list *lml = LIST(lmp); 3323 3324 /* 3325 * Relocations against a static TLS block have limited support once 3326 * process initialization has completed. Any error condition should be 3327 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3328 * however individual relocations are tested in case the dynamic flag 3329 * had not been set when this object was built. 3330 */ 3331 if (PTTLS(lmp) == 0) { 3332 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3333 M_REL_SHT_TYPE, rel, NULL, name)); 3334 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3335 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3336 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3337 return (0); 3338 } 3339 3340 /* 3341 * If no static TLS has been set aside for this object, determine if 3342 * any can be obtained. Enforce that any object using static TLS is 3343 * non-deletable. 3344 */ 3345 if (TLSSTATOFF(lmp) == 0) { 3346 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3347 MODE(lmp) |= RTLD_NODELETE; 3348 3349 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3350 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3351 M_REL_SHT_TYPE, rel, NULL, name)); 3352 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3353 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3354 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3355 return (0); 3356 } 3357 } 3358 3359 /* 3360 * Typically, a static TLS offset is maintained as a symbols value. 3361 * For local symbols that are not apart of the dynamic symbol table, 3362 * the TLS relocation points to a section symbol, and the static TLS 3363 * offset was deposited in the associated GOT table. Make sure the GOT 3364 * is cleared, so that the value isn't reused in do_reloc(). 3365 */ 3366 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3367 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3368 value = *(long *)roffset; 3369 *(long *)roffset = 0; 3370 } else { 3371 value = sym->st_value; 3372 } 3373 } 3374 return (-(TLSSTATOFF(lmp) - value)); 3375 } 3376