1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * Object file dependent support for ELF objects. 33 */ 34 #include "_synonyms.h" 35 36 #include <stdio.h> 37 #include <sys/procfs.h> 38 #include <sys/mman.h> 39 #include <sys/debug.h> 40 #include <string.h> 41 #include <limits.h> 42 #include <dlfcn.h> 43 #include <debug.h> 44 #include <conv.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 50 /* 51 * Default and secure dependency search paths. 52 */ 53 static Pnode elf_dflt_dirs[] = { 54 #if defined(_ELF64) 55 #ifndef SGS_PRE_UNIFIED_PROCESS 56 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 57 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 58 #endif 59 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 60 LA_SER_DEFAULT, 0, 0 } 61 #else 62 #ifndef SGS_PRE_UNIFIED_PROCESS 63 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 64 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 65 #endif 66 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 67 LA_SER_DEFAULT, 0, 0 } 68 #endif 69 }; 70 71 static Pnode elf_secure_dirs[] = { 72 #if defined(_ELF64) 73 #ifndef SGS_PRE_UNIFIED_PROCESS 74 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 75 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 76 #endif 77 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 78 MSG_PTH_USRLIBSE_64_SIZE, 79 LA_SER_SECURE, 0, 0 } 80 #else 81 #ifndef SGS_PRE_UNIFIED_PROCESS 82 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 83 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 84 #endif 85 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 86 LA_SER_SECURE, 0, 0 } 87 #endif 88 }; 89 90 /* 91 * Defines for local functions. 92 */ 93 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 94 static int elf_are_u(Rej_desc *); 95 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 96 static ulong_t elf_entry_pt(void); 97 static char *elf_get_so(const char *, const char *); 98 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 99 static int elf_needed(Lm_list *, Aliste, Rt_map *); 100 static void elf_unmap_so(Rt_map *); 101 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 102 103 /* 104 * Functions and data accessed through indirect pointers. 105 */ 106 Fct elf_fct = { 107 elf_are_u, 108 elf_entry_pt, 109 elf_map_so, 110 elf_unmap_so, 111 elf_needed, 112 lookup_sym, 113 elf_reloc, 114 elf_dflt_dirs, 115 elf_secure_dirs, 116 elf_fix_name, 117 elf_get_so, 118 elf_dladdr, 119 dlsym_handle, 120 elf_verify_vers, 121 elf_set_prot 122 }; 123 124 125 /* 126 * Redefine NEEDED name if necessary. 127 */ 128 static Pnode * 129 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 130 { 131 /* 132 * For ABI compliance, if we are asked for ld.so.1, then really give 133 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 134 */ 135 if (((*name == '/') && 136 /* BEGIN CSTYLED */ 137 #if defined(_ELF64) 138 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 139 #else 140 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 141 #endif 142 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 143 /* END CSTYLED */ 144 Pnode *pnp; 145 146 DBG_CALL(Dbg_file_fixname(LIST(clmp), name, 147 MSG_ORIG(MSG_PTH_LIBSYS))); 148 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 149 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 150 if (pnp) 151 free(pnp); 152 return (0); 153 } 154 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 155 pnp->p_orig = (orig & PN_SER_MASK); 156 return (pnp); 157 } 158 159 return (expand_paths(clmp, name, orig, 0)); 160 } 161 162 /* 163 * Determine if we have been given an ELF file and if so determine if the file 164 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 165 * with associated error information. 166 */ 167 static int 168 elf_are_u(Rej_desc *rej) 169 { 170 Ehdr *ehdr; 171 172 /* 173 * Determine if we're an elf file. If not simply return, we don't set 174 * any rejection information as this test allows use to scroll through 175 * the objects we support (ELF, AOUT). 176 */ 177 if (fmap->fm_fsize < sizeof (Ehdr) || 178 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 179 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 180 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 181 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 182 return (0); 183 } 184 185 /* 186 * Check class and encoding. 187 */ 188 /* LINTED */ 189 ehdr = (Ehdr *)fmap->fm_maddr; 190 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 191 rej->rej_type = SGS_REJ_CLASS; 192 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 193 return (0); 194 } 195 if (ehdr->e_ident[EI_DATA] != M_DATA) { 196 rej->rej_type = SGS_REJ_DATA; 197 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 198 return (0); 199 } 200 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 201 (ehdr->e_type != ET_DYN)) { 202 rej->rej_type = SGS_REJ_TYPE; 203 rej->rej_info = (uint_t)ehdr->e_type; 204 return (0); 205 } 206 207 /* 208 * Verify machine specific flags, and hardware capability requirements. 209 */ 210 if ((elf_mach_flags_check(rej, ehdr) == 0) || 211 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 212 return (0); 213 214 /* 215 * Verify ELF version. ??? is this too restrictive ??? 216 */ 217 if (ehdr->e_version > EV_CURRENT) { 218 rej->rej_type = SGS_REJ_VERSION; 219 rej->rej_info = (uint_t)ehdr->e_version; 220 return (0); 221 } 222 return (1); 223 } 224 225 /* 226 * The runtime linker employs lazy loading to provide the libraries needed for 227 * debugging, preloading .o's and dldump(). As these are seldom used, the 228 * standard startup of ld.so.1 doesn't initialize all the information necessary 229 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 230 * is called we get here to perform these initializations: 231 * 232 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 233 * dependency. Typically, for all other objects, this is called during 234 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 235 * 236 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 237 * are by default skipped thus delaying all relative relocation processing 238 * on every invocation of ld.so.1. 239 */ 240 int 241 elf_rtld_load() 242 { 243 Lm_list *lml = &lml_rtld; 244 Rt_map *lmp = lml->lm_head; 245 246 if (lml->lm_flags & LML_FLG_PLTREL) 247 return (1); 248 249 /* 250 * As we need to refer to the DYNINFO() information, insure that it has 251 * been initialized. 252 */ 253 if (elf_needed(lml, ALO_DATA, lmp) == 0) 254 return (0); 255 256 #if defined(__i386) 257 /* 258 * This is a kludge to give ld.so.1 a performance benefit on i386. 259 * It's based around two factors. 260 * 261 * o JMPSLOT relocations (PLT's) actually need a relative relocation 262 * applied to the GOT entry so that they can find PLT0. 263 * 264 * o ld.so.1 does not exercise *any* PLT's before it has made a call 265 * to elf_lazy_load(). This is because all dynamic dependencies 266 * are recorded as lazy dependencies. 267 */ 268 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 269 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 270 (ulong_t)ADDR(lmp)); 271 #endif 272 273 lml->lm_flags |= LML_FLG_PLTREL; 274 return (1); 275 } 276 277 /* 278 * Lazy load an object. 279 */ 280 Rt_map * 281 elf_lazy_load(Rt_map *clmp, uint_t ndx, const char *sym) 282 { 283 Rt_map *nlmp, *hlmp; 284 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 285 uint_t flags = 0; 286 Pnode *pnp; 287 const char *name; 288 Lm_list *lml = LIST(clmp); 289 Lm_cntl *lmc; 290 Aliste lmco; 291 292 /* 293 * If this dependency has already been processed, we're done. 294 */ 295 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 296 (dip->di_flags & FLG_DI_PROCESSD)) 297 return (nlmp); 298 299 /* 300 * Determine the initial dependency name, and indicate that this 301 * dependencies processing has initiated. 302 */ 303 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 304 DBG_CALL(Dbg_file_lazyload(clmp, name, sym)); 305 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 306 dip->di_flags |= FLG_DI_PROCESSD; 307 308 if (dip->di_flags & FLG_DI_GROUP) 309 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 310 311 /* 312 * Expand the requested name if necessary. 313 */ 314 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 315 return (0); 316 317 /* 318 * Provided the object on the head of the link-map has completed its 319 * relocation, create a new link-map control list for this request. 320 */ 321 hlmp = lml->lm_head; 322 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 323 if ((lmc = alist_append(&(lml->lm_lists), 0, sizeof (Lm_cntl), 324 AL_CNT_LMLISTS)) == 0) { 325 remove_pnode(pnp); 326 return (0); 327 } 328 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 329 } else { 330 lmc = 0; 331 lmco = ALO_DATA; 332 } 333 334 /* 335 * Load the associated object. 336 */ 337 dip->di_info = nlmp = 338 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 339 340 /* 341 * Remove any expanded pathname infrastructure. Reduce the pending lazy 342 * dependency count of the caller, together with the link-map lists 343 * count of objects that still have lazy dependencies pending. 344 */ 345 remove_pnode(pnp); 346 if (--LAZY(clmp) == 0) 347 LIST(clmp)->lm_lazy--; 348 349 /* 350 * Finish processing the objects associated with this request, and 351 * create an association between the caller and this dependency. 352 */ 353 if (nlmp && (((analyze_lmc(lml, lmco, nlmp) == 0)) || 354 (relocate_lmc(lml, lmco, clmp, nlmp) == 0) || 355 (bind_one(clmp, nlmp, BND_NEEDED) == 0))) 356 dip->di_info = nlmp = 0; 357 358 /* 359 * If this lazyload has failed, and we've created a new link-map 360 * control list to which this request has added objects, then remove 361 * all the objects that have been associated to this request. 362 */ 363 if ((nlmp == 0) && lmc && lmc->lc_head) 364 remove_lmc(lml, clmp, lmc, lmco, name); 365 366 /* 367 * Finally, remove any link-map control list that was created. 368 */ 369 if (lmc) 370 remove_cntl(lml, lmco); 371 372 return (nlmp); 373 } 374 375 /* 376 * Return the entry point of the ELF executable. 377 */ 378 static ulong_t 379 elf_entry_pt(void) 380 { 381 return (ENTRY(lml_main.lm_head)); 382 } 383 384 /* 385 * Unmap a given ELF shared object from the address space. 386 */ 387 static void 388 elf_unmap_so(Rt_map *lmp) 389 { 390 caddr_t addr; 391 size_t size; 392 Mmap *mmaps; 393 394 /* 395 * If this link map represents a relocatable object concatenation, then 396 * the image was simply generated in allocated memory. Free the memory. 397 * 398 * Note: the memory was originally allocated in the libelf:_elf_outmap 399 * routine and would normally have been free'd in elf_outsync(), but 400 * because we 'interpose' on that routine the memory wasn't free'd at 401 * that time. 402 */ 403 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 404 free((void *)ADDR(lmp)); 405 return; 406 } 407 408 /* 409 * If padding was enabled via rtld_db, then we have at least one page 410 * in front of the image - and possibly a trailing page. 411 * Unmap the front page first: 412 */ 413 if (PADSTART(lmp) != ADDR(lmp)) { 414 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 415 size = ADDR(lmp) - (ulong_t)addr; 416 (void) munmap(addr, size); 417 } 418 419 /* 420 * Unmap any trailing padding. 421 */ 422 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 423 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 424 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 425 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 426 (void) munmap(addr, size); 427 } 428 429 /* 430 * Unmmap all mapped segments. 431 */ 432 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 433 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 434 } 435 436 /* 437 * Determine if a dependency requires a particular version and if so verify 438 * that the version exists in the dependency. 439 */ 440 static int 441 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 442 { 443 Verneed *vnd = VERNEED(clmp); 444 int _num, num = VERNEEDNUM(clmp); 445 char *cstrs = (char *)STRTAB(clmp); 446 Lm_list *lml = LIST(clmp); 447 448 /* 449 * Traverse the callers version needed information and determine if any 450 * specific versions are required from the dependency. 451 */ 452 DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp))); 453 for (_num = 1; _num <= num; _num++, 454 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 455 Half cnt = vnd->vn_cnt; 456 Vernaux *vnap; 457 char *nstrs, *need; 458 459 /* 460 * Determine if a needed entry matches this dependency. 461 */ 462 need = (char *)(cstrs + vnd->vn_file); 463 if (strcmp(name, need) != 0) 464 continue; 465 466 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 467 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 468 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 469 470 /* 471 * Validate that each version required actually exists in the 472 * dependency. 473 */ 474 nstrs = (char *)STRTAB(nlmp); 475 476 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 477 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 478 char *version, *define; 479 Verdef *vdf = VERDEF(nlmp); 480 ulong_t _num, num = VERDEFNUM(nlmp); 481 int found = 0; 482 483 version = (char *)(cstrs + vnap->vna_name); 484 DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version)); 485 486 for (_num = 1; _num <= num; _num++, 487 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 488 Verdaux *vdap; 489 490 if (vnap->vna_hash != vdf->vd_hash) 491 continue; 492 493 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 494 define = (char *)(nstrs + vdap->vda_name); 495 if (strcmp(version, define) != 0) 496 continue; 497 498 found++; 499 break; 500 } 501 502 /* 503 * If we're being traced print out any matched version 504 * when the verbose (-v) option is in effect. Always 505 * print any unmatched versions. 506 */ 507 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 508 /* BEGIN CSTYLED */ 509 if (found) { 510 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 511 continue; 512 513 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 514 need, version, NAME(nlmp)); 515 } else { 516 if (rtld_flags & RT_FL_SILENCERR) 517 continue; 518 519 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 520 need, version); 521 } 522 /* END CSTYLED */ 523 continue; 524 } 525 526 /* 527 * If the version hasn't been found then this is a 528 * candidate for a fatal error condition. Weak 529 * version definition requirements are silently 530 * ignored. Also, if the image inspected for a version 531 * definition has no versioning recorded at all then 532 * silently ignore this (this provides better backward 533 * compatibility to old images created prior to 534 * versioning being available). Both of these skipped 535 * diagnostics are available under tracing (see above). 536 */ 537 if ((found == 0) && (num != 0) && 538 (!(vnap->vna_flags & VER_FLG_WEAK))) { 539 eprintf(lml, ERR_FATAL, 540 MSG_INTL(MSG_VER_NFOUND), need, version, 541 NAME(clmp)); 542 return (0); 543 } 544 } 545 } 546 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 547 return (1); 548 } 549 550 /* 551 * Search through the dynamic section for DT_NEEDED entries and perform one 552 * of two functions. If only the first argument is specified then load the 553 * defined shared object, otherwise add the link map representing the defined 554 * link map the the dlopen list. 555 */ 556 static int 557 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 558 { 559 Dyn *dyn; 560 ulong_t ndx = 0; 561 uint_t lazy = 0, flags = 0; 562 Word lmflags = lml->lm_flags; 563 Word lmtflags = lml->lm_tflags; 564 565 /* 566 * Process each shared object on needed list. 567 */ 568 if (DYN(clmp) == 0) 569 return (1); 570 571 for (dyn = (Dyn *)DYN(clmp); dyn->d_tag != DT_NULL; dyn++, ndx++) { 572 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 573 Rt_map *nlmp = 0; 574 char *name; 575 int silent = 0; 576 Pnode *pnp; 577 578 switch (dyn->d_tag) { 579 case DT_POSFLAG_1: 580 if ((dyn->d_un.d_val & DF_P1_LAZYLOAD) && 581 !(lmtflags & LML_TFLG_NOLAZYLD)) 582 lazy = 1; 583 if (dyn->d_un.d_val & DF_P1_GROUPPERM) 584 flags = (FLG_RT_SETGROUP | FLG_RT_HANDLE); 585 continue; 586 case DT_NEEDED: 587 case DT_USED: 588 dip->di_flags |= FLG_DI_NEEDED; 589 if (flags) 590 dip->di_flags |= FLG_DI_GROUP; 591 592 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 593 594 /* 595 * NOTE, libc.so.1 can't be lazy loaded. Although a 596 * lazy position flag won't be produced when a RTLDINFO 597 * .dynamic entry is found (introduced with the UPM in 598 * Solaris 10), it was possible to mark libc for lazy 599 * loading on previous releases. To reduce the overhead 600 * of testing for this occurrence, only carry out this 601 * check for the first object on the link-map list 602 * (there aren't many applications built without libc). 603 */ 604 if (lazy && (lml->lm_head == clmp) && 605 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 606 lazy = 0; 607 608 /* 609 * Don't bring in lazy loaded objects yet unless we've 610 * been asked to attempt to load all available objects 611 * (crle(1) sets LD_FLAGS=loadavail). Even under 612 * RTLD_NOW we don't process this - RTLD_NOW will cause 613 * relocation processing which in turn might trigger 614 * lazy loading, but its possible that the object has a 615 * lazy loaded file with no bindings (i.e., it should 616 * never have been a dependency in the first place). 617 */ 618 if (lazy) { 619 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 620 LAZY(clmp)++; 621 lazy = flags = 0; 622 continue; 623 } 624 625 /* 626 * Silence any error messages - see description 627 * under elf_lookup_filtee(). 628 */ 629 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 630 rtld_flags |= RT_FL_SILENCERR; 631 silent = 1; 632 } 633 } 634 break; 635 case DT_AUXILIARY: 636 dip->di_flags |= FLG_DI_AUXFLTR; 637 lazy = flags = 0; 638 continue; 639 case DT_SUNW_AUXILIARY: 640 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 641 lazy = flags = 0; 642 continue; 643 case DT_FILTER: 644 dip->di_flags |= FLG_DI_STDFLTR; 645 lazy = flags = 0; 646 continue; 647 case DT_SUNW_FILTER: 648 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 649 lazy = flags = 0; 650 continue; 651 default: 652 lazy = flags = 0; 653 continue; 654 } 655 656 DBG_CALL(Dbg_file_needed(clmp, name)); 657 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 658 dip->di_flags |= FLG_DI_PROCESSD; 659 660 /* 661 * Establish the objects name, load it and establish a binding 662 * with the caller. 663 */ 664 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 665 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 666 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 667 nlmp = 0; 668 669 /* 670 * Clean up any infrastructure, including the removal of the 671 * error suppression state, if it had been previously set in 672 * this routine. 673 */ 674 if (pnp) 675 remove_pnode(pnp); 676 if (silent) 677 rtld_flags &= ~RT_FL_SILENCERR; 678 lazy = flags = 0; 679 if ((dip->di_info = (void *)nlmp) == 0) { 680 /* 681 * If the object could not be mapped, continue if error 682 * suppression is established or we're here with ldd(1). 683 */ 684 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 685 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 686 continue; 687 else 688 return (0); 689 } 690 } 691 692 if (LAZY(clmp)) 693 lml->lm_lazy++; 694 695 return (1); 696 } 697 698 static int 699 elf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size) 700 { 701 prmap_t *maps, *_maps; 702 int pfd, num, _num; 703 caddr_t eaddr = vaddr + size; 704 int err; 705 706 /* 707 * If memory reservations have been established for alternative objects 708 * determine if this object falls within the reservation, if it does no 709 * further checking is required. 710 */ 711 if (rtld_flags & RT_FL_MEMRESV) { 712 Rtc_head *head = (Rtc_head *)config->c_bgn; 713 714 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 715 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 716 return (0); 717 } 718 719 /* 720 * Determine the mappings presently in use by this process. 721 */ 722 if ((pfd = pr_open(lml)) == FD_UNAVAIL) 723 return (1); 724 725 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 726 err = errno; 727 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 728 strerror(err)); 729 return (1); 730 } 731 732 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 733 return (1); 734 735 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 736 err = errno; 737 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, 738 strerror(err)); 739 free(maps); 740 return (1); 741 } 742 743 /* 744 * Determine if the supplied address clashes with any of the present 745 * process mappings. 746 */ 747 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 748 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 749 Rt_map *lmp; 750 const char *str; 751 752 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 753 continue; 754 755 /* 756 * We have a memory clash. See if one of the known dynamic 757 * dependency mappings represents this space so as to provide 758 * the user a more meaningful message. 759 */ 760 if ((lmp = _caller(vaddr, 0)) != 0) 761 str = NAME(lmp); 762 else 763 str = MSG_INTL(MSG_STR_UNKNOWN); 764 765 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 766 EC_NATPTR(vaddr), EC_OFF(size), str); 767 return (1); 768 } 769 free(maps); 770 return (0); 771 } 772 773 /* 774 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 775 * are used to obtained an aligned reservation from anonymous memory. If 776 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 777 * reservation using the file as backing. 778 */ 779 static Am_ret 780 elf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize, 781 int mperm, int fd, Xword align) 782 { 783 Am_ret amret; 784 int mflag = MAP_PRIVATE | MAP_NORESERVE; 785 786 #if defined(MAP_ALIGN) 787 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 788 mflag |= MAP_ALIGN; 789 *maddr = (caddr_t)align; 790 } 791 #endif 792 if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 793 return (amret); 794 795 if (amret == AM_OK) 796 return (AM_OK); 797 798 /* 799 * If an anonymous memory request failed (which should only be the 800 * case if it is unsupported on the system we're running on), establish 801 * the initial mapping directly from the file. 802 */ 803 *maddr = 0; 804 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 805 fd, 0)) == MAP_FAILED) { 806 int err = errno; 807 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, 808 strerror(err)); 809 return (AM_ERROR); 810 } 811 return (AM_NOSUP); 812 } 813 814 static void * 815 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 816 int fd, Off foff) 817 { 818 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 819 static int notd = 0; 820 821 /* 822 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 823 * flag. 824 */ 825 if (notd == 0) { 826 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 827 mflag |= MAP_TEXT; 828 else 829 mflag |= MAP_INITDATA; 830 } 831 #endif 832 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 833 return (0); 834 835 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 836 if ((notd == 0) && (errno == EINVAL)) { 837 /* 838 * MAP_TEXT and MAP_INITDATA may not be supported on this 839 * platform, try again without. 840 */ 841 notd = 1; 842 mflag &= ~(MAP_TEXT | MAP_INITDATA); 843 844 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 845 } 846 #endif 847 return (MAP_FAILED); 848 } 849 850 /* 851 * Map in a file. 852 */ 853 static caddr_t 854 elf_map_it( 855 Lm_list *lml, /* link-map list */ 856 const char *name, /* actual name stored for pathname */ 857 Off fsize, /* total mapping claim of the file */ 858 Ehdr *ehdr, /* ELF header of file */ 859 Phdr *fphdr, /* first loadable Phdr */ 860 Phdr *lphdr, /* last loadable Phdr */ 861 Phdr **rrphdr, /* return first Phdr in reservation */ 862 caddr_t *rraddr, /* return start of reservation */ 863 Off *rrsize, /* return total size of reservation */ 864 int fixed, /* image is resolved to a fixed addr */ 865 int fd, /* images file descriptor */ 866 Xword align, /* image segments maximum alignment */ 867 Mmap *mmaps, /* mmap information array and */ 868 uint_t *mmapcnt) /* mapping count */ 869 { 870 caddr_t raddr; /* reservation address */ 871 Off rsize; /* reservation size */ 872 Phdr *phdr; /* working program header poiner */ 873 caddr_t maddr; /* working mmap address */ 874 caddr_t faddr; /* working file address */ 875 size_t padsize; /* object padding requirement */ 876 size_t padpsize = 0; /* padding size rounded to next page */ 877 size_t padmsize = 0; /* padding size rounded for alignment */ 878 int skipfseg; /* skip mapping first segment */ 879 int mperm; /* segment permissions */ 880 Am_ret amret = AM_NOSUP; 881 882 /* 883 * If padding is required extend both the front and rear of the image. 884 * To insure the image itself is mapped at the correct alignment the 885 * initial padding is rounded up to the nearest page. Once the image is 886 * mapped the excess can be pruned to the nearest page required for the 887 * actual padding itself. 888 */ 889 if ((padsize = r_debug.rtd_objpad) != 0) { 890 padpsize = M_PROUND(padsize); 891 if (fixed) 892 padmsize = padpsize; 893 else 894 padmsize = S_ROUND(padsize, align); 895 } 896 897 /* 898 * Determine the initial permissions used to map in the first segment. 899 * If this segments memsz is greater that its filesz then the difference 900 * must be zeroed. Make sure this segment is writable. 901 */ 902 mperm = 0; 903 if (fphdr->p_flags & PF_R) 904 mperm |= PROT_READ; 905 if (fphdr->p_flags & PF_X) 906 mperm |= PROT_EXEC; 907 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 908 mperm |= PROT_WRITE; 909 910 /* 911 * Determine whether or not to let system reserve address space based on 912 * whether this is a dynamic executable (addresses in object are fixed) 913 * or a shared object (addresses in object are relative to the objects' 914 * base). 915 */ 916 if (fixed) { 917 /* 918 * Determine the reservation address and size, and insure that 919 * this reservation isn't already in use. 920 */ 921 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 922 raddr = maddr - padpsize; 923 rsize = fsize + padpsize + padsize; 924 925 if (lml_main.lm_head) { 926 if (elf_map_check(lml, name, raddr, rsize) != 0) 927 return (0); 928 } 929 930 /* 931 * As this is a fixed image, all segments must be individually 932 * mapped. 933 */ 934 skipfseg = 0; 935 936 } else { 937 size_t esize; 938 939 /* 940 * If this isn't a fixed image, reserve enough address space for 941 * the entire image to be mapped. The amount of reservation is 942 * the range between the beginning of the first, and end of the 943 * last loadable segment, together with any padding, plus the 944 * alignment of the first segment. 945 * 946 * The optimal reservation is made as a no-reserve mapping from 947 * anonymous memory. Each segment is then mapped into this 948 * reservation. If the anonymous mapping capability isn't 949 * available, the reservation is obtained from the file itself. 950 * In this case the first segment of the image is mapped as part 951 * of the reservation, thus only the following segments need to 952 * be remapped. 953 */ 954 rsize = fsize + padmsize + padsize; 955 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm, 956 fd, align)) == AM_ERROR) 957 return (0); 958 maddr = raddr + padmsize; 959 faddr = (caddr_t)S_ROUND((Off)maddr, align); 960 961 /* 962 * If this reservation has been obtained from anonymous memory, 963 * then all segments must be individually mapped. Otherwise, 964 * the first segment heads the reservation. 965 */ 966 if (amret == AM_OK) 967 skipfseg = 0; 968 else 969 skipfseg = 1; 970 971 /* 972 * For backward compatibility (where MAP_ALIGN isn't available), 973 * insure the alignment of the reservation is adequate for this 974 * object, and if not remap the object to obtain the correct 975 * alignment. 976 */ 977 if (faddr != maddr) { 978 (void) munmap(raddr, rsize); 979 980 rsize += align; 981 if ((amret = elf_map_reserve(lml, name, &raddr, rsize, 982 mperm, fd, align)) == AM_ERROR) 983 return (0); 984 985 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 986 padpsize), align); 987 988 esize = maddr - raddr + padpsize; 989 990 /* 991 * As ths image has been realigned, the first segment 992 * of the file needs to be remapped to its correct 993 * location. 994 */ 995 skipfseg = 0; 996 } else 997 esize = padmsize - padpsize; 998 999 /* 1000 * If this reservation included padding, remove any excess for 1001 * the start of the image (the padding was adjusted to insure 1002 * the image was aligned appropriately). 1003 */ 1004 if (esize) { 1005 (void) munmap(raddr, esize); 1006 raddr += esize; 1007 rsize -= esize; 1008 } 1009 } 1010 1011 /* 1012 * At this point we know the initial location of the image, and its 1013 * size. Pass these back to the caller for inclusion in the link-map 1014 * that will eventually be created. 1015 */ 1016 *rraddr = raddr; 1017 *rrsize = rsize; 1018 1019 /* 1020 * The first loadable segment is now pointed to by maddr. This segment 1021 * will eventually contain the elf header and program headers, so reset 1022 * the program header. Pass this back to the caller for inclusion in 1023 * the link-map so it can be used for later unmapping operations. 1024 */ 1025 /* LINTED */ 1026 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1027 1028 /* 1029 * If padding is required at the front of the image, obtain that now. 1030 * Note, if we've already obtained a reservation from anonymous memory 1031 * then this reservation will already include suitable padding. 1032 * Otherwise this reservation is backed by the file, or in the case of 1033 * a fixed image, doesn't yet exist. Map the padding so that it is 1034 * suitably protected (PROT_NONE), and insure the first segment of the 1035 * file is mapped to its correct location. 1036 */ 1037 if (padsize) { 1038 if (amret == AM_NOSUP) { 1039 if (dz_map(lml, raddr, padpsize, PROT_NONE, 1040 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1041 MAP_FAILED) 1042 return (0); 1043 1044 skipfseg = 0; 1045 } 1046 rsize -= padpsize; 1047 } 1048 1049 /* 1050 * Map individual segments. For a fixed image, these will each be 1051 * unique mappings. For a reservation these will fill in the 1052 * reservation. 1053 */ 1054 for (phdr = fphdr; phdr <= lphdr; 1055 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1056 caddr_t addr; 1057 Off mlen, flen; 1058 size_t size; 1059 1060 /* 1061 * Skip non-loadable segments or segments that don't occupy 1062 * any memory. 1063 */ 1064 if (((phdr->p_type != PT_LOAD) && 1065 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1066 continue; 1067 1068 /* 1069 * Establish this segments address relative to our base. 1070 */ 1071 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1072 (fixed ? 0 : faddr))); 1073 1074 /* 1075 * Determine the mapping protection from the segment attributes. 1076 * Also determine the etext address from the last loadable 1077 * segment which has permissions but no write access. 1078 */ 1079 mperm = 0; 1080 if (phdr->p_flags) { 1081 if (phdr->p_flags & PF_R) 1082 mperm |= PROT_READ; 1083 if (phdr->p_flags & PF_X) 1084 mperm |= PROT_EXEC; 1085 if (phdr->p_flags & PF_W) 1086 mperm |= PROT_WRITE; 1087 else 1088 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1089 (ulong_t)(fixed ? 0 : faddr); 1090 } 1091 1092 /* 1093 * Determine the type of mapping required. 1094 */ 1095 if (phdr->p_type == PT_SUNWBSS) { 1096 /* 1097 * Potentially, we can defer the loading of any SUNWBSS 1098 * segment, depending on whether the symbols it provides 1099 * have been bound to. In this manner, large segments 1100 * that are interposed upon between shared libraries 1101 * may not require mapping. Note, that the mapping 1102 * information is recorded in our mapping descriptor at 1103 * this time. 1104 */ 1105 mlen = phdr->p_memsz; 1106 flen = 0; 1107 1108 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1109 /* 1110 * If this segment has no backing file and no flags 1111 * specified, then it defines a reservation. At this 1112 * point all standard loadable segments will have been 1113 * processed. The segment reservation is mapped 1114 * directly from /dev/null. 1115 */ 1116 if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE, 1117 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1118 return (0); 1119 1120 mlen = phdr->p_memsz; 1121 flen = 0; 1122 1123 } else if (phdr->p_filesz == 0) { 1124 /* 1125 * If this segment has no backing file then it defines a 1126 * nobits segment and is mapped directly from /dev/zero. 1127 */ 1128 if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm, 1129 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1130 return (0); 1131 1132 mlen = phdr->p_memsz; 1133 flen = 0; 1134 1135 } else { 1136 Off foff; 1137 1138 /* 1139 * This mapping originates from the file. Determine the 1140 * file offset to which the mapping will be directed 1141 * (must be aligned) and how much to map (might be more 1142 * than the file in the case of .bss). 1143 */ 1144 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1145 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1146 flen = phdr->p_filesz + (phdr->p_offset - foff); 1147 1148 /* 1149 * If this is a non-fixed, non-anonymous mapping, and no 1150 * padding is involved, then the first loadable segment 1151 * is already part of the initial reservation. In this 1152 * case there is no need to remap this segment. 1153 */ 1154 if ((skipfseg == 0) || (phdr != fphdr)) { 1155 int phdr_mperm = mperm; 1156 /* 1157 * If this segments memsz is greater that its 1158 * filesz then the difference must be zeroed. 1159 * Make sure this segment is writable. 1160 */ 1161 if (phdr->p_memsz > phdr->p_filesz) 1162 mperm |= PROT_WRITE; 1163 1164 if (elf_map_textdata((caddr_t)addr, flen, 1165 mperm, phdr_mperm, 1166 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1167 MAP_FAILED) { 1168 int err = errno; 1169 eprintf(lml, ERR_FATAL, 1170 MSG_INTL(MSG_SYS_MMAP), name, 1171 strerror(err)); 1172 return (0); 1173 } 1174 } 1175 1176 /* 1177 * If the memory occupancy of the segment overflows the 1178 * definition in the file, we need to "zero out" the end 1179 * of the mapping we've established, and if necessary, 1180 * map some more space from /dev/zero. Note, zero'ed 1181 * memory must end on a double word boundary to satisfy 1182 * zero(). 1183 */ 1184 if (phdr->p_memsz > phdr->p_filesz) { 1185 caddr_t zaddr; 1186 size_t zlen, zplen; 1187 Off fend; 1188 1189 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1190 (fixed ? 0 : faddr)); 1191 zaddr = (caddr_t)M_PROUND(foff); 1192 zplen = (size_t)(zaddr - foff); 1193 1194 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1195 phdr->p_memsz + (fixed ? 0 : faddr))); 1196 zlen = (size_t)(fend - foff); 1197 1198 /* 1199 * Determine whether the number of bytes that 1200 * must be zero'ed overflow to the next page. 1201 * If not, simply clear the exact bytes 1202 * (filesz to memsz) from this page. Otherwise, 1203 * clear the remaining bytes of this page, and 1204 * map an following pages from /dev/zero. 1205 */ 1206 if (zlen < zplen) 1207 zero((caddr_t)foff, (long)zlen); 1208 else { 1209 zero((caddr_t)foff, (long)zplen); 1210 1211 if ((zlen = (fend - (Off)zaddr)) > 0) { 1212 if (dz_map(lml, zaddr, zlen, 1213 mperm, 1214 MAP_FIXED | MAP_PRIVATE) == 1215 MAP_FAILED) 1216 return (0); 1217 } 1218 } 1219 } 1220 } 1221 1222 /* 1223 * Unmap anything from the last mapping address to this one and 1224 * update the mapping claim pointer. 1225 */ 1226 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1227 (void) munmap(maddr, size); 1228 rsize -= size; 1229 } 1230 1231 /* 1232 * Retain this segments mapping information. 1233 */ 1234 mmaps[*mmapcnt].m_vaddr = addr; 1235 mmaps[*mmapcnt].m_msize = mlen; 1236 mmaps[*mmapcnt].m_fsize = flen; 1237 mmaps[*mmapcnt].m_perm = mperm; 1238 (*mmapcnt)++; 1239 1240 maddr = addr + M_PROUND(mlen); 1241 rsize -= M_PROUND(mlen); 1242 } 1243 1244 /* 1245 * If padding is required at the end of the image, obtain that now. 1246 * Note, if we've already obtained a reservation from anonymous memory 1247 * then this reservation will already include suitable padding. 1248 */ 1249 if (padsize) { 1250 if (amret == AM_NOSUP) { 1251 /* 1252 * maddr is currently page aligned from the last segment 1253 * mapping. 1254 */ 1255 if (dz_map(lml, maddr, padsize, PROT_NONE, 1256 (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) == 1257 MAP_FAILED) 1258 return (0); 1259 } 1260 maddr += padsize; 1261 rsize -= padsize; 1262 } 1263 1264 /* 1265 * Unmap any final reservation. 1266 */ 1267 if ((fixed == 0) && (rsize != 0)) 1268 (void) munmap(maddr, rsize); 1269 1270 return (faddr); 1271 } 1272 1273 /* 1274 * A null symbol interpretor. Used if a filter has no associated filtees. 1275 */ 1276 /* ARGSUSED0 */ 1277 static Sym * 1278 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1279 { 1280 return ((Sym *)0); 1281 } 1282 1283 /* 1284 * Disable filtee use. 1285 */ 1286 static void 1287 elf_disable_filtee(Rt_map *lmp, Dyninfo *dip) 1288 { 1289 dip->di_info = 0; 1290 1291 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1292 /* 1293 * If this is an object filter, free the filtee's duplication. 1294 */ 1295 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1296 free(REFNAME(lmp)); 1297 REFNAME(lmp) = (char *)0; 1298 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1299 1300 /* 1301 * Indicate that this filtee is no longer available. 1302 */ 1303 if (dip->di_flags & FLG_DI_STDFLTR) 1304 SYMINTP(lmp) = elf_null_find_sym; 1305 1306 } 1307 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1308 /* 1309 * Indicate that this standard filtee is no longer available. 1310 */ 1311 if (SYMSFLTRCNT(lmp)) 1312 SYMSFLTRCNT(lmp)--; 1313 } else { 1314 /* 1315 * Indicate that this auxiliary filtee is no longer available. 1316 */ 1317 if (SYMAFLTRCNT(lmp)) 1318 SYMAFLTRCNT(lmp)--; 1319 } 1320 dip->di_flags &= ~MSK_DI_FILTER; 1321 } 1322 1323 /* 1324 * Find symbol interpreter - filters. 1325 * This function is called when the symbols from a shared object should 1326 * be resolved from the shared objects filtees instead of from within itself. 1327 * 1328 * A symbol name of 0 is used to trigger filtee loading. 1329 */ 1330 static Sym * 1331 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1332 { 1333 const char *name = slp->sl_name, *filtees; 1334 Rt_map *clmp = slp->sl_cmap; 1335 Rt_map *ilmp = slp->sl_imap; 1336 Pnode *pnp, **pnpp; 1337 int any; 1338 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1339 Lm_list *lml = LIST(ilmp); 1340 1341 /* 1342 * Indicate that the filter has been used. If a binding already exists 1343 * to the caller, indicate that this object is referenced. This insures 1344 * we don't generate false unreferenced diagnostics from ldd -u/U or 1345 * debugging. Don't create a binding regardless, as this filter may 1346 * have been dlopen()'ed. 1347 */ 1348 if (name && (ilmp != clmp)) { 1349 Word tracing = (LIST(clmp)->lm_flags & 1350 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1351 1352 if (tracing || DBG_ENABLED) { 1353 Bnd_desc ** bdpp; 1354 Aliste off; 1355 1356 FLAGS1(ilmp) |= FL1_RT_USED; 1357 1358 if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) { 1359 for (ALIST_TRAVERSE(CALLERS(ilmp), off, bdpp)) { 1360 Bnd_desc * bdp = *bdpp; 1361 1362 if (bdp->b_caller == clmp) { 1363 bdp->b_flags |= BND_REFER; 1364 break; 1365 } 1366 } 1367 } 1368 } 1369 } 1370 1371 /* 1372 * If this is the first call to process this filter, establish the 1373 * filtee list. If a configuration file exists, determine if any 1374 * filtee associations for this filter, and its filtee reference, are 1375 * defined. Otherwise, process the filtee reference. Any token 1376 * expansion is also completed at this point (i.e., $PLATFORM). 1377 */ 1378 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1379 if (dip->di_info == 0) { 1380 if (rtld_flags2 & RT_FL2_FLTCFG) 1381 dip->di_info = elf_config_flt(lml, PATHNAME(ilmp), 1382 filtees); 1383 1384 if (dip->di_info == 0) { 1385 DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0)); 1386 if ((lml->lm_flags & 1387 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1388 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1389 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1390 NAME(ilmp), filtees); 1391 1392 if ((dip->di_info = (void *)expand_paths(ilmp, 1393 filtees, PN_SER_FILTEE, 0)) == 0) { 1394 elf_disable_filtee(ilmp, dip); 1395 return ((Sym *)0); 1396 } 1397 } 1398 } 1399 1400 /* 1401 * Traverse the filtee list, dlopen()'ing any objects specified and 1402 * using their group handle to lookup the symbol. 1403 */ 1404 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1405 pnpp = &pnp->p_next, pnp = *pnpp) { 1406 int mode; 1407 Grp_hdl *ghp; 1408 Rt_map *nlmp = 0; 1409 1410 if (pnp->p_len == 0) 1411 continue; 1412 1413 /* 1414 * Establish the mode of the filtee from the filter. As filtees 1415 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1416 * and the filtees aren't global. It would be nice to have 1417 * RTLD_FIRST used here also, but as filters got out long before 1418 * RTLD_FIRST was introduced it's a little too late now. 1419 */ 1420 mode = MODE(ilmp) | RTLD_GROUP; 1421 mode &= ~RTLD_GLOBAL; 1422 1423 /* 1424 * Insure that any auxiliary filter can locate symbols from its 1425 * caller. 1426 */ 1427 if (dip->di_flags & FLG_DI_AUXFLTR) 1428 mode |= RTLD_PARENT; 1429 1430 /* 1431 * Process any hardware capability directory. Establish a new 1432 * link-map control list from which to analyze any newly added 1433 * objects. 1434 */ 1435 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1436 Lm_cntl *lmc; 1437 Aliste lmco; 1438 1439 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1440 if ((lmc = alist_append(&(lml->lm_lists), 0, 1441 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1442 return ((Sym *)0); 1443 lmco = (Aliste)((char *)lmc - 1444 (char *)lml->lm_lists); 1445 } else { 1446 lmc = 0; 1447 lmco = ALO_DATA; 1448 } 1449 1450 pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees, 1451 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1452 1453 /* 1454 * Now that any hardware capability objects have been 1455 * processed, remove any link-map control list. 1456 */ 1457 if (lmc) 1458 remove_cntl(lml, lmco); 1459 } 1460 1461 if (pnp->p_len == 0) 1462 continue; 1463 1464 /* 1465 * Process an individual filtee. 1466 */ 1467 if (pnp->p_info == 0) { 1468 const char *filtee = pnp->p_name; 1469 int audit = 0; 1470 1471 DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0)); 1472 1473 ghp = 0; 1474 1475 /* 1476 * Determine if the reference link map is already 1477 * loaded. As an optimization compare the filtee with 1478 * our interpretor. The most common filter is 1479 * libdl.so.1, which is a filter on ld.so.1. 1480 */ 1481 #if defined(_ELF64) 1482 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1483 #else 1484 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1485 #endif 1486 /* 1487 * Create an association between ld.so.1 and the 1488 * filter. As an optimization, a handle for 1489 * ld.so.1 itself (required for the dlopen() 1490 * family filtering mechanism) shouldn't search 1491 * any dependencies of ld.so.1. Omitting 1492 * GPD_ADDEPS prevents the addition of any 1493 * ld.so.1 dependencies to this handle. 1494 */ 1495 nlmp = lml_rtld.lm_head; 1496 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1497 (GPH_LDSO | GPH_FIRST | GPH_FILTEE), 1498 (GPD_DLSYM | GPD_RELOC), GPD_PARENT)) == 0) 1499 nlmp = 0; 1500 1501 /* 1502 * Establish the filter handle to prevent any 1503 * recursion. 1504 */ 1505 if (nlmp && ghp) 1506 pnp->p_info = (void *)ghp; 1507 1508 /* 1509 * Audit the filter/filtee established. Ignore 1510 * any return from the auditor, as we can't 1511 * allow ignore filtering to ld.so.1, otherwise 1512 * nothing is going to work. 1513 */ 1514 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1515 LML_TFLG_AUD_OBJFILTER)) 1516 (void) audit_objfilter(ilmp, filtees, 1517 nlmp, 0); 1518 1519 } else { 1520 Rej_desc rej = { 0 }; 1521 Lm_cntl *lmc; 1522 Aliste lmco; 1523 1524 /* 1525 * Establish a new link-map control list from 1526 * which to analyze any newly added objects. 1527 */ 1528 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1529 if ((lmc = 1530 alist_append(&(lml->lm_lists), 0, 1531 sizeof (Lm_cntl), 1532 AL_CNT_LMLISTS)) == 0) 1533 return ((Sym *)0); 1534 lmco = (Aliste)((char *)lmc - 1535 (char *)lml->lm_lists); 1536 } else { 1537 lmc = 0; 1538 lmco = ALO_DATA; 1539 } 1540 1541 /* 1542 * Load the filtee. Note, an auditor can 1543 * provide an alternative name. 1544 */ 1545 if ((nlmp = load_path(lml, lmco, &(pnp->p_name), 1546 ilmp, mode, FLG_RT_HANDLE, &ghp, 0, 1547 &rej)) == 0) { 1548 file_notfound(LIST(ilmp), filtee, ilmp, 1549 FLG_RT_HANDLE, &rej); 1550 remove_rej(&rej); 1551 } 1552 filtee = pnp->p_name; 1553 1554 /* 1555 * Establish the filter handle to prevent any 1556 * recursion. 1557 */ 1558 if (nlmp && ghp) { 1559 ghp->gh_flags |= GPH_FILTEE; 1560 pnp->p_info = (void *)ghp; 1561 } 1562 1563 /* 1564 * Audit the filter/filtee established. A 1565 * return of 0 indicates the auditor wishes to 1566 * ignore this filtee. 1567 */ 1568 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1569 LML_TFLG_AUD_OBJFILTER)) { 1570 if (audit_objfilter(ilmp, filtees, 1571 nlmp, 0) == 0) { 1572 audit = 1; 1573 nlmp = 0; 1574 } 1575 } 1576 1577 /* 1578 * Finish processing the objects associated with 1579 * this request. Create an association between 1580 * this object and the originating filter to 1581 * provide sufficient information to tear down 1582 * this filtee if necessary. 1583 */ 1584 if (nlmp && ghp && 1585 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1586 (relocate_lmc(lml, lmco, ilmp, nlmp) == 0))) 1587 nlmp = 0; 1588 1589 /* 1590 * If the filtee has been successfully 1591 * processed, then create an association 1592 * between the filter and filtee. This 1593 * association provides sufficient information 1594 * to tear down the filter and filtee if 1595 * necessary. 1596 */ 1597 DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD)); 1598 if (nlmp && ghp && 1599 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1600 nlmp = 0; 1601 1602 /* 1603 * If this filtee loading has failed, and we've 1604 * created a new link-map control list to which 1605 * this request has added objects, then remove 1606 * all the objects that have been associated to 1607 * this request. 1608 */ 1609 if ((nlmp == 0) && lmc && lmc->lc_head) 1610 remove_lmc(lml, clmp, lmc, lmco, name); 1611 1612 /* 1613 * Remove any link-map control list that was 1614 * created. 1615 */ 1616 if (lmc) 1617 remove_cntl(lml, lmco); 1618 } 1619 1620 /* 1621 * Generate a diagnostic if the filtee couldn't be 1622 * loaded, null out the pnode entry, and continue 1623 * the search. Otherwise, retain this group handle 1624 * for future symbol searches. 1625 */ 1626 if (nlmp == 0) { 1627 DBG_CALL(Dbg_file_filtee(lml, 0, filtee, 1628 audit)); 1629 1630 pnp->p_info = 0; 1631 pnp->p_len = 0; 1632 continue; 1633 } 1634 } 1635 1636 ghp = (Grp_hdl *)pnp->p_info; 1637 1638 /* 1639 * If we're just here to trigger filtee loading skip the symbol 1640 * lookup so we'll continue looking for additional filtees. 1641 */ 1642 if (name) { 1643 Grp_desc *gdp; 1644 Sym *sym = 0; 1645 Aliste off; 1646 Slookup sl = *slp; 1647 1648 sl.sl_flags |= LKUP_FIRST; 1649 any++; 1650 1651 /* 1652 * Look for the symbol in the handles dependencies. 1653 */ 1654 for (ALIST_TRAVERSE(ghp->gh_depends, off, gdp)) { 1655 if ((gdp->gd_flags & GPD_DLSYM) == 0) 1656 continue; 1657 1658 /* 1659 * If our parent is a dependency don't look at 1660 * it (otherwise we are in a recursive loop). 1661 * This situation can occur with auxiliary 1662 * filters if the filtee has a dependency on the 1663 * filter. This dependency isn't necessary as 1664 * auxiliary filters are opened RTLD_PARENT, but 1665 * users may still unknowingly add an explicit 1666 * dependency to the parent. 1667 */ 1668 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1669 continue; 1670 1671 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1672 binfo)) != 0) || 1673 (ghp->gh_flags & GPH_FIRST)) 1674 break; 1675 } 1676 1677 /* 1678 * If a symbol has been found, indicate the binding 1679 * and return the symbol. 1680 */ 1681 if (sym) { 1682 *binfo |= DBG_BINFO_FILTEE; 1683 return (sym); 1684 } 1685 } 1686 1687 /* 1688 * If this object is tagged to terminate filtee processing we're 1689 * done. 1690 */ 1691 if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE) 1692 break; 1693 } 1694 1695 /* 1696 * If we're just here to trigger filtee loading then we're done. 1697 */ 1698 if (name == 0) 1699 return ((Sym *)0); 1700 1701 /* 1702 * If no filtees have been found for a filter, clean up any Pnode 1703 * structures and disable their search completely. For auxiliary 1704 * filters we can reselect the symbol search function so that we never 1705 * enter this routine again for this object. For standard filters we 1706 * use the null symbol routine. 1707 */ 1708 if (any == 0) { 1709 remove_pnode((Pnode *)dip->di_info); 1710 elf_disable_filtee(ilmp, dip); 1711 return ((Sym *)0); 1712 } 1713 1714 return ((Sym *)0); 1715 } 1716 1717 /* 1718 * Focal point for disabling error messages for auxiliary filters. As an 1719 * auxiliary filter allows for filtee use, but provides a fallback should a 1720 * filtee not exist (or fail to load), any errors generated as a consequence of 1721 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1722 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1723 * produced. ldd(1) employs printf(), and here, the selection of whether to 1724 * print a diagnostic in regards to auxiliary filters is a little more complex. 1725 * 1726 * . The determination of whether to produce an ldd message, or a fatal 1727 * error message is driven by LML_FLG_TRC_ENABLE. 1728 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1729 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1730 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1731 * 1732 * . If the calling object is lddstub, then several classes of message are 1733 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1734 * a stub executable employed to preload a user specified library against. 1735 * 1736 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1737 * be suppressed. All detailed ldd messages should still be produced. 1738 */ 1739 Sym * 1740 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1741 { 1742 Sym *sym; 1743 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1744 int silent = 0; 1745 1746 /* 1747 * Make sure this entry is still acting as a filter. We may have tried 1748 * to process this previously, and disabled it if the filtee couldn't 1749 * be processed. However, other entries may provide different filtees 1750 * that are yet to be completed. 1751 */ 1752 if (dip->di_flags == 0) 1753 return ((Sym *)0); 1754 1755 /* 1756 * Indicate whether an error message is required should this filtee not 1757 * be found, based on the type of filter. 1758 */ 1759 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1760 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1761 rtld_flags |= RT_FL_SILENCERR; 1762 silent = 1; 1763 } 1764 1765 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1766 1767 if (silent) 1768 rtld_flags &= ~RT_FL_SILENCERR; 1769 1770 return (sym); 1771 } 1772 1773 /* 1774 * Compute the elf hash value (as defined in the ELF access library). 1775 * The form of the hash table is: 1776 * 1777 * |--------------| 1778 * | # of buckets | 1779 * |--------------| 1780 * | # of chains | 1781 * |--------------| 1782 * | bucket[] | 1783 * |--------------| 1784 * | chain[] | 1785 * |--------------| 1786 */ 1787 ulong_t 1788 elf_hash(const char *name) 1789 { 1790 uint_t hval = 0; 1791 1792 while (*name) { 1793 uint_t g; 1794 hval = (hval << 4) + *name++; 1795 if ((g = (hval & 0xf0000000)) != 0) 1796 hval ^= g >> 24; 1797 hval &= ~g; 1798 } 1799 return ((ulong_t)hval); 1800 } 1801 1802 /* 1803 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1804 * function specially in the executable - if they have a value, even though 1805 * undefined, we use that value. This allows us to associate all references 1806 * to a function's address to a single place in the process: the plt entry 1807 * for that function in the executable. Calls to lookup from plt binding 1808 * routines do NOT set LKUP_SPEC in the flag. 1809 */ 1810 Sym * 1811 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1812 { 1813 const char *name = slp->sl_name; 1814 Rt_map *ilmp = slp->sl_imap; 1815 ulong_t hash = slp->sl_hash; 1816 uint_t ndx, htmp, buckets, *chainptr; 1817 Sym *sym, *symtabptr; 1818 char *strtabptr, *strtabname; 1819 uint_t flags1; 1820 Syminfo *sip; 1821 1822 /* 1823 * If we're only here to establish a symbols index, skip the diagnostic 1824 * used to trace a symbol search. 1825 */ 1826 if ((slp->sl_flags & LKUP_SYMNDX) == 0) 1827 DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF))); 1828 1829 if (HASH(ilmp) == 0) 1830 return ((Sym *)0); 1831 1832 buckets = HASH(ilmp)[0]; 1833 /* LINTED */ 1834 htmp = (uint_t)hash % buckets; 1835 1836 /* 1837 * Get the first symbol on hash chain and initialize the string 1838 * and symbol table pointers. 1839 */ 1840 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1841 return ((Sym *)0); 1842 1843 chainptr = HASH(ilmp) + 2 + buckets; 1844 strtabptr = STRTAB(ilmp); 1845 symtabptr = SYMTAB(ilmp); 1846 1847 while (ndx) { 1848 sym = symtabptr + ndx; 1849 strtabname = strtabptr + sym->st_name; 1850 1851 /* 1852 * Compare the symbol found with the name required. If the 1853 * names don't match continue with the next hash entry. 1854 */ 1855 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1856 if ((ndx = chainptr[ndx]) != 0) 1857 continue; 1858 return ((Sym *)0); 1859 } 1860 1861 /* 1862 * The Solaris ld does not put DT_VERSYM in the dynamic 1863 * section, but the GNU ld does. The GNU runtime linker 1864 * interprets the top bit of the 16-bit Versym value 1865 * (0x8000) as the "hidden" bit. If this bit is set, 1866 * the linker is supposed to act as if that symbol does 1867 * not exist. The hidden bit supports their versioning 1868 * scheme, which allows multiple incompatible functions 1869 * with the same name to exist at different versions 1870 * within an object. The Solaris linker does not support this 1871 * mechanism, or the model of interface evolution that 1872 * it allows, but we honor the hidden bit in GNU ld 1873 * produced objects in order to interoperate with them. 1874 */ 1875 if ((VERSYM(ilmp) != NULL) && 1876 ((VERSYM(ilmp)[ndx] & 0x8000) != 0)) { 1877 DBG_CALL(Dbg_syms_ignore_gnuver(ilmp, name, 1878 ndx, VERSYM(ilmp)[ndx])); 1879 if ((ndx = chainptr[ndx]) != 0) 1880 continue; 1881 return ((Sym *)0); 1882 } 1883 1884 /* 1885 * If we're only here to establish a symbols index, we're done. 1886 */ 1887 if (slp->sl_flags & LKUP_SYMNDX) 1888 return (sym); 1889 1890 /* 1891 * If we find a match and the symbol is defined, return the 1892 * symbol pointer and the link map in which it was found. 1893 */ 1894 if (sym->st_shndx != SHN_UNDEF) { 1895 *dlmp = ilmp; 1896 *binfo |= DBG_BINFO_FOUND; 1897 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1898 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1899 is_sym_interposer(ilmp, sym))) 1900 *binfo |= DBG_BINFO_INTERPOSE; 1901 break; 1902 1903 /* 1904 * If we find a match and the symbol is undefined, the 1905 * symbol type is a function, and the value of the symbol 1906 * is non zero, then this is a special case. This allows 1907 * the resolution of a function address to the plt[] entry. 1908 * See SPARC ABI, Dynamic Linking, Function Addresses for 1909 * more details. 1910 */ 1911 } else if ((slp->sl_flags & LKUP_SPEC) && 1912 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1913 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1914 *dlmp = ilmp; 1915 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1916 if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) || 1917 ((FLAGS(ilmp) & FLG_RT_SYMINTPO) && 1918 is_sym_interposer(ilmp, sym))) 1919 *binfo |= DBG_BINFO_INTERPOSE; 1920 return (sym); 1921 } 1922 1923 /* 1924 * Undefined symbol. 1925 */ 1926 return ((Sym *)0); 1927 } 1928 1929 /* 1930 * We've found a match. Determine if the defining object contains 1931 * symbol binding information. 1932 */ 1933 if ((sip = SYMINFO(ilmp)) != 0) 1934 sip += ndx; 1935 1936 /* 1937 * If this definition is a singleton, and we haven't followed a default 1938 * symbol search knowing that we're looking for a singleton (presumably 1939 * because the symbol definition has been changed since the referring 1940 * object was built), then reject this binding so that the caller can 1941 * fall back to a standard symbol search. 1942 */ 1943 if ((ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON) && 1944 (((slp->sl_flags & LKUP_STANDARD) == 0) || 1945 (((slp->sl_flags & LKUP_SINGLETON) == 0) && 1946 (LIST(ilmp)->lm_flags & LML_FLG_GROUPSEXIST)))) { 1947 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 1948 DBG_BNDREJ_SINGLE)); 1949 *binfo |= BINFO_REJSINGLE; 1950 *binfo &= ~DBG_BINFO_MSK; 1951 return ((Sym *)0); 1952 } 1953 1954 /* 1955 * If this is a direct binding request, but the symbol definition has 1956 * disabled directly binding to it (presumably because the symbol 1957 * definition has been changed since the referring object was built), 1958 * indicate this failure so that the caller can fall back to a standard 1959 * symbol search. 1960 */ 1961 if (sip && (slp->sl_flags & LKUP_DIRECT) && 1962 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 1963 DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name, 1964 DBG_BNDREJ_NODIR)); 1965 *binfo |= BINFO_REJDIRECT; 1966 *binfo &= ~DBG_BINFO_MSK; 1967 return ((Sym *)0); 1968 } 1969 1970 /* 1971 * Determine whether this object is acting as a filter. 1972 */ 1973 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 1974 return (sym); 1975 1976 /* 1977 * Determine if this object offers per-symbol filtering, and if so, 1978 * whether this symbol references a filtee. 1979 */ 1980 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 1981 /* 1982 * If this is a standard filter reference, and no standard 1983 * filtees remain to be inspected, we're done. If this is an 1984 * auxiliary filter reference, and no auxiliary filtees remain, 1985 * we'll fall through in case any object filtering is available. 1986 */ 1987 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 1988 (SYMSFLTRCNT(ilmp) == 0)) 1989 return ((Sym *)0); 1990 1991 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 1992 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 1993 SYMAFLTRCNT(ilmp))) { 1994 Sym * fsym; 1995 1996 /* 1997 * This symbol has an associated filtee. Lookup the 1998 * symbol in the filtee, and if it is found return it. 1999 * If the symbol doesn't exist, and this is a standard 2000 * filter, return an error, otherwise fall through to 2001 * catch any object filtering that may be available. 2002 */ 2003 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2004 sip->si_boundto)) != 0) 2005 return (fsym); 2006 if (sip->si_flags & SYMINFO_FLG_FILTER) 2007 return ((Sym *)0); 2008 } 2009 } 2010 2011 /* 2012 * Determine if this object provides global filtering. 2013 */ 2014 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 2015 Sym * fsym; 2016 2017 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 2018 /* 2019 * This object has an associated filtee. Lookup the 2020 * symbol in the filtee, and if it is found return it. 2021 * If the symbol doesn't exist, and this is a standard 2022 * filter, return and error, otherwise return the symbol 2023 * within the filter itself. 2024 */ 2025 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 2026 OBJFLTRNDX(ilmp))) != 0) 2027 return (fsym); 2028 } 2029 2030 if (flags1 & FL1_RT_OBJSFLTR) 2031 return ((Sym *)0); 2032 } 2033 return (sym); 2034 } 2035 2036 /* 2037 * Create a new Rt_map structure for an ELF object and initialize 2038 * all values. 2039 */ 2040 Rt_map * 2041 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2042 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2043 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2044 { 2045 Rt_map *lmp; 2046 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2047 Xword rpath = 0; 2048 Ehdr *ehdr = (Ehdr *)addr; 2049 2050 DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry, 2051 lml->lm_lmidstr, lmco)); 2052 2053 /* 2054 * Allocate space for the link-map and private elf information. Once 2055 * these are allocated and initialized, we can use remove_so(0, lmp) to 2056 * tear down the link-map should any failures occur. 2057 */ 2058 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2059 return (0); 2060 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2061 free(lmp); 2062 return (0); 2063 } 2064 2065 /* 2066 * All fields not filled in were set to 0 by calloc. 2067 */ 2068 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2069 DYN(lmp) = ld; 2070 ADDR(lmp) = addr; 2071 MSIZE(lmp) = msize; 2072 ENTRY(lmp) = (Addr)entry; 2073 SYMINTP(lmp) = elf_find_sym; 2074 ETEXT(lmp) = etext; 2075 FCT(lmp) = &elf_fct; 2076 LIST(lmp) = lml; 2077 PADSTART(lmp) = paddr; 2078 PADIMLEN(lmp) = padimsize; 2079 THREADID(lmp) = rt_thr_self(); 2080 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2081 SORTVAL(lmp) = -1; 2082 2083 MMAPS(lmp) = mmaps; 2084 MMAPCNT(lmp) = mmapcnt; 2085 ASSERT(mmapcnt != 0); 2086 2087 /* 2088 * If this is a shared object, add the base address to each address. 2089 * if this is an executable, use address as is. 2090 */ 2091 if (ehdr->e_type == ET_EXEC) { 2092 base = 0; 2093 FLAGS(lmp) |= FLG_RT_FIXED; 2094 } else 2095 base = addr; 2096 2097 /* 2098 * Fill in rest of the link map entries with information from the file's 2099 * dynamic structure. 2100 */ 2101 if (ld) { 2102 uint_t dyncnt = 0; 2103 Xword pltpadsz = 0; 2104 Rti_desc *rti; 2105 2106 /* CSTYLED */ 2107 for ( ; ld->d_tag != DT_NULL; ++ld, dyncnt++) { 2108 switch ((Xword)ld->d_tag) { 2109 case DT_SYMTAB: 2110 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2111 break; 2112 case DT_SUNW_SYMTAB: 2113 SUNWSYMTAB(lmp) = 2114 (void *)(ld->d_un.d_ptr + base); 2115 break; 2116 case DT_SUNW_SYMSZ: 2117 SUNWSYMSZ(lmp) = ld->d_un.d_val; 2118 break; 2119 case DT_STRTAB: 2120 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2121 break; 2122 case DT_SYMENT: 2123 SYMENT(lmp) = ld->d_un.d_val; 2124 break; 2125 case DT_FEATURE_1: 2126 ld->d_un.d_val |= DTF_1_PARINIT; 2127 if (ld->d_un.d_val & DTF_1_CONFEXP) 2128 crle = 1; 2129 break; 2130 case DT_MOVESZ: 2131 MOVESZ(lmp) = ld->d_un.d_val; 2132 FLAGS(lmp) |= FLG_RT_MOVE; 2133 break; 2134 case DT_MOVEENT: 2135 MOVEENT(lmp) = ld->d_un.d_val; 2136 break; 2137 case DT_MOVETAB: 2138 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2139 break; 2140 case DT_REL: 2141 case DT_RELA: 2142 /* 2143 * At this time we can only handle 1 type of 2144 * relocation per object. 2145 */ 2146 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2147 break; 2148 case DT_RELSZ: 2149 case DT_RELASZ: 2150 RELSZ(lmp) = ld->d_un.d_val; 2151 break; 2152 case DT_RELENT: 2153 case DT_RELAENT: 2154 RELENT(lmp) = ld->d_un.d_val; 2155 break; 2156 case DT_RELCOUNT: 2157 case DT_RELACOUNT: 2158 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2159 break; 2160 case DT_TEXTREL: 2161 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2162 break; 2163 case DT_HASH: 2164 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2165 break; 2166 case DT_PLTGOT: 2167 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2168 break; 2169 case DT_PLTRELSZ: 2170 PLTRELSZ(lmp) = ld->d_un.d_val; 2171 break; 2172 case DT_JMPREL: 2173 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2174 break; 2175 case DT_INIT: 2176 if (ld->d_un.d_ptr != NULL) 2177 INIT(lmp) = 2178 (void (*)())(ld->d_un.d_ptr + base); 2179 break; 2180 case DT_FINI: 2181 if (ld->d_un.d_ptr != NULL) 2182 FINI(lmp) = 2183 (void (*)())(ld->d_un.d_ptr + base); 2184 break; 2185 case DT_INIT_ARRAY: 2186 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2187 base); 2188 break; 2189 case DT_INIT_ARRAYSZ: 2190 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2191 break; 2192 case DT_FINI_ARRAY: 2193 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2194 base); 2195 break; 2196 case DT_FINI_ARRAYSZ: 2197 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2198 break; 2199 case DT_PREINIT_ARRAY: 2200 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2201 base); 2202 break; 2203 case DT_PREINIT_ARRAYSZ: 2204 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2205 break; 2206 case DT_RPATH: 2207 case DT_RUNPATH: 2208 rpath = ld->d_un.d_val; 2209 break; 2210 case DT_FILTER: 2211 fltr = ld->d_un.d_val; 2212 OBJFLTRNDX(lmp) = dyncnt; 2213 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2214 break; 2215 case DT_AUXILIARY: 2216 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2217 fltr = ld->d_un.d_val; 2218 OBJFLTRNDX(lmp) = dyncnt; 2219 } 2220 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2221 break; 2222 case DT_SUNW_FILTER: 2223 SYMSFLTRCNT(lmp)++; 2224 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2225 break; 2226 case DT_SUNW_AUXILIARY: 2227 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2228 SYMAFLTRCNT(lmp)++; 2229 } 2230 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2231 break; 2232 case DT_DEPAUDIT: 2233 if (!(rtld_flags & RT_FL_NOAUDIT)) 2234 audit = ld->d_un.d_val; 2235 break; 2236 case DT_CONFIG: 2237 cfile = ld->d_un.d_val; 2238 break; 2239 case DT_DEBUG: 2240 /* 2241 * DT_DEBUG entries are only created in 2242 * dynamic objects that require an interpretor 2243 * (ie. all dynamic executables and some shared 2244 * objects), and provide for a hand-shake with 2245 * debuggers. This entry is initialized to 2246 * zero by the link-editor. If a debugger has 2247 * us and updated this entry set the debugger 2248 * flag, and finish initializing the debugging 2249 * structure (see setup() also). Switch off any 2250 * configuration object use as most debuggers 2251 * can't handle fixed dynamic executables as 2252 * dependencies, and we can't handle requests 2253 * like object padding for alternative objects. 2254 */ 2255 if (ld->d_un.d_ptr) 2256 rtld_flags |= 2257 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2258 ld->d_un.d_ptr = (Addr)&r_debug; 2259 break; 2260 case DT_VERNEED: 2261 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2262 base); 2263 break; 2264 case DT_VERNEEDNUM: 2265 /* LINTED */ 2266 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2267 break; 2268 case DT_VERDEF: 2269 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2270 break; 2271 case DT_VERDEFNUM: 2272 /* LINTED */ 2273 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2274 break; 2275 case DT_VERSYM: 2276 /* 2277 * The Solaris ld does not produce DT_VERSYM, 2278 * but the GNU ld does, in order to support 2279 * their style of versioning, which differs 2280 * from ours in some ways, while using the 2281 * same data structures. The presence of 2282 * DT_VERSYM therefore means that GNU 2283 * versioning rules apply to the given file. 2284 * If DT_VERSYM is not present, then Solaris 2285 * versioning rules apply. 2286 */ 2287 VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base); 2288 break; 2289 case DT_BIND_NOW: 2290 if ((ld->d_un.d_val & DF_BIND_NOW) && 2291 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2292 MODE(lmp) |= RTLD_NOW; 2293 MODE(lmp) &= ~RTLD_LAZY; 2294 } 2295 break; 2296 case DT_FLAGS: 2297 if (ld->d_un.d_val & DF_SYMBOLIC) 2298 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2299 if (ld->d_un.d_val & DF_TEXTREL) 2300 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2301 if ((ld->d_un.d_val & DF_BIND_NOW) && 2302 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2303 MODE(lmp) |= RTLD_NOW; 2304 MODE(lmp) &= ~RTLD_LAZY; 2305 } 2306 /* 2307 * Capture any static TLS use, and enforce that 2308 * this object be non-deletable. 2309 */ 2310 if (ld->d_un.d_val & DF_STATIC_TLS) { 2311 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 2312 MODE(lmp) |= RTLD_NODELETE; 2313 } 2314 break; 2315 case DT_FLAGS_1: 2316 if (ld->d_un.d_val & DF_1_DISPRELPND) 2317 FLAGS1(lmp) |= FL1_RT_DISPREL; 2318 if (ld->d_un.d_val & DF_1_GROUP) 2319 FLAGS(lmp) |= 2320 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2321 if ((ld->d_un.d_val & DF_1_NOW) && 2322 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2323 MODE(lmp) |= RTLD_NOW; 2324 MODE(lmp) &= ~RTLD_LAZY; 2325 } 2326 if (ld->d_un.d_val & DF_1_NODELETE) 2327 MODE(lmp) |= RTLD_NODELETE; 2328 if (ld->d_un.d_val & DF_1_INITFIRST) 2329 FLAGS(lmp) |= FLG_RT_INITFRST; 2330 if (ld->d_un.d_val & DF_1_NOOPEN) 2331 FLAGS(lmp) |= FLG_RT_NOOPEN; 2332 if (ld->d_un.d_val & DF_1_LOADFLTR) 2333 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2334 if (ld->d_un.d_val & DF_1_NODUMP) 2335 FLAGS(lmp) |= FLG_RT_NODUMP; 2336 if (ld->d_un.d_val & DF_1_CONFALT) 2337 crle = 1; 2338 if (ld->d_un.d_val & DF_1_DIRECT) 2339 FLAGS1(lmp) |= FL1_RT_DIRECT; 2340 if (ld->d_un.d_val & DF_1_NODEFLIB) 2341 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2342 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2343 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2344 if (ld->d_un.d_val & DF_1_TRANS) 2345 FLAGS(lmp) |= FLG_RT_TRANS; 2346 #ifndef EXPAND_RELATIVE 2347 if (ld->d_un.d_val & DF_1_ORIGIN) 2348 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2349 #endif 2350 /* 2351 * Global auditing is only meaningful when 2352 * specified by the initiating object of the 2353 * process - typically the dynamic executable. 2354 * If this is the initiaiting object, its link- 2355 * map will not yet have been added to the 2356 * link-map list, and consequently the link-map 2357 * list is empty. (see setup()). 2358 */ 2359 if (ld->d_un.d_val & DF_1_GLOBAUDIT) { 2360 if (lml_main.lm_head == 0) 2361 FLAGS1(lmp) |= FL1_RT_GLOBAUD; 2362 else 2363 DBG_CALL(Dbg_audit_ignore(lmp)); 2364 } 2365 2366 /* 2367 * If this object identifies itself as an 2368 * interposer, but relocation processing has 2369 * already started, then demote it. It's too 2370 * late to guarantee complete interposition. 2371 */ 2372 /* BEGIN CSTYLED */ 2373 if (ld->d_un.d_val & 2374 (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) { 2375 if (lml->lm_flags & LML_FLG_STARTREL) { 2376 DBG_CALL(Dbg_util_intoolate(lmp)); 2377 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2378 (void) printf( 2379 MSG_INTL(MSG_LDD_REL_ERR2), 2380 NAME(lmp)); 2381 } else if (ld->d_un.d_val & DF_1_INTERPOSE) 2382 FLAGS(lmp) |= FLG_RT_OBJINTPO; 2383 else 2384 FLAGS(lmp) |= FLG_RT_SYMINTPO; 2385 } 2386 /* END CSTYLED */ 2387 break; 2388 case DT_SYMINFO: 2389 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2390 base); 2391 break; 2392 case DT_SYMINENT: 2393 SYMINENT(lmp) = ld->d_un.d_val; 2394 break; 2395 case DT_PLTPAD: 2396 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2397 break; 2398 case DT_PLTPADSZ: 2399 pltpadsz = ld->d_un.d_val; 2400 break; 2401 case DT_SUNW_RTLDINF: 2402 /* 2403 * Maintain a list of RTLDINFO structures. 2404 * Typically, libc is the only supplier, and 2405 * only one structure is provided. However, 2406 * multiple suppliers and multiple structures 2407 * are supported. For example, one structure 2408 * may provide thread_init, and another 2409 * structure may provide atexit reservations. 2410 */ 2411 if ((rti = alist_append(&lml->lm_rti, 0, 2412 sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) { 2413 remove_so(0, lmp); 2414 return (0); 2415 } 2416 rti->rti_lmp = lmp; 2417 rti->rti_info = (void *)(ld->d_un.d_ptr + base); 2418 break; 2419 case DT_SUNW_SORTENT: 2420 SUNWSORTENT(lmp) = ld->d_un.d_val; 2421 break; 2422 case DT_SUNW_SYMSORT: 2423 SUNWSYMSORT(lmp) = 2424 (void *)(ld->d_un.d_ptr + base); 2425 break; 2426 case DT_SUNW_SYMSORTSZ: 2427 SUNWSYMSORTSZ(lmp) = ld->d_un.d_val; 2428 break; 2429 case DT_DEPRECATED_SPARC_REGISTER: 2430 case M_DT_REGISTER: 2431 FLAGS(lmp) |= FLG_RT_REGSYMS; 2432 break; 2433 case M_DT_PLTRESERVE: 2434 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2435 base); 2436 break; 2437 } 2438 } 2439 2440 2441 if (PLTPAD(lmp)) { 2442 if (pltpadsz == (Xword)0) 2443 PLTPAD(lmp) = 0; 2444 else 2445 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2446 pltpadsz); 2447 } 2448 2449 /* 2450 * Allocate Dynamic Info structure 2451 */ 2452 if ((DYNINFO(lmp) = calloc((size_t)dyncnt, 2453 sizeof (Dyninfo))) == 0) { 2454 remove_so(0, lmp); 2455 return (0); 2456 } 2457 DYNINFOCNT(lmp) = dyncnt; 2458 } 2459 2460 /* 2461 * A dynsym contains only global functions. We want to have 2462 * a version of it that also includes local functions, so that 2463 * dladdr() will be able to report names for local functions 2464 * when used to generate a stack trace for a stripped file. 2465 * This version of the dynsym is provided via DT_SUNW_SYMTAB. 2466 * 2467 * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick 2468 * in order to avoid having to have two copies of the global 2469 * symbols held in DT_SYMTAB: The local symbols are placed in 2470 * a separate section than the globals in the dynsym, but the 2471 * linker conspires to put the data for these two sections adjacent 2472 * to each other. DT_SUNW_SYMTAB points at the top of the local 2473 * symbols, and DT_SUNW_SYMSZ is the combined length of both tables. 2474 * 2475 * If the two sections are not adjacent, then something went wrong 2476 * at link time. We use ASSERT to kill the process if this is 2477 * a debug build. In a production build, we will silently ignore 2478 * the presence of the .ldynsym and proceed. We can detect this 2479 * situation by checking to see that DT_SYMTAB lies in 2480 * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ. 2481 */ 2482 if ((SUNWSYMTAB(lmp) != NULL) && 2483 (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) || 2484 (((char *)SYMTAB(lmp) >= 2485 (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) { 2486 ASSERT(0); 2487 SUNWSYMTAB(lmp) = NULL; 2488 SUNWSYMSZ(lmp) = 0; 2489 } 2490 2491 /* 2492 * If configuration file use hasn't been disabled, and a configuration 2493 * file hasn't already been set via an environment variable, see if any 2494 * application specific configuration file is specified. An LD_CONFIG 2495 * setting is used first, but if this image was generated via crle(1) 2496 * then a default configuration file is a fall-back. 2497 */ 2498 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2499 if (cfile) 2500 config->c_name = (const char *)(cfile + 2501 (char *)STRTAB(lmp)); 2502 else if (crle) { 2503 rtld_flags |= RT_FL_CONFAPP; 2504 #ifndef EXPAND_RELATIVE 2505 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2506 #endif 2507 } 2508 } 2509 2510 if (rpath) 2511 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2512 if (fltr) { 2513 /* 2514 * If this object is a global filter, duplicate the filtee 2515 * string name(s) so that REFNAME() is available in core files. 2516 * This cludge was useful for debuggers at one point, but only 2517 * when the filtee name was an individual full path. 2518 */ 2519 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2520 remove_so(0, lmp); 2521 return (0); 2522 } 2523 } 2524 2525 if (rtld_flags & RT_FL_RELATIVE) 2526 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2527 2528 /* 2529 * For Intel ABI compatibility. It's possible that a JMPREL can be 2530 * specified without any other relocations (e.g. a dynamic executable 2531 * normally only contains .plt relocations). If this is the case then 2532 * no REL, RELSZ or RELENT will have been created. For us to be able 2533 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2534 * the RELENT for these relocations. Refer to elf_reloc() for more 2535 * details. 2536 */ 2537 if (!RELENT(lmp) && JMPREL(lmp)) 2538 RELENT(lmp) = sizeof (Rel); 2539 2540 /* 2541 * Establish any per-object auditing. If we're establishing `main's 2542 * link-map its too early to go searching for audit objects so just 2543 * hold the object name for later (see setup()). 2544 */ 2545 if (audit) { 2546 char *cp = audit + (char *)STRTAB(lmp); 2547 2548 if (*cp) { 2549 if (((AUDITORS(lmp) = 2550 calloc(1, sizeof (Audit_desc))) == 0) || 2551 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2552 remove_so(0, lmp); 2553 return (0); 2554 } 2555 if (lml_main.lm_head) { 2556 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2557 remove_so(0, lmp); 2558 return (0); 2559 } 2560 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2561 lml->lm_flags |= LML_FLG_LOCAUDIT; 2562 } 2563 } 2564 } 2565 2566 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2567 remove_so(0, lmp); 2568 return (0); 2569 } 2570 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2571 remove_so(0, lmp); 2572 return (0); 2573 } 2574 2575 /* 2576 * Add the mapped object to the end of the link map list. 2577 */ 2578 lm_append(lml, lmco, lmp); 2579 return (lmp); 2580 } 2581 2582 /* 2583 * Assign hardware/software capabilities. 2584 */ 2585 void 2586 cap_assign(Cap *cap, Rt_map *lmp) 2587 { 2588 while (cap->c_tag != CA_SUNW_NULL) { 2589 switch (cap->c_tag) { 2590 case CA_SUNW_HW_1: 2591 HWCAP(lmp) = cap->c_un.c_val; 2592 break; 2593 case CA_SUNW_SF_1: 2594 SFCAP(lmp) = cap->c_un.c_val; 2595 } 2596 cap++; 2597 } 2598 } 2599 2600 /* 2601 * Map in an ELF object. 2602 * Takes an open file descriptor for the object to map and its pathname; returns 2603 * a pointer to a Rt_map structure for this object, or 0 on error. 2604 */ 2605 static Rt_map * 2606 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2607 int fd) 2608 { 2609 int i; /* general temporary */ 2610 Off memsize = 0; /* total memory size of pathname */ 2611 Off mentry; /* entry point */ 2612 Ehdr *ehdr; /* ELF header of ld.so */ 2613 Phdr *phdr; /* first Phdr in file */ 2614 Phdr *phdr0; /* Saved first Phdr in file */ 2615 Phdr *pptr; /* working Phdr */ 2616 Phdr *fph = 0; /* first loadable Phdr */ 2617 Phdr *lph; /* last loadable Phdr */ 2618 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2619 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2620 Phdr *swph = 0; /* program header for SUNWBSS */ 2621 Phdr *tlph = 0; /* program header for PT_TLS */ 2622 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2623 Cap *cap = 0; /* program header for SUNWCAP */ 2624 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2625 size_t size; /* size of elf and program headers */ 2626 caddr_t faddr = 0; /* mapping address of pathname */ 2627 Rt_map *lmp; /* link map created */ 2628 caddr_t paddr; /* start of padded image */ 2629 Off plen; /* size of image including padding */ 2630 Half etype; 2631 int fixed; 2632 Mmap *mmaps; 2633 uint_t mmapcnt = 0; 2634 Xword align = 0; 2635 2636 /* LINTED */ 2637 ehdr = (Ehdr *)fmap->fm_maddr; 2638 2639 /* 2640 * If this a relocatable object then special processing is required. 2641 */ 2642 if ((etype = ehdr->e_type) == ET_REL) 2643 return (elf_obj_file(lml, lmco, pname, fd)); 2644 2645 /* 2646 * If this isn't a dynamic executable or shared object we can't process 2647 * it. If this is a dynamic executable then all addresses are fixed. 2648 */ 2649 if (etype == ET_EXEC) { 2650 fixed = 1; 2651 } else if (etype == ET_DYN) { 2652 fixed = 0; 2653 } else { 2654 Conv_inv_buf_t inv_buf; 2655 2656 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2657 conv_ehdr_type(etype, 0, &inv_buf)); 2658 return (0); 2659 } 2660 2661 /* 2662 * If our original mapped page was not large enough to hold all the 2663 * program headers remap them. 2664 */ 2665 size = (size_t)((char *)ehdr->e_phoff + 2666 (ehdr->e_phnum * ehdr->e_phentsize)); 2667 if (size > fmap->fm_fsize) { 2668 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2669 return (0); 2670 } 2671 if (size > fmap->fm_msize) { 2672 fmap_setup(); 2673 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2674 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2675 int err = errno; 2676 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2677 strerror(err)); 2678 return (0); 2679 } 2680 fmap->fm_msize = size; 2681 /* LINTED */ 2682 ehdr = (Ehdr *)fmap->fm_maddr; 2683 } 2684 /* LINTED */ 2685 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2686 2687 /* 2688 * Get entry point. 2689 */ 2690 mentry = ehdr->e_entry; 2691 2692 /* 2693 * Point at program headers and perform some basic validation. 2694 */ 2695 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2696 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2697 if ((pptr->p_type == PT_LOAD) || 2698 (pptr->p_type == PT_SUNWBSS)) { 2699 2700 if (fph == 0) { 2701 fph = pptr; 2702 /* LINTED argument lph is initialized in first pass */ 2703 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2704 eprintf(lml, ERR_ELF, 2705 MSG_INTL(MSG_GEN_INVPRGHDR), pname); 2706 return (0); 2707 } 2708 2709 lph = pptr; 2710 2711 if (pptr->p_memsz) 2712 lmph = pptr; 2713 if (pptr->p_filesz) 2714 lfph = pptr; 2715 if (pptr->p_type == PT_SUNWBSS) 2716 swph = pptr; 2717 if (pptr->p_align > align) 2718 align = pptr->p_align; 2719 2720 } else if (pptr->p_type == PT_DYNAMIC) { 2721 mld = (Dyn *)(pptr->p_vaddr); 2722 } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) { 2723 tlph = pptr; 2724 } else if (pptr->p_type == PT_SUNWCAP) { 2725 cap = (Cap *)(pptr->p_vaddr); 2726 } else if (pptr->p_type == PT_SUNW_UNWIND) { 2727 unwindph = pptr; 2728 } 2729 } 2730 2731 #if defined(MAP_ALIGN) 2732 /* 2733 * Make sure the maximum page alignment is a power of 2 >= the default 2734 * segment alignment, for use with MAP_ALIGN. 2735 */ 2736 align = S_ROUND(align, M_SEGM_ALIGN); 2737 #endif 2738 2739 /* 2740 * We'd better have at least one loadable segment, together with some 2741 * specified file and memory size. 2742 */ 2743 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2744 eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2745 return (0); 2746 } 2747 2748 /* 2749 * Check that the files size accounts for the loadable sections 2750 * we're going to map in (failure to do this may cause spurious 2751 * bus errors if we're given a truncated file). 2752 */ 2753 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2754 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2755 return (0); 2756 } 2757 2758 /* 2759 * Memsize must be page rounded so that if we add object padding 2760 * at the end it will start at the beginning of a page. 2761 */ 2762 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2763 M_PTRUNC((ulong_t)fph->p_vaddr)); 2764 2765 /* 2766 * Determine if an existing mapping is acceptable. 2767 */ 2768 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2769 (strcmp(pname, interp->i_name) == 0)) { 2770 /* 2771 * If this is the interpreter then it has already been mapped 2772 * and we have the address so don't map it again. Note that 2773 * the common occurrence of a reference to the interpretor 2774 * (libdl -> ld.so.1) will have been caught during filter 2775 * initialization (see elf_lookup_filtee()). However, some 2776 * ELF implementations are known to record libc.so.1 as the 2777 * interpretor, and thus this test catches this behavior. 2778 */ 2779 paddr = faddr = interp->i_faddr; 2780 2781 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2782 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2783 (fph == lph) && (fph->p_filesz == fph->p_memsz) && 2784 (((Xword)fmap->fm_maddr % align) == 0)) { 2785 size_t rsize; 2786 2787 /* 2788 * If the file contains a single segment, and the mapping 2789 * required has already been established from the initial fmap 2790 * mapping, then we don't need to do anything more. Reset the 2791 * fmap address so that any later files start a new fmap. This 2792 * is really an optimization for filters, such as libdl.so, 2793 * libthread, etc. that are constructed to be a single text 2794 * segment. 2795 */ 2796 paddr = faddr = fmap->fm_maddr; 2797 2798 /* 2799 * Free any unused mapping by assigning the fmap buffer to the 2800 * unused region. fmap_setup() will unmap this area and 2801 * establish defaults for future mappings. 2802 */ 2803 rsize = M_PROUND(fph->p_filesz); 2804 fmap->fm_maddr += rsize; 2805 fmap->fm_msize -= rsize; 2806 fmap_setup(); 2807 } 2808 2809 /* 2810 * Allocate a mapping array to retain mapped segment information. 2811 */ 2812 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2813 return (0); 2814 2815 /* 2816 * If we're reusing an existing mapping determine the objects etext 2817 * address. Otherwise map the file (which will calculate the etext 2818 * address as part of the mapping process). 2819 */ 2820 if (faddr) { 2821 caddr_t base; 2822 2823 if (fixed) 2824 base = 0; 2825 else 2826 base = faddr; 2827 2828 /* LINTED */ 2829 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2830 2831 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2832 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2833 if (pptr->p_type != PT_LOAD) 2834 continue; 2835 2836 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2837 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2838 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2839 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2840 mmapcnt++; 2841 2842 if (!(pptr->p_flags & PF_W)) { 2843 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2844 (ulong_t)pptr->p_memsz + 2845 (ulong_t)(fixed ? 0 : faddr); 2846 } 2847 } 2848 } else { 2849 /* 2850 * Map the file. 2851 */ 2852 if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph, 2853 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2854 return (0); 2855 } 2856 2857 /* 2858 * Calculate absolute base addresses and entry points. 2859 */ 2860 if (!fixed) { 2861 if (mld) 2862 /* LINTED */ 2863 mld = (Dyn *)((Off)mld + faddr); 2864 if (cap) 2865 /* LINTED */ 2866 cap = (Cap *)((Off)cap + faddr); 2867 mentry += (Off)faddr; 2868 } 2869 2870 /* 2871 * Create new link map structure for newly mapped shared object. 2872 */ 2873 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2874 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2875 mmapcnt))) { 2876 (void) munmap((caddr_t)faddr, memsize); 2877 return (0); 2878 } 2879 2880 /* 2881 * Start the system loading in the ELF information we'll be processing. 2882 */ 2883 if (REL(lmp)) { 2884 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2885 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2886 MADV_WILLNEED); 2887 } 2888 2889 /* 2890 * If this shared object contains any special segments, record them. 2891 */ 2892 if (swph) { 2893 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2894 SUNWBSS(lmp) = phdr + (swph - phdr0); 2895 } 2896 if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) { 2897 remove_so(lml, lmp); 2898 return (0); 2899 } 2900 2901 if (unwindph) 2902 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2903 2904 if (cap) 2905 cap_assign(cap, lmp); 2906 2907 return (lmp); 2908 } 2909 2910 /* 2911 * Function to correct protection settings. Segments are all mapped initially 2912 * with permissions as given in the segment header. We need to turn on write 2913 * permissions on a text segment if there are any relocations against that 2914 * segment, and them turn write permission back off again before returning 2915 * control to the user. This function turns the permission on or off depending 2916 * on the value of the argument. 2917 */ 2918 int 2919 elf_set_prot(Rt_map *lmp, int permission) 2920 { 2921 Mmap *mmaps; 2922 2923 /* 2924 * If this is an allocated image (ie. a relocatable object) we can't 2925 * mprotect() anything. 2926 */ 2927 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2928 return (1); 2929 2930 DBG_CALL(Dbg_file_prot(lmp, permission)); 2931 2932 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2933 if (mmaps->m_perm & PROT_WRITE) 2934 continue; 2935 2936 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2937 (mmaps->m_perm | permission)) == -1) { 2938 int err = errno; 2939 eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2940 NAME(lmp), strerror(err)); 2941 return (0); 2942 } 2943 } 2944 return (1); 2945 } 2946 2947 /* 2948 * Build full pathname of shared object from given directory name and filename. 2949 */ 2950 static char * 2951 elf_get_so(const char *dir, const char *file) 2952 { 2953 static char pname[PATH_MAX]; 2954 2955 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 2956 return (pname); 2957 } 2958 2959 /* 2960 * The copy relocation is recorded in a copy structure which will be applied 2961 * after all other relocations are carried out. This provides for copying data 2962 * that must be relocated itself (ie. pointers in shared objects). This 2963 * structure also provides a means of binding RTLD_GROUP dependencies to any 2964 * copy relocations that have been taken from any group members. 2965 * 2966 * If the size of the .bss area available for the copy information is not the 2967 * same as the source of the data inform the user if we're under ldd(1) control 2968 * (this checking was only established in 5.3, so by only issuing an error via 2969 * ldd(1) we maintain the standard set by previous releases). 2970 */ 2971 int 2972 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 2973 Rt_map *dlmp, const void *dadd) 2974 { 2975 Rel_copy rc; 2976 Lm_list *lml = LIST(rlmp); 2977 2978 rc.r_name = name; 2979 rc.r_rsym = rsym; /* the new reference symbol and its */ 2980 rc.r_rlmp = rlmp; /* associated link-map */ 2981 rc.r_dlmp = dlmp; /* the defining link-map */ 2982 rc.r_dsym = dsym; /* the original definition */ 2983 rc.r_radd = radd; 2984 rc.r_dadd = dadd; 2985 2986 if (rsym->st_size > dsym->st_size) 2987 rc.r_size = (size_t)dsym->st_size; 2988 else 2989 rc.r_size = (size_t)rsym->st_size; 2990 2991 if (alist_append(©(dlmp), &rc, sizeof (Rel_copy), 2992 AL_CNT_COPYREL) == 0) { 2993 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2994 return (0); 2995 else 2996 return (1); 2997 } 2998 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 2999 if (alist_append(©(rlmp), &dlmp, 3000 sizeof (Rt_map *), AL_CNT_COPYREL) == 0) { 3001 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 3002 return (0); 3003 else 3004 return (1); 3005 } 3006 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 3007 } 3008 3009 /* 3010 * If we are tracing (ldd), warn the user if 3011 * 1) the size from the reference symbol differs from the 3012 * copy definition. We can only copy as much data as the 3013 * reference (dynamic executables) entry allows. 3014 * 2) the copy definition has STV_PROTECTED visibility. 3015 */ 3016 if (lml->lm_flags & LML_FLG_TRC_WARN) { 3017 if (rsym->st_size != dsym->st_size) { 3018 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 3019 _conv_reloc_type(M_R_COPY), demangle(name), 3020 NAME(rlmp), EC_XWORD(rsym->st_size), 3021 NAME(dlmp), EC_XWORD(dsym->st_size)); 3022 if (rsym->st_size > dsym->st_size) 3023 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 3024 NAME(dlmp)); 3025 else 3026 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 3027 NAME(rlmp)); 3028 } 3029 3030 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 3031 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 3032 _conv_reloc_type(M_R_COPY), demangle(name), 3033 NAME(dlmp)); 3034 } 3035 } 3036 3037 DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd, 3038 (Xword)rc.r_size)); 3039 return (1); 3040 } 3041 3042 /* 3043 * Determine the symbol location of an address within a link-map. Look for 3044 * the nearest symbol (whose value is less than or equal to the required 3045 * address). This is the object specific part of dladdr(). 3046 */ 3047 static void 3048 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 3049 { 3050 ulong_t ndx, cnt, base, _value; 3051 Sym *sym, *_sym = NULL; 3052 const char *str; 3053 int _flags; 3054 uint_t *dynaddr_ndx; 3055 uint_t dynaddr_n = 0; 3056 ulong_t value; 3057 3058 /* 3059 * If SUNWSYMTAB() is non-NULL, then it sees a special version of 3060 * the dynsym that starts with any local function symbols that exist in 3061 * the library and then moves to the data held in SYMTAB(). In this 3062 * case, SUNWSYMSZ tells us how long the symbol table is. The 3063 * availability of local function symbols will enhance the results 3064 * we can provide. 3065 * 3066 * If SUNWSYMTAB() is non-NULL, then there might also be a 3067 * SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains 3068 * an array of indices into SUNWSYMTAB, sorted by increasing 3069 * address. We can use this to do an O(log N) search instead of a 3070 * brute force search. 3071 * 3072 * If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that 3073 * contains only global symbols. In that case, the length of 3074 * the symbol table comes from the nchain field of the related 3075 * symbol lookup hash table. 3076 */ 3077 str = STRTAB(lmp); 3078 if (SUNWSYMSZ(lmp) == NULL) { 3079 sym = SYMTAB(lmp); 3080 /* 3081 * If we don't have a .hash table there are no symbols 3082 * to look at. 3083 */ 3084 if (HASH(lmp) == 0) 3085 return; 3086 cnt = HASH(lmp)[1]; 3087 } else { 3088 sym = SUNWSYMTAB(lmp); 3089 cnt = SUNWSYMSZ(lmp) / SYMENT(lmp); 3090 dynaddr_ndx = SUNWSYMSORT(lmp); 3091 if (dynaddr_ndx != NULL) 3092 dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp); 3093 } 3094 3095 if (FLAGS(lmp) & FLG_RT_FIXED) 3096 base = 0; 3097 else 3098 base = ADDR(lmp); 3099 3100 if (dynaddr_n > 0) { /* Binary search */ 3101 long low = 0, low_bnd; 3102 long high = dynaddr_n - 1, high_bnd; 3103 long mid; 3104 Sym *mid_sym; 3105 3106 /* 3107 * Note that SUNWSYMSORT only contains symbols types that 3108 * supply memory addresses, so there's no need to check and 3109 * filter out any other types. 3110 */ 3111 low_bnd = low; 3112 high_bnd = high; 3113 while (low <= high) { 3114 mid = (low + high) / 2; 3115 mid_sym = &sym[dynaddr_ndx[mid]]; 3116 value = mid_sym->st_value + base; 3117 if (addr < value) { 3118 if ((sym[dynaddr_ndx[high]].st_value + base) >= 3119 addr) 3120 high_bnd = high; 3121 high = mid - 1; 3122 } else if (addr > value) { 3123 if ((sym[dynaddr_ndx[low]].st_value + base) <= 3124 addr) 3125 low_bnd = low; 3126 low = mid + 1; 3127 } else { 3128 _sym = mid_sym; 3129 _value = value; 3130 break; 3131 } 3132 } 3133 /* 3134 * If the above didn't find it exactly, then we must 3135 * return the closest symbol with a value that doesn't 3136 * exceed the one we are looking for. If that symbol exists, 3137 * it will lie in the range bounded by low_bnd and 3138 * high_bnd. This is a linear search, but a short one. 3139 */ 3140 if (_sym == NULL) { 3141 for (mid = low_bnd; mid <= high_bnd; mid++) { 3142 mid_sym = &sym[dynaddr_ndx[mid]]; 3143 value = mid_sym->st_value + base; 3144 if (addr >= value) { 3145 _sym = mid_sym; 3146 _value = value; 3147 } else { 3148 break; 3149 } 3150 } 3151 } 3152 } else { /* Linear search */ 3153 for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 3154 /* 3155 * Skip expected symbol types that are not functions 3156 * or data: 3157 * - A symbol table starts with an undefined symbol 3158 * in slot 0. If we are using SUNWSYMTAB(), 3159 * there will be a second undefined symbol 3160 * right before the globals. 3161 * - The local part of SUNWSYMTAB() contains a 3162 * series of function symbols. Each section 3163 * starts with an initial STT_FILE symbol. 3164 */ 3165 if ((sym->st_shndx == SHN_UNDEF) || 3166 (ELF_ST_TYPE(sym->st_info) == STT_FILE)) 3167 continue; 3168 3169 value = sym->st_value + base; 3170 if (value > addr) 3171 continue; 3172 if (value < _value) 3173 continue; 3174 3175 _sym = sym; 3176 _value = value; 3177 3178 /* 3179 * Note, because we accept local and global symbols 3180 * we could find a section symbol that matches the 3181 * associated address, which means that the symbol 3182 * name will be null. In this case continue the 3183 * search in case we can find a global symbol of 3184 * the same value. 3185 */ 3186 if ((value == addr) && 3187 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 3188 break; 3189 } 3190 } 3191 3192 _flags = flags & RTLD_DL_MASK; 3193 if (_sym) { 3194 if (_flags == RTLD_DL_SYMENT) 3195 *info = (void *)_sym; 3196 else if (_flags == RTLD_DL_LINKMAP) 3197 *info = (void *)lmp; 3198 3199 dlip->dli_sname = str + _sym->st_name; 3200 dlip->dli_saddr = (void *)_value; 3201 } else { 3202 /* 3203 * addr lies between the beginning of the mapped segment and 3204 * the first global symbol. We have no symbol to return 3205 * and the caller requires one. We use _START_, the base 3206 * address of the mapping. 3207 */ 3208 3209 if (_flags == RTLD_DL_SYMENT) { 3210 /* 3211 * An actual symbol struct is needed, so we 3212 * construct one for _START_. To do this in a 3213 * fully accurate way requires a different symbol 3214 * for each mapped segment. This requires the 3215 * use of dynamic memory and a mutex. That's too much 3216 * plumbing for a fringe case of limited importance. 3217 * 3218 * Fortunately, we can simplify: 3219 * - Only the st_size and st_info fields are useful 3220 * outside of the linker internals. The others 3221 * reference things that outside code cannot see, 3222 * and can be set to 0. 3223 * - It's just a label and there is no size 3224 * to report. So, the size should be 0. 3225 * This means that only st_info needs a non-zero 3226 * (constant) value. A static struct will suffice. 3227 * It must be const (readonly) so the caller can't 3228 * change its meaning for subsequent callers. 3229 */ 3230 static const Sym fsym = { 0, 0, 0, 3231 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3232 *info = (void *) &fsym; 3233 } 3234 3235 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3236 dlip->dli_saddr = (void *) ADDR(lmp); 3237 } 3238 } 3239 3240 static void 3241 elf_lazy_cleanup(Alist *alp) 3242 { 3243 Rt_map **lmpp; 3244 Aliste off; 3245 3246 /* 3247 * Cleanup any link-maps added to this dynamic list and free it. 3248 */ 3249 for (ALIST_TRAVERSE(alp, off, lmpp)) 3250 FLAGS(*lmpp) &= ~FLG_RT_DLSYM; 3251 free(alp); 3252 } 3253 3254 /* 3255 * This routine is called upon to search for a symbol from the dependencies of 3256 * the initial link-map. To maintain lazy loadings goal of reducing the number 3257 * of objects mapped, any symbol search is first carried out using the objects 3258 * that already exist in the process (either on a link-map list or handle). 3259 * If a symbol can't be found, and lazy dependencies are still pending, this 3260 * routine loads the dependencies in an attempt to locate the symbol. 3261 * 3262 * Only new objects are inspected as we will have already inspected presently 3263 * loaded objects before calling this routine. However, a new object may not 3264 * be new - although the di_lmp might be zero, the object may have been mapped 3265 * as someone elses dependency. Thus there's a possibility of some symbol 3266 * search duplication. 3267 */ 3268 3269 Sym * 3270 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 3271 { 3272 Sym *sym = 0; 3273 Alist * alist = 0; 3274 Aliste off; 3275 Rt_map ** lmpp, * lmp = slp->sl_imap; 3276 const char *name = slp->sl_name; 3277 3278 if (alist_append(&alist, &lmp, sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) 3279 return (0); 3280 FLAGS(lmp) |= FLG_RT_DLSYM; 3281 3282 for (ALIST_TRAVERSE(alist, off, lmpp)) { 3283 uint_t cnt = 0; 3284 Slookup sl = *slp; 3285 Dyninfo *dip; 3286 3287 /* 3288 * Loop through the DT_NEEDED entries examining each object for 3289 * the symbol. If the symbol is not found the object is in turn 3290 * added to the alist, so that its DT_NEEDED entires may be 3291 * examined. 3292 */ 3293 lmp = *lmpp; 3294 for (dip = DYNINFO(lmp); cnt < DYNINFOCNT(lmp); cnt++, dip++) { 3295 Rt_map *nlmp; 3296 3297 if (((dip->di_flags & FLG_DI_NEEDED) == 0) || 3298 dip->di_info) 3299 continue; 3300 3301 /* 3302 * If this entry defines a lazy dependency try loading 3303 * it. If the file can't be loaded, consider this 3304 * non-fatal and continue the search (lazy loaded 3305 * dependencies need not exist and their loading should 3306 * only be fatal if called from a relocation). 3307 * 3308 * If the file is already loaded and relocated we must 3309 * still inspect it for symbols, even though it might 3310 * have already been searched. This lazy load operation 3311 * might have promoted the permissions of the object, 3312 * and thus made the object applicable for this symbol 3313 * search, whereas before the object might have been 3314 * skipped. 3315 */ 3316 if ((nlmp = elf_lazy_load(lmp, cnt, name)) == 0) 3317 continue; 3318 3319 /* 3320 * If this object isn't yet a part of the dynamic list 3321 * then inspect it for the symbol. If the symbol isn't 3322 * found add the object to the dynamic list so that we 3323 * can inspect its dependencies. 3324 */ 3325 if (FLAGS(nlmp) & FLG_RT_DLSYM) 3326 continue; 3327 3328 sl.sl_imap = nlmp; 3329 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3330 break; 3331 3332 /* 3333 * Some dlsym() operations are already traversing a 3334 * link-map (dlopen(0)), and thus there's no need to 3335 * build our own dynamic dependency list. 3336 */ 3337 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3338 if (alist_append(&alist, &nlmp, 3339 sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) { 3340 elf_lazy_cleanup(alist); 3341 return (0); 3342 } 3343 FLAGS(nlmp) |= FLG_RT_DLSYM; 3344 } 3345 } 3346 if (sym) 3347 break; 3348 } 3349 3350 elf_lazy_cleanup(alist); 3351 return (sym); 3352 } 3353 3354 /* 3355 * Warning message for bad r_offset. 3356 */ 3357 void 3358 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3359 ulong_t rsymndx) 3360 { 3361 const char *name = (char *)0; 3362 Lm_list *lml = LIST(lmp); 3363 int trace; 3364 3365 if ((lml->lm_flags & LML_FLG_TRC_ENABLE) && 3366 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3367 (lml->lm_flags & LML_FLG_TRC_VERBOSE))) 3368 trace = 1; 3369 else 3370 trace = 0; 3371 3372 if ((trace == 0) && (DBG_ENABLED == 0)) 3373 return; 3374 3375 if (rsymndx) { 3376 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3377 (rsymndx * SYMENT(lmp))); 3378 3379 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3380 name = (char *)(STRTAB(lmp) + symref->st_name); 3381 } 3382 3383 if (name == 0) 3384 name = MSG_ORIG(MSG_STR_EMPTY); 3385 3386 if (trace) { 3387 const char *rstr; 3388 3389 rstr = _conv_reloc_type((uint_t)rtype); 3390 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3391 EC_ADDR(roffset)); 3392 return; 3393 } 3394 3395 Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name); 3396 } 3397 3398 /* 3399 * Resolve a static TLS relocation. 3400 */ 3401 long 3402 elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name, 3403 ulong_t roffset, long value) 3404 { 3405 Lm_list *lml = LIST(lmp); 3406 3407 /* 3408 * Relocations against a static TLS block have limited support once 3409 * process initialization has completed. Any error condition should be 3410 * discovered by testing for DF_STATIC_TLS as part of loading an object, 3411 * however individual relocations are tested in case the dynamic flag 3412 * had not been set when this object was built. 3413 */ 3414 if (PTTLS(lmp) == 0) { 3415 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3416 M_REL_SHT_TYPE, rel, NULL, name)); 3417 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3418 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3419 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3420 return (0); 3421 } 3422 3423 /* 3424 * If no static TLS has been set aside for this object, determine if 3425 * any can be obtained. Enforce that any object using static TLS is 3426 * non-deletable. 3427 */ 3428 if (TLSSTATOFF(lmp) == 0) { 3429 FLAGS1(lmp) |= FL1_RT_TLSSTAT; 3430 MODE(lmp) |= RTLD_NODELETE; 3431 3432 if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) { 3433 DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH, 3434 M_REL_SHT_TYPE, rel, NULL, name)); 3435 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS), 3436 _conv_reloc_type((uint_t)rtype), NAME(lmp), 3437 name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN)); 3438 return (0); 3439 } 3440 } 3441 3442 /* 3443 * Typically, a static TLS offset is maintained as a symbols value. 3444 * For local symbols that are not apart of the dynamic symbol table, 3445 * the TLS relocation points to a section symbol, and the static TLS 3446 * offset was deposited in the associated GOT table. Make sure the GOT 3447 * is cleared, so that the value isn't reused in do_reloc(). 3448 */ 3449 if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) { 3450 if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) { 3451 value = *(long *)roffset; 3452 *(long *)roffset = 0; 3453 } else { 3454 value = sym->st_value; 3455 } 3456 } 3457 return (-(TLSSTATOFF(lmp) - value)); 3458 } 3459