1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright (c) 1988 AT&T 25 * All Rights Reserved 26 * 27 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 /* 33 * Object file dependent support for ELF objects. 34 */ 35 #include "_synonyms.h" 36 37 #include <stdio.h> 38 #include <sys/procfs.h> 39 #include <sys/mman.h> 40 #include <sys/debug.h> 41 #include <string.h> 42 #include <limits.h> 43 #include <dlfcn.h> 44 #include "conv.h" 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 #include "debug.h" 50 51 /* 52 * Default and secure dependency search paths. 53 */ 54 static Pnode elf_dflt_dirs[] = { 55 #if defined(_ELF64) 56 #ifndef SGS_PRE_UNIFIED_PROCESS 57 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 58 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 59 #endif 60 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 61 LA_SER_DEFAULT, 0, 0 } 62 #else 63 #ifndef SGS_PRE_UNIFIED_PROCESS 64 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 65 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 66 #endif 67 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 68 LA_SER_DEFAULT, 0, 0 } 69 #endif 70 }; 71 72 static Pnode elf_secure_dirs[] = { 73 #if defined(_ELF64) 74 #ifndef SGS_PRE_UNIFIED_PROCESS 75 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 76 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 77 #endif 78 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 79 MSG_PTH_USRLIBSE_64_SIZE, 80 LA_SER_SECURE, 0, 0 } 81 #else 82 #ifndef SGS_PRE_UNIFIED_PROCESS 83 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 84 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 85 #endif 86 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 87 LA_SER_SECURE, 0, 0 } 88 #endif 89 }; 90 91 /* 92 * Defines for local functions. 93 */ 94 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 95 static int elf_are_u(Rej_desc *); 96 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 97 static ulong_t elf_entry_pt(void); 98 static char *elf_get_so(const char *, const char *); 99 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 100 static int elf_needed(Lm_list *, Aliste, Rt_map *); 101 static void elf_unmap_so(Rt_map *); 102 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 103 104 /* 105 * Functions and data accessed through indirect pointers. 106 */ 107 Fct elf_fct = { 108 elf_are_u, 109 elf_entry_pt, 110 elf_map_so, 111 elf_unmap_so, 112 elf_needed, 113 lookup_sym, 114 elf_reloc, 115 elf_dflt_dirs, 116 elf_secure_dirs, 117 elf_fix_name, 118 elf_get_so, 119 elf_dladdr, 120 dlsym_handle, 121 elf_verify_vers, 122 elf_set_prot 123 }; 124 125 126 /* 127 * Redefine NEEDED name if necessary. 128 */ 129 static Pnode * 130 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 131 { 132 /* 133 * For ABI compliance, if we are asked for ld.so.1, then really give 134 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 135 */ 136 if (((*name == '/') && 137 #if defined(_ELF64) 138 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 139 #else 140 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 141 #endif 142 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 143 Pnode *pnp; 144 145 DBG_CALL(Dbg_file_fixname(name, MSG_ORIG(MSG_PTH_LIBSYS))); 146 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 147 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 148 if (pnp) 149 free(pnp); 150 return (0); 151 } 152 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 153 pnp->p_orig = (orig & PN_SER_MASK); 154 return (pnp); 155 } 156 157 return (expand_paths(clmp, name, orig, 0)); 158 } 159 160 /* 161 * Determine if we have been given an ELF file and if so determine if the file 162 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 163 * with associated error information. 164 */ 165 static int 166 elf_are_u(Rej_desc *rej) 167 { 168 Ehdr *ehdr; 169 170 /* 171 * Determine if we're an elf file. If not simply return, we don't set 172 * any rejection information as this test allows use to scroll through 173 * the objects we support (ELF, AOUT). 174 */ 175 if (fmap->fm_fsize < sizeof (Ehdr) || 176 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 177 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 178 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 179 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 180 return (0); 181 } 182 183 /* 184 * Check class and encoding. 185 */ 186 /* LINTED */ 187 ehdr = (Ehdr *)fmap->fm_maddr; 188 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 189 rej->rej_type = SGS_REJ_CLASS; 190 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 191 return (0); 192 } 193 if (ehdr->e_ident[EI_DATA] != M_DATA) { 194 rej->rej_type = SGS_REJ_DATA; 195 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 196 return (0); 197 } 198 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 199 (ehdr->e_type != ET_DYN)) { 200 rej->rej_type = SGS_REJ_TYPE; 201 rej->rej_info = (uint_t)ehdr->e_type; 202 return (0); 203 } 204 205 /* 206 * Verify machine specific flags, and hardware capability requirements. 207 */ 208 if ((elf_mach_flags_check(rej, ehdr) == 0) || 209 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 210 return (0); 211 212 /* 213 * Verify ELF version. ??? is this too restrictive ??? 214 */ 215 if (ehdr->e_version > EV_CURRENT) { 216 rej->rej_type = SGS_REJ_VERSION; 217 rej->rej_info = (uint_t)ehdr->e_version; 218 return (0); 219 } 220 return (1); 221 } 222 223 /* 224 * The runtime linker employs lazy loading to provide the libraries needed for 225 * debugging, preloading .o's and dldump(). As these are seldom used, the 226 * standard startup of ld.so.1 doesn't initialize all the information necessary 227 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 228 * is called we get here to perform these initializations: 229 * 230 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 231 * dependency. Typically, for all other objects, this is called during 232 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 233 * 234 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 235 * are by default skipped thus delaying all relative relocation processing 236 * on every invocation of ld.so.1. 237 */ 238 int 239 elf_rtld_load() 240 { 241 Lm_list *lml = &lml_rtld; 242 Rt_map *lmp = lml->lm_head; 243 244 if (lml->lm_flags & LML_FLG_PLTREL) 245 return (1); 246 247 /* 248 * As we need to refer to the DYNINFO() information, insure that it has 249 * been initialized. 250 */ 251 if (elf_needed(lml, ALO_DATA, lmp) == 0) 252 return (0); 253 254 #if defined(i386) 255 /* 256 * This is a kludge to give ld.so.1 a performance benefit on i386. 257 * It's based around two factors. 258 * 259 * o JMPSLOT relocations (PLT's) actually need a relative relocation 260 * applied to the GOT entry so that they can find PLT0. 261 * 262 * o ld.so.1 does not exercise *any* PLT's before it has made a call 263 * to elf_lazy_load(). This is because all dynamic dependencies 264 * are recorded as lazy dependencies. 265 */ 266 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 267 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 268 (ulong_t)ADDR(lmp)); 269 #endif 270 271 lml->lm_flags |= LML_FLG_PLTREL; 272 return (1); 273 } 274 275 /* 276 * Lazy load an object. 277 */ 278 Rt_map * 279 elf_lazy_load(Rt_map *clmp, uint_t ndx, const char *sym) 280 { 281 Rt_map *nlmp, *hlmp; 282 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 283 uint_t flags = 0; 284 Pnode *pnp; 285 const char *name; 286 Lm_list *lml = LIST(clmp); 287 Lm_cntl *lmc; 288 Aliste lmco; 289 290 /* 291 * If this dependency has already been processed, we're done. 292 */ 293 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 294 (dip->di_flags & FLG_DI_PROCESSD)) 295 return (nlmp); 296 297 /* 298 * Determine the initial dependency name, and indicate that this 299 * dependencies processing has initiated. 300 */ 301 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 302 DBG_CALL(Dbg_file_lazyload(name, NAME(clmp), sym)); 303 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 304 dip->di_flags |= FLG_DI_PROCESSD; 305 306 if (dip->di_flags & FLG_DI_GROUP) 307 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 308 309 /* 310 * Expand the requested name if necessary. 311 */ 312 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 313 return (0); 314 315 /* 316 * Provided the object on the head of the link-map has completed its 317 * relocation, create a new link-map control list for this request. 318 */ 319 hlmp = lml->lm_head; 320 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 321 if ((lmc = alist_append(&(lml->lm_lists), 0, sizeof (Lm_cntl), 322 AL_CNT_LMLISTS)) == 0) { 323 remove_pnode(pnp); 324 return (0); 325 } 326 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 327 } else { 328 lmc = 0; 329 lmco = ALO_DATA; 330 } 331 332 /* 333 * Load the associated object. 334 */ 335 dip->di_info = nlmp = 336 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 337 338 /* 339 * Remove any expanded pathname infrastructure. Reduce the pending lazy 340 * dependency count of the caller, together with the link-map lists 341 * count of objects that still have lazy dependencies pending. 342 */ 343 remove_pnode(pnp); 344 if (--LAZY(clmp) == 0) 345 LIST(clmp)->lm_lazy--; 346 347 /* 348 * Finish processing the objects associated with this request. 349 */ 350 if (nlmp && ((analyze_lmc(lml, lmco, nlmp) == 0) || 351 (relocate_lmc(lml, lmco, nlmp) == 0))) 352 dip->di_info = nlmp = 0; 353 354 /* 355 * If the dependency has been successfully processed, and it is part of 356 * a link-map control list that is equivalent, or less, that the callers 357 * control list, create an association between the caller and this 358 * dependency. If this dependency isn't yet apart of the callers 359 * link-map control list, then it is still apart of a list that is being 360 * relocated. As the relocation of an object on this list might still 361 * fail, we can't yet bind the caller to this object. To do so, would 362 * be locking the object so that it couldn't be deleted. Mark this 363 * object as free, and it will be reprocessed when this dependency is 364 * next referenced. 365 */ 366 if (nlmp) { 367 if (CNTL(nlmp) <= CNTL(clmp)) { 368 if (bind_one(clmp, nlmp, BND_NEEDED) == 0) 369 dip->di_info = nlmp = 0; 370 } else { 371 dip->di_info = 0; 372 dip->di_flags &= ~FLG_DI_PROCESSD; 373 if (LAZY(clmp)++ == 0) 374 LIST(clmp)->lm_lazy++; 375 } 376 } 377 378 /* 379 * After a successful load, any objects collected on the new link-map 380 * control list will have been moved to the callers link-map control 381 * list. This control list can now be deleted. 382 */ 383 if (lmc) { 384 if (nlmp == 0) 385 remove_incomplete(lml, lmco); 386 remove_cntl(lml, lmco); 387 } 388 389 return (nlmp); 390 } 391 392 393 /* 394 * Return the entry point of the ELF executable. 395 */ 396 static ulong_t 397 elf_entry_pt(void) 398 { 399 return (ENTRY(lml_main.lm_head)); 400 } 401 402 /* 403 * Unmap a given ELF shared object from the address space. 404 */ 405 static void 406 elf_unmap_so(Rt_map *lmp) 407 { 408 caddr_t addr; 409 size_t size; 410 Mmap *mmaps; 411 412 /* 413 * If this link map represents a relocatable object concatenation, then 414 * the image was simply generated in allocated memory. Free the memory. 415 * 416 * Note: the memory was originally allocated in the libelf:_elf_outmap 417 * routine and would normally have been free'd in elf_outsync(), but 418 * because we 'interpose' on that routine the memory wasn't free'd at 419 * that time. 420 */ 421 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 422 free((void *)ADDR(lmp)); 423 return; 424 } 425 426 /* 427 * If padding was enabled via rtld_db, then we have at least one page 428 * in front of the image - and possibly a trailing page. 429 * Unmap the front page first: 430 */ 431 if (PADSTART(lmp) != ADDR(lmp)) { 432 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 433 size = ADDR(lmp) - (ulong_t)addr; 434 (void) munmap(addr, size); 435 } 436 437 /* 438 * Unmap any trailing padding. 439 */ 440 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 441 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 442 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 443 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 444 (void) munmap(addr, size); 445 } 446 447 /* 448 * Unmmap all mapped segments. 449 */ 450 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 451 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 452 } 453 454 /* 455 * Determine if a dependency requires a particular version and if so verify 456 * that the version exists in the dependency. 457 */ 458 static int 459 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 460 { 461 Verneed *vnd = VERNEED(clmp); 462 int _num, num = VERNEEDNUM(clmp); 463 char *cstrs = (char *)STRTAB(clmp); 464 Lm_list *lml = LIST(clmp); 465 466 /* 467 * Traverse the callers version needed information and determine if any 468 * specific versions are required from the dependency. 469 */ 470 for (_num = 1; _num <= num; _num++, 471 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 472 Half cnt = vnd->vn_cnt; 473 Vernaux *vnap; 474 char *nstrs, *need; 475 476 /* 477 * Determine if a needed entry matches this dependency. 478 */ 479 need = (char *)(cstrs + vnd->vn_file); 480 if (strcmp(name, need) != 0) 481 continue; 482 483 DBG_CALL(Dbg_ver_need_title(NAME(clmp))); 484 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 485 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 486 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 487 488 /* 489 * Validate that each version required actually exists in the 490 * dependency. 491 */ 492 nstrs = (char *)STRTAB(nlmp); 493 494 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 495 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 496 char *version, *define; 497 Verdef *vdf = VERDEF(nlmp); 498 ulong_t _num, num = VERDEFNUM(nlmp); 499 int found = 0; 500 501 version = (char *)(cstrs + vnap->vna_name); 502 DBG_CALL(Dbg_ver_need_entry(0, need, version)); 503 504 for (_num = 1; _num <= num; _num++, 505 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 506 Verdaux *vdap; 507 508 if (vnap->vna_hash != vdf->vd_hash) 509 continue; 510 511 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 512 define = (char *)(nstrs + vdap->vda_name); 513 if (strcmp(version, define) != 0) 514 continue; 515 516 found++; 517 break; 518 } 519 520 /* 521 * If we're being traced print out any matched version 522 * when the verbose (-v) option is in effect. Always 523 * print any unmatched versions. 524 */ 525 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 526 if (found) { 527 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 528 continue; 529 530 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 531 need, version, NAME(nlmp)); 532 } else { 533 if (rtld_flags & RT_FL_SILENCERR) 534 continue; 535 536 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 537 need, version); 538 } 539 continue; 540 } 541 542 /* 543 * If the version hasn't been found then this is a 544 * candidate for a fatal error condition. Weak 545 * version definition requirements are silently 546 * ignored. Also, if the image inspected for a version 547 * definition has no versioning recorded at all then 548 * silently ignore this (this provides better backward 549 * compatibility to old images created prior to 550 * versioning being available). Both of these skipped 551 * diagnostics are available under tracing (see above). 552 */ 553 if ((found == 0) && (num != 0) && 554 (!(vnap->vna_flags & VER_FLG_WEAK))) { 555 eprintf(ERR_FATAL, MSG_INTL(MSG_VER_NFOUND), 556 need, version, NAME(clmp)); 557 return (0); 558 } 559 } 560 return (1); 561 } 562 return (1); 563 } 564 565 /* 566 * Search through the dynamic section for DT_NEEDED entries and perform one 567 * of two functions. If only the first argument is specified then load the 568 * defined shared object, otherwise add the link map representing the defined 569 * link map the the dlopen list. 570 */ 571 static int 572 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 573 { 574 Dyn *dyn; 575 ulong_t ndx = 0; 576 uint_t lazy = 0, flags = 0; 577 Word lmflags = lml->lm_flags; 578 Word lmtflags = lml->lm_tflags; 579 580 /* 581 * Process each shared object on needed list. 582 */ 583 if (DYN(clmp) == 0) 584 return (1); 585 586 for (dyn = (Dyn *)DYN(clmp); dyn->d_tag != DT_NULL; dyn++, ndx++) { 587 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 588 Rt_map *nlmp = 0; 589 char *name; 590 int silent = 0; 591 Pnode *pnp; 592 593 switch (dyn->d_tag) { 594 case DT_POSFLAG_1: 595 if ((dyn->d_un.d_val & DF_P1_LAZYLOAD) && 596 !(lmtflags & LML_TFLG_NOLAZYLD)) 597 lazy = 1; 598 if (dyn->d_un.d_val & DF_P1_GROUPPERM) 599 flags = (FLG_RT_SETGROUP | FLG_RT_HANDLE); 600 continue; 601 case DT_NEEDED: 602 case DT_USED: 603 dip->di_flags |= FLG_DI_NEEDED; 604 if (flags) 605 dip->di_flags |= FLG_DI_GROUP; 606 607 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 608 609 /* 610 * NOTE, libc.so.1 can't be lazy loaded. Although a 611 * lazy position flag won't be produced when a RTLDINFO 612 * .dynamic entry is found (introduced with the UPM in 613 * Solaris 10), it was possible to mark libc for lazy 614 * loading on previous releases. To reduce the overhead 615 * of testing for this occurrence, only carry out this 616 * check for the first object on the link-map list 617 * (there aren't many applications built without libc). 618 */ 619 if (lazy && (lml->lm_head == clmp) && 620 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 621 lazy = 0; 622 623 /* 624 * Don't bring in lazy loaded objects yet unless we've 625 * been asked to attempt to load all available objects 626 * (crle(1) sets LD_FLAGS=loadavail). Even under 627 * RTLD_NOW we don't process this - RTLD_NOW will cause 628 * relocation processing which in turn might trigger 629 * lazy loading, but its possible that the object has a 630 * lazy loaded file with no bindings (i.e., it should 631 * never have been a dependency in the first place). 632 */ 633 if (lazy) { 634 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 635 LAZY(clmp)++; 636 lazy = flags = 0; 637 continue; 638 } 639 640 /* 641 * Silence any error messages - see description 642 * under elf_lookup_filtee(). 643 */ 644 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 645 rtld_flags |= RT_FL_SILENCERR; 646 silent = 1; 647 } 648 } 649 break; 650 case DT_AUXILIARY: 651 dip->di_flags |= FLG_DI_AUXFLTR; 652 lazy = flags = 0; 653 continue; 654 case DT_SUNW_AUXILIARY: 655 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 656 lazy = flags = 0; 657 continue; 658 case DT_FILTER: 659 dip->di_flags |= FLG_DI_STDFLTR; 660 lazy = flags = 0; 661 continue; 662 case DT_SUNW_FILTER: 663 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 664 lazy = flags = 0; 665 continue; 666 default: 667 lazy = flags = 0; 668 continue; 669 } 670 671 DBG_CALL(Dbg_file_needed(name, NAME(clmp))); 672 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 673 dip->di_flags |= FLG_DI_PROCESSD; 674 675 /* 676 * Establish the objects name, load it and establish a binding 677 * with the caller. 678 */ 679 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 680 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 681 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 682 nlmp = 0; 683 684 /* 685 * Clean up any infrastructure, including the removal of the 686 * error suppression state, if it had been previously set in 687 * this routine. 688 */ 689 if (pnp) 690 remove_pnode(pnp); 691 if (silent) 692 rtld_flags &= ~RT_FL_SILENCERR; 693 lazy = flags = 0; 694 if ((dip->di_info = (void *)nlmp) == 0) { 695 /* 696 * If the object could not be mapped, continue if error 697 * suppression is established or we're here with ldd(1). 698 */ 699 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 700 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 701 continue; 702 else 703 return (0); 704 } 705 } 706 707 if (LAZY(clmp)) 708 lml->lm_lazy++; 709 710 return (1); 711 } 712 713 static int 714 elf_map_check(const char *name, caddr_t vaddr, Off size) 715 { 716 prmap_t *maps, *_maps; 717 int pfd, num, _num; 718 caddr_t eaddr = vaddr + size; 719 int err; 720 721 /* 722 * If memory reservations have been established for alternative objects 723 * determine if this object falls within the reservation, if it does no 724 * further checking is required. 725 */ 726 if (rtld_flags & RT_FL_MEMRESV) { 727 Rtc_head *head = (Rtc_head *)config->c_bgn; 728 729 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 730 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 731 return (0); 732 } 733 734 /* 735 * Determine the mappings presently in use by this process. 736 */ 737 if ((pfd = pr_open()) == FD_UNAVAIL) 738 return (1); 739 740 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 741 err = errno; 742 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, strerror(err)); 743 return (1); 744 } 745 746 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 747 return (1); 748 749 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 750 err = errno; 751 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, strerror(err)); 752 free(maps); 753 return (1); 754 } 755 756 /* 757 * Determine if the supplied address clashes with any of the present 758 * process mappings. 759 */ 760 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 761 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 762 Rt_map *lmp; 763 const char *str; 764 765 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 766 continue; 767 768 /* 769 * We have a memory clash. See if one of the known dynamic 770 * dependency mappings represents this space so as to provide 771 * the user a more meaningful message. 772 */ 773 if ((lmp = _caller(vaddr, 0)) != 0) 774 str = NAME(lmp); 775 else 776 str = MSG_INTL(MSG_STR_UNKNOWN); 777 778 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 779 EC_ADDR(vaddr), EC_OFF(size), str); 780 return (1); 781 } 782 free(maps); 783 return (0); 784 } 785 786 /* 787 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 788 * are used to obtained an aligned reservation from anonymous memory. If 789 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 790 * reservation using the file as backing. 791 */ 792 static Am_ret 793 elf_map_reserve(const char *name, caddr_t *maddr, Off msize, int mperm, 794 int fd, Xword align) 795 { 796 Am_ret amret; 797 int mflag = MAP_PRIVATE | MAP_NORESERVE; 798 799 #if defined(MAP_ALIGN) 800 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 801 mflag |= MAP_ALIGN; 802 *maddr = (caddr_t)align; 803 } 804 #endif 805 if ((amret = anon_map(maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 806 return (amret); 807 808 if (amret == AM_OK) 809 return (AM_OK); 810 811 /* 812 * If an anonymous memory request failed (which should only be the 813 * case if it is unsupported on the system we're running on), establish 814 * the initial mapping directly from the file. 815 */ 816 *maddr = 0; 817 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 818 fd, 0)) == MAP_FAILED) { 819 int err = errno; 820 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, strerror(err)); 821 return (AM_ERROR); 822 } 823 return (AM_NOSUP); 824 } 825 826 static void * 827 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 828 int fd, Off foff) 829 { 830 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 831 static int notd = 0; 832 833 /* 834 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 835 * flag. 836 */ 837 if (notd == 0) { 838 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 839 mflag |= MAP_TEXT; 840 else 841 mflag |= MAP_INITDATA; 842 } 843 #endif 844 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 845 return (0); 846 847 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 848 if ((notd == 0) && (errno == EINVAL)) { 849 /* 850 * MAP_TEXT and MAP_INITDATA may not be supported on this 851 * platform, try again without. 852 */ 853 notd = 1; 854 mflag &= ~(MAP_TEXT | MAP_INITDATA); 855 856 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 857 } 858 #endif 859 return (MAP_FAILED); 860 } 861 862 /* 863 * Map in a file. 864 */ 865 static caddr_t 866 elf_map_it( 867 const char *name, /* actual name stored for pathname */ 868 Off fsize, /* total mapping claim of the file */ 869 Ehdr *ehdr, /* ELF header of file */ 870 Phdr *fphdr, /* first loadable Phdr */ 871 Phdr *lphdr, /* last loadable Phdr */ 872 Phdr **rrphdr, /* return first Phdr in reservation */ 873 caddr_t *rraddr, /* return start of reservation */ 874 Off *rrsize, /* return total size of reservation */ 875 int fixed, /* image is resolved to a fixed addr */ 876 int fd, /* images file descriptor */ 877 Xword align, /* image segments maximum alignment */ 878 Mmap *mmaps, /* mmap information array and */ 879 uint_t *mmapcnt) /* mapping count */ 880 { 881 caddr_t raddr; /* reservation address */ 882 Off rsize; /* reservation size */ 883 Phdr *phdr; /* working program header poiner */ 884 caddr_t maddr; /* working mmap address */ 885 caddr_t faddr; /* working file address */ 886 size_t padsize; /* object padding requirement */ 887 size_t padpsize = 0; /* padding size rounded to next page */ 888 size_t padmsize = 0; /* padding size rounded for alignment */ 889 int skipfseg; /* skip mapping first segment */ 890 int mperm; /* segment permissions */ 891 Am_ret amret = AM_NOSUP; 892 893 /* 894 * If padding is required extend both the front and rear of the image. 895 * To insure the image itself is mapped at the correct alignment the 896 * initial padding is rounded up to the nearest page. Once the image is 897 * mapped the excess can be pruned to the nearest page required for the 898 * actual padding itself. 899 */ 900 if ((padsize = r_debug.rtd_objpad) != 0) { 901 padpsize = M_PROUND(padsize); 902 if (fixed) 903 padmsize = padpsize; 904 else 905 padmsize = S_ROUND(padsize, align); 906 } 907 908 /* 909 * Determine the initial permissions used to map in the first segment. 910 * If this segments memsz is greater that its filesz then the difference 911 * must be zeroed. Make sure this segment is writable. 912 */ 913 mperm = 0; 914 if (fphdr->p_flags & PF_R) 915 mperm |= PROT_READ; 916 if (fphdr->p_flags & PF_X) 917 mperm |= PROT_EXEC; 918 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 919 mperm |= PROT_WRITE; 920 921 /* 922 * Determine whether or not to let system reserve address space based on 923 * whether this is a dynamic executable (addresses in object are fixed) 924 * or a shared object (addresses in object are relative to the objects' 925 * base). 926 */ 927 if (fixed) { 928 /* 929 * Determine the reservation address and size, and insure that 930 * this reservation isn't already in use. 931 */ 932 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 933 raddr = maddr - padpsize; 934 rsize = fsize + padpsize + padsize; 935 936 if (lml_main.lm_head) { 937 if (elf_map_check(name, raddr, rsize) != 0) 938 return (0); 939 } 940 941 /* 942 * As this is a fixed image, all segments must be individually 943 * mapped. 944 */ 945 skipfseg = 0; 946 947 } else { 948 size_t esize; 949 950 /* 951 * If this isn't a fixed image, reserve enough address space for 952 * the entire image to be mapped. The amount of reservation is 953 * the range between the beginning of the first, and end of the 954 * last loadable segment, together with any padding, plus the 955 * alignment of the first segment. 956 * 957 * The optimal reservation is made as a no-reserve mapping from 958 * anonymous memory. Each segment is then mapped into this 959 * reservation. If the anonymous mapping capability isn't 960 * available, the reservation is obtained from the file itself. 961 * In this case the first segment of the image is mapped as part 962 * of the reservation, thus only the following segments need to 963 * be remapped. 964 */ 965 rsize = fsize + padmsize + padsize; 966 if ((amret = elf_map_reserve(name, &raddr, rsize, mperm, 967 fd, align)) == AM_ERROR) 968 return (0); 969 maddr = raddr + padmsize; 970 faddr = (caddr_t)S_ROUND((Off)maddr, align); 971 972 /* 973 * If this reservation has been obtained from anonymous memory, 974 * then all segments must be individually mapped. Otherwise, 975 * the first segment heads the reservation. 976 */ 977 if (amret == AM_OK) 978 skipfseg = 0; 979 else 980 skipfseg = 1; 981 982 /* 983 * For backward compatibility (where MAP_ALIGN isn't available), 984 * insure the alignment of the reservation is adequate for this 985 * object, and if not remap the object to obtain the correct 986 * alignment. 987 */ 988 if (faddr != maddr) { 989 (void) munmap(raddr, rsize); 990 991 rsize += align; 992 if ((amret = elf_map_reserve(name, &raddr, rsize, mperm, 993 fd, align)) == AM_ERROR) 994 return (0); 995 996 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 997 padpsize), align); 998 999 esize = maddr - raddr + padpsize; 1000 1001 /* 1002 * As ths image has been realigned, the first segment 1003 * of the file needs to be remapped to its correct 1004 * location. 1005 */ 1006 skipfseg = 0; 1007 } else 1008 esize = padmsize - padpsize; 1009 1010 /* 1011 * If this reservation included padding, remove any excess for 1012 * the start of the image (the padding was adjusted to insure 1013 * the image was aligned appropriately). 1014 */ 1015 if (esize) { 1016 (void) munmap(raddr, esize); 1017 raddr += esize; 1018 rsize -= esize; 1019 } 1020 } 1021 1022 /* 1023 * At this point we know the initial location of the image, and its 1024 * size. Pass these back to the caller for inclusion in the link-map 1025 * that will eventually be created. 1026 */ 1027 *rraddr = raddr; 1028 *rrsize = rsize; 1029 1030 /* 1031 * The first loadable segment is now pointed to by maddr. This segment 1032 * will eventually contain the elf header and program headers, so reset 1033 * the program header. Pass this back to the caller for inclusion in 1034 * the link-map so it can be used for later unmapping operations. 1035 */ 1036 /* LINTED */ 1037 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1038 1039 /* 1040 * If padding is required at the front of the image, obtain that now. 1041 * Note, if we've already obtained a reservation from anonymous memory 1042 * then this reservation will already include suitable padding. 1043 * Otherwise this reservation is backed by the file, or in the case of 1044 * a fixed image, doesn't yet exist. Map the padding so that it is 1045 * suitably protected (PROT_NONE), and insure the first segment of the 1046 * file is mapped to its correct location. 1047 */ 1048 if (padsize) { 1049 if (amret == AM_NOSUP) { 1050 if (dz_map(raddr, padpsize, PROT_NONE, (MAP_PRIVATE | 1051 MAP_FIXED | MAP_NORESERVE)) == MAP_FAILED) 1052 return (0); 1053 1054 skipfseg = 0; 1055 } 1056 rsize -= padpsize; 1057 } 1058 1059 /* 1060 * Map individual segments. For a fixed image, these will each be 1061 * unique mappings. For a reservation these will fill in the 1062 * reservation. 1063 */ 1064 for (phdr = fphdr; phdr <= lphdr; 1065 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1066 caddr_t addr; 1067 Off mlen, flen; 1068 size_t size; 1069 1070 /* 1071 * Skip non-loadable segments or segments that don't occupy 1072 * any memory. 1073 */ 1074 if (((phdr->p_type != PT_LOAD) && 1075 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1076 continue; 1077 1078 /* 1079 * Establish this segments address relative to our base. 1080 */ 1081 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1082 (fixed ? 0 : faddr))); 1083 1084 /* 1085 * Determine the mapping protection from the segment attributes. 1086 * Also determine the etext address from the last loadable 1087 * segment which has permissions but no write access. 1088 */ 1089 mperm = 0; 1090 if (phdr->p_flags) { 1091 if (phdr->p_flags & PF_R) 1092 mperm |= PROT_READ; 1093 if (phdr->p_flags & PF_X) 1094 mperm |= PROT_EXEC; 1095 if (phdr->p_flags & PF_W) 1096 mperm |= PROT_WRITE; 1097 else 1098 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1099 (ulong_t)(fixed ? 0 : faddr); 1100 } 1101 1102 /* 1103 * Determine the type of mapping required. 1104 */ 1105 if (phdr->p_type == PT_SUNWBSS) { 1106 /* 1107 * Potentially, we can defer the loading of any SUNWBSS 1108 * segment, depending on whether the symbols it provides 1109 * have been bound to. In this manner, large segments 1110 * that are interposed upon between shared libraries 1111 * may not require mapping. Note, that the mapping 1112 * information is recorded in our mapping descriptor at 1113 * this time. 1114 */ 1115 mlen = phdr->p_memsz; 1116 flen = 0; 1117 1118 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1119 /* 1120 * If this segment has no backing file and no flags 1121 * specified, then it defines a reservation. At this 1122 * point all standard loadable segments will have been 1123 * processed. The segment reservation is mapped 1124 * directly from /dev/null. 1125 */ 1126 if (nu_map((caddr_t)addr, phdr->p_memsz, PROT_NONE, 1127 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1128 return (0); 1129 1130 mlen = phdr->p_memsz; 1131 flen = 0; 1132 1133 } else if (phdr->p_filesz == 0) { 1134 /* 1135 * If this segment has no backing file then it defines a 1136 * nobits segment and is mapped directly from /dev/zero. 1137 */ 1138 if (dz_map((caddr_t)addr, phdr->p_memsz, mperm, 1139 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1140 return (0); 1141 1142 mlen = phdr->p_memsz; 1143 flen = 0; 1144 1145 } else { 1146 Off foff; 1147 1148 /* 1149 * This mapping originates from the file. Determine the 1150 * file offset to which the mapping will be directed 1151 * (must be aligned) and how much to map (might be more 1152 * than the file in the case of .bss). 1153 */ 1154 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1155 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1156 flen = phdr->p_filesz + (phdr->p_offset - foff); 1157 1158 /* 1159 * If this is a non-fixed, non-anonymous mapping, and no 1160 * padding is involved, then the first loadable segment 1161 * is already part of the initial reservation. In this 1162 * case there is no need to remap this segment. 1163 */ 1164 if ((skipfseg == 0) || (phdr != fphdr)) { 1165 int phdr_mperm = mperm; 1166 /* 1167 * If this segments memsz is greater that its 1168 * filesz then the difference must be zeroed. 1169 * Make sure this segment is writable. 1170 */ 1171 if (phdr->p_memsz > phdr->p_filesz) 1172 mperm |= PROT_WRITE; 1173 1174 if (elf_map_textdata((caddr_t)addr, flen, 1175 mperm, phdr_mperm, 1176 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1177 MAP_FAILED) { 1178 int err = errno; 1179 eprintf(ERR_FATAL, 1180 MSG_INTL(MSG_SYS_MMAP), name, 1181 strerror(err)); 1182 return (0); 1183 } 1184 } 1185 1186 /* 1187 * If the memory occupancy of the segment overflows the 1188 * definition in the file, we need to "zero out" the end 1189 * of the mapping we've established, and if necessary, 1190 * map some more space from /dev/zero. Note, zero'ed 1191 * memory must end on a double word boundary to satisfy 1192 * zero(). 1193 */ 1194 if (phdr->p_memsz > phdr->p_filesz) { 1195 caddr_t zaddr; 1196 size_t zlen, zplen; 1197 Off fend; 1198 1199 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1200 (fixed ? 0 : faddr)); 1201 zaddr = (caddr_t)M_PROUND(foff); 1202 zplen = (size_t)(zaddr - foff); 1203 1204 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1205 phdr->p_memsz + (fixed ? 0 : faddr))); 1206 zlen = (size_t)(fend - foff); 1207 1208 /* 1209 * Determine whether the number of bytes that 1210 * must be zero'ed overflow to the next page. 1211 * If not, simply clear the exact bytes 1212 * (filesz to memsz) from this page. Otherwise, 1213 * clear the remaining bytes of this page, and 1214 * map an following pages from /dev/zero. 1215 */ 1216 if (zlen < zplen) 1217 zero((caddr_t)foff, (long)zlen); 1218 else { 1219 zero((caddr_t)foff, (long)zplen); 1220 1221 if ((zlen = (fend - (Off)zaddr)) > 0) { 1222 if (dz_map(zaddr, zlen, mperm, 1223 MAP_FIXED | MAP_PRIVATE) == 1224 MAP_FAILED) 1225 return (0); 1226 } 1227 } 1228 } 1229 } 1230 1231 /* 1232 * Unmap anything from the last mapping address to this one and 1233 * update the mapping claim pointer. 1234 */ 1235 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1236 (void) munmap(maddr, size); 1237 rsize -= size; 1238 } 1239 1240 /* 1241 * Retain this segments mapping information. 1242 */ 1243 mmaps[*mmapcnt].m_vaddr = addr; 1244 mmaps[*mmapcnt].m_msize = mlen; 1245 mmaps[*mmapcnt].m_fsize = flen; 1246 mmaps[*mmapcnt].m_perm = mperm; 1247 (*mmapcnt)++; 1248 1249 maddr = addr + M_PROUND(mlen); 1250 rsize -= M_PROUND(mlen); 1251 } 1252 1253 /* 1254 * If padding is required at the end of the image, obtain that now. 1255 * Note, if we've already obtained a reservation from anonymous memory 1256 * then this reservation will already include suitable padding. 1257 */ 1258 if (padsize) { 1259 if (amret == AM_NOSUP) { 1260 /* 1261 * maddr is currently page aligned from the last segment 1262 * mapping. 1263 */ 1264 if (dz_map(maddr, padsize, PROT_NONE, (MAP_PRIVATE | 1265 MAP_FIXED | MAP_NORESERVE)) == MAP_FAILED) 1266 return (0); 1267 } 1268 maddr += padsize; 1269 rsize -= padsize; 1270 } 1271 1272 /* 1273 * Unmap any final reservation. 1274 */ 1275 if ((fixed == 0) && (rsize != 0)) 1276 (void) munmap(maddr, rsize); 1277 1278 return (faddr); 1279 } 1280 1281 /* 1282 * A null symbol interpretor. Used if a filter has no associated filtees. 1283 */ 1284 /* ARGSUSED0 */ 1285 static Sym * 1286 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1287 { 1288 return ((Sym *)0); 1289 } 1290 1291 /* 1292 * Disable filtee use. 1293 */ 1294 static void 1295 elf_disable_filtee(Rt_map * lmp, Dyninfo * dip) 1296 { 1297 dip->di_info = 0; 1298 1299 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1300 /* 1301 * If this is an object filter, free the filtee's duplication. 1302 */ 1303 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1304 free(REFNAME(lmp)); 1305 REFNAME(lmp) = (char *)0; 1306 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1307 1308 /* 1309 * Indicate that this filtee is no longer available. 1310 */ 1311 if (dip->di_flags & FLG_DI_STDFLTR) 1312 SYMINTP(lmp) = elf_null_find_sym; 1313 1314 } 1315 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1316 /* 1317 * Indicate that this standard filtee is no longer available. 1318 */ 1319 if (SYMSFLTRCNT(lmp)) 1320 SYMSFLTRCNT(lmp)--; 1321 } else { 1322 /* 1323 * Indicate that this auxiliary filtee is no longer available. 1324 */ 1325 if (SYMAFLTRCNT(lmp)) 1326 SYMAFLTRCNT(lmp)--; 1327 } 1328 dip->di_flags &= ~MSK_DI_FILTER; 1329 } 1330 1331 /* 1332 * Find symbol interpreter - filters. 1333 * This function is called when the symbols from a shared object should 1334 * be resolved from the shared objects filtees instead of from within itself. 1335 * 1336 * A symbol name of 0 is used to trigger filtee loading. 1337 */ 1338 static Sym * 1339 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1340 { 1341 const char *name = slp->sl_name, *filtees; 1342 Rt_map *clmp = slp->sl_cmap; 1343 Rt_map *ilmp = slp->sl_imap; 1344 Pnode *pnp, **pnpp; 1345 int any; 1346 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1347 Lm_list *lml = LIST(ilmp); 1348 1349 /* 1350 * Indicate that the filter has been used. If a binding already exists 1351 * to the caller, indicate that this object is referenced. This insures 1352 * we don't generate false unreferenced diagnostics from ldd -u/U or 1353 * debugging. Don't create a binding regardless, as this filter may 1354 * have been dlopen()'ed. 1355 */ 1356 if (name && (ilmp != clmp)) { 1357 Word tracing = (LIST(clmp)->lm_flags & 1358 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1359 1360 if (tracing || dbg_mask) { 1361 Bnd_desc ** bdpp; 1362 Aliste off; 1363 1364 FLAGS1(ilmp) |= FL1_RT_USED; 1365 1366 if ((tracing & LML_FLG_TRC_UNREF) || dbg_mask) { 1367 for (ALIST_TRAVERSE(CALLERS(ilmp), off, bdpp)) { 1368 Bnd_desc * bdp = *bdpp; 1369 1370 if (bdp->b_caller == clmp) { 1371 bdp->b_flags |= BND_REFER; 1372 break; 1373 } 1374 } 1375 } 1376 } 1377 } 1378 1379 /* 1380 * If this is the first call to process this filter, establish the 1381 * filtee list. If a configuration file exists, determine if any 1382 * filtee associations for this filter, and its filtee reference, are 1383 * defined. Otherwise, process the filtee reference. Any token 1384 * expansion is also completed at this point (i.e., $PLATFORM). 1385 */ 1386 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1387 if (dip->di_info == 0) { 1388 if (rtld_flags2 & RT_FL2_FLTCFG) 1389 dip->di_info = elf_config_flt(PATHNAME(ilmp), filtees); 1390 1391 if (dip->di_info == 0) { 1392 DBG_CALL(Dbg_file_filter(NAME(ilmp), filtees, 0)); 1393 if ((lml->lm_flags & 1394 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1395 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1396 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1397 NAME(ilmp), filtees); 1398 1399 if ((dip->di_info = (void *)expand_paths(ilmp, 1400 filtees, PN_SER_FILTEE, 0)) == 0) { 1401 elf_disable_filtee(ilmp, dip); 1402 return ((Sym *)0); 1403 } 1404 } 1405 } 1406 1407 /* 1408 * Traverse the filtee list, dlopen()'ing any objects specified and 1409 * using their group handle to lookup the symbol. 1410 */ 1411 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1412 pnpp = &pnp->p_next, pnp = * pnpp) { 1413 int mode; 1414 Grp_hdl *ghp; 1415 Rt_map *nlmp = 0; 1416 1417 if (pnp->p_len == 0) 1418 continue; 1419 1420 /* 1421 * Establish the mode of the filtee from the filter. As filtees 1422 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1423 * and the filtees aren't global. It would be nice to have 1424 * RTLD_FIRST used here also, but as filters got out long before 1425 * RTLD_FIRST was introduced it's a little too late now. 1426 */ 1427 mode = MODE(ilmp) | RTLD_GROUP; 1428 mode &= ~RTLD_GLOBAL; 1429 1430 /* 1431 * Insure that any auxiliary filter can locate symbols from its 1432 * caller. 1433 */ 1434 if (dip->di_flags & FLG_DI_AUXFLTR) 1435 mode |= RTLD_PARENT; 1436 1437 /* 1438 * Process any hardware capability directory. Establish a new 1439 * link-map control list from which to analyze any newly added 1440 * objects. 1441 */ 1442 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1443 Lm_cntl *lmc; 1444 Aliste lmco; 1445 1446 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1447 if ((lmc = alist_append(&(lml->lm_lists), 0, 1448 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1449 return ((Sym *)0); 1450 lmco = (Aliste)((char *)lmc - 1451 (char *)lml->lm_lists); 1452 } else { 1453 lmc = 0; 1454 lmco = ALO_DATA; 1455 } 1456 1457 pnp = hwcap_filtees(pnpp, lmco, dip, ilmp, filtees, 1458 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1459 1460 /* 1461 * Now that any hardware capability objects have been 1462 * processed, remove any link-map control list. 1463 */ 1464 if (lmc) { 1465 if (pnp->p_len == 0) 1466 (void) lm_salvage(lml, 0, lmco); 1467 remove_cntl(lml, lmco); 1468 } 1469 } 1470 1471 if (pnp->p_len == 0) 1472 continue; 1473 1474 /* 1475 * Process an individual filtee. 1476 */ 1477 if (pnp->p_info == 0) { 1478 const char *filtee = pnp->p_name; 1479 int audit = 0; 1480 1481 DBG_CALL(Dbg_file_filtee(NAME(ilmp), filtee, 0)); 1482 1483 ghp = 0; 1484 1485 /* 1486 * Determine if the reference link map is already 1487 * loaded. As an optimization compare the filtee with 1488 * our interpretor. The most common filter is 1489 * libdl.so.1, which is a filter on ld.so.1. 1490 */ 1491 #if defined(_ELF64) 1492 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1493 #else 1494 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1495 #endif 1496 /* 1497 * Create an association between ld.so.1 and 1498 * the filter. 1499 */ 1500 nlmp = lml_rtld.lm_head; 1501 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1502 (GPH_LDSO | GPH_FIRST | GPH_FILTEE))) == 0) 1503 nlmp = 0; 1504 1505 /* 1506 * Establish the filter handle to prevent any 1507 * recursion. 1508 */ 1509 if (nlmp && ghp) 1510 pnp->p_info = (void *)ghp; 1511 1512 /* 1513 * Audit the filter/filtee established. Ignore 1514 * any return from the auditor, as we can't 1515 * allow ignore filtering to ld.so.1, otherwise 1516 * nothing is going to work. 1517 */ 1518 if ((lml->lm_tflags | FLAGS1(ilmp)) & 1519 LML_TFLG_AUD_OBJFILTER) 1520 (void) audit_objfilter(ilmp, filtees, 1521 nlmp, 0); 1522 1523 } else { 1524 Rej_desc rej = { 0 }; 1525 Lm_cntl *lmc; 1526 Aliste lmco; 1527 1528 /* 1529 * Establish a new link-map control list from 1530 * which to analyze any newly added objects. 1531 */ 1532 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1533 if ((lmc = 1534 alist_append(&(lml->lm_lists), 0, 1535 sizeof (Lm_cntl), 1536 AL_CNT_LMLISTS)) == 0) 1537 return ((Sym *)0); 1538 lmco = (Aliste)((char *)lmc - 1539 (char *)lml->lm_lists); 1540 } else { 1541 lmc = 0; 1542 lmco = ALO_DATA; 1543 } 1544 1545 /* 1546 * Load the filtee. 1547 */ 1548 if ((nlmp = load_path(lml, lmco, filtee, ilmp, 1549 mode, FLG_RT_HANDLE, &ghp, 0, &rej)) == 0) { 1550 file_notfound(LIST(ilmp), filtee, ilmp, 1551 FLG_RT_HANDLE, &rej); 1552 remove_rej(&rej); 1553 } 1554 1555 /* 1556 * Establish the filter handle to prevent any 1557 * recursion. 1558 */ 1559 if (nlmp && ghp) { 1560 ghp->gh_flags |= GPH_FILTEE; 1561 pnp->p_info = (void *)ghp; 1562 } 1563 1564 /* 1565 * Audit the filter/filtee established. A 1566 * return of 0 indicates the auditor wishes to 1567 * ignore this filtee. 1568 */ 1569 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1570 LML_TFLG_AUD_OBJFILTER)) { 1571 if (audit_objfilter(ilmp, filtees, 1572 nlmp, 0) == 0) { 1573 audit = 1; 1574 nlmp = 0; 1575 } 1576 } 1577 1578 /* 1579 * Finish processing the objects associated with 1580 * this request. Create an association between 1581 * this object and the originating filter to 1582 * provide sufficient information to tear down 1583 * this filtee if necessary. 1584 */ 1585 if (nlmp && ghp && 1586 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1587 (relocate_lmc(lml, lmco, nlmp) == 0))) 1588 nlmp = 0; 1589 1590 /* 1591 * If the filtee has been successfully 1592 * processed, and it is part of a link-map 1593 * control list that is equivalent, or less, 1594 * than the filter control list, create an 1595 * association between the filter and filtee. 1596 * This association provides sufficient 1597 * information to tear down the filter and 1598 * filtee if necessary. 1599 */ 1600 if (nlmp && ghp && (CNTL(nlmp) <= CNTL(ilmp)) && 1601 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1602 nlmp = 0; 1603 1604 /* 1605 * Now that this object has been processed, 1606 * remove any link-map control list. 1607 */ 1608 if (lmc) { 1609 if (nlmp == 0) 1610 (void) lm_salvage(lml, 0, lmco); 1611 remove_cntl(lml, lmco); 1612 } 1613 } 1614 1615 /* 1616 * Generate a diagnostic if the filtee couldn't be 1617 * loaded, null out the pnode entry, and continue 1618 * the search. Otherwise, retain this group handle 1619 * for future symbol searches. 1620 */ 1621 if (nlmp == 0) { 1622 pnp->p_info = 0; 1623 DBG_CALL(Dbg_file_filtee(0, filtee, audit)); 1624 1625 if (ghp) 1626 (void) dlclose_core(ghp, ilmp); 1627 1628 pnp->p_len = 0; 1629 continue; 1630 } 1631 } 1632 1633 ghp = (Grp_hdl *)pnp->p_info; 1634 1635 /* 1636 * If we're just here to trigger filtee loading skip the symbol 1637 * lookup so we'll continue looking for additional filtees. 1638 */ 1639 if (name) { 1640 Grp_desc *gdp; 1641 Sym *sym = 0; 1642 Aliste off; 1643 Slookup sl = *slp; 1644 1645 sl.sl_flags |= LKUP_FIRST; 1646 any++; 1647 1648 /* 1649 * Look for the symbol in the handles dependencies. 1650 */ 1651 for (ALIST_TRAVERSE(ghp->gh_depends, off, gdp)) { 1652 if ((gdp->gd_flags & GPD_AVAIL) == 0) 1653 continue; 1654 1655 /* 1656 * If our parent is a dependency don't look at 1657 * it (otherwise we are in a recursive loop). 1658 * This situation can occur with auxiliary 1659 * filters if the filtee has a dependency on the 1660 * filter. This dependency isn't necessary as 1661 * auxiliary filters are opened RTLD_PARENT, but 1662 * users may still unknowingly add an explicit 1663 * dependency to the parent. 1664 */ 1665 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1666 continue; 1667 1668 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1669 binfo)) != 0) || 1670 (ghp->gh_flags & GPH_FIRST)) 1671 break; 1672 } 1673 1674 /* 1675 * If this filtee has just been loaded (nlmp != 0), 1676 * determine whether the filtee was triggered by a 1677 * relocation from an object that is still being 1678 * relocated on a leaf link-map control list. As the 1679 * relocation of an object on this list might still 1680 * fail, we can't yet bind the filter to the filtee. 1681 * To do so, would be locking the filtee so that it 1682 * couldn't be deleted, and the filtee itself could have 1683 * bound to an object that must be torn down. Insure 1684 * the caller isn't bound to the handle at this time. 1685 * Any association will be reestablished when the filter 1686 * is later referenced and the filtee has propagated to 1687 * the same link-map control list. 1688 */ 1689 if (nlmp && (CNTL(nlmp) > CNTL(ilmp))) { 1690 remove_caller(ghp, ilmp); 1691 pnp->p_info = 0; 1692 } 1693 if (sym) { 1694 *binfo |= DBG_BINFO_FILTEE; 1695 return (sym); 1696 } 1697 } 1698 1699 /* 1700 * If this object is tagged to terminate filtee processing we're 1701 * done. 1702 */ 1703 if (FLAGS1(ghp->gh_owner) & FL1_RT_ENDFILTE) 1704 break; 1705 } 1706 1707 /* 1708 * If we're just here to trigger filtee loading then we're done. 1709 */ 1710 if (name == 0) 1711 return ((Sym *)0); 1712 1713 /* 1714 * If no filtees have been found for a filter, clean up any Pnode 1715 * structures and disable their search completely. For auxiliary 1716 * filters we can reselect the symbol search function so that we never 1717 * enter this routine again for this object. For standard filters we 1718 * use the null symbol routine. 1719 */ 1720 if (any == 0) { 1721 remove_pnode((Pnode *)dip->di_info); 1722 elf_disable_filtee(ilmp, dip); 1723 return ((Sym *)0); 1724 } 1725 1726 return ((Sym *)0); 1727 } 1728 1729 /* 1730 * Focal point for disabling error messages for auxiliary filters. As an 1731 * auxiliary filter allows for filtee use, but provides a fallback should a 1732 * filtee not exist (or fail to load), any errors generated as a consequence of 1733 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1734 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1735 * produced. ldd(1) employs printf(), and here, the selection of whether to 1736 * print a diagnostic in regards to auxiliary filters is a little more complex. 1737 * 1738 * . The determination of whether to produce an ldd message, or a fatal 1739 * error message is driven by LML_FLG_TRC_ENABLE. 1740 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1741 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1742 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1743 * 1744 * . If the calling object is lddstub, then several classes of message are 1745 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1746 * a stub executable employed to preload a user specified library against. 1747 * 1748 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1749 * be suppressed. All detailed ldd messages should still be produced. 1750 */ 1751 Sym * 1752 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1753 { 1754 Sym *sym; 1755 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1756 int silent = 0; 1757 1758 /* 1759 * Make sure this entry is still acting as a filter. We may have tried 1760 * to process this previously, and disabled it if the filtee couldn't 1761 * be processed. However, other entries may provide different filtees 1762 * that are yet to be completed. 1763 */ 1764 if (dip->di_flags == 0) 1765 return ((Sym *)0); 1766 1767 /* 1768 * Indicate whether an error message is required should this filtee not 1769 * be found, based on the type of filter. 1770 */ 1771 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1772 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1773 rtld_flags |= RT_FL_SILENCERR; 1774 silent = 1; 1775 } 1776 1777 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1778 1779 if (silent) 1780 rtld_flags &= ~RT_FL_SILENCERR; 1781 1782 return (sym); 1783 } 1784 1785 /* 1786 * Compute the elf hash value (as defined in the ELF access library). 1787 * The form of the hash table is: 1788 * 1789 * |--------------| 1790 * | # of buckets | 1791 * |--------------| 1792 * | # of chains | 1793 * |--------------| 1794 * | bucket[] | 1795 * |--------------| 1796 * | chain[] | 1797 * |--------------| 1798 */ 1799 ulong_t 1800 elf_hash(const char *name) 1801 { 1802 uint_t hval = 0; 1803 1804 while (*name) { 1805 uint_t g; 1806 hval = (hval << 4) + *name++; 1807 if ((g = (hval & 0xf0000000)) != 0) 1808 hval ^= g >> 24; 1809 hval &= ~g; 1810 } 1811 return ((ulong_t)hval); 1812 } 1813 1814 /* 1815 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1816 * function specially in the executable - if they have a value, even though 1817 * undefined, we use that value. This allows us to associate all references 1818 * to a function's address to a single place in the process: the plt entry 1819 * for that function in the executable. Calls to lookup from plt binding 1820 * routines do NOT set LKUP_SPEC in the flag. 1821 */ 1822 Sym * 1823 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1824 { 1825 const char *name = slp->sl_name; 1826 Rt_map *ilmp = slp->sl_imap; 1827 ulong_t hash = slp->sl_hash; 1828 uint_t ndx, htmp, buckets, *chainptr; 1829 Sym *sym, *symtabptr; 1830 char *strtabptr, *strtabname; 1831 uint_t flags1; 1832 Syminfo *sip; 1833 1834 /* 1835 * If we're only here to establish a symbols index, skip the diagnostic 1836 * used to trace a symbol search. 1837 */ 1838 if ((slp->sl_flags & LKUP_SYMNDX) == 0) { 1839 DBG_CALL(Dbg_syms_lookup(name, NAME(ilmp), 1840 MSG_ORIG(MSG_STR_ELF))); 1841 } 1842 1843 if (HASH(ilmp) == 0) 1844 return ((Sym *)0); 1845 1846 buckets = HASH(ilmp)[0]; 1847 /* LINTED */ 1848 htmp = (uint_t)hash % buckets; 1849 1850 /* 1851 * Get the first symbol on hash chain and initialize the string 1852 * and symbol table pointers. 1853 */ 1854 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1855 return ((Sym *)0); 1856 1857 chainptr = HASH(ilmp) + 2 + buckets; 1858 strtabptr = STRTAB(ilmp); 1859 symtabptr = SYMTAB(ilmp); 1860 1861 while (ndx) { 1862 sym = symtabptr + ndx; 1863 strtabname = strtabptr + sym->st_name; 1864 1865 /* 1866 * Compare the symbol found with the name required. If the 1867 * names don't match continue with the next hash entry. 1868 */ 1869 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1870 if ((ndx = chainptr[ndx]) != 0) 1871 continue; 1872 return ((Sym *)0); 1873 } 1874 1875 /* 1876 * If we're only here to establish a symbols index, we're done. 1877 */ 1878 if (slp->sl_flags & LKUP_SYMNDX) 1879 return (sym); 1880 1881 /* 1882 * If we find a match and the symbol is defined, return the 1883 * symbol pointer and the link map in which it was found. 1884 */ 1885 if (sym->st_shndx != SHN_UNDEF) { 1886 *dlmp = ilmp; 1887 *binfo |= DBG_BINFO_FOUND; 1888 if (FLAGS(ilmp) & FLG_RT_INTRPOSE) 1889 *binfo |= DBG_BINFO_INTERPOSE; 1890 break; 1891 1892 /* 1893 * If we find a match and the symbol is undefined, the 1894 * symbol type is a function, and the value of the symbol 1895 * is non zero, then this is a special case. This allows 1896 * the resolution of a function address to the plt[] entry. 1897 * See SPARC ABI, Dynamic Linking, Function Addresses for 1898 * more details. 1899 */ 1900 } else if ((slp->sl_flags & LKUP_SPEC) && 1901 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1902 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1903 *dlmp = ilmp; 1904 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1905 if (FLAGS(ilmp) & FLG_RT_INTRPOSE) 1906 *binfo |= DBG_BINFO_INTERPOSE; 1907 return (sym); 1908 } 1909 1910 /* 1911 * Undefined symbol. 1912 */ 1913 return ((Sym *)0); 1914 } 1915 1916 /* 1917 * We've found a match. Determine if the defining object contains 1918 * symbol binding information. 1919 */ 1920 if ((sip = SYMINFO(ilmp)) != 0) 1921 /* LINTED */ 1922 sip = (Syminfo *)((char *)sip + (ndx * SYMINENT(ilmp))); 1923 1924 /* 1925 * If this is a direct binding request, but the symbol definition has 1926 * disabled directly binding to it (presumably because the symbol 1927 * definition has been changed since the referring object was built), 1928 * indicate this failure so that the caller can fall back to a standard 1929 * symbol search. Clear any debug binding information for cleanliness. 1930 */ 1931 if (sip && (slp->sl_flags & LKUP_DIRECT) && 1932 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 1933 *binfo |= BINFO_DIRECTDIS; 1934 *binfo &= ~DBG_BINFO_MSK; 1935 return ((Sym *)0); 1936 } 1937 1938 /* 1939 * Determine whether this object is acting as a filter. 1940 */ 1941 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 1942 return (sym); 1943 1944 /* 1945 * Determine if this object offers per-symbol filtering, and if so, 1946 * whether this symbol references a filtee. 1947 */ 1948 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 1949 /* 1950 * If this is a standard filter reference, and no standard 1951 * filtees remain to be inspected, we're done. If this is an 1952 * auxiliary filter reference, and no auxiliary filtees remain, 1953 * we'll fall through in case any object filtering is available. 1954 */ 1955 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 1956 (SYMSFLTRCNT(ilmp) == 0)) 1957 return ((Sym *)0); 1958 1959 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 1960 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 1961 SYMAFLTRCNT(ilmp))) { 1962 Sym * fsym; 1963 1964 /* 1965 * This symbol has an associated filtee. Lookup the 1966 * symbol in the filtee, and if it is found return it. 1967 * If the symbol doesn't exist, and this is a standard 1968 * filter, return an error, otherwise fall through to 1969 * catch any object filtering that may be available. 1970 */ 1971 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1972 sip->si_boundto)) != 0) 1973 return (fsym); 1974 if (sip->si_flags & SYMINFO_FLG_FILTER) 1975 return ((Sym *)0); 1976 } 1977 } 1978 1979 /* 1980 * Determine if this object provides global filtering. 1981 */ 1982 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 1983 Sym * fsym; 1984 1985 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 1986 /* 1987 * This object has an associated filtee. Lookup the 1988 * symbol in the filtee, and if it is found return it. 1989 * If the symbol doesn't exist, and this is a standard 1990 * filter, return and error, otherwise return the symbol 1991 * within the filter itself. 1992 */ 1993 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1994 OBJFLTRNDX(ilmp))) != 0) 1995 return (fsym); 1996 } 1997 1998 if (flags1 & FL1_RT_OBJSFLTR) 1999 return ((Sym *)0); 2000 } 2001 return (sym); 2002 } 2003 2004 /* 2005 * Create a new Rt_map structure for an ELF object and initialize 2006 * all values. 2007 */ 2008 Rt_map * 2009 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 2010 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2011 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2012 { 2013 Rt_map *lmp; 2014 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2015 Xword rpath = 0; 2016 Ehdr *ehdr = (Ehdr *)addr; 2017 2018 DBG_CALL(Dbg_file_elf(pname, (ulong_t)ld, addr, msize, entry, 2019 get_linkmap_id(lml), lmco)); 2020 2021 /* 2022 * Allocate space for the link-map and private elf information. Once 2023 * these are allocated and initialized, we can use remove_so(0, lmp) to 2024 * tear down the link-map should any failures occur. 2025 */ 2026 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2027 return (0); 2028 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2029 free(lmp); 2030 return (0); 2031 } 2032 2033 /* 2034 * All fields not filled in were set to 0 by calloc. 2035 */ 2036 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2037 DYN(lmp) = ld; 2038 ADDR(lmp) = addr; 2039 MSIZE(lmp) = msize; 2040 ENTRY(lmp) = (Addr)entry; 2041 SYMINTP(lmp) = elf_find_sym; 2042 ETEXT(lmp) = etext; 2043 FCT(lmp) = &elf_fct; 2044 LIST(lmp) = lml; 2045 PADSTART(lmp) = paddr; 2046 PADIMLEN(lmp) = padimsize; 2047 THREADID(lmp) = rt_thr_self(); 2048 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2049 SORTVAL(lmp) = -1; 2050 2051 MMAPS(lmp) = mmaps; 2052 MMAPCNT(lmp) = mmapcnt; 2053 ASSERT(mmapcnt != 0); 2054 2055 /* 2056 * If this is a shared object, add the base address to each address. 2057 * if this is an executable, use address as is. 2058 */ 2059 if (ehdr->e_type == ET_EXEC) { 2060 base = 0; 2061 FLAGS(lmp) |= FLG_RT_FIXED; 2062 } else 2063 base = addr; 2064 2065 /* 2066 * Fill in rest of the link map entries with information from the file's 2067 * dynamic structure. 2068 */ 2069 if (ld) { 2070 uint_t dyncnt = 0; 2071 Xword pltpadsz = 0; 2072 void *rtldinfo; 2073 2074 /* CSTYLED */ 2075 for ( ; ld->d_tag != DT_NULL; ++ld, dyncnt++) { 2076 switch ((Xword)ld->d_tag) { 2077 case DT_SYMTAB: 2078 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2079 break; 2080 case DT_STRTAB: 2081 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2082 break; 2083 case DT_SYMENT: 2084 SYMENT(lmp) = ld->d_un.d_val; 2085 break; 2086 case DT_FEATURE_1: 2087 ld->d_un.d_val |= DTF_1_PARINIT; 2088 if (ld->d_un.d_val & DTF_1_CONFEXP) 2089 crle = 1; 2090 break; 2091 case DT_MOVESZ: 2092 MOVESZ(lmp) = ld->d_un.d_val; 2093 FLAGS(lmp) |= FLG_RT_MOVE; 2094 break; 2095 case DT_MOVEENT: 2096 MOVEENT(lmp) = ld->d_un.d_val; 2097 break; 2098 case DT_MOVETAB: 2099 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2100 break; 2101 case DT_REL: 2102 case DT_RELA: 2103 /* 2104 * At this time we can only handle 1 type of 2105 * relocation per object. 2106 */ 2107 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2108 break; 2109 case DT_RELSZ: 2110 case DT_RELASZ: 2111 RELSZ(lmp) = ld->d_un.d_val; 2112 break; 2113 case DT_RELENT: 2114 case DT_RELAENT: 2115 RELENT(lmp) = ld->d_un.d_val; 2116 break; 2117 case DT_RELCOUNT: 2118 case DT_RELACOUNT: 2119 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2120 break; 2121 case DT_TEXTREL: 2122 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2123 break; 2124 case DT_HASH: 2125 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2126 break; 2127 case DT_PLTGOT: 2128 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2129 break; 2130 case DT_PLTRELSZ: 2131 PLTRELSZ(lmp) = ld->d_un.d_val; 2132 break; 2133 case DT_JMPREL: 2134 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2135 break; 2136 case DT_INIT: 2137 INIT(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2138 break; 2139 case DT_FINI: 2140 FINI(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2141 break; 2142 case DT_INIT_ARRAY: 2143 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2144 base); 2145 break; 2146 case DT_INIT_ARRAYSZ: 2147 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2148 break; 2149 case DT_FINI_ARRAY: 2150 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2151 base); 2152 break; 2153 case DT_FINI_ARRAYSZ: 2154 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2155 break; 2156 case DT_PREINIT_ARRAY: 2157 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2158 base); 2159 break; 2160 case DT_PREINIT_ARRAYSZ: 2161 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2162 break; 2163 case DT_RPATH: 2164 case DT_RUNPATH: 2165 rpath = ld->d_un.d_val; 2166 break; 2167 case DT_FILTER: 2168 fltr = ld->d_un.d_val; 2169 OBJFLTRNDX(lmp) = dyncnt; 2170 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2171 break; 2172 case DT_AUXILIARY: 2173 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2174 fltr = ld->d_un.d_val; 2175 OBJFLTRNDX(lmp) = dyncnt; 2176 } 2177 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2178 break; 2179 case DT_SUNW_FILTER: 2180 SYMSFLTRCNT(lmp)++; 2181 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2182 break; 2183 case DT_SUNW_AUXILIARY: 2184 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2185 SYMAFLTRCNT(lmp)++; 2186 } 2187 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2188 break; 2189 case DT_DEPAUDIT: 2190 if (!(rtld_flags & RT_FL_NOAUDIT)) 2191 audit = ld->d_un.d_val; 2192 break; 2193 case DT_CONFIG: 2194 cfile = ld->d_un.d_val; 2195 break; 2196 case DT_DEBUG: 2197 /* 2198 * DT_DEBUG entries are only created in 2199 * dynamic objects that require an interpretor 2200 * (ie. all dynamic executables and some shared 2201 * objects), and provide for a hand-shake with 2202 * debuggers. This entry is initialized to 2203 * zero by the link-editor. If a debugger has 2204 * us and updated this entry set the debugger 2205 * flag, and finish initializing the debugging 2206 * structure (see setup() also). Switch off any 2207 * configuration object use as most debuggers 2208 * can't handle fixed dynamic executables as 2209 * dependencies, and we can't handle requests 2210 * like object padding for alternative objects. 2211 */ 2212 if (ld->d_un.d_ptr) 2213 rtld_flags |= 2214 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2215 ld->d_un.d_ptr = (Addr)&r_debug; 2216 break; 2217 case DT_VERNEED: 2218 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2219 base); 2220 break; 2221 case DT_VERNEEDNUM: 2222 /* LINTED */ 2223 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2224 break; 2225 case DT_VERDEF: 2226 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2227 break; 2228 case DT_VERDEFNUM: 2229 /* LINTED */ 2230 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2231 break; 2232 case DT_BIND_NOW: 2233 if ((ld->d_un.d_val & DF_BIND_NOW) && 2234 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2235 MODE(lmp) |= RTLD_NOW; 2236 MODE(lmp) &= ~RTLD_LAZY; 2237 } 2238 break; 2239 case DT_FLAGS: 2240 if (ld->d_un.d_val & DF_SYMBOLIC) 2241 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2242 if (ld->d_un.d_val & DF_TEXTREL) 2243 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2244 if ((ld->d_un.d_val & DF_BIND_NOW) && 2245 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2246 MODE(lmp) |= RTLD_NOW; 2247 MODE(lmp) &= ~RTLD_LAZY; 2248 } 2249 break; 2250 case DT_FLAGS_1: 2251 if (ld->d_un.d_val & DF_1_DISPRELPND) 2252 FLAGS1(lmp) |= FL1_RT_DISPREL; 2253 if (ld->d_un.d_val & DF_1_GROUP) 2254 FLAGS(lmp) |= 2255 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2256 if ((ld->d_un.d_val & DF_1_NOW) && 2257 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2258 MODE(lmp) |= RTLD_NOW; 2259 MODE(lmp) &= ~RTLD_LAZY; 2260 } 2261 if (ld->d_un.d_val & DF_1_NODELETE) 2262 MODE(lmp) |= RTLD_NODELETE; 2263 if (ld->d_un.d_val & DF_1_INITFIRST) 2264 FLAGS(lmp) |= FLG_RT_INITFRST; 2265 if (ld->d_un.d_val & DF_1_NOOPEN) 2266 FLAGS(lmp) |= FLG_RT_NOOPEN; 2267 if (ld->d_un.d_val & DF_1_LOADFLTR) 2268 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2269 if (ld->d_un.d_val & DF_1_NODUMP) 2270 FLAGS(lmp) |= FLG_RT_NODUMP; 2271 if (ld->d_un.d_val & DF_1_CONFALT) 2272 crle = 1; 2273 if (ld->d_un.d_val & DF_1_DIRECT) 2274 FLAGS(lmp) |= FLG_RT_DIRECT; 2275 if (ld->d_un.d_val & DF_1_NODEFLIB) 2276 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2277 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2278 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2279 if (ld->d_un.d_val & DF_1_TRANS) 2280 FLAGS(lmp) |= FLG_RT_TRANS; 2281 #ifndef EXPAND_RELATIVE 2282 if (ld->d_un.d_val & DF_1_ORIGIN) 2283 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2284 #endif 2285 /* 2286 * If this object identifies itself as an 2287 * interposer, but relocation processing has 2288 * already started, then demote it. It's too 2289 * late to guarantee complete interposition. 2290 */ 2291 if (ld->d_un.d_val & DF_1_INTERPOSE) { 2292 if ((lml->lm_flags & LML_FLG_STARTREL) == 0) 2293 FLAGS(lmp) |= FLG_RT_INTRPOSE; 2294 else { 2295 DBG_CALL(Dbg_util_intoolate(NAME(lmp))); 2296 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2297 (void) printf( 2298 MSG_INTL(MSG_LDD_REL_ERR2), 2299 NAME(lmp)); 2300 } 2301 } 2302 break; 2303 case DT_SYMINFO: 2304 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2305 base); 2306 break; 2307 case DT_SYMINENT: 2308 SYMINENT(lmp) = ld->d_un.d_val; 2309 break; 2310 case DT_PLTPAD: 2311 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2312 break; 2313 case DT_PLTPADSZ: 2314 pltpadsz = ld->d_un.d_val; 2315 break; 2316 case DT_SUNW_RTLDINF: 2317 if ((lml->lm_info_lmp != 0) && 2318 (lml->lm_info_lmp != lmp)) { 2319 DBG_CALL(Dbg_unused_rtldinfo( 2320 NAME(lmp), 2321 NAME(lml->lm_info_lmp))); 2322 break; 2323 } 2324 lml->lm_info_lmp = lmp; 2325 rtldinfo = (void *)(ld->d_un.d_ptr + base); 2326 2327 /* 2328 * We maintain a list of DT_SUNW_RTLDINFO 2329 * structures for a given object. This permits 2330 * the RTLDINFO structures to be grouped 2331 * functionly inside of a shared object. 2332 * 2333 * For example, we could have one for 2334 * thread_init, and another for atexit 2335 * reservations. 2336 */ 2337 if (alist_append(&lml->lm_rtldinfo, &rtldinfo, 2338 sizeof (void *), AL_CNT_RTLDINFO) == 0) { 2339 remove_so(0, lmp); 2340 return (0); 2341 } 2342 break; 2343 case DT_DEPRECATED_SPARC_REGISTER: 2344 case M_DT_REGISTER: 2345 FLAGS(lmp) |= FLG_RT_REGSYMS; 2346 break; 2347 case M_DT_PLTRESERVE: 2348 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2349 base); 2350 break; 2351 } 2352 } 2353 2354 2355 if (PLTPAD(lmp)) { 2356 if (pltpadsz == (Xword)0) 2357 PLTPAD(lmp) = 0; 2358 else 2359 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2360 pltpadsz); 2361 } 2362 2363 /* 2364 * Allocate Dynamic Info structure 2365 */ 2366 if ((DYNINFO(lmp) = calloc((size_t)dyncnt, 2367 sizeof (Dyninfo))) == 0) { 2368 remove_so(0, lmp); 2369 return (0); 2370 } 2371 DYNINFOCNT(lmp) = dyncnt; 2372 } 2373 2374 /* 2375 * If configuration file use hasn't been disabled, and a configuration 2376 * file hasn't already been set via an environment variable, see if any 2377 * application specific configuration file is specified. An LD_CONFIG 2378 * setting is used first, but if this image was generated via crle(1) 2379 * then a default configuration file is a fall-back. 2380 */ 2381 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2382 if (cfile) 2383 config->c_name = (const char *)(cfile + 2384 (char *)STRTAB(lmp)); 2385 else if (crle) { 2386 rtld_flags |= RT_FL_CONFAPP; 2387 #ifndef EXPAND_RELATIVE 2388 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2389 #endif 2390 } 2391 } 2392 2393 if (rpath) 2394 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2395 if (fltr) { 2396 /* 2397 * If this object is a global filter, duplicate the filtee 2398 * string name(s) so that REFNAME() is available in core files. 2399 * This cludge was useful for debuggers at one point, but only 2400 * when the filtee name was an individual full path. 2401 */ 2402 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2403 remove_so(0, lmp); 2404 return (0); 2405 } 2406 } 2407 2408 if (rtld_flags & RT_FL_RELATIVE) 2409 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2410 2411 /* 2412 * For Intel ABI compatibility. It's possible that a JMPREL can be 2413 * specified without any other relocations (e.g. a dynamic executable 2414 * normally only contains .plt relocations). If this is the case then 2415 * no REL, RELSZ or RELENT will have been created. For us to be able 2416 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2417 * the RELENT for these relocations. Refer to elf_reloc() for more 2418 * details. 2419 */ 2420 if (!RELENT(lmp) && JMPREL(lmp)) 2421 RELENT(lmp) = sizeof (Rel); 2422 2423 /* 2424 * Establish any per-object auditing. If we're establishing `main's 2425 * link-map its too early to go searching for audit objects so just 2426 * hold the object name for later (see setup()). 2427 */ 2428 if (audit) { 2429 char *cp = audit + (char *)STRTAB(lmp); 2430 2431 if (*cp) { 2432 if (((AUDITORS(lmp) = 2433 calloc(1, sizeof (Audit_desc))) == 0) || 2434 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2435 remove_so(0, lmp); 2436 return (0); 2437 } 2438 if (lml_main.lm_head) { 2439 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2440 remove_so(0, lmp); 2441 return (0); 2442 } 2443 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2444 lml->lm_flags |= LML_FLG_LOCAUDIT; 2445 } 2446 } 2447 } 2448 2449 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2450 remove_so(0, lmp); 2451 return (0); 2452 } 2453 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2454 remove_so(0, lmp); 2455 return (0); 2456 } 2457 2458 /* 2459 * Add the mapped object to the end of the link map list. 2460 */ 2461 lm_append(lml, lmco, lmp); 2462 return (lmp); 2463 } 2464 2465 /* 2466 * Assign hardware/software capabilities. 2467 */ 2468 void 2469 cap_assign(Cap *cap, Rt_map *lmp) 2470 { 2471 while (cap->c_tag != CA_SUNW_NULL) { 2472 switch (cap->c_tag) { 2473 case CA_SUNW_HW_1: 2474 HWCAP(lmp) = cap->c_un.c_val; 2475 break; 2476 case CA_SUNW_SF_1: 2477 SFCAP(lmp) = cap->c_un.c_val; 2478 } 2479 cap++; 2480 } 2481 } 2482 2483 /* 2484 * Map in an ELF object. 2485 * Takes an open file descriptor for the object to map and its pathname; returns 2486 * a pointer to a Rt_map structure for this object, or 0 on error. 2487 */ 2488 static Rt_map * 2489 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2490 int fd) 2491 { 2492 int i; /* general temporary */ 2493 Off memsize = 0; /* total memory size of pathname */ 2494 Off mentry; /* entry point */ 2495 Ehdr *ehdr; /* ELF header of ld.so */ 2496 Phdr *phdr; /* first Phdr in file */ 2497 Phdr *phdr0; /* Saved first Phdr in file */ 2498 Phdr *pptr; /* working Phdr */ 2499 Phdr *fph = 0; /* first loadable Phdr */ 2500 Phdr *lph; /* last loadable Phdr */ 2501 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2502 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2503 Phdr *swph = 0; /* program header for SUNWBSS */ 2504 Phdr *tlph = 0; /* program header for PT_TLS */ 2505 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2506 Cap *cap = 0; /* program header for SUNWCAP */ 2507 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2508 size_t size; /* size of elf and program headers */ 2509 caddr_t faddr = 0; /* mapping address of pathname */ 2510 Rt_map *lmp; /* link map created */ 2511 caddr_t paddr; /* start of padded image */ 2512 Off plen; /* size of image including padding */ 2513 Half etype; 2514 int fixed; 2515 Mmap *mmaps; 2516 uint_t mmapcnt = 0; 2517 Xword align = 0; 2518 2519 /* LINTED */ 2520 ehdr = (Ehdr *)fmap->fm_maddr; 2521 2522 /* 2523 * If this a relocatable object then special processing is required. 2524 */ 2525 if ((etype = ehdr->e_type) == ET_REL) 2526 return (elf_obj_file(lml, lmco, pname, fd)); 2527 2528 /* 2529 * If this isn't a dynamic executable or shared object we can't process 2530 * it. If this is a dynamic executable then all addresses are fixed. 2531 */ 2532 if (etype == ET_EXEC) 2533 fixed = 1; 2534 else if (etype == ET_DYN) 2535 fixed = 0; 2536 else { 2537 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2538 conv_etype_str(etype)); 2539 return (0); 2540 } 2541 2542 /* 2543 * If our original mapped page was not large enough to hold all the 2544 * program headers remap them. 2545 */ 2546 size = (size_t)((char *)ehdr->e_phoff + 2547 (ehdr->e_phnum * ehdr->e_phentsize)); 2548 if (size > fmap->fm_fsize) { 2549 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2550 return (0); 2551 } 2552 if (size > fmap->fm_msize) { 2553 fmap_setup(); 2554 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2555 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2556 int err = errno; 2557 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2558 strerror(err)); 2559 return (0); 2560 } 2561 fmap->fm_msize = size; 2562 /* LINTED */ 2563 ehdr = (Ehdr *)fmap->fm_maddr; 2564 } 2565 /* LINTED */ 2566 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2567 2568 /* 2569 * Get entry point. 2570 */ 2571 mentry = ehdr->e_entry; 2572 2573 /* 2574 * Point at program headers and perform some basic validation. 2575 */ 2576 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2577 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2578 if ((pptr->p_type == PT_LOAD) || 2579 (pptr->p_type == PT_SUNWBSS)) { 2580 2581 if (fph == 0) { 2582 fph = pptr; 2583 /* LINTED argument lph is initialized in first pass */ 2584 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2585 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_INVPRGHDR), 2586 pname); 2587 return (0); 2588 } 2589 2590 lph = pptr; 2591 2592 if (pptr->p_memsz) 2593 lmph = pptr; 2594 if (pptr->p_filesz) 2595 lfph = pptr; 2596 if (pptr->p_type == PT_SUNWBSS) 2597 swph = pptr; 2598 if (pptr->p_align > align) 2599 align = pptr->p_align; 2600 2601 } else if (pptr->p_type == PT_DYNAMIC) 2602 mld = (Dyn *)(pptr->p_vaddr); 2603 else if (pptr->p_type == PT_TLS) 2604 tlph = pptr; 2605 else if (pptr->p_type == PT_SUNWCAP) 2606 cap = (Cap *)(pptr->p_vaddr); 2607 else if (pptr->p_type == PT_SUNW_UNWIND) 2608 unwindph = pptr; 2609 } 2610 2611 #if defined(MAP_ALIGN) 2612 /* 2613 * Make sure the maximum page alignment is a power of 2 >= the system 2614 * page size, for use with MAP_ALIGN. 2615 */ 2616 align = M_PROUND(align); 2617 #endif 2618 2619 /* 2620 * We'd better have at least one loadable segment, together with some 2621 * specified file and memory size. 2622 */ 2623 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2624 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2625 return (0); 2626 } 2627 2628 /* 2629 * Check that the files size accounts for the loadable sections 2630 * we're going to map in (failure to do this may cause spurious 2631 * bus errors if we're given a truncated file). 2632 */ 2633 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2634 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2635 return (0); 2636 } 2637 2638 /* 2639 * Memsize must be page rounded so that if we add object padding 2640 * at the end it will start at the beginning of a page. 2641 */ 2642 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2643 M_PTRUNC((ulong_t)fph->p_vaddr)); 2644 2645 /* 2646 * Determine if an existing mapping is acceptable. 2647 */ 2648 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2649 (strcmp(pname, interp->i_name) == 0)) { 2650 /* 2651 * If this is the interpreter then it has already been mapped 2652 * and we have the address so don't map it again. Note that 2653 * the common occurrence of a reference to the interpretor 2654 * (libdl -> ld.so.1) will have been caught during filter 2655 * initialization (see elf_lookup_filtee()). However, some 2656 * ELF implementations are known to record libc.so.1 as the 2657 * interpretor, and thus this test catches this behavior. 2658 */ 2659 paddr = faddr = interp->i_faddr; 2660 2661 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2662 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2663 (fph->p_filesz == fph->p_memsz) && 2664 (((Xword)fmap->fm_maddr % align) == 0)) { 2665 /* 2666 * If the mapping required has already been established from 2667 * the initial page we don't need to do anything more. Reset 2668 * the fmap address so then any later files start a new fmap. 2669 * This is really an optimization for filters, such as libdl.so, 2670 * which should only require one page. 2671 */ 2672 paddr = faddr = fmap->fm_maddr; 2673 fmap->fm_maddr = 0; 2674 fmap_setup(); 2675 } 2676 2677 /* 2678 * Allocate a mapping array to retain mapped segment information. 2679 */ 2680 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2681 return (0); 2682 2683 /* 2684 * If we're reusing an existing mapping determine the objects etext 2685 * address. Otherwise map the file (which will calculate the etext 2686 * address as part of the mapping process). 2687 */ 2688 if (faddr) { 2689 caddr_t base; 2690 2691 if (fixed) 2692 base = 0; 2693 else 2694 base = faddr; 2695 2696 /* LINTED */ 2697 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2698 2699 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2700 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2701 if (pptr->p_type != PT_LOAD) 2702 continue; 2703 2704 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2705 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2706 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2707 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2708 mmapcnt++; 2709 2710 if (!(pptr->p_flags & PF_W)) { 2711 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2712 (ulong_t)pptr->p_memsz + 2713 (ulong_t)(fixed ? 0 : faddr); 2714 } 2715 } 2716 } else { 2717 /* 2718 * Map the file. 2719 */ 2720 if (!(faddr = elf_map_it(pname, memsize, ehdr, fph, lph, 2721 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2722 return (0); 2723 } 2724 2725 /* 2726 * Calculate absolute base addresses and entry points. 2727 */ 2728 if (!fixed) { 2729 if (mld) 2730 /* LINTED */ 2731 mld = (Dyn *)((Off)mld + faddr); 2732 if (cap) 2733 /* LINTED */ 2734 cap = (Cap *)((Off)cap + faddr); 2735 mentry += (Off)faddr; 2736 } 2737 2738 /* 2739 * Create new link map structure for newly mapped shared object. 2740 */ 2741 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2742 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2743 mmapcnt))) { 2744 (void) munmap((caddr_t)faddr, memsize); 2745 return (0); 2746 } 2747 2748 /* 2749 * Start the system loading in the ELF information we'll be processing. 2750 */ 2751 if (REL(lmp)) { 2752 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2753 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2754 MADV_WILLNEED); 2755 } 2756 2757 /* 2758 * If this shared object contains a any special segments, record them. 2759 */ 2760 if (swph) { 2761 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2762 SUNWBSS(lmp) = phdr + (swph - phdr0); 2763 } 2764 if (tlph) { 2765 PTTLS(lmp) = phdr + (tlph - phdr0); 2766 tls_assign_soffset(lmp); 2767 } 2768 2769 if (unwindph) 2770 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2771 2772 if (cap) 2773 cap_assign(cap, lmp); 2774 2775 return (lmp); 2776 } 2777 2778 2779 /* 2780 * Function to correct protection settings. Segments are all mapped initially 2781 * with permissions as given in the segment header. We need to turn on write 2782 * permissions on a text segment if there are any relocations against that 2783 * segment, and them turn write permission back off again before returning 2784 * control to the user. This function turns the permission on or off depending 2785 * on the value of the argument. 2786 */ 2787 int 2788 elf_set_prot(Rt_map * lmp, int permission) 2789 { 2790 Mmap *mmaps; 2791 2792 /* 2793 * If this is an allocated image (ie. a relocatable object) we can't 2794 * mprotect() anything. 2795 */ 2796 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2797 return (1); 2798 2799 DBG_CALL(Dbg_file_prot(NAME(lmp), permission)); 2800 2801 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2802 if (mmaps->m_perm & PROT_WRITE) 2803 continue; 2804 2805 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2806 (mmaps->m_perm | permission)) == -1) { 2807 int err = errno; 2808 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2809 NAME(lmp), strerror(err)); 2810 return (0); 2811 } 2812 } 2813 return (1); 2814 } 2815 2816 /* 2817 * Build full pathname of shared object from given directory name and filename. 2818 */ 2819 static char * 2820 elf_get_so(const char *dir, const char *file) 2821 { 2822 static char pname[PATH_MAX]; 2823 2824 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 2825 return (pname); 2826 } 2827 2828 /* 2829 * The copy relocation is recorded in a copy structure which will be applied 2830 * after all other relocations are carried out. This provides for copying data 2831 * that must be relocated itself (ie. pointers in shared objects). This 2832 * structure also provides a means of binding RTLD_GROUP dependencies to any 2833 * copy relocations that have been taken from any group members. 2834 * 2835 * If the size of the .bss area available for the copy information is not the 2836 * same as the source of the data inform the user if we're under ldd(1) control 2837 * (this checking was only established in 5.3, so by only issuing an error via 2838 * ldd(1) we maintain the standard set by previous releases). 2839 */ 2840 int 2841 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 2842 Rt_map *dlmp, const void *dadd) 2843 { 2844 Rel_copy rc; 2845 Lm_list *lml = LIST(rlmp); 2846 2847 rc.r_name = name; 2848 rc.r_rsym = rsym; /* the new reference symbol and its */ 2849 rc.r_rlmp = rlmp; /* associated link-map */ 2850 rc.r_dlmp = dlmp; /* the defining link-map */ 2851 rc.r_dsym = dsym; /* the original definition */ 2852 rc.r_radd = radd; 2853 rc.r_dadd = dadd; 2854 2855 if (rsym->st_size > dsym->st_size) 2856 rc.r_size = (size_t)dsym->st_size; 2857 else 2858 rc.r_size = (size_t)rsym->st_size; 2859 2860 if (alist_append(©(dlmp), &rc, sizeof (Rel_copy), 2861 AL_CNT_COPYREL) == 0) { 2862 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2863 return (0); 2864 else 2865 return (1); 2866 } 2867 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 2868 if (alist_append(©(rlmp), &dlmp, 2869 sizeof (Rt_map *), AL_CNT_COPYREL) == 0) { 2870 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2871 return (0); 2872 else 2873 return (1); 2874 } 2875 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 2876 } 2877 2878 /* 2879 * If we are tracing (ldd), warn the user if 2880 * 1) the size from the reference symbol differs from the 2881 * copy definition. We can only copy as much data as the 2882 * reference (dynamic executables) entry allows. 2883 * 2) the copy definition has STV_PROTECTED visibility. 2884 */ 2885 if (lml->lm_flags & LML_FLG_TRC_WARN) { 2886 if (rsym->st_size != dsym->st_size) { 2887 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 2888 _conv_reloc_type_str(M_R_COPY), demangle(name), 2889 NAME(rlmp), EC_XWORD(rsym->st_size), 2890 NAME(dlmp), EC_XWORD(dsym->st_size)); 2891 if (rsym->st_size > dsym->st_size) 2892 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 2893 NAME(dlmp)); 2894 else 2895 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 2896 NAME(rlmp)); 2897 } 2898 2899 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 2900 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 2901 _conv_reloc_type_str(M_R_COPY), demangle(name), 2902 NAME(dlmp)); 2903 } 2904 } 2905 2906 DBG_CALL(Dbg_reloc_apply((Xword)radd, (Xword)rc.r_size)); 2907 return (1); 2908 } 2909 2910 /* 2911 * Determine the symbol location of an address within a link-map. Look for 2912 * the nearest symbol (whose value is less than or equal to the required 2913 * address). This is the object specific part of dladdr(). 2914 */ 2915 static void 2916 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 2917 { 2918 ulong_t ndx, cnt, base, _value; 2919 Sym *sym, *_sym; 2920 const char *str; 2921 int _flags; 2922 2923 /* 2924 * If we don't have a .hash table there are no symbols to look at. 2925 */ 2926 if (HASH(lmp) == 0) 2927 return; 2928 2929 cnt = HASH(lmp)[1]; 2930 str = STRTAB(lmp); 2931 sym = SYMTAB(lmp); 2932 2933 if (FLAGS(lmp) & FLG_RT_FIXED) 2934 base = 0; 2935 else 2936 base = ADDR(lmp); 2937 2938 for (_sym = 0, _value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 2939 ulong_t value; 2940 2941 if (sym->st_shndx == SHN_UNDEF) 2942 continue; 2943 2944 value = sym->st_value + base; 2945 if (value > addr) 2946 continue; 2947 if (value < _value) 2948 continue; 2949 2950 _sym = sym; 2951 _value = value; 2952 2953 /* 2954 * Note, because we accept local and global symbols we could 2955 * find a section symbol that matches the associated address, 2956 * which means that the symbol name will be null. In this 2957 * case continue the search in case we can find a global 2958 * symbol of the same value. 2959 */ 2960 if ((value == addr) && 2961 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 2962 break; 2963 } 2964 2965 _flags = flags & RTLD_DL_MASK; 2966 if (_sym) { 2967 if (_flags == RTLD_DL_SYMENT) 2968 *info = (void *)_sym; 2969 else if (_flags == RTLD_DL_LINKMAP) 2970 *info = (void *)lmp; 2971 2972 dlip->dli_sname = str + _sym->st_name; 2973 dlip->dli_saddr = (void *)_value; 2974 } else { 2975 /* 2976 * addr lies between the beginning of the mapped segment and 2977 * the first global symbol. We have no symbol to return 2978 * and the caller requires one. We use _START_, the base 2979 * address of the mapping. 2980 */ 2981 2982 if (_flags == RTLD_DL_SYMENT) { 2983 /* 2984 * An actual symbol struct is needed, so we 2985 * construct one for _START_. To do this in a 2986 * fully accurate way requires a different symbol 2987 * for each mapped segment. This requires the 2988 * use of dynamic memory and a mutex. That's too much 2989 * plumbing for a fringe case of limited importance. 2990 * 2991 * Fortunately, we can simplify: 2992 * - Only the st_size and st_info fields are useful 2993 * outside of the linker internals. The others 2994 * reference things that outside code cannot see, 2995 * and can be set to 0. 2996 * - It's just a label and there is no size 2997 * to report. So, the size should be 0. 2998 * This means that only st_info needs a non-zero 2999 * (constant) value. A static struct will suffice. 3000 * It must be const (readonly) so the caller can't 3001 * change its meaning for subsequent callers. 3002 */ 3003 static const Sym fsym = { 0, 0, 0, 3004 ELF_ST_INFO(STB_LOCAL, STT_OBJECT) }; 3005 *info = (void *) &fsym; 3006 } 3007 3008 dlip->dli_sname = MSG_ORIG(MSG_SYM_START); 3009 dlip->dli_saddr = (void *) ADDR(lmp); 3010 } 3011 } 3012 3013 static void 3014 elf_lazy_cleanup(Alist * alp) 3015 { 3016 Rt_map ** lmpp; 3017 Aliste off; 3018 3019 /* 3020 * Cleanup any link-maps added to this dynamic list and free it. 3021 */ 3022 for (ALIST_TRAVERSE(alp, off, lmpp)) 3023 FLAGS(*lmpp) &= ~FLG_RT_DLSYM; 3024 free(alp); 3025 } 3026 3027 /* 3028 * This routine is called upon to search for a symbol from the dependencies of 3029 * the initial link-map. To maintain lazy loadings goal of reducing the number 3030 * of objects mapped, any symbol search is first carried out using the objects 3031 * that already exist in the process (either on a link-map list or handle). 3032 * If a symbol can't be found, and lazy dependencies are still pending, this 3033 * routine loads the dependencies in an attempt to locate the symbol. 3034 * 3035 * Only new objects are inspected as we will have already inspected presently 3036 * loaded objects before calling this routine. However, a new object may not 3037 * be new - although the di_lmp might be zero, the object may have been mapped 3038 * as someone elses dependency. Thus there's a possibility of some symbol 3039 * search duplication. 3040 */ 3041 3042 Sym * 3043 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 3044 { 3045 Sym *sym = 0; 3046 Alist * alist = 0; 3047 Aliste off; 3048 Rt_map ** lmpp, * lmp = slp->sl_imap; 3049 const char *name = slp->sl_name; 3050 3051 if (alist_append(&alist, &lmp, sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) 3052 return (0); 3053 FLAGS(lmp) |= FLG_RT_DLSYM; 3054 3055 for (ALIST_TRAVERSE(alist, off, lmpp)) { 3056 uint_t cnt = 0; 3057 Slookup sl = *slp; 3058 Dyninfo *dip; 3059 3060 /* 3061 * Loop through the DT_NEEDED entries examining each object for 3062 * the symbol. If the symbol is not found the object is in turn 3063 * added to the alist, so that its DT_NEEDED entires may be 3064 * examined. 3065 */ 3066 lmp = *lmpp; 3067 for (dip = DYNINFO(lmp); cnt < DYNINFOCNT(lmp); cnt++, dip++) { 3068 Rt_map *nlmp; 3069 3070 if (((dip->di_flags & FLG_DI_NEEDED) == 0) || 3071 dip->di_info) 3072 continue; 3073 3074 /* 3075 * If this entry defines a lazy dependency try loading 3076 * it. If the file can't be loaded, consider this 3077 * non-fatal and continue the search (lazy loaded 3078 * dependencies need not exist and their loading should 3079 * only be fatal if called from a relocation). 3080 * 3081 * If the file is already loaded and relocated we must 3082 * still inspect it for symbols, even though it might 3083 * have already been searched. This lazy load operation 3084 * might have promoted the permissions of the object, 3085 * and thus made the object applicable for this symbol 3086 * search, whereas before the object might have been 3087 * skipped. 3088 */ 3089 if ((nlmp = elf_lazy_load(lmp, cnt, name)) == 0) 3090 continue; 3091 3092 /* 3093 * If this object isn't yet a part of the dynamic list 3094 * then inspect it for the symbol. If the symbol isn't 3095 * found add the object to the dynamic list so that we 3096 * can inspect its dependencies. 3097 */ 3098 if (FLAGS(nlmp) & FLG_RT_DLSYM) 3099 continue; 3100 3101 sl.sl_imap = nlmp; 3102 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3103 break; 3104 3105 /* 3106 * Some dlsym() operations are already traversing a 3107 * link-map (dlopen(0)), and thus there's no need to 3108 * build our own dynamic dependency list. 3109 */ 3110 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3111 if (alist_append(&alist, &nlmp, 3112 sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) { 3113 elf_lazy_cleanup(alist); 3114 return (0); 3115 } 3116 FLAGS(nlmp) |= FLG_RT_DLSYM; 3117 } 3118 } 3119 if (sym) 3120 break; 3121 } 3122 3123 elf_lazy_cleanup(alist); 3124 return (sym); 3125 } 3126 3127 /* 3128 * Warning message for bad r_offset. 3129 */ 3130 void 3131 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3132 ulong_t rsymndx) 3133 { 3134 const char *name = (char *)0; 3135 int trace; 3136 3137 if ((LIST(lmp)->lm_flags & LML_FLG_TRC_ENABLE) && 3138 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3139 (LIST(lmp)->lm_flags & LML_FLG_TRC_VERBOSE))) 3140 trace = 1; 3141 else 3142 trace = 0; 3143 3144 if ((trace == 0) && (dbg_mask == 0)) 3145 return; 3146 3147 if (rsymndx) { 3148 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3149 (rsymndx * SYMENT(lmp))); 3150 3151 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3152 name = (char *)(STRTAB(lmp) + symref->st_name); 3153 } 3154 3155 if (name == 0) 3156 name = MSG_ORIG(MSG_STR_EMPTY); 3157 3158 if (trace) { 3159 const char *rstr; 3160 3161 rstr = _conv_reloc_type_str((uint_t)rtype); 3162 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3163 EC_ADDR(roffset)); 3164 return; 3165 } 3166 3167 Dbg_reloc_error(M_MACH, M_REL_SHT_TYPE, rel, name, 3168 MSG_ORIG(MSG_REL_BADROFFSET)); 3169 } 3170