1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 33 /* 34 * Object file dependent support for ELF objects. 35 */ 36 #include "_synonyms.h" 37 38 #include <stdio.h> 39 #include <sys/procfs.h> 40 #include <sys/mman.h> 41 #include <sys/debug.h> 42 #include <string.h> 43 #include <limits.h> 44 #include <dlfcn.h> 45 #include "conv.h" 46 #include "_rtld.h" 47 #include "_audit.h" 48 #include "_elf.h" 49 #include "msg.h" 50 #include "debug.h" 51 52 /* 53 * Default and secure dependency search paths. 54 */ 55 static Pnode elf_dflt_dirs[] = { 56 #if defined(_ELF64) 57 #ifndef SGS_PRE_UNIFIED_PROCESS 58 { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE, 59 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 60 #endif 61 { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE, 62 LA_SER_DEFAULT, 0, 0 } 63 #else 64 #ifndef SGS_PRE_UNIFIED_PROCESS 65 { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE, 66 LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] }, 67 #endif 68 { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE, 69 LA_SER_DEFAULT, 0, 0 } 70 #endif 71 }; 72 73 static Pnode elf_secure_dirs[] = { 74 #if defined(_ELF64) 75 #ifndef SGS_PRE_UNIFIED_PROCESS 76 { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE, 77 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 78 #endif 79 { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0, 80 MSG_PTH_USRLIBSE_64_SIZE, 81 LA_SER_SECURE, 0, 0 } 82 #else 83 #ifndef SGS_PRE_UNIFIED_PROCESS 84 { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE, 85 LA_SER_SECURE, 0, &elf_secure_dirs[1] }, 86 #endif 87 { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE, 88 LA_SER_SECURE, 0, 0 } 89 #endif 90 }; 91 92 /* 93 * Defines for local functions. 94 */ 95 static Pnode *elf_fix_name(const char *, Rt_map *, uint_t); 96 static int elf_are_u(Rej_desc *); 97 static void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int); 98 static ulong_t elf_entry_pt(void); 99 static char *elf_get_so(const char *, const char *); 100 static Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int); 101 static int elf_needed(Lm_list *, Aliste, Rt_map *); 102 static void elf_unmap_so(Rt_map *); 103 static int elf_verify_vers(const char *, Rt_map *, Rt_map *); 104 105 /* 106 * Functions and data accessed through indirect pointers. 107 */ 108 Fct elf_fct = { 109 elf_are_u, 110 elf_entry_pt, 111 elf_map_so, 112 elf_unmap_so, 113 elf_needed, 114 lookup_sym, 115 elf_reloc, 116 elf_dflt_dirs, 117 elf_secure_dirs, 118 elf_fix_name, 119 elf_get_so, 120 elf_dladdr, 121 dlsym_handle, 122 elf_verify_vers, 123 elf_set_prot 124 }; 125 126 127 /* 128 * Redefine NEEDED name if necessary. 129 */ 130 static Pnode * 131 elf_fix_name(const char *name, Rt_map *clmp, uint_t orig) 132 { 133 /* 134 * For ABI compliance, if we are asked for ld.so.1, then really give 135 * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1). 136 */ 137 if (((*name == '/') && 138 #if defined(_ELF64) 139 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) || 140 #else 141 (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) || 142 #endif 143 (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) { 144 Pnode *pnp; 145 146 DBG_CALL(Dbg_file_fixname(name, MSG_ORIG(MSG_PTH_LIBSYS))); 147 if (((pnp = calloc(sizeof (Pnode), 1)) == 0) || 148 ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) { 149 if (pnp) 150 free(pnp); 151 return (0); 152 } 153 pnp->p_len = MSG_PTH_LIBSYS_SIZE; 154 pnp->p_orig = (orig & PN_SER_MASK); 155 return (pnp); 156 } 157 158 return (expand_paths(clmp, name, orig, 0)); 159 } 160 161 /* 162 * Determine if we have been given an ELF file and if so determine if the file 163 * is compatible. Returns 1 if true, else 0 and sets the reject descriptor 164 * with associated error information. 165 */ 166 static int 167 elf_are_u(Rej_desc *rej) 168 { 169 Ehdr *ehdr; 170 171 /* 172 * Determine if we're an elf file. If not simply return, we don't set 173 * any rejection information as this test allows use to scroll through 174 * the objects we support (ELF, AOUT). 175 */ 176 if (fmap->fm_fsize < sizeof (Ehdr) || 177 fmap->fm_maddr[EI_MAG0] != ELFMAG0 || 178 fmap->fm_maddr[EI_MAG1] != ELFMAG1 || 179 fmap->fm_maddr[EI_MAG2] != ELFMAG2 || 180 fmap->fm_maddr[EI_MAG3] != ELFMAG3) { 181 return (0); 182 } 183 184 /* 185 * Check class and encoding. 186 */ 187 /* LINTED */ 188 ehdr = (Ehdr *)fmap->fm_maddr; 189 if (ehdr->e_ident[EI_CLASS] != M_CLASS) { 190 rej->rej_type = SGS_REJ_CLASS; 191 rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS]; 192 return (0); 193 } 194 if (ehdr->e_ident[EI_DATA] != M_DATA) { 195 rej->rej_type = SGS_REJ_DATA; 196 rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA]; 197 return (0); 198 } 199 if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) && 200 (ehdr->e_type != ET_DYN)) { 201 rej->rej_type = SGS_REJ_TYPE; 202 rej->rej_info = (uint_t)ehdr->e_type; 203 return (0); 204 } 205 206 /* 207 * Verify machine specific flags, and hardware capability requirements. 208 */ 209 if ((elf_mach_flags_check(rej, ehdr) == 0) || 210 ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0))) 211 return (0); 212 213 /* 214 * Verify ELF version. ??? is this too restrictive ??? 215 */ 216 if (ehdr->e_version > EV_CURRENT) { 217 rej->rej_type = SGS_REJ_VERSION; 218 rej->rej_info = (uint_t)ehdr->e_version; 219 return (0); 220 } 221 return (1); 222 } 223 224 /* 225 * The runtime linker employs lazy loading to provide the libraries needed for 226 * debugging, preloading .o's and dldump(). As these are seldom used, the 227 * standard startup of ld.so.1 doesn't initialize all the information necessary 228 * to perform plt relocation on ld.so.1's link-map. The first time lazy loading 229 * is called we get here to perform these initializations: 230 * 231 * o elf_needed() is called to set up the DYNINFO() indexes for each lazy 232 * dependency. Typically, for all other objects, this is called during 233 * analyze_so(), but as ld.so.1 is set-contained we skip this processing. 234 * 235 * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These 236 * are by default skipped thus delaying all relative relocation processing 237 * on every invocation of ld.so.1. 238 */ 239 int 240 elf_rtld_load() 241 { 242 Lm_list *lml = &lml_rtld; 243 Rt_map *lmp = lml->lm_head; 244 245 if (lml->lm_flags & LML_FLG_PLTREL) 246 return (1); 247 248 /* 249 * As we need to refer to the DYNINFO() information, insure that it has 250 * been initialized. 251 */ 252 if (elf_needed(lml, ALO_DATA, lmp) == 0) 253 return (0); 254 255 #if defined(i386) 256 /* 257 * This is a kludge to give ld.so.1 a performance benefit on i386. 258 * It's based around two factors. 259 * 260 * o JMPSLOT relocations (PLT's) actually need a relative relocation 261 * applied to the GOT entry so that they can find PLT0. 262 * 263 * o ld.so.1 does not exercise *any* PLT's before it has made a call 264 * to elf_lazy_load(). This is because all dynamic dependencies 265 * are recorded as lazy dependencies. 266 */ 267 (void) elf_reloc_relacount((ulong_t)JMPREL(lmp), 268 (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp), 269 (ulong_t)ADDR(lmp)); 270 #endif 271 272 lml->lm_flags |= LML_FLG_PLTREL; 273 return (1); 274 } 275 276 /* 277 * Lazy load an object. 278 */ 279 Rt_map * 280 elf_lazy_load(Rt_map *clmp, uint_t ndx, const char *sym) 281 { 282 Rt_map *nlmp, *hlmp; 283 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 284 uint_t flags = 0; 285 Pnode *pnp; 286 const char *name; 287 Lm_list *lml = LIST(clmp); 288 Lm_cntl *lmc; 289 Aliste lmco; 290 291 /* 292 * If this dependency has already been processed, we're done. 293 */ 294 if (((nlmp = (Rt_map *)dip->di_info) != 0) || 295 (dip->di_flags & FLG_DI_PROCESSD)) 296 return (nlmp); 297 298 /* 299 * Determine the initial dependency name, and indicate that this 300 * dependencies processing has initiated. 301 */ 302 name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val; 303 DBG_CALL(Dbg_file_lazyload(name, NAME(clmp), sym)); 304 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 305 dip->di_flags |= FLG_DI_PROCESSD; 306 307 if (dip->di_flags & FLG_DI_GROUP) 308 flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE); 309 310 /* 311 * Expand the requested name if necessary. 312 */ 313 if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) 314 return (0); 315 316 /* 317 * Provided the object on the head of the link-map has completed its 318 * relocation, create a new link-map control list for this request. 319 */ 320 hlmp = lml->lm_head; 321 if (FLAGS(hlmp) & FLG_RT_RELOCED) { 322 if ((lmc = alist_append(&(lml->lm_lists), 0, sizeof (Lm_cntl), 323 AL_CNT_LMLISTS)) == 0) { 324 remove_pnode(pnp); 325 return (0); 326 } 327 lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists); 328 } else { 329 lmc = 0; 330 lmco = ALO_DATA; 331 } 332 333 /* 334 * Load the associated object. 335 */ 336 dip->di_info = nlmp = 337 load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0); 338 339 /* 340 * Remove any expanded pathname infrastructure. Reduce the pending lazy 341 * dependency count of the caller, together with the link-map lists 342 * count of objects that still have lazy dependencies pending. 343 */ 344 remove_pnode(pnp); 345 if (--LAZY(clmp) == 0) 346 LIST(clmp)->lm_lazy--; 347 348 /* 349 * Finish processing the objects associated with this request. 350 */ 351 if (nlmp && ((analyze_lmc(lml, lmco, nlmp) == 0) || 352 (relocate_lmc(lml, lmco, nlmp) == 0))) 353 dip->di_info = nlmp = 0; 354 355 /* 356 * If the dependency has been successfully processed, and it is part of 357 * a link-map control list that is equivalent, or less, that the callers 358 * control list, create an association between the caller and this 359 * dependency. If this dependency isn't yet apart of the callers 360 * link-map control list, then it is still apart of a list that is being 361 * relocated. As the relocation of an object on this list might still 362 * fail, we can't yet bind the caller to this object. To do so, would 363 * be locking the object so that it couldn't be deleted. Mark this 364 * object as free, and it will be reprocessed when this dependency is 365 * next referenced. 366 */ 367 if (nlmp) { 368 if (CNTL(nlmp) <= CNTL(clmp)) { 369 if (bind_one(clmp, nlmp, BND_NEEDED) == 0) 370 dip->di_info = nlmp = 0; 371 } else { 372 dip->di_info = 0; 373 dip->di_flags &= ~FLG_DI_PROCESSD; 374 if (LAZY(clmp)++ == 0) 375 LIST(clmp)->lm_lazy++; 376 } 377 } 378 379 /* 380 * After a successful load, any objects collected on the new link-map 381 * control list will have been moved to the callers link-map control 382 * list. This control list can now be deleted. 383 */ 384 if (lmc) { 385 if (nlmp == 0) 386 remove_incomplete(lml, lmco); 387 remove_cntl(lml, lmco); 388 } 389 390 return (nlmp); 391 } 392 393 394 /* 395 * Return the entry point of the ELF executable. 396 */ 397 static ulong_t 398 elf_entry_pt(void) 399 { 400 return (ENTRY(lml_main.lm_head)); 401 } 402 403 /* 404 * Unmap a given ELF shared object from the address space. 405 */ 406 static void 407 elf_unmap_so(Rt_map *lmp) 408 { 409 caddr_t addr; 410 size_t size; 411 Mmap *mmaps; 412 413 /* 414 * If this link map represents a relocatable object concatenation, then 415 * the image was simply generated in allocated memory. Free the memory. 416 * 417 * Note: the memory was originally allocated in the libelf:_elf_outmap 418 * routine and would normally have been free'd in elf_outsync(), but 419 * because we 'interpose' on that routine the memory wasn't free'd at 420 * that time. 421 */ 422 if (FLAGS(lmp) & FLG_RT_IMGALLOC) { 423 free((void *)ADDR(lmp)); 424 return; 425 } 426 427 /* 428 * If padding was enabled via rtld_db, then we have at least one page 429 * in front of the image - and possibly a trailing page. 430 * Unmap the front page first: 431 */ 432 if (PADSTART(lmp) != ADDR(lmp)) { 433 addr = (caddr_t)M_PTRUNC(PADSTART(lmp)); 434 size = ADDR(lmp) - (ulong_t)addr; 435 (void) munmap(addr, size); 436 } 437 438 /* 439 * Unmap any trailing padding. 440 */ 441 if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) > 442 M_PROUND(ADDR(lmp) + MSIZE(lmp))) { 443 addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp)); 444 size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr; 445 (void) munmap(addr, size); 446 } 447 448 /* 449 * Unmmap all mapped segments. 450 */ 451 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) 452 (void) munmap(mmaps->m_vaddr, mmaps->m_msize); 453 } 454 455 /* 456 * Determine if a dependency requires a particular version and if so verify 457 * that the version exists in the dependency. 458 */ 459 static int 460 elf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp) 461 { 462 Verneed *vnd = VERNEED(clmp); 463 int _num, num = VERNEEDNUM(clmp); 464 char *cstrs = (char *)STRTAB(clmp); 465 Lm_list *lml = LIST(clmp); 466 467 /* 468 * Traverse the callers version needed information and determine if any 469 * specific versions are required from the dependency. 470 */ 471 for (_num = 1; _num <= num; _num++, 472 vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) { 473 Half cnt = vnd->vn_cnt; 474 Vernaux *vnap; 475 char *nstrs, *need; 476 477 /* 478 * Determine if a needed entry matches this dependency. 479 */ 480 need = (char *)(cstrs + vnd->vn_file); 481 if (strcmp(name, need) != 0) 482 continue; 483 484 DBG_CALL(Dbg_ver_need_title(NAME(clmp))); 485 if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) && 486 ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0)) 487 (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name); 488 489 /* 490 * Validate that each version required actually exists in the 491 * dependency. 492 */ 493 nstrs = (char *)STRTAB(nlmp); 494 495 for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt; 496 cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) { 497 char *version, *define; 498 Verdef *vdf = VERDEF(nlmp); 499 ulong_t _num, num = VERDEFNUM(nlmp); 500 int found = 0; 501 502 version = (char *)(cstrs + vnap->vna_name); 503 DBG_CALL(Dbg_ver_need_entry(0, need, version)); 504 505 for (_num = 1; _num <= num; _num++, 506 vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) { 507 Verdaux *vdap; 508 509 if (vnap->vna_hash != vdf->vd_hash) 510 continue; 511 512 vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux); 513 define = (char *)(nstrs + vdap->vda_name); 514 if (strcmp(version, define) != 0) 515 continue; 516 517 found++; 518 break; 519 } 520 521 /* 522 * If we're being traced print out any matched version 523 * when the verbose (-v) option is in effect. Always 524 * print any unmatched versions. 525 */ 526 if (lml->lm_flags & LML_FLG_TRC_ENABLE) { 527 if (found) { 528 if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE)) 529 continue; 530 531 (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND), 532 need, version, NAME(nlmp)); 533 } else { 534 if (rtld_flags & RT_FL_SILENCERR) 535 continue; 536 537 (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND), 538 need, version); 539 } 540 continue; 541 } 542 543 /* 544 * If the version hasn't been found then this is a 545 * candidate for a fatal error condition. Weak 546 * version definition requirements are silently 547 * ignored. Also, if the image inspected for a version 548 * definition has no versioning recorded at all then 549 * silently ignore this (this provides better backward 550 * compatibility to old images created prior to 551 * versioning being available). Both of these skipped 552 * diagnostics are available under tracing (see above). 553 */ 554 if ((found == 0) && (num != 0) && 555 (!(vnap->vna_flags & VER_FLG_WEAK))) { 556 eprintf(ERR_FATAL, MSG_INTL(MSG_VER_NFOUND), 557 need, version, NAME(clmp)); 558 return (0); 559 } 560 } 561 return (1); 562 } 563 return (1); 564 } 565 566 /* 567 * Search through the dynamic section for DT_NEEDED entries and perform one 568 * of two functions. If only the first argument is specified then load the 569 * defined shared object, otherwise add the link map representing the defined 570 * link map the the dlopen list. 571 */ 572 static int 573 elf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp) 574 { 575 Dyn *dyn; 576 ulong_t ndx = 0; 577 uint_t lazy = 0, flags = 0; 578 Word lmflags = lml->lm_flags; 579 Word lmtflags = lml->lm_tflags; 580 581 /* 582 * Process each shared object on needed list. 583 */ 584 if (DYN(clmp) == 0) 585 return (1); 586 587 for (dyn = (Dyn *)DYN(clmp); dyn->d_tag != DT_NULL; dyn++, ndx++) { 588 Dyninfo *dip = &DYNINFO(clmp)[ndx]; 589 Rt_map *nlmp = 0; 590 char *name; 591 int silent = 0; 592 Pnode *pnp; 593 594 switch (dyn->d_tag) { 595 case DT_POSFLAG_1: 596 if ((dyn->d_un.d_val & DF_P1_LAZYLOAD) && 597 !(lmtflags & LML_TFLG_NOLAZYLD)) 598 lazy = 1; 599 if (dyn->d_un.d_val & DF_P1_GROUPPERM) 600 flags = (FLG_RT_SETGROUP | FLG_RT_HANDLE); 601 continue; 602 case DT_NEEDED: 603 case DT_USED: 604 dip->di_flags |= FLG_DI_NEEDED; 605 if (flags) 606 dip->di_flags |= FLG_DI_GROUP; 607 608 name = (char *)STRTAB(clmp) + dyn->d_un.d_val; 609 610 /* 611 * NOTE, libc.so.1 can't be lazy loaded. Although a 612 * lazy position flag won't be produced when a RTLDINFO 613 * .dynamic entry is found (introduced with the UPM in 614 * Solaris 10), it was possible to mark libc for lazy 615 * loading on previous releases. To reduce the overhead 616 * of testing for this occurrence, only carry out this 617 * check for the first object on the link-map list 618 * (there aren't many applications built without libc). 619 */ 620 if (lazy && (lml->lm_head == clmp) && 621 (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0)) 622 lazy = 0; 623 624 /* 625 * Don't bring in lazy loaded objects yet unless we've 626 * been asked to attempt to load all available objects 627 * (crle(1) sets LD_FLAGS=loadavail). Even under 628 * RTLD_NOW we don't process this - RTLD_NOW will cause 629 * relocation processing which in turn might trigger 630 * lazy loading, but its possible that the object has a 631 * lazy loaded file with no bindings (i.e., it should 632 * never have been a dependency in the first place). 633 */ 634 if (lazy) { 635 if ((lmflags & LML_FLG_LOADAVAIL) == 0) { 636 LAZY(clmp)++; 637 lazy = flags = 0; 638 continue; 639 } 640 641 /* 642 * Silence any error messages - see description 643 * under elf_lookup_filtee(). 644 */ 645 if ((rtld_flags & RT_FL_SILENCERR) == 0) { 646 rtld_flags |= RT_FL_SILENCERR; 647 silent = 1; 648 } 649 } 650 break; 651 case DT_AUXILIARY: 652 dip->di_flags |= FLG_DI_AUXFLTR; 653 lazy = flags = 0; 654 continue; 655 case DT_SUNW_AUXILIARY: 656 dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR); 657 lazy = flags = 0; 658 continue; 659 case DT_FILTER: 660 dip->di_flags |= FLG_DI_STDFLTR; 661 lazy = flags = 0; 662 continue; 663 case DT_SUNW_FILTER: 664 dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR); 665 lazy = flags = 0; 666 continue; 667 default: 668 lazy = flags = 0; 669 continue; 670 } 671 672 DBG_CALL(Dbg_file_needed(name, NAME(clmp))); 673 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 674 dip->di_flags |= FLG_DI_PROCESSD; 675 676 /* 677 * Establish the objects name, load it and establish a binding 678 * with the caller. 679 */ 680 if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) || 681 ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp), 682 flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0)) 683 nlmp = 0; 684 685 /* 686 * Clean up any infrastructure, including the removal of the 687 * error suppression state, if it had been previously set in 688 * this routine. 689 */ 690 if (pnp) 691 remove_pnode(pnp); 692 if (silent) 693 rtld_flags &= ~RT_FL_SILENCERR; 694 lazy = flags = 0; 695 if ((dip->di_info = (void *)nlmp) == 0) { 696 /* 697 * If the object could not be mapped, continue if error 698 * suppression is established or we're here with ldd(1). 699 */ 700 if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags & 701 (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE))) 702 continue; 703 else 704 return (0); 705 } 706 } 707 708 if (LAZY(clmp)) 709 lml->lm_lazy++; 710 711 return (1); 712 } 713 714 static int 715 elf_map_check(const char *name, caddr_t vaddr, Off size) 716 { 717 prmap_t *maps, *_maps; 718 int pfd, num, _num; 719 caddr_t eaddr = vaddr + size; 720 int err; 721 722 /* 723 * If memory reservations have been established for alternative objects 724 * determine if this object falls within the reservation, if it does no 725 * further checking is required. 726 */ 727 if (rtld_flags & RT_FL_MEMRESV) { 728 Rtc_head *head = (Rtc_head *)config->c_bgn; 729 730 if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) && 731 (eaddr <= (caddr_t)(uintptr_t)head->ch_resend)) 732 return (0); 733 } 734 735 /* 736 * Determine the mappings presently in use by this process. 737 */ 738 if ((pfd = pr_open()) == FD_UNAVAIL) 739 return (1); 740 741 if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) { 742 err = errno; 743 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, strerror(err)); 744 return (1); 745 } 746 747 if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0) 748 return (1); 749 750 if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) { 751 err = errno; 752 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name, strerror(err)); 753 free(maps); 754 return (1); 755 } 756 757 /* 758 * Determine if the supplied address clashes with any of the present 759 * process mappings. 760 */ 761 for (_num = 0, _maps = maps; _num < num; _num++, _maps++) { 762 caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size; 763 Rt_map *lmp; 764 const char *str; 765 766 if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr)) 767 continue; 768 769 /* 770 * We have a memory clash. See if one of the known dynamic 771 * dependency mappings represents this space so as to provide 772 * the user a more meaningful message. 773 */ 774 if ((lmp = _caller(vaddr, 0)) != 0) 775 str = NAME(lmp); 776 else 777 str = MSG_INTL(MSG_STR_UNKNOWN); 778 779 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name, 780 EC_ADDR(vaddr), EC_OFF(size), str); 781 return (1); 782 } 783 free(maps); 784 return (0); 785 } 786 787 /* 788 * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN 789 * are used to obtained an aligned reservation from anonymous memory. If 790 * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard 791 * reservation using the file as backing. 792 */ 793 static Am_ret 794 elf_map_reserve(const char *name, caddr_t *maddr, Off msize, int mperm, 795 int fd, Xword align) 796 { 797 Am_ret amret; 798 int mflag = MAP_PRIVATE | MAP_NORESERVE; 799 800 #if defined(MAP_ALIGN) 801 if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) { 802 mflag |= MAP_ALIGN; 803 *maddr = (caddr_t)align; 804 } 805 #endif 806 if ((amret = anon_map(maddr, msize, PROT_NONE, mflag)) == AM_ERROR) 807 return (amret); 808 809 if (amret == AM_OK) 810 return (AM_OK); 811 812 /* 813 * If an anonymous memory request failed (which should only be the 814 * case if it is unsupported on the system we're running on), establish 815 * the initial mapping directly from the file. 816 */ 817 *maddr = 0; 818 if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE, 819 fd, 0)) == MAP_FAILED) { 820 int err = errno; 821 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name, strerror(err)); 822 return (AM_ERROR); 823 } 824 return (AM_NOSUP); 825 } 826 827 static void * 828 elf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag, 829 int fd, Off foff) 830 { 831 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 832 static int notd = 0; 833 834 /* 835 * If MAP_TEXT and MAP_INITDATA are available, select the appropriate 836 * flag. 837 */ 838 if (notd == 0) { 839 if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) 840 mflag |= MAP_TEXT; 841 else 842 mflag |= MAP_INITDATA; 843 } 844 #endif 845 if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED) 846 return (0); 847 848 #if defined(MAP_TEXT) && defined(MAP_INITDATA) 849 if ((notd == 0) && (errno == EINVAL)) { 850 /* 851 * MAP_TEXT and MAP_INITDATA may not be supported on this 852 * platform, try again without. 853 */ 854 notd = 1; 855 mflag &= ~(MAP_TEXT | MAP_INITDATA); 856 857 return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff)); 858 } 859 #endif 860 return (MAP_FAILED); 861 } 862 863 /* 864 * Map in a file. 865 */ 866 static caddr_t 867 elf_map_it( 868 const char *name, /* actual name stored for pathname */ 869 Off fsize, /* total mapping claim of the file */ 870 Ehdr *ehdr, /* ELF header of file */ 871 Phdr *fphdr, /* first loadable Phdr */ 872 Phdr *lphdr, /* last loadable Phdr */ 873 Phdr **rrphdr, /* return first Phdr in reservation */ 874 caddr_t *rraddr, /* return start of reservation */ 875 Off *rrsize, /* return total size of reservation */ 876 int fixed, /* image is resolved to a fixed addr */ 877 int fd, /* images file descriptor */ 878 Xword align, /* image segments maximum alignment */ 879 Mmap *mmaps, /* mmap information array and */ 880 uint_t *mmapcnt) /* mapping count */ 881 { 882 caddr_t raddr; /* reservation address */ 883 Off rsize; /* reservation size */ 884 Phdr *phdr; /* working program header poiner */ 885 caddr_t maddr; /* working mmap address */ 886 caddr_t faddr; /* working file address */ 887 size_t padsize; /* object padding requirement */ 888 size_t padpsize = 0; /* padding size rounded to next page */ 889 size_t padmsize = 0; /* padding size rounded for alignment */ 890 int skipfseg; /* skip mapping first segment */ 891 int mperm; /* segment permissions */ 892 Am_ret amret = AM_NOSUP; 893 894 /* 895 * If padding is required extend both the front and rear of the image. 896 * To insure the image itself is mapped at the correct alignment the 897 * initial padding is rounded up to the nearest page. Once the image is 898 * mapped the excess can be pruned to the nearest page required for the 899 * actual padding itself. 900 */ 901 if ((padsize = r_debug.rtd_objpad) != 0) { 902 padpsize = M_PROUND(padsize); 903 if (fixed) 904 padmsize = padpsize; 905 else 906 padmsize = S_ROUND(padsize, align); 907 } 908 909 /* 910 * Determine the initial permissions used to map in the first segment. 911 * If this segments memsz is greater that its filesz then the difference 912 * must be zeroed. Make sure this segment is writable. 913 */ 914 mperm = 0; 915 if (fphdr->p_flags & PF_R) 916 mperm |= PROT_READ; 917 if (fphdr->p_flags & PF_X) 918 mperm |= PROT_EXEC; 919 if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz)) 920 mperm |= PROT_WRITE; 921 922 /* 923 * Determine whether or not to let system reserve address space based on 924 * whether this is a dynamic executable (addresses in object are fixed) 925 * or a shared object (addresses in object are relative to the objects' 926 * base). 927 */ 928 if (fixed) { 929 /* 930 * Determine the reservation address and size, and insure that 931 * this reservation isn't already in use. 932 */ 933 faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr); 934 raddr = maddr - padpsize; 935 rsize = fsize + padpsize + padsize; 936 937 if (lml_main.lm_head) { 938 if (elf_map_check(name, raddr, rsize) != 0) 939 return (0); 940 } 941 942 /* 943 * As this is a fixed image, all segments must be individually 944 * mapped. 945 */ 946 skipfseg = 0; 947 948 } else { 949 size_t esize; 950 951 /* 952 * If this isn't a fixed image, reserve enough address space for 953 * the entire image to be mapped. The amount of reservation is 954 * the range between the beginning of the first, and end of the 955 * last loadable segment, together with any padding, plus the 956 * alignment of the first segment. 957 * 958 * The optimal reservation is made as a no-reserve mapping from 959 * anonymous memory. Each segment is then mapped into this 960 * reservation. If the anonymous mapping capability isn't 961 * available, the reservation is obtained from the file itself. 962 * In this case the first segment of the image is mapped as part 963 * of the reservation, thus only the following segments need to 964 * be remapped. 965 */ 966 rsize = fsize + padmsize + padsize; 967 if ((amret = elf_map_reserve(name, &raddr, rsize, mperm, 968 fd, align)) == AM_ERROR) 969 return (0); 970 maddr = raddr + padmsize; 971 faddr = (caddr_t)S_ROUND((Off)maddr, align); 972 973 /* 974 * If this reservation has been obtained from anonymous memory, 975 * then all segments must be individually mapped. Otherwise, 976 * the first segment heads the reservation. 977 */ 978 if (amret == AM_OK) 979 skipfseg = 0; 980 else 981 skipfseg = 1; 982 983 /* 984 * For backward compatibility (where MAP_ALIGN isn't available), 985 * insure the alignment of the reservation is adequate for this 986 * object, and if not remap the object to obtain the correct 987 * alignment. 988 */ 989 if (faddr != maddr) { 990 (void) munmap(raddr, rsize); 991 992 rsize += align; 993 if ((amret = elf_map_reserve(name, &raddr, rsize, mperm, 994 fd, align)) == AM_ERROR) 995 return (0); 996 997 maddr = faddr = (caddr_t)S_ROUND((Off)(raddr + 998 padpsize), align); 999 1000 esize = maddr - raddr + padpsize; 1001 1002 /* 1003 * As ths image has been realigned, the first segment 1004 * of the file needs to be remapped to its correct 1005 * location. 1006 */ 1007 skipfseg = 0; 1008 } else 1009 esize = padmsize - padpsize; 1010 1011 /* 1012 * If this reservation included padding, remove any excess for 1013 * the start of the image (the padding was adjusted to insure 1014 * the image was aligned appropriately). 1015 */ 1016 if (esize) { 1017 (void) munmap(raddr, esize); 1018 raddr += esize; 1019 rsize -= esize; 1020 } 1021 } 1022 1023 /* 1024 * At this point we know the initial location of the image, and its 1025 * size. Pass these back to the caller for inclusion in the link-map 1026 * that will eventually be created. 1027 */ 1028 *rraddr = raddr; 1029 *rrsize = rsize; 1030 1031 /* 1032 * The first loadable segment is now pointed to by maddr. This segment 1033 * will eventually contain the elf header and program headers, so reset 1034 * the program header. Pass this back to the caller for inclusion in 1035 * the link-map so it can be used for later unmapping operations. 1036 */ 1037 /* LINTED */ 1038 *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff); 1039 1040 /* 1041 * If padding is required at the front of the image, obtain that now. 1042 * Note, if we've already obtained a reservation from anonymous memory 1043 * then this reservation will already include suitable padding. 1044 * Otherwise this reservation is backed by the file, or in the case of 1045 * a fixed image, doesn't yet exist. Map the padding so that it is 1046 * suitably protected (PROT_NONE), and insure the first segment of the 1047 * file is mapped to its correct location. 1048 */ 1049 if (padsize) { 1050 if (amret == AM_NOSUP) { 1051 if (dz_map(raddr, padpsize, PROT_NONE, (MAP_PRIVATE | 1052 MAP_FIXED | MAP_NORESERVE)) == MAP_FAILED) 1053 return (0); 1054 1055 skipfseg = 0; 1056 } 1057 rsize -= padpsize; 1058 } 1059 1060 /* 1061 * Map individual segments. For a fixed image, these will each be 1062 * unique mappings. For a reservation these will fill in the 1063 * reservation. 1064 */ 1065 for (phdr = fphdr; phdr <= lphdr; 1066 phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) { 1067 caddr_t addr; 1068 Off mlen, flen; 1069 size_t size; 1070 1071 /* 1072 * Skip non-loadable segments or segments that don't occupy 1073 * any memory. 1074 */ 1075 if (((phdr->p_type != PT_LOAD) && 1076 (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0)) 1077 continue; 1078 1079 /* 1080 * Establish this segments address relative to our base. 1081 */ 1082 addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr + 1083 (fixed ? 0 : faddr))); 1084 1085 /* 1086 * Determine the mapping protection from the segment attributes. 1087 * Also determine the etext address from the last loadable 1088 * segment which has permissions but no write access. 1089 */ 1090 mperm = 0; 1091 if (phdr->p_flags) { 1092 if (phdr->p_flags & PF_R) 1093 mperm |= PROT_READ; 1094 if (phdr->p_flags & PF_X) 1095 mperm |= PROT_EXEC; 1096 if (phdr->p_flags & PF_W) 1097 mperm |= PROT_WRITE; 1098 else 1099 fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz + 1100 (ulong_t)(fixed ? 0 : faddr); 1101 } 1102 1103 /* 1104 * Determine the type of mapping required. 1105 */ 1106 if (phdr->p_type == PT_SUNWBSS) { 1107 /* 1108 * Potentially, we can defer the loading of any SUNWBSS 1109 * segment, depending on whether the symbols it provides 1110 * have been bound to. In this manner, large segments 1111 * that are interposed upon between shared libraries 1112 * may not require mapping. Note, that the mapping 1113 * information is recorded in our mapping descriptor at 1114 * this time. 1115 */ 1116 mlen = phdr->p_memsz; 1117 flen = 0; 1118 1119 } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) { 1120 /* 1121 * If this segment has no backing file and no flags 1122 * specified, then it defines a reservation. At this 1123 * point all standard loadable segments will have been 1124 * processed. The segment reservation is mapped 1125 * directly from /dev/null. 1126 */ 1127 if (nu_map((caddr_t)addr, phdr->p_memsz, PROT_NONE, 1128 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1129 return (0); 1130 1131 mlen = phdr->p_memsz; 1132 flen = 0; 1133 1134 } else if (phdr->p_filesz == 0) { 1135 /* 1136 * If this segment has no backing file then it defines a 1137 * nobits segment and is mapped directly from /dev/zero. 1138 */ 1139 if (dz_map((caddr_t)addr, phdr->p_memsz, mperm, 1140 MAP_FIXED | MAP_PRIVATE) == MAP_FAILED) 1141 return (0); 1142 1143 mlen = phdr->p_memsz; 1144 flen = 0; 1145 1146 } else { 1147 Off foff; 1148 1149 /* 1150 * This mapping originates from the file. Determine the 1151 * file offset to which the mapping will be directed 1152 * (must be aligned) and how much to map (might be more 1153 * than the file in the case of .bss). 1154 */ 1155 foff = M_PTRUNC((ulong_t)phdr->p_offset); 1156 mlen = phdr->p_memsz + (phdr->p_offset - foff); 1157 flen = phdr->p_filesz + (phdr->p_offset - foff); 1158 1159 /* 1160 * If this is a non-fixed, non-anonymous mapping, and no 1161 * padding is involved, then the first loadable segment 1162 * is already part of the initial reservation. In this 1163 * case there is no need to remap this segment. 1164 */ 1165 if ((skipfseg == 0) || (phdr != fphdr)) { 1166 int phdr_mperm = mperm; 1167 /* 1168 * If this segments memsz is greater that its 1169 * filesz then the difference must be zeroed. 1170 * Make sure this segment is writable. 1171 */ 1172 if (phdr->p_memsz > phdr->p_filesz) 1173 mperm |= PROT_WRITE; 1174 1175 if (elf_map_textdata((caddr_t)addr, flen, 1176 mperm, phdr_mperm, 1177 (MAP_FIXED | MAP_PRIVATE), fd, foff) == 1178 MAP_FAILED) { 1179 int err = errno; 1180 eprintf(ERR_FATAL, 1181 MSG_INTL(MSG_SYS_MMAP), name, 1182 strerror(err)); 1183 return (0); 1184 } 1185 } 1186 1187 /* 1188 * If the memory occupancy of the segment overflows the 1189 * definition in the file, we need to "zero out" the end 1190 * of the mapping we've established, and if necessary, 1191 * map some more space from /dev/zero. Note, zero'ed 1192 * memory must end on a double word boundary to satisfy 1193 * zero(). 1194 */ 1195 if (phdr->p_memsz > phdr->p_filesz) { 1196 caddr_t zaddr; 1197 size_t zlen, zplen; 1198 Off fend; 1199 1200 foff = (Off)(phdr->p_vaddr + phdr->p_filesz + 1201 (fixed ? 0 : faddr)); 1202 zaddr = (caddr_t)M_PROUND(foff); 1203 zplen = (size_t)(zaddr - foff); 1204 1205 fend = (Off)S_DROUND((size_t)(phdr->p_vaddr + 1206 phdr->p_memsz + (fixed ? 0 : faddr))); 1207 zlen = (size_t)(fend - foff); 1208 1209 /* 1210 * Determine whether the number of bytes that 1211 * must be zero'ed overflow to the next page. 1212 * If not, simply clear the exact bytes 1213 * (filesz to memsz) from this page. Otherwise, 1214 * clear the remaining bytes of this page, and 1215 * map an following pages from /dev/zero. 1216 */ 1217 if (zlen < zplen) 1218 zero((caddr_t)foff, (long)zlen); 1219 else { 1220 zero((caddr_t)foff, (long)zplen); 1221 1222 if ((zlen = (fend - (Off)zaddr)) > 0) { 1223 if (dz_map(zaddr, zlen, mperm, 1224 MAP_FIXED | MAP_PRIVATE) == 1225 MAP_FAILED) 1226 return (0); 1227 } 1228 } 1229 } 1230 } 1231 1232 /* 1233 * Unmap anything from the last mapping address to this one and 1234 * update the mapping claim pointer. 1235 */ 1236 if ((fixed == 0) && ((size = addr - maddr) != 0)) { 1237 (void) munmap(maddr, size); 1238 rsize -= size; 1239 } 1240 1241 /* 1242 * Retain this segments mapping information. 1243 */ 1244 mmaps[*mmapcnt].m_vaddr = addr; 1245 mmaps[*mmapcnt].m_msize = mlen; 1246 mmaps[*mmapcnt].m_fsize = flen; 1247 mmaps[*mmapcnt].m_perm = mperm; 1248 (*mmapcnt)++; 1249 1250 maddr = addr + M_PROUND(mlen); 1251 rsize -= M_PROUND(mlen); 1252 } 1253 1254 /* 1255 * If padding is required at the end of the image, obtain that now. 1256 * Note, if we've already obtained a reservation from anonymous memory 1257 * then this reservation will already include suitable padding. 1258 */ 1259 if (padsize) { 1260 if (amret == AM_NOSUP) { 1261 /* 1262 * maddr is currently page aligned from the last segment 1263 * mapping. 1264 */ 1265 if (dz_map(maddr, padsize, PROT_NONE, (MAP_PRIVATE | 1266 MAP_FIXED | MAP_NORESERVE)) == MAP_FAILED) 1267 return (0); 1268 } 1269 maddr += padsize; 1270 rsize -= padsize; 1271 } 1272 1273 /* 1274 * Unmap any final reservation. 1275 */ 1276 if ((fixed == 0) && (rsize != 0)) 1277 (void) munmap(maddr, rsize); 1278 1279 return (faddr); 1280 } 1281 1282 /* 1283 * A null symbol interpretor. Used if a filter has no associated filtees. 1284 */ 1285 /* ARGSUSED0 */ 1286 static Sym * 1287 elf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1288 { 1289 return ((Sym *)0); 1290 } 1291 1292 /* 1293 * Disable filtee use. 1294 */ 1295 static void 1296 elf_disable_filtee(Rt_map * lmp, Dyninfo * dip) 1297 { 1298 dip->di_info = 0; 1299 1300 if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) { 1301 /* 1302 * If this is an object filter, free the filtee's duplication. 1303 */ 1304 if (OBJFLTRNDX(lmp) != FLTR_DISABLED) { 1305 free(REFNAME(lmp)); 1306 REFNAME(lmp) = (char *)0; 1307 OBJFLTRNDX(lmp) = FLTR_DISABLED; 1308 1309 /* 1310 * Indicate that this filtee is no longer available. 1311 */ 1312 if (dip->di_flags & FLG_DI_STDFLTR) 1313 SYMINTP(lmp) = elf_null_find_sym; 1314 1315 } 1316 } else if (dip->di_flags & FLG_DI_STDFLTR) { 1317 /* 1318 * Indicate that this standard filtee is no longer available. 1319 */ 1320 if (SYMSFLTRCNT(lmp)) 1321 SYMSFLTRCNT(lmp)--; 1322 } else { 1323 /* 1324 * Indicate that this auxiliary filtee is no longer available. 1325 */ 1326 if (SYMAFLTRCNT(lmp)) 1327 SYMAFLTRCNT(lmp)--; 1328 } 1329 dip->di_flags &= ~MSK_DI_FILTER; 1330 } 1331 1332 /* 1333 * Find symbol interpreter - filters. 1334 * This function is called when the symbols from a shared object should 1335 * be resolved from the shared objects filtees instead of from within itself. 1336 * 1337 * A symbol name of 0 is used to trigger filtee loading. 1338 */ 1339 static Sym * 1340 _elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1341 { 1342 const char *name = slp->sl_name, *filtees; 1343 Rt_map *clmp = slp->sl_cmap; 1344 Rt_map *ilmp = slp->sl_imap; 1345 Pnode *pnp, **pnpp; 1346 int any; 1347 Dyninfo *dip = &DYNINFO(ilmp)[ndx]; 1348 Lm_list *lml = LIST(ilmp); 1349 Lm_cntl *lmc = 0; 1350 Aliste lmco; 1351 1352 /* 1353 * Indicate that the filter has been used. If a binding already exists 1354 * to the caller, indicate that this object is referenced. This insures 1355 * we don't generate false unreferenced diagnostics from ldd -u/U or 1356 * debugging. Don't create a binding regardless, as this filter may 1357 * have been dlopen()'ed. 1358 */ 1359 if (name && (ilmp != clmp)) { 1360 Word tracing = (LIST(clmp)->lm_flags & 1361 (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED)); 1362 1363 if (tracing || dbg_mask) { 1364 Bnd_desc ** bdpp; 1365 Aliste off; 1366 1367 FLAGS1(ilmp) |= FL1_RT_USED; 1368 1369 if ((tracing & LML_FLG_TRC_UNREF) || dbg_mask) { 1370 for (ALIST_TRAVERSE(CALLERS(ilmp), off, bdpp)) { 1371 Bnd_desc * bdp = *bdpp; 1372 1373 if (bdp->b_caller == clmp) { 1374 bdp->b_flags |= BND_REFER; 1375 break; 1376 } 1377 } 1378 } 1379 } 1380 } 1381 1382 /* 1383 * If this is the first call to process this filter, establish the 1384 * filtee list. If a configuration file exists, determine if any 1385 * filtee associations for this filter, and its filtee reference, are 1386 * defined. Otherwise, process the filtee reference. Any token 1387 * expansion is also completed at this point (i.e., $PLATFORM). 1388 */ 1389 filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val; 1390 if (dip->di_info == 0) { 1391 if (rtld_flags2 & RT_FL2_FLTCFG) 1392 dip->di_info = elf_config_flt(PATHNAME(ilmp), filtees); 1393 1394 if (dip->di_info == 0) { 1395 DBG_CALL(Dbg_file_filter(NAME(ilmp), filtees, 0)); 1396 if ((lml->lm_flags & 1397 (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) && 1398 ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0)) 1399 (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER), 1400 NAME(ilmp), filtees); 1401 1402 if ((dip->di_info = (void *)expand_paths(ilmp, 1403 filtees, PN_SER_FILTEE, 0)) == 0) { 1404 elf_disable_filtee(ilmp, dip); 1405 return ((Sym *)0); 1406 } 1407 } 1408 } 1409 1410 /* 1411 * Traverse the filtee list, dlopen()'ing any objects specified and 1412 * using their group handle to lookup the symbol. 1413 */ 1414 for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp; 1415 pnpp = &pnp->p_next, pnp = * pnpp) { 1416 int mode; 1417 Grp_hdl *ghp; 1418 Rt_map *nlmp = 0; 1419 1420 if (pnp->p_len == 0) 1421 continue; 1422 1423 /* 1424 * Establish the mode of the filtee from the filter. As filtees 1425 * are loaded via a dlopen(), make sure that RTLD_GROUP is set 1426 * and the filtees aren't global. It would be nice to have 1427 * RTLD_FIRST used here also, but as filters got out long before 1428 * RTLD_FIRST was introduced it's a little too late now. 1429 */ 1430 mode = MODE(ilmp) | RTLD_GROUP; 1431 mode &= ~RTLD_GLOBAL; 1432 1433 /* 1434 * Insure that any auxiliary filter can locate symbols from its 1435 * caller. 1436 */ 1437 if (dip->di_flags & FLG_DI_AUXFLTR) 1438 mode |= RTLD_PARENT; 1439 1440 /* 1441 * Process any hardware capability directory. Establish a new 1442 * link-map control list from which to analyze any newly added 1443 * objects. 1444 */ 1445 if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) { 1446 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1447 if ((lmc = alist_append(&(lml->lm_lists), 0, 1448 sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0) 1449 return ((Sym *)0); 1450 lmco = (Aliste)((char *)lmc - 1451 (char *)lml->lm_lists); 1452 } else { 1453 lmc = 0; 1454 lmco = ALO_DATA; 1455 } 1456 1457 pnp = hwcap_filtees(pnpp, lmco, dip, ilmp, filtees, 1458 mode, (FLG_RT_HANDLE | FLG_RT_HWCAP)); 1459 } 1460 1461 if (pnp->p_len == 0) 1462 continue; 1463 1464 /* 1465 * Process an individual filtee. 1466 */ 1467 if (pnp->p_info == 0) { 1468 const char *filtee = pnp->p_name; 1469 int audit = 0; 1470 1471 DBG_CALL(Dbg_file_filtee(NAME(ilmp), filtee, 0)); 1472 1473 ghp = 0; 1474 1475 /* 1476 * Determine if the reference link map is already 1477 * loaded. As an optimization compare the filtee with 1478 * our interpretor. The most common filter is 1479 * libdl.so.1, which is a filter on ld.so.1. 1480 */ 1481 #if defined(_ELF64) 1482 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) { 1483 #else 1484 if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) { 1485 #endif 1486 /* 1487 * Create an association between ld.so.1 and 1488 * the filter. 1489 */ 1490 nlmp = lml_rtld.lm_head; 1491 if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp, 1492 (GPH_LDSO | GPH_FIRST | GPH_FILTEE))) == 0) 1493 nlmp = 0; 1494 1495 /* 1496 * Establish the filter handle to prevent any 1497 * recursion. 1498 */ 1499 if (nlmp && ghp) 1500 pnp->p_info = (void *)ghp; 1501 1502 /* 1503 * Audit the filter/filtee established. Ignore 1504 * any return from the auditor, as we can't 1505 * allow ignore filtering to ld.so.1, otherwise 1506 * nothing is going to work. 1507 */ 1508 if ((lml->lm_tflags | FLAGS1(ilmp)) & 1509 LML_TFLG_AUD_OBJFILTER) 1510 (void) audit_objfilter(ilmp, filtees, 1511 nlmp, 0); 1512 1513 } else { 1514 Rej_desc rej = { 0 }; 1515 1516 /* 1517 * Establish a new link-map control list from 1518 * which to analyze any newly added objects. 1519 */ 1520 if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) { 1521 if ((lmc = 1522 alist_append(&(lml->lm_lists), 0, 1523 sizeof (Lm_cntl), 1524 AL_CNT_LMLISTS)) == 0) 1525 return ((Sym *)0); 1526 lmco = (Aliste)((char *)lmc - 1527 (char *)lml->lm_lists); 1528 } else { 1529 lmc = 0; 1530 lmco = ALO_DATA; 1531 } 1532 1533 /* 1534 * Load the filtee. 1535 */ 1536 if ((nlmp = load_path(lml, lmco, filtee, ilmp, 1537 mode, FLG_RT_HANDLE, &ghp, 0, &rej)) == 0) { 1538 file_notfound(LIST(ilmp), filtee, ilmp, 1539 FLG_RT_HANDLE, &rej); 1540 remove_rej(&rej); 1541 } 1542 1543 /* 1544 * Establish the filter handle to prevent any 1545 * recursion. 1546 */ 1547 if (nlmp && ghp) { 1548 ghp->gh_flags |= GPH_FILTEE; 1549 pnp->p_info = (void *)ghp; 1550 } 1551 1552 /* 1553 * Audit the filter/filtee established. A 1554 * return of 0 indicates the auditor wishes to 1555 * ignore this filtee. 1556 */ 1557 if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) & 1558 LML_TFLG_AUD_OBJFILTER)) { 1559 if (audit_objfilter(ilmp, filtees, 1560 nlmp, 0) == 0) { 1561 audit = 1; 1562 nlmp = 0; 1563 } 1564 } 1565 1566 /* 1567 * Finish processing the objects associated with 1568 * this request. Create an association between 1569 * this object and the originating filter to 1570 * provide sufficient information to tear down 1571 * this filtee if necessary. 1572 */ 1573 if (nlmp && ghp && 1574 ((analyze_lmc(lml, lmco, nlmp) == 0) || 1575 (relocate_lmc(lml, lmco, nlmp) == 0))) 1576 nlmp = 0; 1577 1578 /* 1579 * If the filtee has been successfully 1580 * processed, and it is part of a link-map 1581 * control list that is equivalent, or less, 1582 * than the filter control list, create an 1583 * association between the filter and filtee. 1584 * This association provides sufficient 1585 * information to tear down the filter and 1586 * filtee if necessary. 1587 */ 1588 if (nlmp && ghp && (CNTL(nlmp) <= CNTL(ilmp)) && 1589 (hdl_add(ghp, ilmp, GPD_FILTER) == 0)) 1590 nlmp = 0; 1591 } 1592 1593 /* 1594 * Generate a diagnostic if the filtee couldn't be 1595 * loaded, null out the pnode entry, and continue 1596 * the search. Otherwise, retain this group handle 1597 * for future symbol searches. 1598 */ 1599 if (nlmp == 0) { 1600 pnp->p_info = 0; 1601 DBG_CALL(Dbg_file_filtee(0, filtee, audit)); 1602 1603 if (ghp) 1604 (void) dlclose_core(ghp, ilmp); 1605 1606 if (lmc) { 1607 (void) lm_salvage(lml, 0, lmco); 1608 remove_cntl(lml, lmco); 1609 lmc = 0; 1610 } 1611 pnp->p_len = 0; 1612 continue; 1613 } 1614 } 1615 1616 ghp = (Grp_hdl *)pnp->p_info; 1617 1618 /* 1619 * If we're just here to trigger filtee loading skip the symbol 1620 * lookup so we'll continue looking for additional filtees. 1621 */ 1622 if (name) { 1623 Grp_desc *gdp; 1624 Sym *sym = 0; 1625 Aliste off; 1626 Slookup sl = *slp; 1627 1628 sl.sl_flags |= LKUP_FIRST; 1629 any++; 1630 1631 /* 1632 * Look for the symbol in the handles dependencies. 1633 */ 1634 for (ALIST_TRAVERSE(ghp->gh_depends, off, gdp)) { 1635 if ((gdp->gd_flags & GPD_AVAIL) == 0) 1636 continue; 1637 1638 /* 1639 * If our parent is a dependency don't look at 1640 * it (otherwise we are in a recursive loop). 1641 * This situation can occur with auxiliary 1642 * filters if the filtee has a dependency on the 1643 * filter. This dependency isn't necessary as 1644 * auxiliary filters are opened RTLD_PARENT, but 1645 * users may still unknowingly add an explicit 1646 * dependency to the parent. 1647 */ 1648 if ((sl.sl_imap = gdp->gd_depend) == ilmp) 1649 continue; 1650 1651 if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp, 1652 binfo)) != 0) || 1653 (ghp->gh_flags & GPH_FIRST)) 1654 break; 1655 } 1656 1657 /* 1658 * If this filtee has just been loaded (nlmp != 0), 1659 * determine whether the filtee was triggered by a 1660 * relocation from an object that is still being 1661 * relocated on a leaf link-map control list. As the 1662 * relocation of an object on this list might still 1663 * fail, we can't yet bind the filter to the filtee. 1664 * To do so, would be locking the filtee so that it 1665 * couldn't be deleted, and the filtee itself could have 1666 * bound to an object that must be torn down. Insure 1667 * the caller isn't bound to the handle at this time. 1668 * Any association will be reestablished when the filter 1669 * is later referenced and the filtee has propagated to 1670 * the same link-map control list. 1671 */ 1672 if (nlmp && (CNTL(nlmp) > CNTL(ilmp))) { 1673 remove_caller(ghp, ilmp); 1674 pnp->p_info = 0; 1675 } 1676 if (sym) { 1677 if (lmc) 1678 remove_cntl(lml, lmco); 1679 1680 *binfo |= DBG_BINFO_FILTEE; 1681 return (sym); 1682 } 1683 } 1684 1685 /* 1686 * If this object is tagged to terminate filtee processing we're 1687 * done. 1688 */ 1689 if (FLAGS1(ghp->gh_owner) & FL1_RT_ENDFILTE) 1690 break; 1691 } 1692 1693 if (lmc) 1694 remove_cntl(lml, lmco); 1695 1696 /* 1697 * If we're just here to trigger filtee loading then we're done. 1698 */ 1699 if (name == 0) 1700 return ((Sym *)0); 1701 1702 /* 1703 * If no filtees have been found for a filter, clean up any Pnode 1704 * structures and disable their search completely. For auxiliary 1705 * filters we can reselect the symbol search function so that we never 1706 * enter this routine again for this object. For standard filters we 1707 * use the null symbol routine. 1708 */ 1709 if (any == 0) { 1710 remove_pnode((Pnode *)dip->di_info); 1711 elf_disable_filtee(ilmp, dip); 1712 return ((Sym *)0); 1713 } 1714 1715 return ((Sym *)0); 1716 } 1717 1718 /* 1719 * Focal point for disabling error messages for auxiliary filters. As an 1720 * auxiliary filter allows for filtee use, but provides a fallback should a 1721 * filtee not exist (or fail to load), any errors generated as a consequence of 1722 * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR 1723 * suppresses errors generated by eprint(), but insures a debug diagnostic is 1724 * produced. ldd(1) employs printf(), and here, the selection of whether to 1725 * print a diagnostic in regards to auxiliary filters is a little more complex. 1726 * 1727 * . The determination of whether to produce an ldd message, or a fatal 1728 * error message is driven by LML_FLG_TRC_ENABLE. 1729 * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN, 1730 * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s), 1731 * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u). 1732 * 1733 * . If the calling object is lddstub, then several classes of message are 1734 * suppressed. The user isn't trying to diagnose lddstub, this is simply 1735 * a stub executable employed to preload a user specified library against. 1736 * 1737 * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should 1738 * be suppressed. All detailed ldd messages should still be produced. 1739 */ 1740 Sym * 1741 elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx) 1742 { 1743 Sym *sym; 1744 Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx]; 1745 int silent = 0; 1746 1747 /* 1748 * Make sure this entry is still acting as a filter. We may have tried 1749 * to process this previously, and disabled it if the filtee couldn't 1750 * be processed. However, other entries may provide different filtees 1751 * that are yet to be completed. 1752 */ 1753 if (dip->di_flags == 0) 1754 return ((Sym *)0); 1755 1756 /* 1757 * Indicate whether an error message is required should this filtee not 1758 * be found, based on the type of filter. 1759 */ 1760 if ((dip->di_flags & FLG_DI_AUXFLTR) && 1761 ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) { 1762 rtld_flags |= RT_FL_SILENCERR; 1763 silent = 1; 1764 } 1765 1766 sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx); 1767 1768 if (silent) 1769 rtld_flags &= ~RT_FL_SILENCERR; 1770 1771 return (sym); 1772 } 1773 1774 /* 1775 * Compute the elf hash value (as defined in the ELF access library). 1776 * The form of the hash table is: 1777 * 1778 * |--------------| 1779 * | # of buckets | 1780 * |--------------| 1781 * | # of chains | 1782 * |--------------| 1783 * | bucket[] | 1784 * |--------------| 1785 * | chain[] | 1786 * |--------------| 1787 */ 1788 ulong_t 1789 elf_hash(const char *name) 1790 { 1791 uint_t hval = 0; 1792 1793 while (*name) { 1794 uint_t g; 1795 hval = (hval << 4) + *name++; 1796 if ((g = (hval & 0xf0000000)) != 0) 1797 hval ^= g >> 24; 1798 hval &= ~g; 1799 } 1800 return ((ulong_t)hval); 1801 } 1802 1803 /* 1804 * If flag argument has LKUP_SPEC set, we treat undefined symbols of type 1805 * function specially in the executable - if they have a value, even though 1806 * undefined, we use that value. This allows us to associate all references 1807 * to a function's address to a single place in the process: the plt entry 1808 * for that function in the executable. Calls to lookup from plt binding 1809 * routines do NOT set LKUP_SPEC in the flag. 1810 */ 1811 Sym * 1812 elf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo) 1813 { 1814 const char *name = slp->sl_name; 1815 Rt_map *ilmp = slp->sl_imap; 1816 ulong_t hash = slp->sl_hash; 1817 uint_t ndx, htmp, buckets, *chainptr; 1818 Sym *sym, *symtabptr; 1819 char *strtabptr, *strtabname; 1820 uint_t flags1; 1821 Syminfo *sip; 1822 1823 /* 1824 * If we're only here to establish a symbols index, skip the diagnostic 1825 * used to trace a symbol search. 1826 */ 1827 if ((slp->sl_flags & LKUP_SYMNDX) == 0) { 1828 DBG_CALL(Dbg_syms_lookup(name, NAME(ilmp), 1829 MSG_ORIG(MSG_STR_ELF))); 1830 } 1831 1832 if (HASH(ilmp) == 0) 1833 return ((Sym *)0); 1834 1835 buckets = HASH(ilmp)[0]; 1836 /* LINTED */ 1837 htmp = (uint_t)hash % buckets; 1838 1839 /* 1840 * Get the first symbol on hash chain and initialize the string 1841 * and symbol table pointers. 1842 */ 1843 if ((ndx = HASH(ilmp)[htmp + 2]) == 0) 1844 return ((Sym *)0); 1845 1846 chainptr = HASH(ilmp) + 2 + buckets; 1847 strtabptr = STRTAB(ilmp); 1848 symtabptr = SYMTAB(ilmp); 1849 1850 while (ndx) { 1851 sym = symtabptr + ndx; 1852 strtabname = strtabptr + sym->st_name; 1853 1854 /* 1855 * Compare the symbol found with the name required. If the 1856 * names don't match continue with the next hash entry. 1857 */ 1858 if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) { 1859 if ((ndx = chainptr[ndx]) != 0) 1860 continue; 1861 return ((Sym *)0); 1862 } 1863 1864 /* 1865 * If we're only here to establish a symbols index, we're done. 1866 */ 1867 if (slp->sl_flags & LKUP_SYMNDX) 1868 return (sym); 1869 1870 /* 1871 * If we find a match and the symbol is defined, return the 1872 * symbol pointer and the link map in which it was found. 1873 */ 1874 if (sym->st_shndx != SHN_UNDEF) { 1875 *dlmp = ilmp; 1876 *binfo |= DBG_BINFO_FOUND; 1877 if (FLAGS(ilmp) & FLG_RT_INTRPOSE) 1878 *binfo |= DBG_BINFO_INTERPOSE; 1879 break; 1880 1881 /* 1882 * If we find a match and the symbol is undefined, the 1883 * symbol type is a function, and the value of the symbol 1884 * is non zero, then this is a special case. This allows 1885 * the resolution of a function address to the plt[] entry. 1886 * See SPARC ABI, Dynamic Linking, Function Addresses for 1887 * more details. 1888 */ 1889 } else if ((slp->sl_flags & LKUP_SPEC) && 1890 (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) && 1891 (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) { 1892 *dlmp = ilmp; 1893 *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR); 1894 if (FLAGS(ilmp) & FLG_RT_INTRPOSE) 1895 *binfo |= DBG_BINFO_INTERPOSE; 1896 return (sym); 1897 } 1898 1899 /* 1900 * Undefined symbol. 1901 */ 1902 return ((Sym *)0); 1903 } 1904 1905 /* 1906 * We've found a match. Determine if the defining object contains 1907 * symbol binding information. 1908 */ 1909 if ((sip = SYMINFO(ilmp)) != 0) 1910 /* LINTED */ 1911 sip = (Syminfo *)((char *)sip + (ndx * SYMINENT(ilmp))); 1912 1913 /* 1914 * If this is a direct binding request, but the symbol definition has 1915 * disabled directly binding to it (presumably because the symbol 1916 * definition has been changed since the referring object was built), 1917 * indicate this failure so that the caller can fall back to a standard 1918 * symbol search. Clear any debug binding information for cleanliness. 1919 */ 1920 if (sip && (slp->sl_flags & LKUP_DIRECT) && 1921 (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) { 1922 *binfo |= BINFO_DIRECTDIS; 1923 *binfo &= ~DBG_BINFO_MSK; 1924 return ((Sym *)0); 1925 } 1926 1927 /* 1928 * Determine whether this object is acting as a filter. 1929 */ 1930 if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0) 1931 return (sym); 1932 1933 /* 1934 * Determine if this object offers per-symbol filtering, and if so, 1935 * whether this symbol references a filtee. 1936 */ 1937 if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) { 1938 /* 1939 * If this is a standard filter reference, and no standard 1940 * filtees remain to be inspected, we're done. If this is an 1941 * auxiliary filter reference, and no auxiliary filtees remain, 1942 * we'll fall through in case any object filtering is available. 1943 */ 1944 if ((sip->si_flags & SYMINFO_FLG_FILTER) && 1945 (SYMSFLTRCNT(ilmp) == 0)) 1946 return ((Sym *)0); 1947 1948 if ((sip->si_flags & SYMINFO_FLG_FILTER) || 1949 ((sip->si_flags & SYMINFO_FLG_AUXILIARY) && 1950 SYMAFLTRCNT(ilmp))) { 1951 Sym * fsym; 1952 1953 /* 1954 * This symbol has an associated filtee. Lookup the 1955 * symbol in the filtee, and if it is found return it. 1956 * If the symbol doesn't exist, and this is a standard 1957 * filter, return an error, otherwise fall through to 1958 * catch any object filtering that may be available. 1959 */ 1960 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1961 sip->si_boundto)) != 0) 1962 return (fsym); 1963 if (sip->si_flags & SYMINFO_FLG_FILTER) 1964 return ((Sym *)0); 1965 } 1966 } 1967 1968 /* 1969 * Determine if this object provides global filtering. 1970 */ 1971 if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) { 1972 Sym * fsym; 1973 1974 if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) { 1975 /* 1976 * This object has an associated filtee. Lookup the 1977 * symbol in the filtee, and if it is found return it. 1978 * If the symbol doesn't exist, and this is a standard 1979 * filter, return and error, otherwise return the symbol 1980 * within the filter itself. 1981 */ 1982 if ((fsym = elf_lookup_filtee(slp, dlmp, binfo, 1983 OBJFLTRNDX(ilmp))) != 0) 1984 return (fsym); 1985 } 1986 1987 if (flags1 & FL1_RT_OBJSFLTR) 1988 return ((Sym *)0); 1989 } 1990 return (sym); 1991 } 1992 1993 /* 1994 * Create a new Rt_map structure for an ELF object and initialize 1995 * all values. 1996 */ 1997 Rt_map * 1998 elf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld, 1999 ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry, 2000 ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt) 2001 { 2002 Rt_map *lmp; 2003 ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0; 2004 Xword rpath = 0; 2005 Ehdr *ehdr = (Ehdr *)addr; 2006 2007 DBG_CALL(Dbg_file_elf(pname, (ulong_t)ld, addr, msize, entry, 2008 get_linkmap_id(lml), lmco)); 2009 2010 /* 2011 * Allocate space for the link-map and private elf information. Once 2012 * these are allocated and initialized, we can use remove_so(0, lmp) to 2013 * tear down the link-map should any failures occur. 2014 */ 2015 if ((lmp = calloc(sizeof (Rt_map), 1)) == 0) 2016 return (0); 2017 if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) { 2018 free(lmp); 2019 return (0); 2020 } 2021 2022 /* 2023 * All fields not filled in were set to 0 by calloc. 2024 */ 2025 ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname; 2026 DYN(lmp) = ld; 2027 ADDR(lmp) = addr; 2028 MSIZE(lmp) = msize; 2029 ENTRY(lmp) = (Addr)entry; 2030 SYMINTP(lmp) = elf_find_sym; 2031 ETEXT(lmp) = etext; 2032 FCT(lmp) = &elf_fct; 2033 LIST(lmp) = lml; 2034 PADSTART(lmp) = paddr; 2035 PADIMLEN(lmp) = padimsize; 2036 THREADID(lmp) = rt_thr_self(); 2037 OBJFLTRNDX(lmp) = FLTR_DISABLED; 2038 SORTVAL(lmp) = -1; 2039 2040 MMAPS(lmp) = mmaps; 2041 MMAPCNT(lmp) = mmapcnt; 2042 ASSERT(mmapcnt != 0); 2043 2044 /* 2045 * If this is a shared object, add the base address to each address. 2046 * if this is an executable, use address as is. 2047 */ 2048 if (ehdr->e_type == ET_EXEC) { 2049 base = 0; 2050 FLAGS(lmp) |= FLG_RT_FIXED; 2051 } else 2052 base = addr; 2053 2054 /* 2055 * Fill in rest of the link map entries with information from the file's 2056 * dynamic structure. 2057 */ 2058 if (ld) { 2059 uint_t dyncnt = 0; 2060 Xword pltpadsz = 0; 2061 void *rtldinfo; 2062 2063 /* CSTYLED */ 2064 for ( ; ld->d_tag != DT_NULL; ++ld, dyncnt++) { 2065 switch ((Xword)ld->d_tag) { 2066 case DT_SYMTAB: 2067 SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2068 break; 2069 case DT_STRTAB: 2070 STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2071 break; 2072 case DT_SYMENT: 2073 SYMENT(lmp) = ld->d_un.d_val; 2074 break; 2075 case DT_FEATURE_1: 2076 ld->d_un.d_val |= DTF_1_PARINIT; 2077 if (ld->d_un.d_val & DTF_1_CONFEXP) 2078 crle = 1; 2079 break; 2080 case DT_MOVESZ: 2081 MOVESZ(lmp) = ld->d_un.d_val; 2082 FLAGS(lmp) |= FLG_RT_MOVE; 2083 break; 2084 case DT_MOVEENT: 2085 MOVEENT(lmp) = ld->d_un.d_val; 2086 break; 2087 case DT_MOVETAB: 2088 MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base); 2089 break; 2090 case DT_REL: 2091 case DT_RELA: 2092 /* 2093 * At this time we can only handle 1 type of 2094 * relocation per object. 2095 */ 2096 REL(lmp) = (void *)(ld->d_un.d_ptr + base); 2097 break; 2098 case DT_RELSZ: 2099 case DT_RELASZ: 2100 RELSZ(lmp) = ld->d_un.d_val; 2101 break; 2102 case DT_RELENT: 2103 case DT_RELAENT: 2104 RELENT(lmp) = ld->d_un.d_val; 2105 break; 2106 case DT_RELCOUNT: 2107 case DT_RELACOUNT: 2108 RELACOUNT(lmp) = (uint_t)ld->d_un.d_val; 2109 break; 2110 case DT_TEXTREL: 2111 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2112 break; 2113 case DT_HASH: 2114 HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2115 break; 2116 case DT_PLTGOT: 2117 PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base); 2118 break; 2119 case DT_PLTRELSZ: 2120 PLTRELSZ(lmp) = ld->d_un.d_val; 2121 break; 2122 case DT_JMPREL: 2123 JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base); 2124 break; 2125 case DT_INIT: 2126 INIT(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2127 break; 2128 case DT_FINI: 2129 FINI(lmp) = (void (*)())(ld->d_un.d_ptr + base); 2130 break; 2131 case DT_INIT_ARRAY: 2132 INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2133 base); 2134 break; 2135 case DT_INIT_ARRAYSZ: 2136 INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2137 break; 2138 case DT_FINI_ARRAY: 2139 FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2140 base); 2141 break; 2142 case DT_FINI_ARRAYSZ: 2143 FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2144 break; 2145 case DT_PREINIT_ARRAY: 2146 PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr + 2147 base); 2148 break; 2149 case DT_PREINIT_ARRAYSZ: 2150 PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val; 2151 break; 2152 case DT_RPATH: 2153 case DT_RUNPATH: 2154 rpath = ld->d_un.d_val; 2155 break; 2156 case DT_FILTER: 2157 fltr = ld->d_un.d_val; 2158 OBJFLTRNDX(lmp) = dyncnt; 2159 FLAGS1(lmp) |= FL1_RT_OBJSFLTR; 2160 break; 2161 case DT_AUXILIARY: 2162 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2163 fltr = ld->d_un.d_val; 2164 OBJFLTRNDX(lmp) = dyncnt; 2165 } 2166 FLAGS1(lmp) |= FL1_RT_OBJAFLTR; 2167 break; 2168 case DT_SUNW_FILTER: 2169 SYMSFLTRCNT(lmp)++; 2170 FLAGS1(lmp) |= FL1_RT_SYMSFLTR; 2171 break; 2172 case DT_SUNW_AUXILIARY: 2173 if (!(rtld_flags & RT_FL_NOAUXFLTR)) { 2174 SYMAFLTRCNT(lmp)++; 2175 } 2176 FLAGS1(lmp) |= FL1_RT_SYMAFLTR; 2177 break; 2178 case DT_DEPAUDIT: 2179 if (!(rtld_flags & RT_FL_NOAUDIT)) 2180 audit = ld->d_un.d_val; 2181 break; 2182 case DT_CONFIG: 2183 cfile = ld->d_un.d_val; 2184 break; 2185 case DT_DEBUG: 2186 /* 2187 * DT_DEBUG entries are only created in 2188 * dynamic objects that require an interpretor 2189 * (ie. all dynamic executables and some shared 2190 * objects), and provide for a hand-shake with 2191 * debuggers. This entry is initialized to 2192 * zero by the link-editor. If a debugger has 2193 * us and updated this entry set the debugger 2194 * flag, and finish initializing the debugging 2195 * structure (see setup() also). Switch off any 2196 * configuration object use as most debuggers 2197 * can't handle fixed dynamic executables as 2198 * dependencies, and we can't handle requests 2199 * like object padding for alternative objects. 2200 */ 2201 if (ld->d_un.d_ptr) 2202 rtld_flags |= 2203 (RT_FL_DEBUGGER | RT_FL_NOOBJALT); 2204 ld->d_un.d_ptr = (Addr)&r_debug; 2205 break; 2206 case DT_VERNEED: 2207 VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr + 2208 base); 2209 break; 2210 case DT_VERNEEDNUM: 2211 /* LINTED */ 2212 VERNEEDNUM(lmp) = (int)ld->d_un.d_val; 2213 break; 2214 case DT_VERDEF: 2215 VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base); 2216 break; 2217 case DT_VERDEFNUM: 2218 /* LINTED */ 2219 VERDEFNUM(lmp) = (int)ld->d_un.d_val; 2220 break; 2221 case DT_BIND_NOW: 2222 if ((ld->d_un.d_val & DF_BIND_NOW) && 2223 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2224 MODE(lmp) |= RTLD_NOW; 2225 MODE(lmp) &= ~RTLD_LAZY; 2226 } 2227 break; 2228 case DT_FLAGS: 2229 if (ld->d_un.d_val & DF_SYMBOLIC) 2230 FLAGS1(lmp) |= FL1_RT_SYMBOLIC; 2231 if (ld->d_un.d_val & DF_TEXTREL) 2232 FLAGS1(lmp) |= FL1_RT_TEXTREL; 2233 if ((ld->d_un.d_val & DF_BIND_NOW) && 2234 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2235 MODE(lmp) |= RTLD_NOW; 2236 MODE(lmp) &= ~RTLD_LAZY; 2237 } 2238 break; 2239 case DT_FLAGS_1: 2240 if (ld->d_un.d_val & DF_1_DISPRELPND) 2241 FLAGS1(lmp) |= FL1_RT_DISPREL; 2242 if (ld->d_un.d_val & DF_1_GROUP) 2243 FLAGS(lmp) |= 2244 (FLG_RT_SETGROUP | FLG_RT_HANDLE); 2245 if ((ld->d_un.d_val & DF_1_NOW) && 2246 ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) { 2247 MODE(lmp) |= RTLD_NOW; 2248 MODE(lmp) &= ~RTLD_LAZY; 2249 } 2250 if (ld->d_un.d_val & DF_1_NODELETE) 2251 MODE(lmp) |= RTLD_NODELETE; 2252 if (ld->d_un.d_val & DF_1_INITFIRST) 2253 FLAGS(lmp) |= FLG_RT_INITFRST; 2254 if (ld->d_un.d_val & DF_1_NOOPEN) 2255 FLAGS(lmp) |= FLG_RT_NOOPEN; 2256 if (ld->d_un.d_val & DF_1_LOADFLTR) 2257 FLAGS(lmp) |= FLG_RT_LOADFLTR; 2258 if (ld->d_un.d_val & DF_1_NODUMP) 2259 FLAGS(lmp) |= FLG_RT_NODUMP; 2260 if (ld->d_un.d_val & DF_1_CONFALT) 2261 crle = 1; 2262 if (ld->d_un.d_val & DF_1_DIRECT) 2263 FLAGS(lmp) |= FLG_RT_DIRECT; 2264 if (ld->d_un.d_val & DF_1_NODEFLIB) 2265 FLAGS1(lmp) |= FL1_RT_NODEFLIB; 2266 if (ld->d_un.d_val & DF_1_ENDFILTEE) 2267 FLAGS1(lmp) |= FL1_RT_ENDFILTE; 2268 if (ld->d_un.d_val & DF_1_TRANS) 2269 FLAGS(lmp) |= FLG_RT_TRANS; 2270 #ifndef EXPAND_RELATIVE 2271 if (ld->d_un.d_val & DF_1_ORIGIN) 2272 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2273 #endif 2274 /* 2275 * If this object identifies itself as an 2276 * interposer, but relocation processing has 2277 * already started, then demote it. It's too 2278 * late to guarantee complete interposition. 2279 */ 2280 if (ld->d_un.d_val & DF_1_INTERPOSE) { 2281 if ((lml->lm_flags & LML_FLG_STARTREL) == 0) 2282 FLAGS(lmp) |= FLG_RT_INTRPOSE; 2283 else { 2284 DBG_CALL(Dbg_util_intoolate(NAME(lmp))); 2285 if (lml->lm_flags & LML_FLG_TRC_ENABLE) 2286 (void) printf( 2287 MSG_INTL(MSG_LDD_REL_ERR2), 2288 NAME(lmp)); 2289 } 2290 } 2291 break; 2292 case DT_SYMINFO: 2293 SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr + 2294 base); 2295 break; 2296 case DT_SYMINENT: 2297 SYMINENT(lmp) = ld->d_un.d_val; 2298 break; 2299 case DT_PLTPAD: 2300 PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base); 2301 break; 2302 case DT_PLTPADSZ: 2303 pltpadsz = ld->d_un.d_val; 2304 break; 2305 case DT_SUNW_RTLDINF: 2306 if ((lml->lm_info_lmp != 0) && 2307 (lml->lm_info_lmp != lmp)) { 2308 DBG_CALL(Dbg_unused_rtldinfo( 2309 NAME(lmp), 2310 NAME(lml->lm_info_lmp))); 2311 break; 2312 } 2313 lml->lm_info_lmp = lmp; 2314 rtldinfo = (void *)(ld->d_un.d_ptr + base); 2315 2316 /* 2317 * We maintain a list of DT_SUNW_RTLDINFO 2318 * structures for a given object. This permits 2319 * the RTLDINFO structures to be grouped 2320 * functionly inside of a shared object. 2321 * 2322 * For example, we could have one for 2323 * thread_init, and another for atexit 2324 * reservations. 2325 */ 2326 if (alist_append(&lml->lm_rtldinfo, &rtldinfo, 2327 sizeof (void *), AL_CNT_RTLDINFO) == 0) { 2328 remove_so(0, lmp); 2329 return (0); 2330 } 2331 break; 2332 case DT_DEPRECATED_SPARC_REGISTER: 2333 case M_DT_REGISTER: 2334 FLAGS(lmp) |= FLG_RT_REGSYMS; 2335 break; 2336 case M_DT_PLTRESERVE: 2337 PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr + 2338 base); 2339 break; 2340 } 2341 } 2342 2343 2344 if (PLTPAD(lmp)) { 2345 if (pltpadsz == (Xword)0) 2346 PLTPAD(lmp) = 0; 2347 else 2348 PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) + 2349 pltpadsz); 2350 } 2351 2352 /* 2353 * Allocate Dynamic Info structure 2354 */ 2355 if ((DYNINFO(lmp) = calloc((size_t)dyncnt, 2356 sizeof (Dyninfo))) == 0) { 2357 remove_so(0, lmp); 2358 return (0); 2359 } 2360 DYNINFOCNT(lmp) = dyncnt; 2361 } 2362 2363 /* 2364 * If configuration file use hasn't been disabled, and a configuration 2365 * file hasn't already been set via an environment variable, see if any 2366 * application specific configuration file is specified. An LD_CONFIG 2367 * setting is used first, but if this image was generated via crle(1) 2368 * then a default configuration file is a fall-back. 2369 */ 2370 if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) { 2371 if (cfile) 2372 config->c_name = (const char *)(cfile + 2373 (char *)STRTAB(lmp)); 2374 else if (crle) { 2375 rtld_flags |= RT_FL_CONFAPP; 2376 #ifndef EXPAND_RELATIVE 2377 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2378 #endif 2379 } 2380 } 2381 2382 if (rpath) 2383 RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp)); 2384 if (fltr) { 2385 /* 2386 * If this object is a global filter, duplicate the filtee 2387 * string name(s) so that REFNAME() is available in core files. 2388 * This cludge was useful for debuggers at one point, but only 2389 * when the filtee name was an individual full path. 2390 */ 2391 if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) { 2392 remove_so(0, lmp); 2393 return (0); 2394 } 2395 } 2396 2397 if (rtld_flags & RT_FL_RELATIVE) 2398 FLAGS1(lmp) |= FL1_RT_RELATIVE; 2399 2400 /* 2401 * For Intel ABI compatibility. It's possible that a JMPREL can be 2402 * specified without any other relocations (e.g. a dynamic executable 2403 * normally only contains .plt relocations). If this is the case then 2404 * no REL, RELSZ or RELENT will have been created. For us to be able 2405 * to traverse the .plt relocations under LD_BIND_NOW we need to know 2406 * the RELENT for these relocations. Refer to elf_reloc() for more 2407 * details. 2408 */ 2409 if (!RELENT(lmp) && JMPREL(lmp)) 2410 RELENT(lmp) = sizeof (Rel); 2411 2412 /* 2413 * Establish any per-object auditing. If we're establishing `main's 2414 * link-map its too early to go searching for audit objects so just 2415 * hold the object name for later (see setup()). 2416 */ 2417 if (audit) { 2418 char *cp = audit + (char *)STRTAB(lmp); 2419 2420 if (*cp) { 2421 if (((AUDITORS(lmp) = 2422 calloc(1, sizeof (Audit_desc))) == 0) || 2423 ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) { 2424 remove_so(0, lmp); 2425 return (0); 2426 } 2427 if (lml_main.lm_head) { 2428 if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) { 2429 remove_so(0, lmp); 2430 return (0); 2431 } 2432 FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags; 2433 lml->lm_flags |= LML_FLG_LOCAUDIT; 2434 } 2435 } 2436 } 2437 2438 if ((CONDVAR(lmp) = rt_cond_create()) == 0) { 2439 remove_so(0, lmp); 2440 return (0); 2441 } 2442 if (oname && ((append_alias(lmp, oname, 0)) == 0)) { 2443 remove_so(0, lmp); 2444 return (0); 2445 } 2446 2447 /* 2448 * Add the mapped object to the end of the link map list. 2449 */ 2450 lm_append(lml, lmco, lmp); 2451 return (lmp); 2452 } 2453 2454 /* 2455 * Assign hardware/software capabilities. 2456 */ 2457 void 2458 cap_assign(Cap *cap, Rt_map *lmp) 2459 { 2460 while (cap->c_tag != CA_SUNW_NULL) { 2461 switch (cap->c_tag) { 2462 case CA_SUNW_HW_1: 2463 HWCAP(lmp) = cap->c_un.c_val; 2464 break; 2465 case CA_SUNW_SF_1: 2466 SFCAP(lmp) = cap->c_un.c_val; 2467 } 2468 cap++; 2469 } 2470 } 2471 2472 /* 2473 * Map in an ELF object. 2474 * Takes an open file descriptor for the object to map and its pathname; returns 2475 * a pointer to a Rt_map structure for this object, or 0 on error. 2476 */ 2477 static Rt_map * 2478 elf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname, 2479 int fd) 2480 { 2481 int i; /* general temporary */ 2482 Off memsize = 0; /* total memory size of pathname */ 2483 Off mentry; /* entry point */ 2484 Ehdr *ehdr; /* ELF header of ld.so */ 2485 Phdr *phdr; /* first Phdr in file */ 2486 Phdr *phdr0; /* Saved first Phdr in file */ 2487 Phdr *pptr; /* working Phdr */ 2488 Phdr *fph = 0; /* first loadable Phdr */ 2489 Phdr *lph; /* last loadable Phdr */ 2490 Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */ 2491 Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */ 2492 Phdr *swph = 0; /* program header for SUNWBSS */ 2493 Phdr *tlph = 0; /* program header for PT_TLS */ 2494 Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */ 2495 Cap *cap = 0; /* program header for SUNWCAP */ 2496 Dyn *mld = 0; /* DYNAMIC structure for pathname */ 2497 size_t size; /* size of elf and program headers */ 2498 caddr_t faddr = 0; /* mapping address of pathname */ 2499 Rt_map *lmp; /* link map created */ 2500 caddr_t paddr; /* start of padded image */ 2501 Off plen; /* size of image including padding */ 2502 Half etype; 2503 int fixed; 2504 Mmap *mmaps; 2505 uint_t mmapcnt = 0; 2506 Xword align = 0; 2507 2508 /* LINTED */ 2509 ehdr = (Ehdr *)fmap->fm_maddr; 2510 2511 /* 2512 * If this a relocatable object then special processing is required. 2513 */ 2514 if ((etype = ehdr->e_type) == ET_REL) 2515 return (elf_obj_file(lml, lmco, pname, fd)); 2516 2517 /* 2518 * If this isn't a dynamic executable or shared object we can't process 2519 * it. If this is a dynamic executable then all addresses are fixed. 2520 */ 2521 if (etype == ET_EXEC) 2522 fixed = 1; 2523 else if (etype == ET_DYN) 2524 fixed = 0; 2525 else { 2526 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname, 2527 conv_etype_str(etype)); 2528 return (0); 2529 } 2530 2531 /* 2532 * If our original mapped page was not large enough to hold all the 2533 * program headers remap them. 2534 */ 2535 size = (size_t)((char *)ehdr->e_phoff + 2536 (ehdr->e_phnum * ehdr->e_phentsize)); 2537 if (size > fmap->fm_fsize) { 2538 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2539 return (0); 2540 } 2541 if (size > fmap->fm_msize) { 2542 fmap_setup(); 2543 if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ, 2544 fmap->fm_mflags, fd, 0)) == MAP_FAILED) { 2545 int err = errno; 2546 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname, 2547 strerror(err)); 2548 return (0); 2549 } 2550 fmap->fm_msize = size; 2551 /* LINTED */ 2552 ehdr = (Ehdr *)fmap->fm_maddr; 2553 } 2554 /* LINTED */ 2555 phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize); 2556 2557 /* 2558 * Get entry point. 2559 */ 2560 mentry = ehdr->e_entry; 2561 2562 /* 2563 * Point at program headers and perform some basic validation. 2564 */ 2565 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2566 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2567 if ((pptr->p_type == PT_LOAD) || 2568 (pptr->p_type == PT_SUNWBSS)) { 2569 2570 if (fph == 0) { 2571 fph = pptr; 2572 /* LINTED argument lph is initialized in first pass */ 2573 } else if (pptr->p_vaddr <= lph->p_vaddr) { 2574 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_INVPRGHDR), 2575 pname); 2576 return (0); 2577 } 2578 2579 lph = pptr; 2580 2581 if (pptr->p_memsz) 2582 lmph = pptr; 2583 if (pptr->p_filesz) 2584 lfph = pptr; 2585 if (pptr->p_type == PT_SUNWBSS) 2586 swph = pptr; 2587 if (pptr->p_align > align) 2588 align = pptr->p_align; 2589 2590 } else if (pptr->p_type == PT_DYNAMIC) 2591 mld = (Dyn *)(pptr->p_vaddr); 2592 else if (pptr->p_type == PT_TLS) 2593 tlph = pptr; 2594 else if (pptr->p_type == PT_SUNWCAP) 2595 cap = (Cap *)(pptr->p_vaddr); 2596 else if (pptr->p_type == PT_SUNW_UNWIND) 2597 unwindph = pptr; 2598 } 2599 2600 #if defined(MAP_ALIGN) 2601 /* 2602 * Make sure the maximum page alignment is a power of 2 >= the system 2603 * page size, for use with MAP_ALIGN. 2604 */ 2605 align = M_PROUND(align); 2606 #endif 2607 2608 /* 2609 * We'd better have at least one loadable segment, together with some 2610 * specified file and memory size. 2611 */ 2612 if ((fph == 0) || (lmph == 0) || (lfph == 0)) { 2613 eprintf(ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname); 2614 return (0); 2615 } 2616 2617 /* 2618 * Check that the files size accounts for the loadable sections 2619 * we're going to map in (failure to do this may cause spurious 2620 * bus errors if we're given a truncated file). 2621 */ 2622 if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) { 2623 eprintf(ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname); 2624 return (0); 2625 } 2626 2627 /* 2628 * Memsize must be page rounded so that if we add object padding 2629 * at the end it will start at the beginning of a page. 2630 */ 2631 plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) - 2632 M_PTRUNC((ulong_t)fph->p_vaddr)); 2633 2634 /* 2635 * Determine if an existing mapping is acceptable. 2636 */ 2637 if (interp && (lml->lm_flags & LML_FLG_BASELM) && 2638 (strcmp(pname, interp->i_name) == 0)) { 2639 /* 2640 * If this is the interpreter then it has already been mapped 2641 * and we have the address so don't map it again. Note that 2642 * the common occurrence of a reference to the interpretor 2643 * (libdl -> ld.so.1) will have been caught during filter 2644 * initialization (see elf_lookup_filtee()). However, some 2645 * ELF implementations are known to record libc.so.1 as the 2646 * interpretor, and thus this test catches this behavior. 2647 */ 2648 paddr = faddr = interp->i_faddr; 2649 2650 } else if ((fixed == 0) && (r_debug.rtd_objpad == 0) && 2651 (memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) && 2652 (fph->p_filesz == fph->p_memsz) && 2653 (((Xword)fmap->fm_maddr % align) == 0)) { 2654 /* 2655 * If the mapping required has already been established from 2656 * the initial page we don't need to do anything more. Reset 2657 * the fmap address so then any later files start a new fmap. 2658 * This is really an optimization for filters, such as libdl.so, 2659 * which should only require one page. 2660 */ 2661 paddr = faddr = fmap->fm_maddr; 2662 fmap->fm_maddr = 0; 2663 fmap_setup(); 2664 } 2665 2666 /* 2667 * Allocate a mapping array to retain mapped segment information. 2668 */ 2669 if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0) 2670 return (0); 2671 2672 /* 2673 * If we're reusing an existing mapping determine the objects etext 2674 * address. Otherwise map the file (which will calculate the etext 2675 * address as part of the mapping process). 2676 */ 2677 if (faddr) { 2678 caddr_t base; 2679 2680 if (fixed) 2681 base = 0; 2682 else 2683 base = faddr; 2684 2685 /* LINTED */ 2686 phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize); 2687 2688 for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++, 2689 pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) { 2690 if (pptr->p_type != PT_LOAD) 2691 continue; 2692 2693 mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base); 2694 mmaps[mmapcnt].m_msize = pptr->p_memsz; 2695 mmaps[mmapcnt].m_fsize = pptr->p_filesz; 2696 mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC); 2697 mmapcnt++; 2698 2699 if (!(pptr->p_flags & PF_W)) { 2700 fmap->fm_etext = (ulong_t)pptr->p_vaddr + 2701 (ulong_t)pptr->p_memsz + 2702 (ulong_t)(fixed ? 0 : faddr); 2703 } 2704 } 2705 } else { 2706 /* 2707 * Map the file. 2708 */ 2709 if (!(faddr = elf_map_it(pname, memsize, ehdr, fph, lph, 2710 &phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt))) 2711 return (0); 2712 } 2713 2714 /* 2715 * Calculate absolute base addresses and entry points. 2716 */ 2717 if (!fixed) { 2718 if (mld) 2719 /* LINTED */ 2720 mld = (Dyn *)((Off)mld + faddr); 2721 if (cap) 2722 /* LINTED */ 2723 cap = (Cap *)((Off)cap + faddr); 2724 mentry += (Off)faddr; 2725 } 2726 2727 /* 2728 * Create new link map structure for newly mapped shared object. 2729 */ 2730 if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr, 2731 fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps, 2732 mmapcnt))) { 2733 (void) munmap((caddr_t)faddr, memsize); 2734 return (0); 2735 } 2736 2737 /* 2738 * Start the system loading in the ELF information we'll be processing. 2739 */ 2740 if (REL(lmp)) { 2741 (void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) + 2742 (uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp), 2743 MADV_WILLNEED); 2744 } 2745 2746 /* 2747 * If this shared object contains a any special segments, record them. 2748 */ 2749 if (swph) { 2750 FLAGS(lmp) |= FLG_RT_SUNWBSS; 2751 SUNWBSS(lmp) = phdr + (swph - phdr0); 2752 } 2753 if (tlph) { 2754 PTTLS(lmp) = phdr + (tlph - phdr0); 2755 tls_assign_soffset(lmp); 2756 } 2757 2758 if (unwindph) 2759 PTUNWIND(lmp) = phdr + (unwindph - phdr0); 2760 2761 if (cap) 2762 cap_assign(cap, lmp); 2763 2764 return (lmp); 2765 } 2766 2767 2768 /* 2769 * Function to correct protection settings. Segments are all mapped initially 2770 * with permissions as given in the segment header. We need to turn on write 2771 * permissions on a text segment if there are any relocations against that 2772 * segment, and them turn write permission back off again before returning 2773 * control to the user. This function turns the permission on or off depending 2774 * on the value of the argument. 2775 */ 2776 int 2777 elf_set_prot(Rt_map * lmp, int permission) 2778 { 2779 Mmap *mmaps; 2780 2781 /* 2782 * If this is an allocated image (ie. a relocatable object) we can't 2783 * mprotect() anything. 2784 */ 2785 if (FLAGS(lmp) & FLG_RT_IMGALLOC) 2786 return (1); 2787 2788 DBG_CALL(Dbg_file_prot(NAME(lmp), permission)); 2789 2790 for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) { 2791 if (mmaps->m_perm & PROT_WRITE) 2792 continue; 2793 2794 if (mprotect(mmaps->m_vaddr, mmaps->m_msize, 2795 (mmaps->m_perm | permission)) == -1) { 2796 int err = errno; 2797 eprintf(ERR_FATAL, MSG_INTL(MSG_SYS_MPROT), 2798 NAME(lmp), strerror(err)); 2799 return (0); 2800 } 2801 } 2802 return (1); 2803 } 2804 2805 /* 2806 * Build full pathname of shared object from given directory name and filename. 2807 */ 2808 static char * 2809 elf_get_so(const char *dir, const char *file) 2810 { 2811 static char pname[PATH_MAX]; 2812 2813 (void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file); 2814 return (pname); 2815 } 2816 2817 /* 2818 * The copy relocation is recorded in a copy structure which will be applied 2819 * after all other relocations are carried out. This provides for copying data 2820 * that must be relocated itself (ie. pointers in shared objects). This 2821 * structure also provides a means of binding RTLD_GROUP dependencies to any 2822 * copy relocations that have been taken from any group members. 2823 * 2824 * If the size of the .bss area available for the copy information is not the 2825 * same as the source of the data inform the user if we're under ldd(1) control 2826 * (this checking was only established in 5.3, so by only issuing an error via 2827 * ldd(1) we maintain the standard set by previous releases). 2828 */ 2829 int 2830 elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym, 2831 Rt_map *dlmp, const void *dadd) 2832 { 2833 Rel_copy rc; 2834 Lm_list *lml = LIST(rlmp); 2835 2836 rc.r_name = name; 2837 rc.r_rsym = rsym; /* the new reference symbol and its */ 2838 rc.r_rlmp = rlmp; /* associated link-map */ 2839 rc.r_dlmp = dlmp; /* the defining link-map */ 2840 rc.r_dsym = dsym; /* the original definition */ 2841 rc.r_radd = radd; 2842 rc.r_dadd = dadd; 2843 2844 if (rsym->st_size > dsym->st_size) 2845 rc.r_size = (size_t)dsym->st_size; 2846 else 2847 rc.r_size = (size_t)rsym->st_size; 2848 2849 if (alist_append(©(dlmp), &rc, sizeof (Rel_copy), 2850 AL_CNT_COPYREL) == 0) { 2851 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2852 return (0); 2853 else 2854 return (1); 2855 } 2856 if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) { 2857 if (alist_append(©(rlmp), &dlmp, 2858 sizeof (Rt_map *), AL_CNT_COPYREL) == 0) { 2859 if (!(lml->lm_flags & LML_FLG_TRC_WARN)) 2860 return (0); 2861 else 2862 return (1); 2863 } 2864 FLAGS1(dlmp) |= FL1_RT_COPYTOOK; 2865 } 2866 2867 /* 2868 * If we are tracing (ldd), warn the user if 2869 * 1) the size from the reference symbol differs from the 2870 * copy definition. We can only copy as much data as the 2871 * reference (dynamic executables) entry allows. 2872 * 2) the copy definition has STV_PROTECTED visibility. 2873 */ 2874 if (lml->lm_flags & LML_FLG_TRC_WARN) { 2875 if (rsym->st_size != dsym->st_size) { 2876 (void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF), 2877 _conv_reloc_type_str(M_R_COPY), demangle(name), 2878 NAME(rlmp), EC_XWORD(rsym->st_size), 2879 NAME(dlmp), EC_XWORD(dsym->st_size)); 2880 if (rsym->st_size > dsym->st_size) 2881 (void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA), 2882 NAME(dlmp)); 2883 else 2884 (void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC), 2885 NAME(rlmp)); 2886 } 2887 2888 if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) { 2889 (void) printf(MSG_INTL(MSG_LDD_CPY_PROT), 2890 _conv_reloc_type_str(M_R_COPY), demangle(name), 2891 NAME(dlmp)); 2892 } 2893 } 2894 2895 DBG_CALL(Dbg_reloc_apply((Xword)radd, (Xword)rc.r_size)); 2896 return (1); 2897 } 2898 2899 /* 2900 * Determine the symbol location of an address within a link-map. Look for 2901 * the nearest symbol (whose value is less than or equal to the required 2902 * address). This is the object specific part of dladdr(). 2903 */ 2904 static void 2905 elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags) 2906 { 2907 ulong_t ndx, cnt, base, _value; 2908 Sym *sym, *_sym; 2909 const char *str; 2910 2911 /* 2912 * If we don't have a .hash table there are no symbols to look at. 2913 */ 2914 if (HASH(lmp) == 0) 2915 return; 2916 2917 cnt = HASH(lmp)[1]; 2918 str = STRTAB(lmp); 2919 sym = SYMTAB(lmp); 2920 2921 if (FLAGS(lmp) & FLG_RT_FIXED) 2922 base = 0; 2923 else 2924 base = ADDR(lmp); 2925 2926 for (_sym = 0, _value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) { 2927 ulong_t value; 2928 2929 if (sym->st_shndx == SHN_UNDEF) 2930 continue; 2931 2932 value = sym->st_value + base; 2933 if (value > addr) 2934 continue; 2935 if (value < _value) 2936 continue; 2937 2938 _sym = sym; 2939 _value = value; 2940 2941 /* 2942 * Note, because we accept local and global symbols we could 2943 * find a section symbol that matches the associated address, 2944 * which means that the symbol name will be null. In this 2945 * case continue the search in case we can find a global 2946 * symbol of the same value. 2947 */ 2948 if ((value == addr) && 2949 (ELF_ST_TYPE(sym->st_info) != STT_SECTION)) 2950 break; 2951 } 2952 2953 if (_sym) { 2954 int _flags = flags & RTLD_DL_MASK; 2955 2956 if (_flags == RTLD_DL_SYMENT) 2957 *info = (void *)_sym; 2958 else if (_flags == RTLD_DL_LINKMAP) 2959 *info = (void *)lmp; 2960 2961 dlip->dli_sname = str + _sym->st_name; 2962 dlip->dli_saddr = (void *)_value; 2963 } 2964 } 2965 2966 static void 2967 elf_lazy_cleanup(Alist * alp) 2968 { 2969 Rt_map ** lmpp; 2970 Aliste off; 2971 2972 /* 2973 * Cleanup any link-maps added to this dynamic list and free it. 2974 */ 2975 for (ALIST_TRAVERSE(alp, off, lmpp)) 2976 FLAGS(*lmpp) &= ~FLG_RT_DLSYM; 2977 free(alp); 2978 } 2979 2980 /* 2981 * This routine is called upon to search for a symbol from the dependencies of 2982 * the initial link-map. To maintain lazy loadings goal of reducing the number 2983 * of objects mapped, any symbol search is first carried out using the objects 2984 * that already exist in the process (either on a link-map list or handle). 2985 * If a symbol can't be found, and lazy dependencies are still pending, this 2986 * routine loads the dependencies in an attempt to locate the symbol. 2987 * 2988 * Only new objects are inspected as we will have already inspected presently 2989 * loaded objects before calling this routine. However, a new object may not 2990 * be new - although the di_lmp might be zero, the object may have been mapped 2991 * as someone elses dependency. Thus there's a possibility of some symbol 2992 * search duplication. 2993 */ 2994 2995 Sym * 2996 elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo) 2997 { 2998 Sym *sym = 0; 2999 Alist * alist = 0; 3000 Aliste off; 3001 Rt_map ** lmpp, * lmp = slp->sl_imap; 3002 const char *name = slp->sl_name; 3003 3004 if (alist_append(&alist, &lmp, sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) 3005 return (0); 3006 FLAGS(lmp) |= FLG_RT_DLSYM; 3007 3008 for (ALIST_TRAVERSE(alist, off, lmpp)) { 3009 uint_t cnt = 0; 3010 Slookup sl = *slp; 3011 Dyninfo *dip; 3012 3013 /* 3014 * Loop through the DT_NEEDED entries examining each object for 3015 * the symbol. If the symbol is not found the object is in turn 3016 * added to the alist, so that its DT_NEEDED entires may be 3017 * examined. 3018 */ 3019 lmp = *lmpp; 3020 for (dip = DYNINFO(lmp); cnt < DYNINFOCNT(lmp); cnt++, dip++) { 3021 Rt_map *nlmp; 3022 3023 if (((dip->di_flags & FLG_DI_NEEDED) == 0) || 3024 dip->di_info) 3025 continue; 3026 3027 /* 3028 * If this entry defines a lazy dependency try loading 3029 * it. If the file can't be loaded, consider this 3030 * non-fatal and continue the search (lazy loaded 3031 * dependencies need not exist and their loading should 3032 * only be fatal if called from a relocation). 3033 * 3034 * If the file is already loaded and relocated we must 3035 * still inspect it for symbols, even though it might 3036 * have already been searched. This lazy load operation 3037 * might have promoted the permissions of the object, 3038 * and thus made the object applicable for this symbol 3039 * search, whereas before the object might have been 3040 * skipped. 3041 */ 3042 if ((nlmp = elf_lazy_load(lmp, cnt, name)) == 0) 3043 continue; 3044 3045 /* 3046 * If this object isn't yet a part of the dynamic list 3047 * then inspect it for the symbol. If the symbol isn't 3048 * found add the object to the dynamic list so that we 3049 * can inspect its dependencies. 3050 */ 3051 if (FLAGS(nlmp) & FLG_RT_DLSYM) 3052 continue; 3053 3054 sl.sl_imap = nlmp; 3055 if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo)) 3056 break; 3057 3058 /* 3059 * Some dlsym() operations are already traversing a 3060 * link-map (dlopen(0)), and thus there's no need to 3061 * build our own dynamic dependency list. 3062 */ 3063 if ((sl.sl_flags & LKUP_NODESCENT) == 0) { 3064 if (alist_append(&alist, &nlmp, 3065 sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) { 3066 elf_lazy_cleanup(alist); 3067 return (0); 3068 } 3069 FLAGS(nlmp) |= FLG_RT_DLSYM; 3070 } 3071 } 3072 if (sym) 3073 break; 3074 } 3075 3076 elf_lazy_cleanup(alist); 3077 return (sym); 3078 } 3079 3080 /* 3081 * Warning message for bad r_offset. 3082 */ 3083 void 3084 elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset, 3085 ulong_t rsymndx) 3086 { 3087 const char *name = (char *)0; 3088 int trace; 3089 3090 if ((LIST(lmp)->lm_flags & LML_FLG_TRC_ENABLE) && 3091 (((rtld_flags & RT_FL_SILENCERR) == 0) || 3092 (LIST(lmp)->lm_flags & LML_FLG_TRC_VERBOSE))) 3093 trace = 1; 3094 else 3095 trace = 0; 3096 3097 if ((trace == 0) && (dbg_mask == 0)) 3098 return; 3099 3100 if (rsymndx) { 3101 Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) + 3102 (rsymndx * SYMENT(lmp))); 3103 3104 if (ELF_ST_BIND(symref->st_info) != STB_LOCAL) 3105 name = (char *)(STRTAB(lmp) + symref->st_name); 3106 } 3107 3108 if (name == 0) 3109 name = MSG_ORIG(MSG_STR_EMPTY); 3110 3111 if (trace) { 3112 const char *rstr; 3113 3114 rstr = _conv_reloc_type_str((uint_t)rtype); 3115 (void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name, 3116 EC_ADDR(roffset)); 3117 return; 3118 } 3119 3120 Dbg_reloc_error(M_MACH, M_REL_SHT_TYPE, rel, name, 3121 MSG_ORIG(MSG_REL_BADROFFSET)); 3122 } 3123