1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 1988 AT&T 29 * All Rights Reserved 30 */ 31 32 /* 33 * x86 machine dependent and ELF file class dependent functions. 34 * Contains routines for performing function binding and symbol relocations. 35 */ 36 37 #include <stdio.h> 38 #include <sys/elf.h> 39 #include <sys/elf_386.h> 40 #include <sys/mman.h> 41 #include <dlfcn.h> 42 #include <synch.h> 43 #include <string.h> 44 #include <debug.h> 45 #include <reloc.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "_inline.h" 51 #include "msg.h" 52 53 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 54 55 int 56 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 57 { 58 /* 59 * Check machine type and flags. 60 */ 61 if (ehdr->e_flags != 0) { 62 rej->rej_type = SGS_REJ_BADFLAG; 63 rej->rej_info = (uint_t)ehdr->e_flags; 64 return (0); 65 } 66 return (1); 67 } 68 69 void 70 ldso_plt_init(Rt_map *lmp) 71 { 72 /* 73 * There is no need to analyze ld.so because we don't map in any of 74 * its dependencies. However we may map these dependencies in later 75 * (as if ld.so had dlopened them), so initialize the plt and the 76 * permission information. 77 */ 78 if (PLTGOT(lmp)) 79 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 80 } 81 82 static const uchar_t dyn_plt_template[] = { 83 /* 0x00 */ 0x55, /* pushl %ebp */ 84 /* 0x01 */ 0x8b, 0xec, /* movl %esp, %ebp */ 85 /* 0x03 */ 0x68, 0x00, 0x00, 0x00, 0x00, /* pushl trace_fields */ 86 /* 0x08 */ 0xe9, 0xfc, 0xff, 0xff, 0xff, 0xff /* jmp elf_plt_trace */ 87 }; 88 int dyn_plt_ent_size = sizeof (dyn_plt_template); 89 90 /* 91 * the dynamic plt entry is: 92 * 93 * pushl %ebp 94 * movl %esp, %ebp 95 * pushl tfp 96 * jmp elf_plt_trace 97 * dyn_data: 98 * .align 4 99 * uintptr_t reflmp 100 * uintptr_t deflmp 101 * uint_t symndx 102 * uint_t sb_flags 103 * Sym symdef 104 */ 105 static caddr_t 106 elf_plt_trace_write(uint_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 107 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 108 { 109 extern int elf_plt_trace(); 110 ulong_t got_entry; 111 uchar_t *dyn_plt; 112 uintptr_t *dyndata; 113 114 /* 115 * We only need to add the glue code if there is an auditing 116 * library that is interested in this binding. 117 */ 118 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 119 (pltndx * dyn_plt_ent_size)); 120 121 /* 122 * Have we initialized this dynamic plt entry yet? If we haven't do it 123 * now. Otherwise this function has been called before, but from a 124 * different plt (ie. from another shared object). In that case 125 * we just set the plt to point to the new dyn_plt. 126 */ 127 if (*dyn_plt == 0) { 128 Sym *symp; 129 Word symvalue; 130 Lm_list *lml = LIST(rlmp); 131 132 (void) memcpy((void *)dyn_plt, dyn_plt_template, 133 sizeof (dyn_plt_template)); 134 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 135 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 136 137 /* 138 * relocate: 139 * pushl dyn_data 140 */ 141 symvalue = (Word)dyndata; 142 if (do_reloc_rtld(R_386_32, &dyn_plt[4], &symvalue, 143 MSG_ORIG(MSG_SYM_LADYNDATA), 144 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 145 *fail = 1; 146 return (0); 147 } 148 149 /* 150 * jmps are relative, so I need to figure out the relative 151 * address to elf_plt_trace. 152 * 153 * relocating: 154 * jmp elf_plt_trace 155 */ 156 symvalue = (ulong_t)(elf_plt_trace) - (ulong_t)(dyn_plt + 9); 157 if (do_reloc_rtld(R_386_PC32, &dyn_plt[9], &symvalue, 158 MSG_ORIG(MSG_SYM_ELFPLTTRACE), 159 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 160 *fail = 1; 161 return (0); 162 } 163 164 *dyndata++ = (uintptr_t)rlmp; 165 *dyndata++ = (uintptr_t)dlmp; 166 *dyndata++ = (uint_t)symndx; 167 *dyndata++ = (uint_t)sb_flags; 168 symp = (Sym *)dyndata; 169 *symp = *sym; 170 symp->st_name += (Word)STRTAB(dlmp); 171 symp->st_value = (Addr)to; 172 } 173 174 got_entry = (ulong_t)roffset; 175 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 176 return ((caddr_t)dyn_plt); 177 } 178 179 /* 180 * Function binding routine - invoked on the first call to a function through 181 * the procedure linkage table; 182 * passes first through an assembly language interface. 183 * 184 * Takes the offset into the relocation table of the associated 185 * relocation entry and the address of the link map (rt_private_map struct) 186 * for the entry. 187 * 188 * Returns the address of the function referenced after re-writing the PLT 189 * entry to invoke the function directly. 190 * 191 * On error, causes process to terminate with a signal. 192 */ 193 ulong_t 194 elf_bndr(Rt_map *lmp, ulong_t reloff, caddr_t from) 195 { 196 Rt_map *nlmp, *llmp; 197 ulong_t addr, symval, rsymndx; 198 char *name; 199 Rel *rptr; 200 Sym *rsym, *nsym; 201 uint_t binfo, sb_flags = 0, dbg_class; 202 Slookup sl; 203 Sresult sr; 204 int entry, lmflags; 205 Lm_list *lml; 206 207 /* 208 * For compatibility with libthread (TI_VERSION 1) we track the entry 209 * value. A zero value indicates we have recursed into ld.so.1 to 210 * further process a locking request. Under this recursion we disable 211 * tsort and cleanup activities. 212 */ 213 entry = enter(0); 214 215 lml = LIST(lmp); 216 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 217 dbg_class = dbg_desc->d_class; 218 dbg_desc->d_class = 0; 219 } 220 221 /* 222 * Perform some basic sanity checks. If we didn't get a load map or 223 * the relocation offset is invalid then its possible someone has walked 224 * over the .got entries or jumped to plt0 out of the blue. 225 */ 226 if (!lmp || ((reloff % sizeof (Rel)) != 0)) { 227 Conv_inv_buf_t inv_buf; 228 229 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 230 conv_reloc_386_type(R_386_JMP_SLOT, 0, &inv_buf), 231 EC_NATPTR(lmp), EC_XWORD(reloff), EC_NATPTR(from)); 232 rtldexit(lml, 1); 233 } 234 235 /* 236 * Use relocation entry to get symbol table entry and symbol name. 237 */ 238 addr = (ulong_t)JMPREL(lmp); 239 rptr = (Rel *)(addr + reloff); 240 rsymndx = ELF_R_SYM(rptr->r_info); 241 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 242 name = (char *)(STRTAB(lmp) + rsym->st_name); 243 244 /* 245 * Determine the last link-map of this list, this'll be the starting 246 * point for any tsort() processing. 247 */ 248 llmp = lml->lm_tail; 249 250 /* 251 * Find definition for symbol. Initialize the symbol lookup, and 252 * symbol result, data structures. 253 */ 254 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 255 rsymndx, rsym, 0, LKUP_DEFT); 256 SRESULT_INIT(sr, name); 257 258 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) { 259 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 260 demangle(name)); 261 rtldexit(lml, 1); 262 } 263 264 name = (char *)sr.sr_name; 265 nlmp = sr.sr_dmap; 266 nsym = sr.sr_sym; 267 268 symval = nsym->st_value; 269 270 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 271 (nsym->st_shndx != SHN_ABS)) 272 symval += ADDR(nlmp); 273 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 274 /* 275 * Record that this new link map is now bound to the caller. 276 */ 277 if (bind_one(lmp, nlmp, BND_REFER) == 0) 278 rtldexit(lml, 1); 279 } 280 281 if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) { 282 uint_t symndx = (((uintptr_t)nsym - 283 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 284 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 285 &sb_flags); 286 } 287 288 if (!(rtld_flags & RT_FL_NOBIND)) { 289 addr = rptr->r_offset; 290 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 291 addr += ADDR(lmp); 292 if (((lml->lm_tflags | AFLAGS(lmp)) & 293 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 294 AUDINFO(lmp)->ai_dynplts) { 295 int fail = 0; 296 uint_t pltndx = reloff / sizeof (Rel); 297 uint_t symndx = (((uintptr_t)nsym - 298 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 299 300 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 301 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 302 &fail); 303 if (fail) 304 rtldexit(lml, 1); 305 } else { 306 /* 307 * Write standard PLT entry to jump directly 308 * to newly bound function. 309 */ 310 *(ulong_t *)addr = symval; 311 } 312 } 313 314 /* 315 * Print binding information and rebuild PLT entry. 316 */ 317 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 318 (Xword)(reloff / sizeof (Rel)), PLT_T_FULL, nlmp, (Addr)symval, 319 nsym->st_value, name, binfo)); 320 321 /* 322 * Complete any processing for newly loaded objects. Note we don't 323 * know exactly where any new objects are loaded (we know the object 324 * that supplied the symbol, but others may have been loaded lazily as 325 * we searched for the symbol), so sorting starts from the last 326 * link-map know on entry to this routine. 327 */ 328 if (entry) 329 load_completion(llmp); 330 331 /* 332 * Some operations like dldump() or dlopen()'ing a relocatable object 333 * result in objects being loaded on rtld's link-map, make sure these 334 * objects are initialized also. 335 */ 336 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 337 load_completion(nlmp); 338 339 /* 340 * Make sure the object to which we've bound has had it's .init fired. 341 * Cleanup before return to user code. 342 */ 343 if (entry) { 344 is_dep_init(nlmp, lmp); 345 leave(lml, 0); 346 } 347 348 if (lmflags & LML_FLG_RTLDLM) 349 dbg_desc->d_class = dbg_class; 350 351 return (symval); 352 } 353 354 /* 355 * Read and process the relocations for one link object, we assume all 356 * relocation sections for loadable segments are stored contiguously in 357 * the file. 358 */ 359 int 360 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel) 361 { 362 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; 363 ulong_t _pltbgn, _pltend; 364 ulong_t dsymndx, roffset, rsymndx, psymndx = 0; 365 uchar_t rtype; 366 long value, pvalue; 367 Sym *symref, *psymref, *symdef, *psymdef; 368 char *name, *pname; 369 Rt_map *_lmp, *plmp; 370 int ret = 1, noplt = 0; 371 int relacount = RELACOUNT(lmp), plthint = 0; 372 Rel *rel; 373 uint_t binfo, pbinfo; 374 APlist *bound = NULL; 375 376 /* 377 * Although only necessary for lazy binding, initialize the first 378 * global offset entry to go to elf_rtbndr(). dbx(1) seems 379 * to find this useful. 380 */ 381 if ((plt == 0) && PLTGOT(lmp)) { 382 mmapobj_result_t *mpp; 383 384 /* 385 * Make sure the segment is writable. 386 */ 387 if ((((mpp = 388 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) && 389 ((mpp->mr_prot & PROT_WRITE) == 0)) && 390 ((set_prot(lmp, mpp, 1) == 0) || 391 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) 392 return (0); 393 394 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 395 } 396 397 /* 398 * Initialize the plt start and end addresses. 399 */ 400 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 401 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 402 403 relsiz = (ulong_t)(RELENT(lmp)); 404 basebgn = ADDR(lmp); 405 406 if (PLTRELSZ(lmp)) 407 plthint = PLTRELSZ(lmp) / relsiz; 408 409 /* 410 * If we've been called upon to promote an RTLD_LAZY object to an 411 * RTLD_NOW then we're only interested in scaning the .plt table. 412 * An uninitialized .plt is the case where the associated got entry 413 * points back to the plt itself. Determine the range of the real .plt 414 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 415 */ 416 if (plt) { 417 Slookup sl; 418 Sresult sr; 419 420 relbgn = pltbgn; 421 relend = pltend; 422 if (!relbgn || (relbgn == relend)) 423 return (1); 424 425 /* 426 * Initialize the symbol lookup, and symbol result, data 427 * structures. 428 */ 429 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, 430 elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); 431 SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT)); 432 433 if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0) 434 return (1); 435 436 symdef = sr.sr_sym; 437 _pltbgn = symdef->st_value; 438 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 439 (symdef->st_shndx != SHN_ABS)) 440 _pltbgn += basebgn; 441 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 442 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 443 444 } else { 445 /* 446 * The relocation sections appear to the run-time linker as a 447 * single table. Determine the address of the beginning and end 448 * of this table. There are two different interpretations of 449 * the ABI at this point: 450 * 451 * o The REL table and its associated RELSZ indicate the 452 * concatenation of *all* relocation sections (this is the 453 * model our link-editor constructs). 454 * 455 * o The REL table and its associated RELSZ indicate the 456 * concatenation of all *but* the .plt relocations. These 457 * relocations are specified individually by the JMPREL and 458 * PLTRELSZ entries. 459 * 460 * Determine from our knowledege of the relocation range and 461 * .plt range, the range of the total relocation table. Note 462 * that one other ABI assumption seems to be that the .plt 463 * relocations always follow any other relocations, the 464 * following range checking drops that assumption. 465 */ 466 relbgn = (ulong_t)(REL(lmp)); 467 relend = relbgn + (ulong_t)(RELSZ(lmp)); 468 if (pltbgn) { 469 if (!relbgn || (relbgn > pltbgn)) 470 relbgn = pltbgn; 471 if (!relbgn || (relend < pltend)) 472 relend = pltend; 473 } 474 } 475 if (!relbgn || (relbgn == relend)) { 476 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 477 return (1); 478 } 479 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 480 481 /* 482 * If we're processing a dynamic executable in lazy mode there is no 483 * need to scan the .rel.plt table, however if we're processing a shared 484 * object in lazy mode the .got addresses associated to each .plt must 485 * be relocated to reflect the location of the shared object. 486 */ 487 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 488 (FLAGS(lmp) & FLG_RT_FIXED)) 489 noplt = 1; 490 491 /* 492 * Loop through relocations. 493 */ 494 while (relbgn < relend) { 495 mmapobj_result_t *mpp; 496 uint_t sb_flags = 0; 497 498 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); 499 500 /* 501 * If this is a RELATIVE relocation in a shared object (the 502 * common case), and if we are not debugging, then jump into a 503 * tighter relocation loop (elf_reloc_relative). 504 */ 505 if ((rtype == R_386_RELATIVE) && 506 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 507 if (relacount) { 508 relbgn = elf_reloc_relative_count(relbgn, 509 relacount, relsiz, basebgn, lmp, textrel); 510 relacount = 0; 511 } else { 512 relbgn = elf_reloc_relative(relbgn, relend, 513 relsiz, basebgn, lmp, textrel); 514 } 515 if (relbgn >= relend) 516 break; 517 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); 518 } 519 520 roffset = ((Rel *)relbgn)->r_offset; 521 522 /* 523 * If this is a shared object, add the base address to offset. 524 */ 525 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 526 /* 527 * If we're processing lazy bindings, we have to step 528 * through the plt entries and add the base address 529 * to the corresponding got entry. 530 */ 531 if (plthint && (plt == 0) && 532 (rtype == R_386_JMP_SLOT) && 533 ((MODE(lmp) & RTLD_NOW) == 0)) { 534 relbgn = elf_reloc_relative_count(relbgn, 535 plthint, relsiz, basebgn, lmp, textrel); 536 plthint = 0; 537 continue; 538 } 539 roffset += basebgn; 540 } 541 542 rsymndx = ELF_R_SYM(((Rel *)relbgn)->r_info); 543 rel = (Rel *)relbgn; 544 relbgn += relsiz; 545 546 /* 547 * Optimizations. 548 */ 549 if (rtype == R_386_NONE) 550 continue; 551 if (noplt && ((ulong_t)rel >= pltbgn) && 552 ((ulong_t)rel < pltend)) { 553 relbgn = pltend; 554 continue; 555 } 556 557 /* 558 * If we're promoting plts, determine if this one has already 559 * been written. 560 */ 561 if (plt && ((*(ulong_t *)roffset < _pltbgn) || 562 (*(ulong_t *)roffset > _pltend))) 563 continue; 564 565 /* 566 * If this relocation is not against part of the image 567 * mapped into memory we skip it. 568 */ 569 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) { 570 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 571 rsymndx); 572 continue; 573 } 574 575 binfo = 0; 576 /* 577 * If a symbol index is specified then get the symbol table 578 * entry, locate the symbol definition, and determine its 579 * address. 580 */ 581 if (rsymndx) { 582 /* 583 * Get the local symbol table entry. 584 */ 585 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 586 (rsymndx * SYMENT(lmp))); 587 588 /* 589 * If this is a local symbol, just use the base address. 590 * (we should have no local relocations in the 591 * executable). 592 */ 593 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 594 value = basebgn; 595 name = NULL; 596 597 /* 598 * Special case TLS relocations. 599 */ 600 if (rtype == R_386_TLS_DTPMOD32) { 601 /* 602 * Use the TLS modid. 603 */ 604 value = TLSMODID(lmp); 605 606 } else if (rtype == R_386_TLS_TPOFF) { 607 if ((value = elf_static_tls(lmp, symref, 608 rel, rtype, 0, roffset, 0)) == 0) { 609 ret = 0; 610 break; 611 } 612 } 613 } else { 614 /* 615 * If the symbol index is equal to the previous 616 * symbol index relocation we processed then 617 * reuse the previous values. (Note that there 618 * have been cases where a relocation exists 619 * against a copy relocation symbol, our ld(1) 620 * should optimize this away, but make sure we 621 * don't use the same symbol information should 622 * this case exist). 623 */ 624 if ((rsymndx == psymndx) && 625 (rtype != R_386_COPY)) { 626 /* LINTED */ 627 if (psymdef == 0) { 628 DBG_CALL(Dbg_bind_weak(lmp, 629 (Addr)roffset, (Addr) 630 (roffset - basebgn), name)); 631 continue; 632 } 633 /* LINTED */ 634 value = pvalue; 635 /* LINTED */ 636 name = pname; 637 /* LINTED */ 638 symdef = psymdef; 639 /* LINTED */ 640 symref = psymref; 641 /* LINTED */ 642 _lmp = plmp; 643 /* LINTED */ 644 binfo = pbinfo; 645 646 if ((LIST(_lmp)->lm_tflags | 647 AFLAGS(_lmp)) & 648 LML_TFLG_AUD_SYMBIND) { 649 value = audit_symbind(lmp, _lmp, 650 /* LINTED */ 651 symdef, dsymndx, value, 652 &sb_flags); 653 } 654 } else { 655 Slookup sl; 656 Sresult sr; 657 658 /* 659 * Lookup the symbol definition. 660 * Initialize the symbol lookup, and 661 * symbol result, data structures. 662 */ 663 name = (char *)(STRTAB(lmp) + 664 symref->st_name); 665 666 SLOOKUP_INIT(sl, name, lmp, 0, 667 ld_entry_cnt, 0, rsymndx, symref, 668 rtype, LKUP_STDRELOC); 669 SRESULT_INIT(sr, name); 670 symdef = NULL; 671 672 if (lookup_sym(&sl, &sr, &binfo, 673 in_nfavl)) { 674 name = (char *)sr.sr_name; 675 _lmp = sr.sr_dmap; 676 symdef = sr.sr_sym; 677 } 678 679 /* 680 * If the symbol is not found and the 681 * reference was not to a weak symbol, 682 * report an error. Weak references 683 * may be unresolved. 684 */ 685 /* BEGIN CSTYLED */ 686 if (symdef == 0) { 687 if (sl.sl_bind != STB_WEAK) { 688 if (elf_reloc_error(lmp, name, 689 rel, binfo)) 690 continue; 691 692 ret = 0; 693 break; 694 695 } else { 696 psymndx = rsymndx; 697 psymdef = 0; 698 699 DBG_CALL(Dbg_bind_weak(lmp, 700 (Addr)roffset, (Addr) 701 (roffset - basebgn), name)); 702 continue; 703 } 704 } 705 /* END CSTYLED */ 706 707 /* 708 * If symbol was found in an object 709 * other than the referencing object 710 * then record the binding. 711 */ 712 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 713 FL1_RT_NOINIFIN) == 0)) { 714 if (aplist_test(&bound, _lmp, 715 AL_CNT_RELBIND) == 0) { 716 ret = 0; 717 break; 718 } 719 } 720 721 /* 722 * Calculate the location of definition; 723 * symbol value plus base address of 724 * containing shared object. 725 */ 726 if (IS_SIZE(rtype)) 727 value = symdef->st_size; 728 else 729 value = symdef->st_value; 730 731 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 732 !(IS_SIZE(rtype)) && 733 (symdef->st_shndx != SHN_ABS) && 734 (ELF_ST_TYPE(symdef->st_info) != 735 STT_TLS)) 736 value += ADDR(_lmp); 737 738 /* 739 * Retain this symbol index and the 740 * value in case it can be used for the 741 * subsequent relocations. 742 */ 743 if (rtype != R_386_COPY) { 744 psymndx = rsymndx; 745 pvalue = value; 746 pname = name; 747 psymdef = symdef; 748 psymref = symref; 749 plmp = _lmp; 750 pbinfo = binfo; 751 } 752 if ((LIST(_lmp)->lm_tflags | 753 AFLAGS(_lmp)) & 754 LML_TFLG_AUD_SYMBIND) { 755 dsymndx = (((uintptr_t)symdef - 756 (uintptr_t)SYMTAB(_lmp)) / 757 SYMENT(_lmp)); 758 value = audit_symbind(lmp, _lmp, 759 symdef, dsymndx, value, 760 &sb_flags); 761 } 762 } 763 764 /* 765 * If relocation is PC-relative, subtract 766 * offset address. 767 */ 768 if (IS_PC_RELATIVE(rtype)) 769 value -= roffset; 770 771 /* 772 * Special case TLS relocations. 773 */ 774 if (rtype == R_386_TLS_DTPMOD32) { 775 /* 776 * Relocation value is the TLS modid. 777 */ 778 value = TLSMODID(_lmp); 779 780 } else if (rtype == R_386_TLS_TPOFF) { 781 if ((value = elf_static_tls(_lmp, 782 symdef, rel, rtype, name, roffset, 783 value)) == 0) { 784 ret = 0; 785 break; 786 } 787 } 788 } 789 } else { 790 /* 791 * Special cases. 792 */ 793 if (rtype == R_386_TLS_DTPMOD32) { 794 /* 795 * TLS relocation value is the TLS modid. 796 */ 797 value = TLSMODID(lmp); 798 } else 799 value = basebgn; 800 801 name = NULL; 802 } 803 804 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 805 M_REL_SHT_TYPE, rel, NULL, 0, name)); 806 807 /* 808 * Make sure the segment is writable. 809 */ 810 if (((mpp->mr_prot & PROT_WRITE) == 0) && 811 ((set_prot(lmp, mpp, 1) == 0) || 812 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) { 813 ret = 0; 814 break; 815 } 816 817 /* 818 * Call relocation routine to perform required relocation. 819 */ 820 switch (rtype) { 821 case R_386_COPY: 822 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 823 symdef, _lmp, (const void *)value) == 0) 824 ret = 0; 825 break; 826 case R_386_JMP_SLOT: 827 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) & 828 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 829 AUDINFO(lmp)->ai_dynplts) { 830 int fail = 0; 831 int pltndx = (((ulong_t)rel - 832 (uintptr_t)JMPREL(lmp)) / relsiz); 833 int symndx = (((uintptr_t)symdef - 834 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 835 836 (void) elf_plt_trace_write(roffset, lmp, _lmp, 837 symdef, symndx, pltndx, (caddr_t)value, 838 sb_flags, &fail); 839 if (fail) 840 ret = 0; 841 } else { 842 /* 843 * Write standard PLT entry to jump directly 844 * to newly bound function. 845 */ 846 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 847 ELF_DBG_RTLD, (Xword)roffset, 848 (Xword)value)); 849 *(ulong_t *)roffset = value; 850 } 851 break; 852 default: 853 /* 854 * Write the relocation out. 855 */ 856 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 857 (Word *)&value, name, NAME(lmp), LIST(lmp)) == 0) 858 ret = 0; 859 860 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 861 (Xword)roffset, (Xword)value)); 862 } 863 864 if ((ret == 0) && 865 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 866 break; 867 868 if (binfo) { 869 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 870 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 871 _lmp, (Addr)value, symdef->st_value, name, binfo)); 872 } 873 } 874 875 return (relocate_finish(lmp, bound, ret)); 876 } 877 878 /* 879 * Initialize the first few got entries so that function calls go to 880 * elf_rtbndr: 881 * 882 * GOT[GOT_XLINKMAP] = the address of the link map 883 * GOT[GOT_XRTLD] = the address of rtbinder 884 */ 885 void 886 elf_plt_init(void *got, caddr_t l) 887 { 888 uint_t *_got; 889 /* LINTED */ 890 Rt_map *lmp = (Rt_map *)l; 891 892 _got = (uint_t *)got + M_GOT_XLINKMAP; 893 *_got = (uint_t)lmp; 894 _got = (uint_t *)got + M_GOT_XRTLD; 895 *_got = (uint_t)elf_rtbndr; 896 } 897 898 /* 899 * For SVR4 Intel compatability. USL uses /usr/lib/libc.so.1 as the run-time 900 * linker, so the interpreter's address will differ from /usr/lib/ld.so.1. 901 * Further, USL has special _iob[] and _ctype[] processing that makes up for the 902 * fact that these arrays do not have associated copy relocations. So we try 903 * and make up for that here. Any relocations found will be added to the global 904 * copy relocation list and will be processed in setup(). 905 */ 906 static int 907 _elf_copy_reloc(const char *name, Rt_map *rlmp, Rt_map *dlmp) 908 { 909 Sym *symref, *symdef; 910 caddr_t ref, def; 911 Rt_map *_lmp; 912 Rel rel; 913 Slookup sl; 914 Sresult sr; 915 uint_t binfo; 916 917 /* 918 * Determine if the special symbol exists as a reference in the dynamic 919 * executable, and that an associated definition exists in libc.so.1. 920 * 921 * Initialize the symbol lookup, and symbol result, data structures. 922 */ 923 SLOOKUP_INIT(sl, name, rlmp, rlmp, ld_entry_cnt, 0, 0, 0, 0, 924 LKUP_FIRST); 925 SRESULT_INIT(sr, name); 926 927 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) 928 return (1); 929 symref = sr.sr_sym; 930 931 SLOOKUP_INIT(sl, name, rlmp, dlmp, ld_entry_cnt, 0, 0, 0, 0, 932 LKUP_DEFT); 933 SRESULT_INIT(sr, name); 934 935 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) 936 return (1); 937 938 _lmp = sr.sr_dmap; 939 symdef = sr.sr_sym; 940 941 if (strcmp(NAME(sr.sr_dmap), MSG_ORIG(MSG_PTH_LIBC))) 942 return (1); 943 944 /* 945 * Determine the reference and definition addresses. 946 */ 947 ref = (void *)(symref->st_value); 948 if (!(FLAGS(rlmp) & FLG_RT_FIXED)) 949 ref += ADDR(rlmp); 950 def = (void *)(symdef->st_value); 951 if (!(FLAGS(sr.sr_dmap) & FLG_RT_FIXED)) 952 def += ADDR(_lmp); 953 954 /* 955 * Set up a relocation entry for debugging and call the generic copy 956 * relocation function to provide symbol size error checking and to 957 * record the copy relocation that must be performed. 958 */ 959 rel.r_offset = (Addr)ref; 960 rel.r_info = (Word)R_386_COPY; 961 DBG_CALL(Dbg_reloc_in(LIST(rlmp), ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, 962 &rel, NULL, 0, name)); 963 964 return (elf_copy_reloc((char *)name, symref, rlmp, (void *)ref, symdef, 965 _lmp, (void *)def)); 966 } 967 968 int 969 elf_copy_gen(Rt_map *lmp) 970 { 971 if (interp && ((ulong_t)interp->i_faddr != 972 r_debug.rtd_rdebug.r_ldbase) && 973 !(strcmp(interp->i_name, MSG_ORIG(MSG_PTH_LIBC)))) { 974 975 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, 0, 976 DBG_REL_START)); 977 978 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_CTYPE), lmp, 979 (Rt_map *)NEXT(lmp)) == 0) 980 return (0); 981 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_IOB), lmp, 982 (Rt_map *)NEXT(lmp)) == 0) 983 return (0); 984 } 985 return (1); 986 } 987 988 /* 989 * Plt writing interface to allow debugging initialization to be generic. 990 */ 991 Pltbindtype 992 /* ARGSUSED1 */ 993 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 994 Xword pltndx) 995 { 996 Rel *rel = (Rel*)rptr; 997 uintptr_t pltaddr; 998 999 pltaddr = addr + rel->r_offset; 1000 *(ulong_t *)pltaddr = (ulong_t)symval; 1001 DBG_CALL(pltcntfull++); 1002 return (PLT_T_FULL); 1003 } 1004 1005 /* 1006 * Provide a machine specific interface to the conversion routine. By calling 1007 * the machine specific version, rather than the generic version, we insure that 1008 * the data tables/strings for all known machine versions aren't dragged into 1009 * ld.so.1. 1010 */ 1011 const char * 1012 _conv_reloc_type(uint_t rel) 1013 { 1014 static Conv_inv_buf_t inv_buf; 1015 1016 return (conv_reloc_386_type(rel, 0, &inv_buf)); 1017 } 1018