1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 /* 33 * x86 machine dependent and ELF file class dependent functions. 34 * Contains routines for performing function binding and symbol relocations. 35 */ 36 #include "_synonyms.h" 37 38 #include <stdio.h> 39 #include <sys/elf.h> 40 #include <sys/elf_386.h> 41 #include <sys/mman.h> 42 #include <dlfcn.h> 43 #include <synch.h> 44 #include <string.h> 45 #include "_rtld.h" 46 #include "_audit.h" 47 #include "_elf.h" 48 #include "msg.h" 49 #include "debug.h" 50 #include "reloc.h" 51 #include "conv.h" 52 53 54 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 55 56 int 57 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 58 { 59 /* 60 * Check machine type and flags. 61 */ 62 if (ehdr->e_flags != 0) { 63 rej->rej_type = SGS_REJ_BADFLAG; 64 rej->rej_info = (uint_t)ehdr->e_flags; 65 return (0); 66 } 67 return (1); 68 } 69 70 void 71 ldso_plt_init(Rt_map * lmp) 72 { 73 /* 74 * There is no need to analyze ld.so because we don't map in any of 75 * its dependencies. However we may map these dependencies in later 76 * (as if ld.so had dlopened them), so initialize the plt and the 77 * permission information. 78 */ 79 if (PLTGOT(lmp)) 80 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 81 } 82 83 static const uchar_t dyn_plt_template[] = { 84 /* 0x00 */ 0x55, /* pushl %ebp */ 85 /* 0x01 */ 0x8b, 0xec, /* movl %esp, %ebp */ 86 /* 0x03 */ 0x68, 0x00, 0x00, 0x00, 0x00, /* pushl trace_fields */ 87 /* 0x08 */ 0xe9, 0xfc, 0xff, 0xff, 0xff, 0xff /* jmp elf_plt_trace */ 88 }; 89 int dyn_plt_ent_size = sizeof (dyn_plt_template); 90 91 /* 92 * the dynamic plt entry is: 93 * 94 * pushl %ebp 95 * movl %esp, %ebp 96 * pushl tfp 97 * jmp elf_plt_trace 98 * dyn_data: 99 * .align 4 100 * uintptr_t reflmp 101 * uintptr_t deflmp 102 * uint_t symndx 103 * uint_t sb_flags 104 * Sym symdef 105 */ 106 static caddr_t 107 elf_plt_trace_write(uint_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 108 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 109 { 110 extern int elf_plt_trace(); 111 ulong_t got_entry; 112 uchar_t *dyn_plt; 113 uintptr_t *dyndata; 114 115 /* 116 * We only need to add the glue code if there is an auditing 117 * library that is interested in this binding. 118 */ 119 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 120 (pltndx * dyn_plt_ent_size)); 121 122 /* 123 * Have we initialized this dynamic plt entry yet? If we haven't do it 124 * now. Otherwise this function has been called before, but from a 125 * different plt (ie. from another shared object). In that case 126 * we just set the plt to point to the new dyn_plt. 127 */ 128 if (*dyn_plt == 0) { 129 Sym * symp; 130 Word symvalue; 131 132 (void) memcpy((void *)dyn_plt, dyn_plt_template, 133 sizeof (dyn_plt_template)); 134 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 135 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 136 137 /* 138 * relocate: 139 * pushl dyn_data 140 */ 141 symvalue = (Word)dyndata; 142 if (do_reloc(R_386_32, &dyn_plt[4], &symvalue, 143 MSG_ORIG(MSG_SYM_LADYNDATA), 144 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 145 *fail = 1; 146 return (0); 147 } 148 149 /* 150 * jmps are relative, so I need to figure out the relative 151 * address to elf_plt_trace. 152 * 153 * relocating: 154 * jmp elf_plt_trace 155 */ 156 symvalue = (ulong_t)(elf_plt_trace) - (ulong_t)(dyn_plt + 9); 157 if (do_reloc(R_386_PC32, &dyn_plt[9], &symvalue, 158 MSG_ORIG(MSG_SYM_ELFPLTTRACE), 159 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 160 *fail = 1; 161 return (0); 162 } 163 164 *dyndata++ = (uintptr_t)rlmp; 165 *dyndata++ = (uintptr_t)dlmp; 166 *dyndata++ = (uint_t)symndx; 167 *dyndata++ = (uint_t)sb_flags; 168 symp = (Sym *)dyndata; 169 *symp = *sym; 170 symp->st_name += (Word)STRTAB(dlmp); 171 symp->st_value = (Addr)to; 172 } 173 174 got_entry = (ulong_t)roffset; 175 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 176 return ((caddr_t)dyn_plt); 177 } 178 179 180 /* 181 * Function binding routine - invoked on the first call to a function through 182 * the procedure linkage table; 183 * passes first through an assembly language interface. 184 * 185 * Takes the offset into the relocation table of the associated 186 * relocation entry and the address of the link map (rt_private_map struct) 187 * for the entry. 188 * 189 * Returns the address of the function referenced after re-writing the PLT 190 * entry to invoke the function directly. 191 * 192 * On error, causes process to terminate with a signal. 193 */ 194 ulong_t 195 elf_bndr(Rt_map *lmp, ulong_t reloff, caddr_t from) 196 { 197 Rt_map *nlmp, * llmp; 198 ulong_t addr, symval, rsymndx; 199 char *name; 200 Rel *rptr; 201 Sym *sym, *nsym; 202 uint_t binfo, sb_flags = 0; 203 Slookup sl; 204 int entry, dbg_save, lmflags; 205 206 /* 207 * For compatibility with libthread (TI_VERSION 1) we track the entry 208 * value. A zero value indicates we have recursed into ld.so.1 to 209 * further process a locking request. Under this recursion we disable 210 * tsort and cleanup activities. 211 */ 212 entry = enter(); 213 214 if ((lmflags = LIST(lmp)->lm_flags) & LML_FLG_RTLDLM) { 215 dbg_save = dbg_mask; 216 dbg_mask = 0; 217 } 218 219 /* 220 * Perform some basic sanity checks. If we didn't get a load map or 221 * the relocation offset is invalid then its possible someone has walked 222 * over the .got entries or jumped to plt0 out of the blue. 223 */ 224 if (!lmp || ((reloff % sizeof (Rel)) != 0)) { 225 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 226 conv_reloc_386_type_str(R_386_JMP_SLOT), 227 EC_XWORD(lmp), EC_XWORD(reloff), EC_ADDR(from)); 228 rtldexit(LIST(lmp), 1); 229 } 230 231 /* 232 * Use relocation entry to get symbol table entry and symbol name. 233 */ 234 addr = (ulong_t)JMPREL(lmp); 235 rptr = (Rel *)(addr + reloff); 236 rsymndx = ELF_R_SYM(rptr->r_info); 237 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 238 name = (char *)(STRTAB(lmp) + sym->st_name); 239 240 /* 241 * Determine the last link-map of this list, this'll be the starting 242 * point for any tsort() processing. 243 */ 244 llmp = LIST(lmp)->lm_tail; 245 246 /* 247 * Find definition for symbol. 248 */ 249 sl.sl_name = name; 250 sl.sl_cmap = lmp; 251 sl.sl_imap = LIST(lmp)->lm_head; 252 sl.sl_hash = 0; 253 sl.sl_rsymndx = rsymndx; 254 sl.sl_flags = LKUP_DEFT; 255 256 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 257 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 258 demangle(name)); 259 rtldexit(LIST(lmp), 1); 260 } 261 262 symval = nsym->st_value; 263 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 264 (nsym->st_shndx != SHN_ABS)) 265 symval += ADDR(nlmp); 266 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 267 /* 268 * Record that this new link map is now bound to the caller. 269 */ 270 if (bind_one(lmp, nlmp, BND_REFER) == 0) 271 rtldexit(LIST(lmp), 1); 272 } 273 274 if ((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 275 uint_t symndx = (((uintptr_t)nsym - 276 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 277 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 278 &sb_flags); 279 } 280 281 if (!(rtld_flags & RT_FL_NOBIND)) { 282 addr = rptr->r_offset; 283 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 284 addr += ADDR(lmp); 285 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 286 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 287 AUDINFO(lmp)->ai_dynplts) { 288 int fail = 0; 289 uint_t pltndx = reloff / sizeof (Rel); 290 uint_t symndx = (((uintptr_t)nsym - 291 (uintptr_t)SYMTAB(nlmp)) / 292 SYMENT(nlmp)); 293 294 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 295 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 296 &fail); 297 if (fail) 298 rtldexit(LIST(lmp), 1); 299 } else { 300 /* 301 * Write standard PLT entry to jump directly 302 * to newly bound function. 303 */ 304 *(ulong_t *)addr = symval; 305 } 306 } 307 308 /* 309 * Print binding information and rebuild PLT entry. 310 */ 311 DBG_CALL(Dbg_bind_global(NAME(lmp), from, from - ADDR(lmp), 312 (Xword)(reloff / sizeof (Rel)), PLT_T_FULL, NAME(nlmp), 313 (caddr_t)symval, (caddr_t)nsym->st_value, name, binfo)); 314 315 /* 316 * Complete any processing for newly loaded objects. Note we don't 317 * know exactly where any new objects are loaded (we know the object 318 * that supplied the symbol, but others may have been loaded lazily as 319 * we searched for the symbol), so sorting starts from the last 320 * link-map know on entry to this routine. 321 */ 322 if (entry) 323 load_completion(llmp, lmp); 324 325 /* 326 * Some operations like dldump() or dlopen()'ing a relocatable object 327 * result in objects being loaded on rtld's link-map, make sure these 328 * objects are initialized also. 329 */ 330 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 331 load_completion(nlmp, 0); 332 333 /* 334 * If the object we've bound to is in the process of being initialized 335 * by another thread, determine whether we should block. 336 */ 337 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 338 339 /* 340 * Make sure the object to which we've bound has had it's .init fired. 341 * Cleanup before return to user code. 342 */ 343 if (entry) { 344 is_dep_init(nlmp, lmp); 345 leave(LIST(lmp)); 346 } 347 348 if (lmflags & LML_FLG_RTLDLM) 349 dbg_mask = dbg_save; 350 351 return (symval); 352 } 353 354 355 /* 356 * When the relocation loop realizes that it's dealing with relative 357 * relocations in a shared object, it breaks into this tighter loop 358 * as an optimization. 359 */ 360 ulong_t 361 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 362 ulong_t basebgn, ulong_t etext, ulong_t emap) 363 { 364 ulong_t roffset = ((Rel *)relbgn)->r_offset; 365 char rtype; 366 367 do { 368 roffset += basebgn; 369 370 /* 371 * If this relocation is against an address not mapped in, 372 * then break out of the relative relocation loop, falling 373 * back on the main relocation loop. 374 */ 375 if (roffset < etext || roffset > emap) 376 break; 377 378 /* 379 * Perform the actual relocation. 380 */ 381 *((ulong_t *)roffset) += basebgn; 382 383 relbgn += relsiz; 384 385 if (relbgn >= relend) 386 break; 387 388 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 389 roffset = ((Rel *)relbgn)->r_offset; 390 391 } while (rtype == R_386_RELATIVE); 392 393 return (relbgn); 394 } 395 396 /* 397 * This is the tightest loop for RELATIVE relocations for those 398 * objects built with the DT_RELACOUNT .dynamic entry. 399 */ 400 ulong_t 401 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 402 ulong_t basebgn) 403 { 404 ulong_t roffset = ((Rel *) relbgn)->r_offset; 405 406 for (; relacount; relacount--) { 407 roffset += basebgn; 408 409 /* 410 * Perform the actual relocation. 411 */ 412 *((ulong_t *)roffset) += basebgn; 413 414 relbgn += relsiz; 415 416 roffset = ((Rel *)relbgn)->r_offset; 417 418 } 419 420 return (relbgn); 421 } 422 423 /* 424 * Read and process the relocations for one link object, we assume all 425 * relocation sections for loadable segments are stored contiguously in 426 * the file. 427 */ 428 int 429 elf_reloc(Rt_map *lmp, uint_t plt) 430 { 431 ulong_t relbgn, relend, relsiz, basebgn; 432 ulong_t pltbgn, pltend, _pltbgn, _pltend; 433 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 434 ulong_t emap, dsymndx; 435 uchar_t rtype; 436 long value, pvalue; 437 Sym *symref, *psymref, *symdef, *psymdef; 438 char *name, *pname; 439 Rt_map *_lmp, *plmp; 440 int textrel = 0, ret = 1, noplt = 0; 441 int relacount = RELACOUNT(lmp), plthint = 0; 442 Rel *rel; 443 uint_t binfo, pbinfo; 444 Alist *bound = 0; 445 446 /* 447 * Although only necessary for lazy binding, initialize the first 448 * global offset entry to go to elf_rtbndr(). dbx(1) seems 449 * to find this useful. 450 */ 451 if ((plt == 0) && PLTGOT(lmp)) { 452 if ((ulong_t)PLTGOT(lmp) < etext) { 453 if (elf_set_prot(lmp, PROT_WRITE) == 0) 454 return (0); 455 textrel = 1; 456 } 457 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 458 } 459 460 /* 461 * Initialize the plt start and end addresses. 462 */ 463 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 464 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 465 466 467 relsiz = (ulong_t)(RELENT(lmp)); 468 basebgn = ADDR(lmp); 469 emap = ADDR(lmp) + MSIZE(lmp); 470 471 if (PLTRELSZ(lmp)) 472 plthint = PLTRELSZ(lmp) / relsiz; 473 474 /* 475 * If we've been called upon to promote an RTLD_LAZY object to an 476 * RTLD_NOW then we're only interested in scaning the .plt table. 477 * An uninitialized .plt is the case where the associated got entry 478 * points back to the plt itself. Determine the range of the real .plt 479 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 480 */ 481 if (plt) { 482 Slookup sl; 483 484 relbgn = pltbgn; 485 relend = pltend; 486 if (!relbgn || (relbgn == relend)) 487 return (1); 488 489 sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 490 sl.sl_cmap = lmp; 491 sl.sl_imap = lmp; 492 sl.sl_hash = 0; 493 sl.sl_rsymndx = 0; 494 sl.sl_flags = LKUP_DEFT; 495 496 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 497 return (1); 498 499 _pltbgn = symdef->st_value; 500 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 501 (symdef->st_shndx != SHN_ABS)) 502 _pltbgn += basebgn; 503 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 504 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 505 506 } else { 507 /* 508 * The relocation sections appear to the run-time linker as a 509 * single table. Determine the address of the beginning and end 510 * of this table. There are two different interpretations of 511 * the ABI at this point: 512 * 513 * o The REL table and its associated RELSZ indicate the 514 * concatenation of *all* relocation sections (this is the 515 * model our link-editor constructs). 516 * 517 * o The REL table and its associated RELSZ indicate the 518 * concatenation of all *but* the .plt relocations. These 519 * relocations are specified individually by the JMPREL and 520 * PLTRELSZ entries. 521 * 522 * Determine from our knowledege of the relocation range and 523 * .plt range, the range of the total relocation table. Note 524 * that one other ABI assumption seems to be that the .plt 525 * relocations always follow any other relocations, the 526 * following range checking drops that assumption. 527 */ 528 relbgn = (ulong_t)(REL(lmp)); 529 relend = relbgn + (ulong_t)(RELSZ(lmp)); 530 if (pltbgn) { 531 if (!relbgn || (relbgn > pltbgn)) 532 relbgn = pltbgn; 533 if (!relbgn || (relend < pltend)) 534 relend = pltend; 535 } 536 } 537 if (!relbgn || (relbgn == relend)) { 538 DBG_CALL(Dbg_reloc_run(NAME(lmp), 0, plt, DBG_REL_NONE)); 539 return (1); 540 } 541 DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, plt, DBG_REL_START)); 542 543 /* 544 * If we're processing a dynamic executable in lazy mode there is no 545 * need to scan the .rel.plt table, however if we're processing a shared 546 * object in lazy mode the .got addresses associated to each .plt must 547 * be relocated to reflect the location of the shared object. 548 */ 549 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 550 (FLAGS(lmp) & FLG_RT_FIXED)) 551 noplt = 1; 552 553 /* 554 * Loop through relocations. 555 */ 556 while (relbgn < relend) { 557 uint_t sb_flags = 0; 558 559 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 560 561 /* 562 * If this is a RELATIVE relocation in a shared object (the 563 * common case), and if we are not debugging, then jump into a 564 * tighter relocation loop (elf_reloc_relative). Only make the 565 * jump if we've been given a hint on the number of relocations. 566 */ 567 if ((rtype == R_386_RELATIVE) && 568 !(FLAGS(lmp) & FLG_RT_FIXED) && !dbg_mask) { 569 /* 570 * It's possible that the relative relocation block 571 * has relocations against the text segment as well 572 * as the data segment. Since our optimized relocation 573 * engine does not check which segment the relocation 574 * is against - just mprotect it now if it's been 575 * marked as containing TEXTREL's. 576 */ 577 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 578 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 579 ret = 0; 580 break; 581 } 582 textrel = 1; 583 } 584 if (relacount) { 585 relbgn = elf_reloc_relacount(relbgn, relacount, 586 relsiz, basebgn); 587 relacount = 0; 588 } else { 589 relbgn = elf_reloc_relative(relbgn, relend, 590 relsiz, basebgn, etext, emap); 591 } 592 if (relbgn >= relend) 593 break; 594 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 595 } 596 597 roffset = ((Rel *)relbgn)->r_offset; 598 599 /* 600 * If this is a shared object, add the base address to offset. 601 */ 602 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 603 604 /* 605 * If we're processing lazy bindings, we have to step 606 * through the plt entries and add the base address 607 * to the corresponding got entry. 608 */ 609 if (plthint && (plt == 0) && 610 (rtype == R_386_JMP_SLOT) && 611 ((MODE(lmp) & RTLD_NOW) == 0)) { 612 relbgn = elf_reloc_relacount(relbgn, 613 plthint, relsiz, basebgn); 614 plthint = 0; 615 continue; 616 } 617 roffset += basebgn; 618 } 619 620 rsymndx = ELF_R_SYM(((Rel *)relbgn)->r_info); 621 rel = (Rel *)relbgn; 622 relbgn += relsiz; 623 624 /* 625 * Optimizations. 626 */ 627 if (rtype == R_386_NONE) 628 continue; 629 if (noplt && ((ulong_t)rel >= pltbgn) && 630 ((ulong_t)rel < pltend)) { 631 relbgn = pltend; 632 continue; 633 } 634 635 /* 636 * If we're promoting plts determine if this one has already 637 * been written. 638 */ 639 if (plt) { 640 if ((*(ulong_t *)roffset < _pltbgn) || 641 (*(ulong_t *)roffset > _pltend)) 642 continue; 643 } 644 645 /* 646 * If this relocation is not against part of the image 647 * mapped into memory we skip it. 648 */ 649 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 650 MSIZE(lmp)))) { 651 elf_reloc_bad(lmp, (void *)rel, 652 rtype, roffset, rsymndx); 653 continue; 654 } 655 656 binfo = 0; 657 /* 658 * If a symbol index is specified then get the symbol table 659 * entry, locate the symbol definition, and determine its 660 * address. 661 */ 662 if (rsymndx) { 663 /* 664 * Get the local symbol table entry. 665 */ 666 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 667 (rsymndx * SYMENT(lmp))); 668 669 /* 670 * If this is a local symbol, just use the base address. 671 * (we should have no local relocations in the 672 * executable). 673 */ 674 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 675 value = basebgn; 676 name = (char *)0; 677 678 /* 679 * TLS relocation - value for DTPMOD32 680 * relocation is the TLS modid. 681 */ 682 if (rtype == R_386_TLS_DTPMOD32) 683 value = TLSMODID(lmp); 684 } else { 685 /* 686 * If the symbol index is equal to the previous 687 * symbol index relocation we processed then 688 * reuse the previous values. (Note that there 689 * have been cases where a relocation exists 690 * against a copy relocation symbol, our ld(1) 691 * should optimize this away, but make sure we 692 * don't use the same symbol information should 693 * this case exist). 694 */ 695 if ((rsymndx == psymndx) && 696 (rtype != R_386_COPY)) { 697 /* LINTED */ 698 if (psymdef == 0) { 699 DBG_CALL(Dbg_bind_weak( 700 NAME(lmp), (caddr_t)roffset, 701 (caddr_t) 702 (roffset - basebgn), name)); 703 continue; 704 } 705 /* LINTED */ 706 value = pvalue; 707 /* LINTED */ 708 name = pname; 709 /* LINTED */ 710 symdef = psymdef; 711 /* LINTED */ 712 symref = psymref; 713 /* LINTED */ 714 _lmp = plmp; 715 /* LINTED */ 716 binfo = pbinfo; 717 718 if ((LIST(_lmp)->lm_tflags | 719 FLAGS1(_lmp)) & 720 LML_TFLG_AUD_SYMBIND) { 721 value = audit_symbind(lmp, _lmp, 722 /* LINTED */ 723 symdef, dsymndx, value, 724 &sb_flags); 725 } 726 } else { 727 Slookup sl; 728 uchar_t bind; 729 730 /* 731 * Lookup the symbol definition. 732 */ 733 name = (char *)(STRTAB(lmp) + 734 symref->st_name); 735 736 sl.sl_name = name; 737 sl.sl_cmap = lmp; 738 sl.sl_imap = 0; 739 sl.sl_hash = 0; 740 sl.sl_rsymndx = rsymndx; 741 742 if (rtype == R_386_COPY) 743 sl.sl_flags = LKUP_COPY; 744 else 745 sl.sl_flags = LKUP_DEFT; 746 747 sl.sl_flags |= LKUP_ALLCNTLIST; 748 749 if (rtype != R_386_JMP_SLOT) 750 sl.sl_flags |= LKUP_SPEC; 751 752 bind = ELF_ST_BIND(symref->st_info); 753 if (bind == STB_WEAK) 754 sl.sl_flags |= LKUP_WEAK; 755 756 symdef = lookup_sym(&sl, &_lmp, &binfo); 757 758 /* 759 * If the symbol is not found and the 760 * reference was not to a weak symbol, 761 * report an error. Weak references 762 * may be unresolved. 763 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 764 */ 765 if (symdef == 0) { 766 if (bind != STB_WEAK) { 767 if (LIST(lmp)->lm_flags & 768 LML_FLG_IGNRELERR) { 769 continue; 770 } else if (LIST(lmp)->lm_flags & 771 LML_FLG_TRC_WARN) { 772 (void) printf(MSG_INTL( 773 MSG_LDD_SYM_NFOUND), 774 demangle(name), 775 NAME(lmp)); 776 continue; 777 } else { 778 eprintf(ERR_FATAL, 779 MSG_INTL(MSG_REL_NOSYM), 780 NAME(lmp), 781 demangle(name)); 782 ret = 0; 783 break; 784 } 785 } else { 786 psymndx = rsymndx; 787 psymdef = 0; 788 789 DBG_CALL(Dbg_bind_weak( 790 NAME(lmp), (caddr_t)roffset, 791 (caddr_t) 792 (roffset - basebgn), name)); 793 continue; 794 } 795 } 796 797 /* 798 * If symbol was found in an object 799 * other than the referencing object 800 * then record the binding. 801 */ 802 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 803 FL1_RT_NOINIFIN) == 0)) { 804 if (alist_test(&bound, _lmp, 805 sizeof (Rt_map *), 806 AL_CNT_RELBIND) == 0) { 807 ret = 0; 808 break; 809 } 810 } 811 812 /* 813 * Calculate the location of definition; 814 * symbol value plus base address of 815 * containing shared object. 816 */ 817 value = symdef->st_value; 818 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 819 (symdef->st_shndx != SHN_ABS) && 820 (ELF_ST_TYPE(symdef->st_info) != 821 STT_TLS)) 822 value += ADDR(_lmp); 823 824 /* 825 * Retain this symbol index and the 826 * value in case it can be used for the 827 * subsequent relocations. 828 */ 829 if (rtype != R_386_COPY) { 830 psymndx = rsymndx; 831 pvalue = value; 832 pname = name; 833 psymdef = symdef; 834 psymref = symref; 835 plmp = _lmp; 836 pbinfo = binfo; 837 } 838 if ((LIST(_lmp)->lm_tflags | 839 FLAGS1(_lmp)) & 840 LML_TFLG_AUD_SYMBIND) { 841 dsymndx = (((uintptr_t)symdef - 842 (uintptr_t)SYMTAB(_lmp)) / 843 SYMENT(_lmp)); 844 value = audit_symbind(lmp, _lmp, 845 symdef, dsymndx, value, 846 &sb_flags); 847 } 848 } 849 850 /* 851 * If relocation is PC-relative, subtract 852 * offset address. 853 */ 854 if (IS_PC_RELATIVE(rtype)) 855 value -= roffset; 856 857 /* 858 * TLS relocation - value for DTPMOD32 859 * relocation is the TLS modid. 860 */ 861 if (rtype == R_386_TLS_DTPMOD32) 862 value = TLSMODID(_lmp); 863 else if (rtype == R_386_TLS_TPOFF) 864 value = -(TLSSTATOFF(_lmp) - value); 865 } 866 } else { 867 /* 868 * Special case: 869 * 870 * A DTPMOD32 relocation is a local binding to a TLS 871 * symbol. Fill in the TLSMODID for the current object. 872 */ 873 if (rtype == R_386_TLS_DTPMOD32) 874 value = TLSMODID(lmp); 875 else 876 value = basebgn; 877 name = (char *)0; 878 } 879 880 /* 881 * If this object has relocations in the text segment, turn 882 * off the write protect. 883 */ 884 if ((roffset < etext) && (textrel == 0)) { 885 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 886 ret = 0; 887 break; 888 } 889 textrel = 1; 890 } 891 892 /* 893 * Call relocation routine to perform required relocation. 894 */ 895 DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, rel, name, NULL)); 896 897 switch (rtype) { 898 case R_386_COPY: 899 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 900 symdef, _lmp, (const void *)value) == 0) 901 ret = 0; 902 break; 903 case R_386_JMP_SLOT: 904 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 905 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 906 AUDINFO(lmp)->ai_dynplts) { 907 int fail = 0; 908 int pltndx = (((ulong_t)rel - 909 (uintptr_t)JMPREL(lmp)) / relsiz); 910 int symndx = (((uintptr_t)symdef - 911 (uintptr_t)SYMTAB(_lmp)) / 912 SYMENT(_lmp)); 913 914 (void) elf_plt_trace_write(roffset, lmp, _lmp, 915 symdef, symndx, pltndx, (caddr_t)value, 916 sb_flags, &fail); 917 if (fail) 918 ret = 0; 919 } else { 920 /* 921 * Write standard PLT entry to jump directly 922 * to newly bound function. 923 */ 924 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 925 (Xword)value)); 926 *(ulong_t *)roffset = value; 927 } 928 break; 929 default: 930 /* 931 * Write the relocation out. 932 */ 933 if (do_reloc(rtype, (uchar_t *)roffset, 934 (Word *)&value, name, NAME(lmp)) == 0) 935 ret = 0; 936 937 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 938 (Xword)value)); 939 } 940 941 if ((ret == 0) && 942 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 943 break; 944 945 if (binfo) { 946 DBG_CALL(Dbg_bind_global(NAME(lmp), (caddr_t)roffset, 947 (caddr_t)(roffset - basebgn), (Xword)(-1), 948 PLT_T_FULL, NAME(_lmp), (caddr_t)value, 949 (caddr_t)symdef->st_value, name, binfo)); 950 } 951 } 952 953 return (relocate_finish(lmp, bound, textrel, ret)); 954 } 955 956 /* 957 * Initialize the first few got entries so that function calls go to 958 * elf_rtbndr: 959 * 960 * GOT[GOT_XLINKMAP] = the address of the link map 961 * GOT[GOT_XRTLD] = the address of rtbinder 962 */ 963 void 964 elf_plt_init(void *got, caddr_t l) 965 { 966 uint_t *_got; 967 /* LINTED */ 968 Rt_map *lmp = (Rt_map *)l; 969 970 _got = (uint_t *)got + M_GOT_XLINKMAP; 971 *_got = (uint_t)lmp; 972 _got = (uint_t *)got + M_GOT_XRTLD; 973 *_got = (uint_t)elf_rtbndr; 974 } 975 976 /* 977 * For SVR4 Intel compatability. USL uses /usr/lib/libc.so.1 as the run-time 978 * linker, so the interpreter's address will differ from /usr/lib/ld.so.1. 979 * Further, USL has special _iob[] and _ctype[] processing that makes up for the 980 * fact that these arrays do not have associated copy relocations. So we try 981 * and make up for that here. Any relocations found will be added to the global 982 * copy relocation list and will be processed in setup(). 983 */ 984 static int 985 _elf_copy_reloc(const char *name, Rt_map *rlmp, Rt_map *dlmp) 986 { 987 Sym *symref, *symdef; 988 caddr_t ref, def; 989 Rt_map *_lmp; 990 Rel rel; 991 Slookup sl; 992 uint_t binfo; 993 994 /* 995 * Determine if the special symbol exists as a reference in the dynamic 996 * executable, and that an associated definition exists in libc.so.1. 997 */ 998 sl.sl_name = name; 999 sl.sl_cmap = rlmp; 1000 sl.sl_imap = rlmp; 1001 sl.sl_hash = 0; 1002 sl.sl_rsymndx = 0; 1003 sl.sl_flags = LKUP_FIRST; 1004 1005 if ((symref = lookup_sym(&sl, &_lmp, &binfo)) == 0) 1006 return (1); 1007 1008 sl.sl_imap = dlmp; 1009 sl.sl_flags = LKUP_DEFT; 1010 1011 if ((symdef = lookup_sym(&sl, &_lmp, &binfo)) == 0) 1012 return (1); 1013 if (strcmp(NAME(_lmp), MSG_ORIG(MSG_PTH_LIBC))) 1014 return (1); 1015 1016 /* 1017 * Determine the reference and definition addresses. 1018 */ 1019 ref = (void *)(symref->st_value); 1020 if (!(FLAGS(rlmp) & FLG_RT_FIXED)) 1021 ref += ADDR(rlmp); 1022 def = (void *)(symdef->st_value); 1023 if (!(FLAGS(_lmp) & FLG_RT_FIXED)) 1024 def += ADDR(_lmp); 1025 1026 /* 1027 * Set up a relocation entry for debugging and call the generic copy 1028 * relocation function to provide symbol size error checking and to 1029 * record the copy relocation that must be performed. 1030 */ 1031 rel.r_offset = (Addr)ref; 1032 rel.r_info = (Word)R_386_COPY; 1033 DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, &rel, name, 0)); 1034 1035 return (elf_copy_reloc((char *)name, symref, rlmp, (void *)ref, symdef, 1036 _lmp, (void *)def)); 1037 } 1038 1039 int 1040 elf_copy_gen(Rt_map *lmp) 1041 { 1042 if (interp && ((ulong_t)interp->i_faddr != 1043 r_debug.rtd_rdebug.r_ldbase) && 1044 !(strcmp(interp->i_name, MSG_ORIG(MSG_PTH_LIBC)))) { 1045 1046 DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, 0, 1047 DBG_REL_START)); 1048 1049 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_CTYPE), lmp, 1050 (Rt_map *)NEXT(lmp)) == 0) 1051 return (0); 1052 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_IOB), lmp, 1053 (Rt_map *)NEXT(lmp)) == 0) 1054 return (0); 1055 } 1056 return (1); 1057 } 1058 1059 /* 1060 * Plt writing interface to allow debugging initialization to be generic. 1061 */ 1062 Pltbindtype 1063 /* ARGSUSED1 */ 1064 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1065 Xword pltndx) 1066 { 1067 Rel *rel = (Rel*)rptr; 1068 uintptr_t pltaddr; 1069 1070 pltaddr = addr + rel->r_offset; 1071 *(ulong_t *)pltaddr = (ulong_t)symval; 1072 DBG_CALL(pltcntfull++); 1073 return (PLT_T_FULL); 1074 } 1075 1076 /* 1077 * Provide a machine specific interface to the conversion routine. By calling 1078 * the machine specific version, rather than the generic version, we insure that 1079 * the data tables/strings for all known machine versions aren't dragged into 1080 * ld.so.1. 1081 */ 1082 const char * 1083 _conv_reloc_type_str(uint_t rel) 1084 { 1085 return (conv_reloc_386_type_str(rel)); 1086 } 1087