1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * x86 machine dependent and ELF file class dependent functions. 33 * Contains routines for performing function binding and symbol relocations. 34 */ 35 #include "_synonyms.h" 36 37 #include <stdio.h> 38 #include <sys/elf.h> 39 #include <sys/elf_386.h> 40 #include <sys/mman.h> 41 #include <dlfcn.h> 42 #include <synch.h> 43 #include <string.h> 44 #include <debug.h> 45 #include <reloc.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "msg.h" 51 52 53 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 54 55 int 56 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 57 { 58 /* 59 * Check machine type and flags. 60 */ 61 if (ehdr->e_flags != 0) { 62 rej->rej_type = SGS_REJ_BADFLAG; 63 rej->rej_info = (uint_t)ehdr->e_flags; 64 return (0); 65 } 66 return (1); 67 } 68 69 void 70 ldso_plt_init(Rt_map * lmp) 71 { 72 /* 73 * There is no need to analyze ld.so because we don't map in any of 74 * its dependencies. However we may map these dependencies in later 75 * (as if ld.so had dlopened them), so initialize the plt and the 76 * permission information. 77 */ 78 if (PLTGOT(lmp)) 79 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 80 } 81 82 static const uchar_t dyn_plt_template[] = { 83 /* 0x00 */ 0x55, /* pushl %ebp */ 84 /* 0x01 */ 0x8b, 0xec, /* movl %esp, %ebp */ 85 /* 0x03 */ 0x68, 0x00, 0x00, 0x00, 0x00, /* pushl trace_fields */ 86 /* 0x08 */ 0xe9, 0xfc, 0xff, 0xff, 0xff, 0xff /* jmp elf_plt_trace */ 87 }; 88 int dyn_plt_ent_size = sizeof (dyn_plt_template); 89 90 /* 91 * the dynamic plt entry is: 92 * 93 * pushl %ebp 94 * movl %esp, %ebp 95 * pushl tfp 96 * jmp elf_plt_trace 97 * dyn_data: 98 * .align 4 99 * uintptr_t reflmp 100 * uintptr_t deflmp 101 * uint_t symndx 102 * uint_t sb_flags 103 * Sym symdef 104 */ 105 static caddr_t 106 elf_plt_trace_write(uint_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 107 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 108 { 109 extern int elf_plt_trace(); 110 ulong_t got_entry; 111 uchar_t *dyn_plt; 112 uintptr_t *dyndata; 113 114 /* 115 * We only need to add the glue code if there is an auditing 116 * library that is interested in this binding. 117 */ 118 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 119 (pltndx * dyn_plt_ent_size)); 120 121 /* 122 * Have we initialized this dynamic plt entry yet? If we haven't do it 123 * now. Otherwise this function has been called before, but from a 124 * different plt (ie. from another shared object). In that case 125 * we just set the plt to point to the new dyn_plt. 126 */ 127 if (*dyn_plt == 0) { 128 Sym *symp; 129 Word symvalue; 130 Lm_list *lml = LIST(rlmp); 131 132 (void) memcpy((void *)dyn_plt, dyn_plt_template, 133 sizeof (dyn_plt_template)); 134 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 135 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 136 137 /* 138 * relocate: 139 * pushl dyn_data 140 */ 141 symvalue = (Word)dyndata; 142 if (do_reloc(R_386_32, &dyn_plt[4], &symvalue, 143 MSG_ORIG(MSG_SYM_LADYNDATA), 144 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 145 *fail = 1; 146 return (0); 147 } 148 149 /* 150 * jmps are relative, so I need to figure out the relative 151 * address to elf_plt_trace. 152 * 153 * relocating: 154 * jmp elf_plt_trace 155 */ 156 symvalue = (ulong_t)(elf_plt_trace) - (ulong_t)(dyn_plt + 9); 157 if (do_reloc(R_386_PC32, &dyn_plt[9], &symvalue, 158 MSG_ORIG(MSG_SYM_ELFPLTTRACE), 159 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 160 *fail = 1; 161 return (0); 162 } 163 164 *dyndata++ = (uintptr_t)rlmp; 165 *dyndata++ = (uintptr_t)dlmp; 166 *dyndata++ = (uint_t)symndx; 167 *dyndata++ = (uint_t)sb_flags; 168 symp = (Sym *)dyndata; 169 *symp = *sym; 170 symp->st_name += (Word)STRTAB(dlmp); 171 symp->st_value = (Addr)to; 172 } 173 174 got_entry = (ulong_t)roffset; 175 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 176 return ((caddr_t)dyn_plt); 177 } 178 179 180 /* 181 * Function binding routine - invoked on the first call to a function through 182 * the procedure linkage table; 183 * passes first through an assembly language interface. 184 * 185 * Takes the offset into the relocation table of the associated 186 * relocation entry and the address of the link map (rt_private_map struct) 187 * for the entry. 188 * 189 * Returns the address of the function referenced after re-writing the PLT 190 * entry to invoke the function directly. 191 * 192 * On error, causes process to terminate with a signal. 193 */ 194 ulong_t 195 elf_bndr(Rt_map *lmp, ulong_t reloff, caddr_t from) 196 { 197 Rt_map *nlmp, *llmp; 198 ulong_t addr, symval, rsymndx; 199 char *name; 200 Rel *rptr; 201 Sym *sym, *nsym; 202 uint_t binfo, sb_flags = 0, dbg_class; 203 Slookup sl; 204 int entry, lmflags; 205 Lm_list *lml; 206 207 /* 208 * For compatibility with libthread (TI_VERSION 1) we track the entry 209 * value. A zero value indicates we have recursed into ld.so.1 to 210 * further process a locking request. Under this recursion we disable 211 * tsort and cleanup activities. 212 */ 213 entry = enter(); 214 215 lml = LIST(lmp); 216 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 217 dbg_class = dbg_desc->d_class; 218 dbg_desc->d_class = 0; 219 } 220 221 /* 222 * Perform some basic sanity checks. If we didn't get a load map or 223 * the relocation offset is invalid then its possible someone has walked 224 * over the .got entries or jumped to plt0 out of the blue. 225 */ 226 if (!lmp || ((reloff % sizeof (Rel)) != 0)) { 227 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 228 conv_reloc_386_type(R_386_JMP_SLOT, 0), 229 EC_NATPTR(lmp), EC_XWORD(reloff), EC_NATPTR(from)); 230 rtldexit(lml, 1); 231 } 232 233 /* 234 * Use relocation entry to get symbol table entry and symbol name. 235 */ 236 addr = (ulong_t)JMPREL(lmp); 237 rptr = (Rel *)(addr + reloff); 238 rsymndx = ELF_R_SYM(rptr->r_info); 239 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 240 name = (char *)(STRTAB(lmp) + sym->st_name); 241 242 /* 243 * Determine the last link-map of this list, this'll be the starting 244 * point for any tsort() processing. 245 */ 246 llmp = lml->lm_tail; 247 248 /* 249 * Find definition for symbol. 250 */ 251 sl.sl_name = name; 252 sl.sl_cmap = lmp; 253 sl.sl_imap = lml->lm_head; 254 sl.sl_hash = 0; 255 sl.sl_rsymndx = rsymndx; 256 sl.sl_flags = LKUP_DEFT; 257 258 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 259 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 260 demangle(name)); 261 rtldexit(lml, 1); 262 } 263 264 symval = nsym->st_value; 265 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 266 (nsym->st_shndx != SHN_ABS)) 267 symval += ADDR(nlmp); 268 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 269 /* 270 * Record that this new link map is now bound to the caller. 271 */ 272 if (bind_one(lmp, nlmp, BND_REFER) == 0) 273 rtldexit(lml, 1); 274 } 275 276 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 277 uint_t symndx = (((uintptr_t)nsym - 278 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 279 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 280 &sb_flags); 281 } 282 283 if (!(rtld_flags & RT_FL_NOBIND)) { 284 addr = rptr->r_offset; 285 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 286 addr += ADDR(lmp); 287 if (((lml->lm_tflags | FLAGS1(lmp)) & 288 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 289 AUDINFO(lmp)->ai_dynplts) { 290 int fail = 0; 291 uint_t pltndx = reloff / sizeof (Rel); 292 uint_t symndx = (((uintptr_t)nsym - 293 (uintptr_t)SYMTAB(nlmp)) / 294 SYMENT(nlmp)); 295 296 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 297 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 298 &fail); 299 if (fail) 300 rtldexit(lml, 1); 301 } else { 302 /* 303 * Write standard PLT entry to jump directly 304 * to newly bound function. 305 */ 306 *(ulong_t *)addr = symval; 307 } 308 } 309 310 /* 311 * Print binding information and rebuild PLT entry. 312 */ 313 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 314 (Xword)(reloff / sizeof (Rel)), PLT_T_FULL, nlmp, (Addr)symval, 315 nsym->st_value, name, binfo)); 316 317 /* 318 * Complete any processing for newly loaded objects. Note we don't 319 * know exactly where any new objects are loaded (we know the object 320 * that supplied the symbol, but others may have been loaded lazily as 321 * we searched for the symbol), so sorting starts from the last 322 * link-map know on entry to this routine. 323 */ 324 if (entry) 325 load_completion(llmp, lmp); 326 327 /* 328 * Some operations like dldump() or dlopen()'ing a relocatable object 329 * result in objects being loaded on rtld's link-map, make sure these 330 * objects are initialized also. 331 */ 332 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 333 load_completion(nlmp, 0); 334 335 /* 336 * If the object we've bound to is in the process of being initialized 337 * by another thread, determine whether we should block. 338 */ 339 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 340 341 /* 342 * Make sure the object to which we've bound has had it's .init fired. 343 * Cleanup before return to user code. 344 */ 345 if (entry) { 346 is_dep_init(nlmp, lmp); 347 leave(lml); 348 } 349 350 if (lmflags & LML_FLG_RTLDLM) 351 dbg_desc->d_class = dbg_class; 352 353 return (symval); 354 } 355 356 357 /* 358 * When the relocation loop realizes that it's dealing with relative 359 * relocations in a shared object, it breaks into this tighter loop 360 * as an optimization. 361 */ 362 ulong_t 363 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 364 ulong_t basebgn, ulong_t etext, ulong_t emap) 365 { 366 ulong_t roffset = ((Rel *)relbgn)->r_offset; 367 char rtype; 368 369 do { 370 roffset += basebgn; 371 372 /* 373 * If this relocation is against an address not mapped in, 374 * then break out of the relative relocation loop, falling 375 * back on the main relocation loop. 376 */ 377 if (roffset < etext || roffset > emap) 378 break; 379 380 /* 381 * Perform the actual relocation. 382 */ 383 *((ulong_t *)roffset) += basebgn; 384 385 relbgn += relsiz; 386 387 if (relbgn >= relend) 388 break; 389 390 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 391 roffset = ((Rel *)relbgn)->r_offset; 392 393 } while (rtype == R_386_RELATIVE); 394 395 return (relbgn); 396 } 397 398 /* 399 * This is the tightest loop for RELATIVE relocations for those 400 * objects built with the DT_RELACOUNT .dynamic entry. 401 */ 402 ulong_t 403 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 404 ulong_t basebgn) 405 { 406 ulong_t roffset = ((Rel *) relbgn)->r_offset; 407 408 for (; relacount; relacount--) { 409 roffset += basebgn; 410 411 /* 412 * Perform the actual relocation. 413 */ 414 *((ulong_t *)roffset) += basebgn; 415 416 relbgn += relsiz; 417 418 roffset = ((Rel *)relbgn)->r_offset; 419 420 } 421 422 return (relbgn); 423 } 424 425 /* 426 * Read and process the relocations for one link object, we assume all 427 * relocation sections for loadable segments are stored contiguously in 428 * the file. 429 */ 430 int 431 elf_reloc(Rt_map *lmp, uint_t plt) 432 { 433 ulong_t relbgn, relend, relsiz, basebgn; 434 ulong_t pltbgn, pltend, _pltbgn, _pltend; 435 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 436 ulong_t emap, dsymndx; 437 uchar_t rtype; 438 long value, pvalue; 439 Sym *symref, *psymref, *symdef, *psymdef; 440 char *name, *pname; 441 Rt_map *_lmp, *plmp; 442 int textrel = 0, ret = 1, noplt = 0; 443 int relacount = RELACOUNT(lmp), plthint = 0; 444 Rel *rel; 445 uint_t binfo, pbinfo; 446 Alist *bound = 0; 447 448 /* 449 * Although only necessary for lazy binding, initialize the first 450 * global offset entry to go to elf_rtbndr(). dbx(1) seems 451 * to find this useful. 452 */ 453 if ((plt == 0) && PLTGOT(lmp)) { 454 if ((ulong_t)PLTGOT(lmp) < etext) { 455 if (elf_set_prot(lmp, PROT_WRITE) == 0) 456 return (0); 457 textrel = 1; 458 } 459 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 460 } 461 462 /* 463 * Initialize the plt start and end addresses. 464 */ 465 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 466 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 467 468 469 relsiz = (ulong_t)(RELENT(lmp)); 470 basebgn = ADDR(lmp); 471 emap = ADDR(lmp) + MSIZE(lmp); 472 473 if (PLTRELSZ(lmp)) 474 plthint = PLTRELSZ(lmp) / relsiz; 475 476 /* 477 * If we've been called upon to promote an RTLD_LAZY object to an 478 * RTLD_NOW then we're only interested in scaning the .plt table. 479 * An uninitialized .plt is the case where the associated got entry 480 * points back to the plt itself. Determine the range of the real .plt 481 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 482 */ 483 if (plt) { 484 Slookup sl; 485 486 relbgn = pltbgn; 487 relend = pltend; 488 if (!relbgn || (relbgn == relend)) 489 return (1); 490 491 sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 492 sl.sl_cmap = lmp; 493 sl.sl_imap = lmp; 494 sl.sl_hash = elf_hash(MSG_ORIG(MSG_SYM_PLT)); 495 sl.sl_rsymndx = 0; 496 sl.sl_flags = LKUP_DEFT; 497 498 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 499 return (1); 500 501 _pltbgn = symdef->st_value; 502 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 503 (symdef->st_shndx != SHN_ABS)) 504 _pltbgn += basebgn; 505 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 506 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 507 508 } else { 509 /* 510 * The relocation sections appear to the run-time linker as a 511 * single table. Determine the address of the beginning and end 512 * of this table. There are two different interpretations of 513 * the ABI at this point: 514 * 515 * o The REL table and its associated RELSZ indicate the 516 * concatenation of *all* relocation sections (this is the 517 * model our link-editor constructs). 518 * 519 * o The REL table and its associated RELSZ indicate the 520 * concatenation of all *but* the .plt relocations. These 521 * relocations are specified individually by the JMPREL and 522 * PLTRELSZ entries. 523 * 524 * Determine from our knowledege of the relocation range and 525 * .plt range, the range of the total relocation table. Note 526 * that one other ABI assumption seems to be that the .plt 527 * relocations always follow any other relocations, the 528 * following range checking drops that assumption. 529 */ 530 relbgn = (ulong_t)(REL(lmp)); 531 relend = relbgn + (ulong_t)(RELSZ(lmp)); 532 if (pltbgn) { 533 if (!relbgn || (relbgn > pltbgn)) 534 relbgn = pltbgn; 535 if (!relbgn || (relend < pltend)) 536 relend = pltend; 537 } 538 } 539 if (!relbgn || (relbgn == relend)) { 540 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 541 return (1); 542 } 543 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 544 545 /* 546 * If we're processing a dynamic executable in lazy mode there is no 547 * need to scan the .rel.plt table, however if we're processing a shared 548 * object in lazy mode the .got addresses associated to each .plt must 549 * be relocated to reflect the location of the shared object. 550 */ 551 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 552 (FLAGS(lmp) & FLG_RT_FIXED)) 553 noplt = 1; 554 555 /* 556 * Loop through relocations. 557 */ 558 while (relbgn < relend) { 559 uint_t sb_flags = 0; 560 561 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 562 563 /* 564 * If this is a RELATIVE relocation in a shared object (the 565 * common case), and if we are not debugging, then jump into a 566 * tighter relocation loop (elf_reloc_relative). Only make the 567 * jump if we've been given a hint on the number of relocations. 568 */ 569 if ((rtype == R_386_RELATIVE) && 570 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 571 /* 572 * It's possible that the relative relocation block 573 * has relocations against the text segment as well 574 * as the data segment. Since our optimized relocation 575 * engine does not check which segment the relocation 576 * is against - just mprotect it now if it's been 577 * marked as containing TEXTREL's. 578 */ 579 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 580 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 581 ret = 0; 582 break; 583 } 584 textrel = 1; 585 } 586 587 if (relacount) { 588 relbgn = elf_reloc_relacount(relbgn, relacount, 589 relsiz, basebgn); 590 relacount = 0; 591 } else { 592 relbgn = elf_reloc_relative(relbgn, relend, 593 relsiz, basebgn, etext, emap); 594 } 595 if (relbgn >= relend) 596 break; 597 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info); 598 } 599 600 roffset = ((Rel *)relbgn)->r_offset; 601 602 /* 603 * If this is a shared object, add the base address to offset. 604 */ 605 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 606 607 /* 608 * If we're processing lazy bindings, we have to step 609 * through the plt entries and add the base address 610 * to the corresponding got entry. 611 */ 612 if (plthint && (plt == 0) && 613 (rtype == R_386_JMP_SLOT) && 614 ((MODE(lmp) & RTLD_NOW) == 0)) { 615 relbgn = elf_reloc_relacount(relbgn, 616 plthint, relsiz, basebgn); 617 plthint = 0; 618 continue; 619 } 620 roffset += basebgn; 621 } 622 623 rsymndx = ELF_R_SYM(((Rel *)relbgn)->r_info); 624 rel = (Rel *)relbgn; 625 relbgn += relsiz; 626 627 /* 628 * Optimizations. 629 */ 630 if (rtype == R_386_NONE) 631 continue; 632 if (noplt && ((ulong_t)rel >= pltbgn) && 633 ((ulong_t)rel < pltend)) { 634 relbgn = pltend; 635 continue; 636 } 637 638 /* 639 * If we're promoting plts determine if this one has already 640 * been written. 641 */ 642 if (plt) { 643 if ((*(ulong_t *)roffset < _pltbgn) || 644 (*(ulong_t *)roffset > _pltend)) 645 continue; 646 } 647 648 /* 649 * If this relocation is not against part of the image 650 * mapped into memory we skip it. 651 */ 652 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 653 MSIZE(lmp)))) { 654 elf_reloc_bad(lmp, (void *)rel, 655 rtype, roffset, rsymndx); 656 continue; 657 } 658 659 binfo = 0; 660 /* 661 * If a symbol index is specified then get the symbol table 662 * entry, locate the symbol definition, and determine its 663 * address. 664 */ 665 if (rsymndx) { 666 /* 667 * Get the local symbol table entry. 668 */ 669 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 670 (rsymndx * SYMENT(lmp))); 671 672 /* 673 * If this is a local symbol, just use the base address. 674 * (we should have no local relocations in the 675 * executable). 676 */ 677 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 678 value = basebgn; 679 name = (char *)0; 680 681 /* 682 * Special case TLS relocations. 683 */ 684 if (rtype == R_386_TLS_DTPMOD32) { 685 /* 686 * Use the TLS modid. 687 */ 688 value = TLSMODID(lmp); 689 690 } else if (rtype == R_386_TLS_TPOFF) { 691 if ((value = elf_static_tls(lmp, symref, 692 rel, rtype, 0, roffset, 0)) == 0) { 693 ret = 0; 694 break; 695 } 696 } 697 } else { 698 /* 699 * If the symbol index is equal to the previous 700 * symbol index relocation we processed then 701 * reuse the previous values. (Note that there 702 * have been cases where a relocation exists 703 * against a copy relocation symbol, our ld(1) 704 * should optimize this away, but make sure we 705 * don't use the same symbol information should 706 * this case exist). 707 */ 708 if ((rsymndx == psymndx) && 709 (rtype != R_386_COPY)) { 710 /* LINTED */ 711 if (psymdef == 0) { 712 DBG_CALL(Dbg_bind_weak(lmp, 713 (Addr)roffset, (Addr) 714 (roffset - basebgn), name)); 715 continue; 716 } 717 /* LINTED */ 718 value = pvalue; 719 /* LINTED */ 720 name = pname; 721 /* LINTED */ 722 symdef = psymdef; 723 /* LINTED */ 724 symref = psymref; 725 /* LINTED */ 726 _lmp = plmp; 727 /* LINTED */ 728 binfo = pbinfo; 729 730 if ((LIST(_lmp)->lm_tflags | 731 FLAGS1(_lmp)) & 732 LML_TFLG_AUD_SYMBIND) { 733 value = audit_symbind(lmp, _lmp, 734 /* LINTED */ 735 symdef, dsymndx, value, 736 &sb_flags); 737 } 738 } else { 739 Slookup sl; 740 uchar_t bind; 741 742 /* 743 * Lookup the symbol definition. 744 */ 745 name = (char *)(STRTAB(lmp) + 746 symref->st_name); 747 748 sl.sl_name = name; 749 sl.sl_cmap = lmp; 750 sl.sl_imap = 0; 751 sl.sl_hash = 0; 752 sl.sl_rsymndx = rsymndx; 753 754 if (rtype == R_386_COPY) 755 sl.sl_flags = LKUP_COPY; 756 else 757 sl.sl_flags = LKUP_DEFT; 758 759 sl.sl_flags |= LKUP_ALLCNTLIST; 760 761 if (rtype != R_386_JMP_SLOT) 762 sl.sl_flags |= LKUP_SPEC; 763 764 bind = ELF_ST_BIND(symref->st_info); 765 if (bind == STB_WEAK) 766 sl.sl_flags |= LKUP_WEAK; 767 768 symdef = lookup_sym(&sl, &_lmp, &binfo); 769 770 /* 771 * If the symbol is not found and the 772 * reference was not to a weak symbol, 773 * report an error. Weak references 774 * may be unresolved. 775 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 776 */ 777 if (symdef == 0) { 778 Lm_list *lml = LIST(lmp); 779 780 if (bind != STB_WEAK) { 781 if (lml->lm_flags & 782 LML_FLG_IGNRELERR) { 783 continue; 784 } else if (lml->lm_flags & 785 LML_FLG_TRC_WARN) { 786 (void) printf(MSG_INTL( 787 MSG_LDD_SYM_NFOUND), 788 demangle(name), 789 NAME(lmp)); 790 continue; 791 } else { 792 DBG_CALL(Dbg_reloc_in(lml, 793 ELF_DBG_RTLD, M_MACH, 794 M_REL_SHT_TYPE, rel, 795 NULL, name)); 796 eprintf(lml, ERR_FATAL, 797 MSG_INTL(MSG_REL_NOSYM), 798 NAME(lmp), 799 demangle(name)); 800 ret = 0; 801 break; 802 } 803 } else { 804 psymndx = rsymndx; 805 psymdef = 0; 806 807 DBG_CALL(Dbg_bind_weak(lmp, 808 (Addr)roffset, (Addr) 809 (roffset - basebgn), name)); 810 continue; 811 } 812 } 813 814 /* 815 * If symbol was found in an object 816 * other than the referencing object 817 * then record the binding. 818 */ 819 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 820 FL1_RT_NOINIFIN) == 0)) { 821 if (alist_test(&bound, _lmp, 822 sizeof (Rt_map *), 823 AL_CNT_RELBIND) == 0) { 824 ret = 0; 825 break; 826 } 827 } 828 829 /* 830 * Calculate the location of definition; 831 * symbol value plus base address of 832 * containing shared object. 833 */ 834 if (IS_SIZE(rtype)) 835 value = symdef->st_size; 836 else 837 value = symdef->st_value; 838 839 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 840 !(IS_SIZE(rtype)) && 841 (symdef->st_shndx != SHN_ABS) && 842 (ELF_ST_TYPE(symdef->st_info) != 843 STT_TLS)) 844 value += ADDR(_lmp); 845 846 /* 847 * Retain this symbol index and the 848 * value in case it can be used for the 849 * subsequent relocations. 850 */ 851 if (rtype != R_386_COPY) { 852 psymndx = rsymndx; 853 pvalue = value; 854 pname = name; 855 psymdef = symdef; 856 psymref = symref; 857 plmp = _lmp; 858 pbinfo = binfo; 859 } 860 if ((LIST(_lmp)->lm_tflags | 861 FLAGS1(_lmp)) & 862 LML_TFLG_AUD_SYMBIND) { 863 dsymndx = (((uintptr_t)symdef - 864 (uintptr_t)SYMTAB(_lmp)) / 865 SYMENT(_lmp)); 866 value = audit_symbind(lmp, _lmp, 867 symdef, dsymndx, value, 868 &sb_flags); 869 } 870 } 871 872 /* 873 * If relocation is PC-relative, subtract 874 * offset address. 875 */ 876 if (IS_PC_RELATIVE(rtype)) 877 value -= roffset; 878 879 /* 880 * Special case TLS relocations. 881 */ 882 if (rtype == R_386_TLS_DTPMOD32) { 883 /* 884 * Relocation value is the TLS modid. 885 */ 886 value = TLSMODID(_lmp); 887 888 } else if (rtype == R_386_TLS_TPOFF) { 889 if ((value = elf_static_tls(_lmp, 890 symdef, rel, rtype, name, roffset, 891 value)) == 0) { 892 ret = 0; 893 break; 894 } 895 } 896 } 897 } else { 898 /* 899 * Special cases. 900 */ 901 if (rtype == R_386_TLS_DTPMOD32) { 902 /* 903 * TLS relocation value is the TLS modid. 904 */ 905 value = TLSMODID(lmp); 906 } else 907 value = basebgn; 908 name = (char *)0; 909 } 910 911 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 912 M_REL_SHT_TYPE, rel, NULL, name)); 913 914 /* 915 * If this object has relocations in the text segment, turn 916 * off the write protect. 917 */ 918 if ((roffset < etext) && (textrel == 0)) { 919 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 920 ret = 0; 921 break; 922 } 923 textrel = 1; 924 } 925 926 /* 927 * Call relocation routine to perform required relocation. 928 */ 929 switch (rtype) { 930 case R_386_COPY: 931 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 932 symdef, _lmp, (const void *)value) == 0) 933 ret = 0; 934 break; 935 case R_386_JMP_SLOT: 936 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 937 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 938 AUDINFO(lmp)->ai_dynplts) { 939 int fail = 0; 940 int pltndx = (((ulong_t)rel - 941 (uintptr_t)JMPREL(lmp)) / relsiz); 942 int symndx = (((uintptr_t)symdef - 943 (uintptr_t)SYMTAB(_lmp)) / 944 SYMENT(_lmp)); 945 946 (void) elf_plt_trace_write(roffset, lmp, _lmp, 947 symdef, symndx, pltndx, (caddr_t)value, 948 sb_flags, &fail); 949 if (fail) 950 ret = 0; 951 } else { 952 /* 953 * Write standard PLT entry to jump directly 954 * to newly bound function. 955 */ 956 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 957 ELF_DBG_RTLD, (Xword)roffset, 958 (Xword)value)); 959 *(ulong_t *)roffset = value; 960 } 961 break; 962 default: 963 /* 964 * Write the relocation out. 965 */ 966 if (do_reloc(rtype, (uchar_t *)roffset, (Word *)&value, 967 name, NAME(lmp), LIST(lmp)) == 0) 968 ret = 0; 969 970 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 971 (Xword)roffset, (Xword)value)); 972 } 973 974 if ((ret == 0) && 975 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 976 break; 977 978 if (binfo) { 979 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 980 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 981 _lmp, (Addr)value, symdef->st_value, name, binfo)); 982 } 983 } 984 985 return (relocate_finish(lmp, bound, textrel, ret)); 986 } 987 988 /* 989 * Initialize the first few got entries so that function calls go to 990 * elf_rtbndr: 991 * 992 * GOT[GOT_XLINKMAP] = the address of the link map 993 * GOT[GOT_XRTLD] = the address of rtbinder 994 */ 995 void 996 elf_plt_init(void *got, caddr_t l) 997 { 998 uint_t *_got; 999 /* LINTED */ 1000 Rt_map *lmp = (Rt_map *)l; 1001 1002 _got = (uint_t *)got + M_GOT_XLINKMAP; 1003 *_got = (uint_t)lmp; 1004 _got = (uint_t *)got + M_GOT_XRTLD; 1005 *_got = (uint_t)elf_rtbndr; 1006 } 1007 1008 /* 1009 * For SVR4 Intel compatability. USL uses /usr/lib/libc.so.1 as the run-time 1010 * linker, so the interpreter's address will differ from /usr/lib/ld.so.1. 1011 * Further, USL has special _iob[] and _ctype[] processing that makes up for the 1012 * fact that these arrays do not have associated copy relocations. So we try 1013 * and make up for that here. Any relocations found will be added to the global 1014 * copy relocation list and will be processed in setup(). 1015 */ 1016 static int 1017 _elf_copy_reloc(const char *name, Rt_map *rlmp, Rt_map *dlmp) 1018 { 1019 Sym *symref, *symdef; 1020 caddr_t ref, def; 1021 Rt_map *_lmp; 1022 Rel rel; 1023 Slookup sl; 1024 uint_t binfo; 1025 1026 /* 1027 * Determine if the special symbol exists as a reference in the dynamic 1028 * executable, and that an associated definition exists in libc.so.1. 1029 */ 1030 sl.sl_name = name; 1031 sl.sl_cmap = rlmp; 1032 sl.sl_imap = rlmp; 1033 sl.sl_hash = 0; 1034 sl.sl_rsymndx = 0; 1035 sl.sl_flags = LKUP_FIRST; 1036 1037 if ((symref = lookup_sym(&sl, &_lmp, &binfo)) == 0) 1038 return (1); 1039 1040 sl.sl_imap = dlmp; 1041 sl.sl_flags = LKUP_DEFT; 1042 1043 if ((symdef = lookup_sym(&sl, &_lmp, &binfo)) == 0) 1044 return (1); 1045 if (strcmp(NAME(_lmp), MSG_ORIG(MSG_PTH_LIBC))) 1046 return (1); 1047 1048 /* 1049 * Determine the reference and definition addresses. 1050 */ 1051 ref = (void *)(symref->st_value); 1052 if (!(FLAGS(rlmp) & FLG_RT_FIXED)) 1053 ref += ADDR(rlmp); 1054 def = (void *)(symdef->st_value); 1055 if (!(FLAGS(_lmp) & FLG_RT_FIXED)) 1056 def += ADDR(_lmp); 1057 1058 /* 1059 * Set up a relocation entry for debugging and call the generic copy 1060 * relocation function to provide symbol size error checking and to 1061 * record the copy relocation that must be performed. 1062 */ 1063 rel.r_offset = (Addr)ref; 1064 rel.r_info = (Word)R_386_COPY; 1065 DBG_CALL(Dbg_reloc_in(LIST(rlmp), ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, 1066 &rel, NULL, name)); 1067 1068 return (elf_copy_reloc((char *)name, symref, rlmp, (void *)ref, symdef, 1069 _lmp, (void *)def)); 1070 } 1071 1072 int 1073 elf_copy_gen(Rt_map *lmp) 1074 { 1075 if (interp && ((ulong_t)interp->i_faddr != 1076 r_debug.rtd_rdebug.r_ldbase) && 1077 !(strcmp(interp->i_name, MSG_ORIG(MSG_PTH_LIBC)))) { 1078 1079 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, 0, 1080 DBG_REL_START)); 1081 1082 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_CTYPE), lmp, 1083 (Rt_map *)NEXT(lmp)) == 0) 1084 return (0); 1085 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_IOB), lmp, 1086 (Rt_map *)NEXT(lmp)) == 0) 1087 return (0); 1088 } 1089 return (1); 1090 } 1091 1092 /* 1093 * Plt writing interface to allow debugging initialization to be generic. 1094 */ 1095 Pltbindtype 1096 /* ARGSUSED1 */ 1097 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1098 Xword pltndx) 1099 { 1100 Rel *rel = (Rel*)rptr; 1101 uintptr_t pltaddr; 1102 1103 pltaddr = addr + rel->r_offset; 1104 *(ulong_t *)pltaddr = (ulong_t)symval; 1105 DBG_CALL(pltcntfull++); 1106 return (PLT_T_FULL); 1107 } 1108 1109 /* 1110 * Provide a machine specific interface to the conversion routine. By calling 1111 * the machine specific version, rather than the generic version, we insure that 1112 * the data tables/strings for all known machine versions aren't dragged into 1113 * ld.so.1. 1114 */ 1115 const char * 1116 _conv_reloc_type(uint_t rel) 1117 { 1118 return (conv_reloc_386_type(rel, 0)); 1119 } 1120