1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * x86 machine dependent and ELF file class dependent functions. 33 * Contains routines for performing function binding and symbol relocations. 34 */ 35 #include "_synonyms.h" 36 37 #include <stdio.h> 38 #include <sys/elf.h> 39 #include <sys/elf_386.h> 40 #include <sys/mman.h> 41 #include <dlfcn.h> 42 #include <synch.h> 43 #include <string.h> 44 #include <debug.h> 45 #include <reloc.h> 46 #include <conv.h> 47 #include "_rtld.h" 48 #include "_audit.h" 49 #include "_elf.h" 50 #include "msg.h" 51 52 53 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 54 55 int 56 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 57 { 58 /* 59 * Check machine type and flags. 60 */ 61 if (ehdr->e_flags != 0) { 62 rej->rej_type = SGS_REJ_BADFLAG; 63 rej->rej_info = (uint_t)ehdr->e_flags; 64 return (0); 65 } 66 return (1); 67 } 68 69 void 70 ldso_plt_init(Rt_map * lmp) 71 { 72 /* 73 * There is no need to analyze ld.so because we don't map in any of 74 * its dependencies. However we may map these dependencies in later 75 * (as if ld.so had dlopened them), so initialize the plt and the 76 * permission information. 77 */ 78 if (PLTGOT(lmp)) 79 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 80 } 81 82 static const uchar_t dyn_plt_template[] = { 83 /* 0x00 */ 0x55, /* pushl %ebp */ 84 /* 0x01 */ 0x8b, 0xec, /* movl %esp, %ebp */ 85 /* 0x03 */ 0x68, 0x00, 0x00, 0x00, 0x00, /* pushl trace_fields */ 86 /* 0x08 */ 0xe9, 0xfc, 0xff, 0xff, 0xff, 0xff /* jmp elf_plt_trace */ 87 }; 88 int dyn_plt_ent_size = sizeof (dyn_plt_template); 89 90 /* 91 * the dynamic plt entry is: 92 * 93 * pushl %ebp 94 * movl %esp, %ebp 95 * pushl tfp 96 * jmp elf_plt_trace 97 * dyn_data: 98 * .align 4 99 * uintptr_t reflmp 100 * uintptr_t deflmp 101 * uint_t symndx 102 * uint_t sb_flags 103 * Sym symdef 104 */ 105 static caddr_t 106 elf_plt_trace_write(uint_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 107 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 108 { 109 extern int elf_plt_trace(); 110 ulong_t got_entry; 111 uchar_t *dyn_plt; 112 uintptr_t *dyndata; 113 114 /* 115 * We only need to add the glue code if there is an auditing 116 * library that is interested in this binding. 117 */ 118 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 119 (pltndx * dyn_plt_ent_size)); 120 121 /* 122 * Have we initialized this dynamic plt entry yet? If we haven't do it 123 * now. Otherwise this function has been called before, but from a 124 * different plt (ie. from another shared object). In that case 125 * we just set the plt to point to the new dyn_plt. 126 */ 127 if (*dyn_plt == 0) { 128 Sym *symp; 129 Word symvalue; 130 Lm_list *lml = LIST(rlmp); 131 132 (void) memcpy((void *)dyn_plt, dyn_plt_template, 133 sizeof (dyn_plt_template)); 134 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 135 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 136 137 /* 138 * relocate: 139 * pushl dyn_data 140 */ 141 symvalue = (Word)dyndata; 142 if (do_reloc_rtld(R_386_32, &dyn_plt[4], &symvalue, 143 MSG_ORIG(MSG_SYM_LADYNDATA), 144 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 145 *fail = 1; 146 return (0); 147 } 148 149 /* 150 * jmps are relative, so I need to figure out the relative 151 * address to elf_plt_trace. 152 * 153 * relocating: 154 * jmp elf_plt_trace 155 */ 156 symvalue = (ulong_t)(elf_plt_trace) - (ulong_t)(dyn_plt + 9); 157 if (do_reloc_rtld(R_386_PC32, &dyn_plt[9], &symvalue, 158 MSG_ORIG(MSG_SYM_ELFPLTTRACE), 159 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 160 *fail = 1; 161 return (0); 162 } 163 164 *dyndata++ = (uintptr_t)rlmp; 165 *dyndata++ = (uintptr_t)dlmp; 166 *dyndata++ = (uint_t)symndx; 167 *dyndata++ = (uint_t)sb_flags; 168 symp = (Sym *)dyndata; 169 *symp = *sym; 170 symp->st_name += (Word)STRTAB(dlmp); 171 symp->st_value = (Addr)to; 172 } 173 174 got_entry = (ulong_t)roffset; 175 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 176 return ((caddr_t)dyn_plt); 177 } 178 179 180 /* 181 * Function binding routine - invoked on the first call to a function through 182 * the procedure linkage table; 183 * passes first through an assembly language interface. 184 * 185 * Takes the offset into the relocation table of the associated 186 * relocation entry and the address of the link map (rt_private_map struct) 187 * for the entry. 188 * 189 * Returns the address of the function referenced after re-writing the PLT 190 * entry to invoke the function directly. 191 * 192 * On error, causes process to terminate with a signal. 193 */ 194 ulong_t 195 elf_bndr(Rt_map *lmp, ulong_t reloff, caddr_t from) 196 { 197 Rt_map *nlmp, *llmp; 198 ulong_t addr, symval, rsymndx; 199 char *name; 200 Rel *rptr; 201 Sym *rsym, *nsym; 202 uint_t binfo, sb_flags = 0, dbg_class; 203 Slookup sl; 204 int entry, lmflags; 205 Lm_list *lml; 206 207 /* 208 * For compatibility with libthread (TI_VERSION 1) we track the entry 209 * value. A zero value indicates we have recursed into ld.so.1 to 210 * further process a locking request. Under this recursion we disable 211 * tsort and cleanup activities. 212 */ 213 entry = enter(); 214 215 lml = LIST(lmp); 216 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 217 dbg_class = dbg_desc->d_class; 218 dbg_desc->d_class = 0; 219 } 220 221 /* 222 * Perform some basic sanity checks. If we didn't get a load map or 223 * the relocation offset is invalid then its possible someone has walked 224 * over the .got entries or jumped to plt0 out of the blue. 225 */ 226 if (!lmp || ((reloff % sizeof (Rel)) != 0)) { 227 Conv_inv_buf_t inv_buf; 228 229 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 230 conv_reloc_386_type(R_386_JMP_SLOT, 0, &inv_buf), 231 EC_NATPTR(lmp), EC_XWORD(reloff), EC_NATPTR(from)); 232 rtldexit(lml, 1); 233 } 234 235 /* 236 * Use relocation entry to get symbol table entry and symbol name. 237 */ 238 addr = (ulong_t)JMPREL(lmp); 239 rptr = (Rel *)(addr + reloff); 240 rsymndx = ELF_R_SYM(rptr->r_info); 241 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 242 name = (char *)(STRTAB(lmp) + rsym->st_name); 243 244 /* 245 * Determine the last link-map of this list, this'll be the starting 246 * point for any tsort() processing. 247 */ 248 llmp = lml->lm_tail; 249 250 /* 251 * Find definition for symbol. Initialize the symbol lookup data 252 * structure. 253 */ 254 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 255 rsymndx, rsym, 0, LKUP_DEFT); 256 257 if ((nsym = lookup_sym(&sl, &nlmp, &binfo, NULL)) == 0) { 258 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 259 demangle(name)); 260 rtldexit(lml, 1); 261 } 262 263 symval = nsym->st_value; 264 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 265 (nsym->st_shndx != SHN_ABS)) 266 symval += ADDR(nlmp); 267 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 268 /* 269 * Record that this new link map is now bound to the caller. 270 */ 271 if (bind_one(lmp, nlmp, BND_REFER) == 0) 272 rtldexit(lml, 1); 273 } 274 275 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 276 uint_t symndx = (((uintptr_t)nsym - 277 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 278 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 279 &sb_flags); 280 } 281 282 if (!(rtld_flags & RT_FL_NOBIND)) { 283 addr = rptr->r_offset; 284 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 285 addr += ADDR(lmp); 286 if (((lml->lm_tflags | FLAGS1(lmp)) & 287 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 288 AUDINFO(lmp)->ai_dynplts) { 289 int fail = 0; 290 uint_t pltndx = reloff / sizeof (Rel); 291 uint_t symndx = (((uintptr_t)nsym - 292 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 293 294 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 295 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 296 &fail); 297 if (fail) 298 rtldexit(lml, 1); 299 } else { 300 /* 301 * Write standard PLT entry to jump directly 302 * to newly bound function. 303 */ 304 *(ulong_t *)addr = symval; 305 } 306 } 307 308 /* 309 * Print binding information and rebuild PLT entry. 310 */ 311 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 312 (Xword)(reloff / sizeof (Rel)), PLT_T_FULL, nlmp, (Addr)symval, 313 nsym->st_value, name, binfo)); 314 315 /* 316 * Complete any processing for newly loaded objects. Note we don't 317 * know exactly where any new objects are loaded (we know the object 318 * that supplied the symbol, but others may have been loaded lazily as 319 * we searched for the symbol), so sorting starts from the last 320 * link-map know on entry to this routine. 321 */ 322 if (entry) 323 load_completion(llmp); 324 325 /* 326 * Some operations like dldump() or dlopen()'ing a relocatable object 327 * result in objects being loaded on rtld's link-map, make sure these 328 * objects are initialized also. 329 */ 330 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 331 load_completion(nlmp); 332 333 /* 334 * If the object we've bound to is in the process of being initialized 335 * by another thread, determine whether we should block. 336 */ 337 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 338 339 /* 340 * Make sure the object to which we've bound has had it's .init fired. 341 * Cleanup before return to user code. 342 */ 343 if (entry) { 344 is_dep_init(nlmp, lmp); 345 leave(lml); 346 } 347 348 if (lmflags & LML_FLG_RTLDLM) 349 dbg_desc->d_class = dbg_class; 350 351 return (symval); 352 } 353 354 355 /* 356 * When the relocation loop realizes that it's dealing with relative 357 * relocations in a shared object, it breaks into this tighter loop 358 * as an optimization. 359 */ 360 ulong_t 361 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 362 ulong_t basebgn, ulong_t etext, ulong_t emap) 363 { 364 ulong_t roffset = ((Rel *)relbgn)->r_offset; 365 char rtype; 366 367 do { 368 roffset += basebgn; 369 370 /* 371 * If this relocation is against an address not mapped in, 372 * then break out of the relative relocation loop, falling 373 * back on the main relocation loop. 374 */ 375 if (roffset < etext || roffset > emap) 376 break; 377 378 /* 379 * Perform the actual relocation. 380 */ 381 *((ulong_t *)roffset) += basebgn; 382 383 relbgn += relsiz; 384 385 if (relbgn >= relend) 386 break; 387 388 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); 389 roffset = ((Rel *)relbgn)->r_offset; 390 391 } while (rtype == R_386_RELATIVE); 392 393 return (relbgn); 394 } 395 396 /* 397 * This is the tightest loop for RELATIVE relocations for those 398 * objects built with the DT_RELACOUNT .dynamic entry. 399 */ 400 ulong_t 401 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 402 ulong_t basebgn) 403 { 404 ulong_t roffset = ((Rel *) relbgn)->r_offset; 405 406 for (; relacount; relacount--) { 407 roffset += basebgn; 408 409 /* 410 * Perform the actual relocation. 411 */ 412 *((ulong_t *)roffset) += basebgn; 413 414 relbgn += relsiz; 415 416 roffset = ((Rel *)relbgn)->r_offset; 417 418 } 419 420 return (relbgn); 421 } 422 423 /* 424 * Read and process the relocations for one link object, we assume all 425 * relocation sections for loadable segments are stored contiguously in 426 * the file. 427 */ 428 int 429 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl) 430 { 431 ulong_t relbgn, relend, relsiz, basebgn; 432 ulong_t pltbgn, pltend, _pltbgn, _pltend; 433 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 434 ulong_t emap, dsymndx; 435 uchar_t rtype; 436 long value, pvalue; 437 Sym *symref, *psymref, *symdef, *psymdef; 438 char *name, *pname; 439 Rt_map *_lmp, *plmp; 440 int textrel = 0, ret = 1, noplt = 0; 441 int relacount = RELACOUNT(lmp), plthint = 0; 442 Rel *rel; 443 uint_t binfo, pbinfo; 444 APlist *bound = NULL; 445 446 /* 447 * Although only necessary for lazy binding, initialize the first 448 * global offset entry to go to elf_rtbndr(). dbx(1) seems 449 * to find this useful. 450 */ 451 if ((plt == 0) && PLTGOT(lmp)) { 452 if ((ulong_t)PLTGOT(lmp) < etext) { 453 if (elf_set_prot(lmp, PROT_WRITE) == 0) 454 return (0); 455 textrel = 1; 456 } 457 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 458 } 459 460 /* 461 * Initialize the plt start and end addresses. 462 */ 463 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 464 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 465 466 467 relsiz = (ulong_t)(RELENT(lmp)); 468 basebgn = ADDR(lmp); 469 emap = ADDR(lmp) + MSIZE(lmp); 470 471 if (PLTRELSZ(lmp)) 472 plthint = PLTRELSZ(lmp) / relsiz; 473 474 /* 475 * If we've been called upon to promote an RTLD_LAZY object to an 476 * RTLD_NOW then we're only interested in scaning the .plt table. 477 * An uninitialized .plt is the case where the associated got entry 478 * points back to the plt itself. Determine the range of the real .plt 479 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 480 */ 481 if (plt) { 482 Slookup sl; 483 484 relbgn = pltbgn; 485 relend = pltend; 486 if (!relbgn || (relbgn == relend)) 487 return (1); 488 489 /* 490 * Initialize the symbol lookup data structure. 491 */ 492 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, 493 elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); 494 495 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo, NULL)) == 0) 496 return (1); 497 498 _pltbgn = symdef->st_value; 499 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 500 (symdef->st_shndx != SHN_ABS)) 501 _pltbgn += basebgn; 502 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 503 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 504 505 } else { 506 /* 507 * The relocation sections appear to the run-time linker as a 508 * single table. Determine the address of the beginning and end 509 * of this table. There are two different interpretations of 510 * the ABI at this point: 511 * 512 * o The REL table and its associated RELSZ indicate the 513 * concatenation of *all* relocation sections (this is the 514 * model our link-editor constructs). 515 * 516 * o The REL table and its associated RELSZ indicate the 517 * concatenation of all *but* the .plt relocations. These 518 * relocations are specified individually by the JMPREL and 519 * PLTRELSZ entries. 520 * 521 * Determine from our knowledege of the relocation range and 522 * .plt range, the range of the total relocation table. Note 523 * that one other ABI assumption seems to be that the .plt 524 * relocations always follow any other relocations, the 525 * following range checking drops that assumption. 526 */ 527 relbgn = (ulong_t)(REL(lmp)); 528 relend = relbgn + (ulong_t)(RELSZ(lmp)); 529 if (pltbgn) { 530 if (!relbgn || (relbgn > pltbgn)) 531 relbgn = pltbgn; 532 if (!relbgn || (relend < pltend)) 533 relend = pltend; 534 } 535 } 536 if (!relbgn || (relbgn == relend)) { 537 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 538 return (1); 539 } 540 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 541 542 /* 543 * If we're processing a dynamic executable in lazy mode there is no 544 * need to scan the .rel.plt table, however if we're processing a shared 545 * object in lazy mode the .got addresses associated to each .plt must 546 * be relocated to reflect the location of the shared object. 547 */ 548 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 549 (FLAGS(lmp) & FLG_RT_FIXED)) 550 noplt = 1; 551 552 /* 553 * Loop through relocations. 554 */ 555 while (relbgn < relend) { 556 uint_t sb_flags = 0; 557 558 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); 559 560 /* 561 * If this is a RELATIVE relocation in a shared object (the 562 * common case), and if we are not debugging, then jump into a 563 * tighter relocation loop (elf_reloc_relative). Only make the 564 * jump if we've been given a hint on the number of relocations. 565 */ 566 if ((rtype == R_386_RELATIVE) && 567 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 568 /* 569 * It's possible that the relative relocation block 570 * has relocations against the text segment as well 571 * as the data segment. Since our optimized relocation 572 * engine does not check which segment the relocation 573 * is against - just mprotect it now if it's been 574 * marked as containing TEXTREL's. 575 */ 576 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 577 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 578 ret = 0; 579 break; 580 } 581 textrel = 1; 582 } 583 584 if (relacount) { 585 relbgn = elf_reloc_relacount(relbgn, relacount, 586 relsiz, basebgn); 587 relacount = 0; 588 } else { 589 relbgn = elf_reloc_relative(relbgn, relend, 590 relsiz, basebgn, etext, emap); 591 } 592 if (relbgn >= relend) 593 break; 594 rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); 595 } 596 597 roffset = ((Rel *)relbgn)->r_offset; 598 599 /* 600 * If this is a shared object, add the base address to offset. 601 */ 602 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 603 604 /* 605 * If we're processing lazy bindings, we have to step 606 * through the plt entries and add the base address 607 * to the corresponding got entry. 608 */ 609 if (plthint && (plt == 0) && 610 (rtype == R_386_JMP_SLOT) && 611 ((MODE(lmp) & RTLD_NOW) == 0)) { 612 relbgn = elf_reloc_relacount(relbgn, 613 plthint, relsiz, basebgn); 614 plthint = 0; 615 continue; 616 } 617 roffset += basebgn; 618 } 619 620 rsymndx = ELF_R_SYM(((Rel *)relbgn)->r_info); 621 rel = (Rel *)relbgn; 622 relbgn += relsiz; 623 624 /* 625 * Optimizations. 626 */ 627 if (rtype == R_386_NONE) 628 continue; 629 if (noplt && ((ulong_t)rel >= pltbgn) && 630 ((ulong_t)rel < pltend)) { 631 relbgn = pltend; 632 continue; 633 } 634 635 /* 636 * If we're promoting plts determine if this one has already 637 * been written. 638 */ 639 if (plt) { 640 if ((*(ulong_t *)roffset < _pltbgn) || 641 (*(ulong_t *)roffset > _pltend)) 642 continue; 643 } 644 645 /* 646 * If this relocation is not against part of the image 647 * mapped into memory we skip it. 648 */ 649 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 650 MSIZE(lmp)))) { 651 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 652 rsymndx); 653 continue; 654 } 655 656 binfo = 0; 657 /* 658 * If a symbol index is specified then get the symbol table 659 * entry, locate the symbol definition, and determine its 660 * address. 661 */ 662 if (rsymndx) { 663 /* 664 * Get the local symbol table entry. 665 */ 666 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 667 (rsymndx * SYMENT(lmp))); 668 669 /* 670 * If this is a local symbol, just use the base address. 671 * (we should have no local relocations in the 672 * executable). 673 */ 674 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 675 value = basebgn; 676 name = (char *)0; 677 678 /* 679 * Special case TLS relocations. 680 */ 681 if (rtype == R_386_TLS_DTPMOD32) { 682 /* 683 * Use the TLS modid. 684 */ 685 value = TLSMODID(lmp); 686 687 } else if (rtype == R_386_TLS_TPOFF) { 688 if ((value = elf_static_tls(lmp, symref, 689 rel, rtype, 0, roffset, 0)) == 0) { 690 ret = 0; 691 break; 692 } 693 } 694 } else { 695 /* 696 * If the symbol index is equal to the previous 697 * symbol index relocation we processed then 698 * reuse the previous values. (Note that there 699 * have been cases where a relocation exists 700 * against a copy relocation symbol, our ld(1) 701 * should optimize this away, but make sure we 702 * don't use the same symbol information should 703 * this case exist). 704 */ 705 if ((rsymndx == psymndx) && 706 (rtype != R_386_COPY)) { 707 /* LINTED */ 708 if (psymdef == 0) { 709 DBG_CALL(Dbg_bind_weak(lmp, 710 (Addr)roffset, (Addr) 711 (roffset - basebgn), name)); 712 continue; 713 } 714 /* LINTED */ 715 value = pvalue; 716 /* LINTED */ 717 name = pname; 718 /* LINTED */ 719 symdef = psymdef; 720 /* LINTED */ 721 symref = psymref; 722 /* LINTED */ 723 _lmp = plmp; 724 /* LINTED */ 725 binfo = pbinfo; 726 727 if ((LIST(_lmp)->lm_tflags | 728 FLAGS1(_lmp)) & 729 LML_TFLG_AUD_SYMBIND) { 730 value = audit_symbind(lmp, _lmp, 731 /* LINTED */ 732 symdef, dsymndx, value, 733 &sb_flags); 734 } 735 } else { 736 Slookup sl; 737 738 /* 739 * Lookup the symbol definition. 740 * Initialize the symbol lookup data 741 * structure. 742 */ 743 name = (char *)(STRTAB(lmp) + 744 symref->st_name); 745 746 SLOOKUP_INIT(sl, name, lmp, 0, 747 ld_entry_cnt, 0, rsymndx, symref, 748 rtype, LKUP_STDRELOC); 749 750 symdef = lookup_sym(&sl, &_lmp, 751 &binfo, in_nfavl); 752 753 /* 754 * If the symbol is not found and the 755 * reference was not to a weak symbol, 756 * report an error. Weak references 757 * may be unresolved. 758 */ 759 /* BEGIN CSTYLED */ 760 if (symdef == 0) { 761 if (sl.sl_bind != STB_WEAK) { 762 if (elf_reloc_error(lmp, name, 763 rel, binfo)) 764 continue; 765 766 ret = 0; 767 break; 768 769 } else { 770 psymndx = rsymndx; 771 psymdef = 0; 772 773 DBG_CALL(Dbg_bind_weak(lmp, 774 (Addr)roffset, (Addr) 775 (roffset - basebgn), name)); 776 continue; 777 } 778 } 779 /* END CSTYLED */ 780 781 /* 782 * If symbol was found in an object 783 * other than the referencing object 784 * then record the binding. 785 */ 786 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 787 FL1_RT_NOINIFIN) == 0)) { 788 if (aplist_test(&bound, _lmp, 789 AL_CNT_RELBIND) == 0) { 790 ret = 0; 791 break; 792 } 793 } 794 795 /* 796 * Calculate the location of definition; 797 * symbol value plus base address of 798 * containing shared object. 799 */ 800 if (IS_SIZE(rtype)) 801 value = symdef->st_size; 802 else 803 value = symdef->st_value; 804 805 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 806 !(IS_SIZE(rtype)) && 807 (symdef->st_shndx != SHN_ABS) && 808 (ELF_ST_TYPE(symdef->st_info) != 809 STT_TLS)) 810 value += ADDR(_lmp); 811 812 /* 813 * Retain this symbol index and the 814 * value in case it can be used for the 815 * subsequent relocations. 816 */ 817 if (rtype != R_386_COPY) { 818 psymndx = rsymndx; 819 pvalue = value; 820 pname = name; 821 psymdef = symdef; 822 psymref = symref; 823 plmp = _lmp; 824 pbinfo = binfo; 825 } 826 if ((LIST(_lmp)->lm_tflags | 827 FLAGS1(_lmp)) & 828 LML_TFLG_AUD_SYMBIND) { 829 dsymndx = (((uintptr_t)symdef - 830 (uintptr_t)SYMTAB(_lmp)) / 831 SYMENT(_lmp)); 832 value = audit_symbind(lmp, _lmp, 833 symdef, dsymndx, value, 834 &sb_flags); 835 } 836 } 837 838 /* 839 * If relocation is PC-relative, subtract 840 * offset address. 841 */ 842 if (IS_PC_RELATIVE(rtype)) 843 value -= roffset; 844 845 /* 846 * Special case TLS relocations. 847 */ 848 if (rtype == R_386_TLS_DTPMOD32) { 849 /* 850 * Relocation value is the TLS modid. 851 */ 852 value = TLSMODID(_lmp); 853 854 } else if (rtype == R_386_TLS_TPOFF) { 855 if ((value = elf_static_tls(_lmp, 856 symdef, rel, rtype, name, roffset, 857 value)) == 0) { 858 ret = 0; 859 break; 860 } 861 } 862 } 863 } else { 864 /* 865 * Special cases. 866 */ 867 if (rtype == R_386_TLS_DTPMOD32) { 868 /* 869 * TLS relocation value is the TLS modid. 870 */ 871 value = TLSMODID(lmp); 872 } else 873 value = basebgn; 874 name = (char *)0; 875 } 876 877 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 878 M_REL_SHT_TYPE, rel, NULL, name)); 879 880 /* 881 * If this object has relocations in the text segment, turn 882 * off the write protect. 883 */ 884 if ((roffset < etext) && (textrel == 0)) { 885 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 886 ret = 0; 887 break; 888 } 889 textrel = 1; 890 } 891 892 /* 893 * Call relocation routine to perform required relocation. 894 */ 895 switch (rtype) { 896 case R_386_COPY: 897 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 898 symdef, _lmp, (const void *)value) == 0) 899 ret = 0; 900 break; 901 case R_386_JMP_SLOT: 902 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 903 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 904 AUDINFO(lmp)->ai_dynplts) { 905 int fail = 0; 906 int pltndx = (((ulong_t)rel - 907 (uintptr_t)JMPREL(lmp)) / relsiz); 908 int symndx = (((uintptr_t)symdef - 909 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 910 911 (void) elf_plt_trace_write(roffset, lmp, _lmp, 912 symdef, symndx, pltndx, (caddr_t)value, 913 sb_flags, &fail); 914 if (fail) 915 ret = 0; 916 } else { 917 /* 918 * Write standard PLT entry to jump directly 919 * to newly bound function. 920 */ 921 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 922 ELF_DBG_RTLD, (Xword)roffset, 923 (Xword)value)); 924 *(ulong_t *)roffset = value; 925 } 926 break; 927 default: 928 /* 929 * Write the relocation out. 930 */ 931 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 932 (Word *)&value, name, NAME(lmp), LIST(lmp)) == 0) 933 ret = 0; 934 935 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 936 (Xword)roffset, (Xword)value)); 937 } 938 939 if ((ret == 0) && 940 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 941 break; 942 943 if (binfo) { 944 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 945 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 946 _lmp, (Addr)value, symdef->st_value, name, binfo)); 947 } 948 } 949 950 return (relocate_finish(lmp, bound, textrel, ret)); 951 } 952 953 /* 954 * Initialize the first few got entries so that function calls go to 955 * elf_rtbndr: 956 * 957 * GOT[GOT_XLINKMAP] = the address of the link map 958 * GOT[GOT_XRTLD] = the address of rtbinder 959 */ 960 void 961 elf_plt_init(void *got, caddr_t l) 962 { 963 uint_t *_got; 964 /* LINTED */ 965 Rt_map *lmp = (Rt_map *)l; 966 967 _got = (uint_t *)got + M_GOT_XLINKMAP; 968 *_got = (uint_t)lmp; 969 _got = (uint_t *)got + M_GOT_XRTLD; 970 *_got = (uint_t)elf_rtbndr; 971 } 972 973 /* 974 * For SVR4 Intel compatability. USL uses /usr/lib/libc.so.1 as the run-time 975 * linker, so the interpreter's address will differ from /usr/lib/ld.so.1. 976 * Further, USL has special _iob[] and _ctype[] processing that makes up for the 977 * fact that these arrays do not have associated copy relocations. So we try 978 * and make up for that here. Any relocations found will be added to the global 979 * copy relocation list and will be processed in setup(). 980 */ 981 static int 982 _elf_copy_reloc(const char *name, Rt_map *rlmp, Rt_map *dlmp) 983 { 984 Sym *symref, *symdef; 985 caddr_t ref, def; 986 Rt_map *_lmp; 987 Rel rel; 988 Slookup sl; 989 uint_t binfo; 990 991 /* 992 * Determine if the special symbol exists as a reference in the dynamic 993 * executable, and that an associated definition exists in libc.so.1. 994 * 995 * Initialize the symbol lookup data structure. 996 */ 997 SLOOKUP_INIT(sl, name, rlmp, rlmp, ld_entry_cnt, 0, 0, 0, 0, 998 LKUP_FIRST); 999 1000 if ((symref = lookup_sym(&sl, &_lmp, &binfo, NULL)) == 0) 1001 return (1); 1002 1003 sl.sl_imap = dlmp; 1004 sl.sl_flags = LKUP_DEFT; 1005 1006 if ((symdef = lookup_sym(&sl, &_lmp, &binfo, NULL)) == 0) 1007 return (1); 1008 if (strcmp(NAME(_lmp), MSG_ORIG(MSG_PTH_LIBC))) 1009 return (1); 1010 1011 /* 1012 * Determine the reference and definition addresses. 1013 */ 1014 ref = (void *)(symref->st_value); 1015 if (!(FLAGS(rlmp) & FLG_RT_FIXED)) 1016 ref += ADDR(rlmp); 1017 def = (void *)(symdef->st_value); 1018 if (!(FLAGS(_lmp) & FLG_RT_FIXED)) 1019 def += ADDR(_lmp); 1020 1021 /* 1022 * Set up a relocation entry for debugging and call the generic copy 1023 * relocation function to provide symbol size error checking and to 1024 * record the copy relocation that must be performed. 1025 */ 1026 rel.r_offset = (Addr)ref; 1027 rel.r_info = (Word)R_386_COPY; 1028 DBG_CALL(Dbg_reloc_in(LIST(rlmp), ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, 1029 &rel, NULL, name)); 1030 1031 return (elf_copy_reloc((char *)name, symref, rlmp, (void *)ref, symdef, 1032 _lmp, (void *)def)); 1033 } 1034 1035 int 1036 elf_copy_gen(Rt_map *lmp) 1037 { 1038 if (interp && ((ulong_t)interp->i_faddr != 1039 r_debug.rtd_rdebug.r_ldbase) && 1040 !(strcmp(interp->i_name, MSG_ORIG(MSG_PTH_LIBC)))) { 1041 1042 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, 0, 1043 DBG_REL_START)); 1044 1045 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_CTYPE), lmp, 1046 (Rt_map *)NEXT(lmp)) == 0) 1047 return (0); 1048 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_IOB), lmp, 1049 (Rt_map *)NEXT(lmp)) == 0) 1050 return (0); 1051 } 1052 return (1); 1053 } 1054 1055 /* 1056 * Plt writing interface to allow debugging initialization to be generic. 1057 */ 1058 Pltbindtype 1059 /* ARGSUSED1 */ 1060 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1061 Xword pltndx) 1062 { 1063 Rel *rel = (Rel*)rptr; 1064 uintptr_t pltaddr; 1065 1066 pltaddr = addr + rel->r_offset; 1067 *(ulong_t *)pltaddr = (ulong_t)symval; 1068 DBG_CALL(pltcntfull++); 1069 return (PLT_T_FULL); 1070 } 1071 1072 /* 1073 * Provide a machine specific interface to the conversion routine. By calling 1074 * the machine specific version, rather than the generic version, we insure that 1075 * the data tables/strings for all known machine versions aren't dragged into 1076 * ld.so.1. 1077 */ 1078 const char * 1079 _conv_reloc_type(uint_t rel) 1080 { 1081 static Conv_inv_buf_t inv_buf; 1082 1083 return (conv_reloc_386_type(rel, 0, &inv_buf)); 1084 } 1085