1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * amd64 machine dependent and ELF file class dependent functions. 30 * Contains routines for performing function binding and symbol relocations. 31 */ 32 #include "_synonyms.h" 33 34 #include <stdio.h> 35 #include <sys/elf.h> 36 #include <sys/elf_amd64.h> 37 #include <sys/mman.h> 38 #include <dlfcn.h> 39 #include <synch.h> 40 #include <string.h> 41 #include <debug.h> 42 #include <reloc.h> 43 #include <conv.h> 44 #include "_rtld.h" 45 #include "_audit.h" 46 #include "_elf.h" 47 #include "msg.h" 48 49 50 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 51 52 int 53 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 54 { 55 /* 56 * Check machine type and flags. 57 */ 58 if (ehdr->e_flags != 0) { 59 rej->rej_type = SGS_REJ_BADFLAG; 60 rej->rej_info = (uint_t)ehdr->e_flags; 61 return (0); 62 } 63 return (1); 64 } 65 66 void 67 ldso_plt_init(Rt_map * lmp) 68 { 69 /* 70 * There is no need to analyze ld.so because we don't map in any of 71 * its dependencies. However we may map these dependencies in later 72 * (as if ld.so had dlopened them), so initialize the plt and the 73 * permission information. 74 */ 75 if (PLTGOT(lmp)) 76 elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 77 } 78 79 static const uchar_t dyn_plt_template[] = { 80 /* 0x00 */ 0x55, /* pushq %rbp */ 81 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 82 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 83 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 84 0x00, 0x00, 0x00, 85 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 86 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 87 0x00, 0x00, 0x00, 88 0x00, 0x00, 0x00, 89 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 90 /* 0x20 */ 91 }; 92 93 /* 94 * And the virutal outstanding relocations against the 95 * above block are: 96 * 97 * reloc offset Addend symbol 98 * R_AMD64_PC32 0x0b -4 trace_fields 99 * R_AMD64_64 0x15 0 elf_plt_trace 100 */ 101 102 #define TRCREL1OFF 0x0b 103 #define TRCREL2OFF 0x15 104 105 int dyn_plt_ent_size = sizeof (dyn_plt_template); 106 107 /* 108 * the dynamic plt entry is: 109 * 110 * pushq %rbp 111 * movq %rsp, %rbp 112 * subq $0x10, %rsp 113 * leaq trace_fields(%rip), %r11 114 * movq %r11, -0x8(%rbp) 115 * movq $elf_plt_trace, %r11 116 * jmp *%r11 117 * dyn_data: 118 * .align 8 119 * uintptr_t reflmp 120 * uintptr_t deflmp 121 * uint_t symndx 122 * uint_t sb_flags 123 * Sym symdef 124 */ 125 static caddr_t 126 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 127 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 128 { 129 extern int elf_plt_trace(); 130 ulong_t got_entry; 131 uchar_t *dyn_plt; 132 uintptr_t *dyndata; 133 134 135 /* 136 * We only need to add the glue code if there is an auditing 137 * library that is interested in this binding. 138 */ 139 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 140 (pltndx * dyn_plt_ent_size)); 141 142 /* 143 * Have we initialized this dynamic plt entry yet? If we haven't do it 144 * now. Otherwise this function has been called before, but from a 145 * different plt (ie. from another shared object). In that case 146 * we just set the plt to point to the new dyn_plt. 147 */ 148 if (*dyn_plt == 0) { 149 Sym * symp; 150 Xword symvalue; 151 Lm_list *lml = LIST(rlmp); 152 153 (void) memcpy((void *)dyn_plt, dyn_plt_template, 154 sizeof (dyn_plt_template)); 155 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 156 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 157 158 /* 159 * relocate: 160 * leaq trace_fields(%rip), %r11 161 * R_AMD64_PC32 0x0b -4 trace_fields 162 */ 163 symvalue = (Xword)((uintptr_t)dyndata - 164 (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 165 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 166 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 167 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 168 *fail = 1; 169 return (0); 170 } 171 172 /* 173 * relocating: 174 * movq $elf_plt_trace, %r11 175 * R_AMD64_64 0x15 0 elf_plt_trace 176 */ 177 symvalue = (Xword)elf_plt_trace; 178 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF], 179 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 180 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 181 *fail = 1; 182 return (0); 183 } 184 185 *dyndata++ = (uintptr_t)rlmp; 186 *dyndata++ = (uintptr_t)dlmp; 187 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 188 dyndata++; 189 symp = (Sym *)dyndata; 190 *symp = *sym; 191 symp->st_value = (Addr)to; 192 } 193 194 got_entry = (ulong_t)roffset; 195 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 196 return ((caddr_t)dyn_plt); 197 } 198 199 200 /* 201 * Function binding routine - invoked on the first call to a function through 202 * the procedure linkage table; 203 * passes first through an assembly language interface. 204 * 205 * Takes the offset into the relocation table of the associated 206 * relocation entry and the address of the link map (rt_private_map struct) 207 * for the entry. 208 * 209 * Returns the address of the function referenced after re-writing the PLT 210 * entry to invoke the function directly. 211 * 212 * On error, causes process to terminate with a signal. 213 */ 214 ulong_t 215 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 216 { 217 Rt_map *nlmp, * llmp; 218 ulong_t addr, reloff, symval, rsymndx; 219 char *name; 220 Rela *rptr; 221 Sym *sym, *nsym; 222 uint_t binfo, sb_flags = 0, dbg_class; 223 Slookup sl; 224 int entry, lmflags; 225 Lm_list *lml; 226 227 /* 228 * For compatibility with libthread (TI_VERSION 1) we track the entry 229 * value. A zero value indicates we have recursed into ld.so.1 to 230 * further process a locking request. Under this recursion we disable 231 * tsort and cleanup activities. 232 */ 233 entry = enter(); 234 235 lml = LIST(lmp); 236 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 237 dbg_class = dbg_desc->d_class; 238 dbg_desc->d_class = 0; 239 } 240 241 /* 242 * Perform some basic sanity checks. If we didn't get a load map or 243 * the relocation offset is invalid then its possible someone has walked 244 * over the .got entries or jumped to plt0 out of the blue. 245 */ 246 if ((!lmp) && (pltndx <= 247 (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 248 Conv_inv_buf_t inv_buf; 249 250 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 251 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf), 252 EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 253 rtldexit(lml, 1); 254 } 255 reloff = pltndx * (ulong_t)RELENT(lmp); 256 257 /* 258 * Use relocation entry to get symbol table entry and symbol name. 259 */ 260 addr = (ulong_t)JMPREL(lmp); 261 rptr = (Rela *)(addr + reloff); 262 rsymndx = ELF_R_SYM(rptr->r_info); 263 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 264 name = (char *)(STRTAB(lmp) + sym->st_name); 265 266 /* 267 * Determine the last link-map of this list, this'll be the starting 268 * point for any tsort() processing. 269 */ 270 llmp = lml->lm_tail; 271 272 /* 273 * Find definition for symbol. 274 */ 275 sl.sl_name = name; 276 sl.sl_cmap = lmp; 277 sl.sl_imap = lml->lm_head; 278 sl.sl_hash = 0; 279 sl.sl_rsymndx = rsymndx; 280 sl.sl_flags = LKUP_DEFT; 281 282 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 283 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 284 demangle(name)); 285 rtldexit(lml, 1); 286 } 287 288 symval = nsym->st_value; 289 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 290 (nsym->st_shndx != SHN_ABS)) 291 symval += ADDR(nlmp); 292 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 293 /* 294 * Record that this new link map is now bound to the caller. 295 */ 296 if (bind_one(lmp, nlmp, BND_REFER) == 0) 297 rtldexit(lml, 1); 298 } 299 300 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 301 uint_t symndx = (((uintptr_t)nsym - 302 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 303 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 304 &sb_flags); 305 } 306 307 if (!(rtld_flags & RT_FL_NOBIND)) { 308 addr = rptr->r_offset; 309 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 310 addr += ADDR(lmp); 311 if (((lml->lm_tflags | FLAGS1(lmp)) & 312 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 313 AUDINFO(lmp)->ai_dynplts) { 314 int fail = 0; 315 uint_t pltndx = reloff / sizeof (Rela); 316 uint_t symndx = (((uintptr_t)nsym - 317 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 318 319 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 320 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 321 &fail); 322 if (fail) 323 rtldexit(lml, 1); 324 } else { 325 /* 326 * Write standard PLT entry to jump directly 327 * to newly bound function. 328 */ 329 *(ulong_t *)addr = symval; 330 } 331 } 332 333 /* 334 * Print binding information and rebuild PLT entry. 335 */ 336 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 337 (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, 338 (Addr)symval, nsym->st_value, name, binfo)); 339 340 /* 341 * Complete any processing for newly loaded objects. Note we don't 342 * know exactly where any new objects are loaded (we know the object 343 * that supplied the symbol, but others may have been loaded lazily as 344 * we searched for the symbol), so sorting starts from the last 345 * link-map know on entry to this routine. 346 */ 347 if (entry) 348 load_completion(llmp); 349 350 /* 351 * Some operations like dldump() or dlopen()'ing a relocatable object 352 * result in objects being loaded on rtld's link-map, make sure these 353 * objects are initialized also. 354 */ 355 if ((lml->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 356 load_completion(nlmp); 357 358 /* 359 * If the object we've bound to is in the process of being initialized 360 * by another thread, determine whether we should block. 361 */ 362 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 363 364 /* 365 * Make sure the object to which we've bound has had it's .init fired. 366 * Cleanup before return to user code. 367 */ 368 if (entry) { 369 is_dep_init(nlmp, lmp); 370 leave(lml); 371 } 372 373 if (lmflags & LML_FLG_RTLDLM) 374 dbg_desc->d_class = dbg_class; 375 376 return (symval); 377 } 378 379 380 /* 381 * When the relocation loop realizes that it's dealing with relative 382 * relocations in a shared object, it breaks into this tighter loop 383 * as an optimization. 384 */ 385 ulong_t 386 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 387 ulong_t basebgn, ulong_t etext, ulong_t emap) 388 { 389 ulong_t roffset = ((Rela *)relbgn)->r_offset; 390 char rtype; 391 392 do { 393 roffset += basebgn; 394 395 /* 396 * If this relocation is against an address not mapped in, 397 * then break out of the relative relocation loop, falling 398 * back on the main relocation loop. 399 */ 400 if (roffset < etext || roffset > emap) 401 break; 402 403 /* 404 * Perform the actual relocation. 405 */ 406 *((ulong_t *)roffset) = basebgn + 407 ((Rela *)relbgn)->r_addend; 408 409 relbgn += relsiz; 410 411 if (relbgn >= relend) 412 break; 413 414 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 415 roffset = ((Rela *)relbgn)->r_offset; 416 417 } while (rtype == R_AMD64_RELATIVE); 418 419 return (relbgn); 420 } 421 422 /* 423 * This is the tightest loop for RELATIVE relocations for those 424 * objects built with the DT_RELACOUNT .dynamic entry. 425 */ 426 ulong_t 427 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 428 ulong_t basebgn) 429 { 430 ulong_t roffset = ((Rela *) relbgn)->r_offset; 431 432 for (; relacount; relacount--) { 433 roffset += basebgn; 434 435 /* 436 * Perform the actual relocation. 437 */ 438 *((ulong_t *)roffset) = basebgn + 439 ((Rela *)relbgn)->r_addend; 440 441 relbgn += relsiz; 442 443 roffset = ((Rela *)relbgn)->r_offset; 444 445 } 446 447 return (relbgn); 448 } 449 450 /* 451 * Read and process the relocations for one link object, we assume all 452 * relocation sections for loadable segments are stored contiguously in 453 * the file. 454 */ 455 int 456 elf_reloc(Rt_map *lmp, uint_t plt) 457 { 458 ulong_t relbgn, relend, relsiz, basebgn; 459 ulong_t pltbgn, pltend, _pltbgn, _pltend; 460 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 461 ulong_t emap, dsymndx; 462 uchar_t rtype; 463 long reladd, value, pvalue; 464 Sym *symref, *psymref, *symdef, *psymdef; 465 char *name, *pname; 466 Rt_map *_lmp, *plmp; 467 int textrel = 0, ret = 1, noplt = 0; 468 int relacount = RELACOUNT(lmp), plthint = 0; 469 Rela *rel; 470 uint_t binfo, pbinfo; 471 Alist *bound = 0; 472 473 /* 474 * Although only necessary for lazy binding, initialize the first 475 * global offset entry to go to elf_rtbndr(). dbx(1) seems 476 * to find this useful. 477 */ 478 if ((plt == 0) && PLTGOT(lmp)) { 479 if ((ulong_t)PLTGOT(lmp) < etext) { 480 if (elf_set_prot(lmp, PROT_WRITE) == 0) 481 return (0); 482 textrel = 1; 483 } 484 elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 485 } 486 487 /* 488 * Initialize the plt start and end addresses. 489 */ 490 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 491 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 492 493 494 relsiz = (ulong_t)(RELENT(lmp)); 495 basebgn = ADDR(lmp); 496 emap = ADDR(lmp) + MSIZE(lmp); 497 498 if (PLTRELSZ(lmp)) 499 plthint = PLTRELSZ(lmp) / relsiz; 500 501 /* 502 * If we've been called upon to promote an RTLD_LAZY object to an 503 * RTLD_NOW then we're only interested in scaning the .plt table. 504 * An uninitialized .plt is the case where the associated got entry 505 * points back to the plt itself. Determine the range of the real .plt 506 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 507 */ 508 if (plt) { 509 Slookup sl; 510 511 relbgn = pltbgn; 512 relend = pltend; 513 if (!relbgn || (relbgn == relend)) 514 return (1); 515 516 sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 517 sl.sl_cmap = lmp; 518 sl.sl_imap = lmp; 519 sl.sl_hash = elf_hash(MSG_ORIG(MSG_SYM_PLT)); 520 sl.sl_rsymndx = 0; 521 sl.sl_flags = LKUP_DEFT; 522 523 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 524 return (1); 525 526 _pltbgn = symdef->st_value; 527 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 528 (symdef->st_shndx != SHN_ABS)) 529 _pltbgn += basebgn; 530 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 531 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 532 533 } else { 534 /* 535 * The relocation sections appear to the run-time linker as a 536 * single table. Determine the address of the beginning and end 537 * of this table. There are two different interpretations of 538 * the ABI at this point: 539 * 540 * o The REL table and its associated RELSZ indicate the 541 * concatenation of *all* relocation sections (this is the 542 * model our link-editor constructs). 543 * 544 * o The REL table and its associated RELSZ indicate the 545 * concatenation of all *but* the .plt relocations. These 546 * relocations are specified individually by the JMPREL and 547 * PLTRELSZ entries. 548 * 549 * Determine from our knowledege of the relocation range and 550 * .plt range, the range of the total relocation table. Note 551 * that one other ABI assumption seems to be that the .plt 552 * relocations always follow any other relocations, the 553 * following range checking drops that assumption. 554 */ 555 relbgn = (ulong_t)(REL(lmp)); 556 relend = relbgn + (ulong_t)(RELSZ(lmp)); 557 if (pltbgn) { 558 if (!relbgn || (relbgn > pltbgn)) 559 relbgn = pltbgn; 560 if (!relbgn || (relend < pltend)) 561 relend = pltend; 562 } 563 } 564 if (!relbgn || (relbgn == relend)) { 565 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 566 return (1); 567 } 568 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 569 570 /* 571 * If we're processing a dynamic executable in lazy mode there is no 572 * need to scan the .rel.plt table, however if we're processing a shared 573 * object in lazy mode the .got addresses associated to each .plt must 574 * be relocated to reflect the location of the shared object. 575 */ 576 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 577 (FLAGS(lmp) & FLG_RT_FIXED)) 578 noplt = 1; 579 580 /* 581 * Loop through relocations. 582 */ 583 while (relbgn < relend) { 584 uint_t sb_flags = 0; 585 586 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 587 588 /* 589 * If this is a RELATIVE relocation in a shared object (the 590 * common case), and if we are not debugging, then jump into a 591 * tighter relocation loop (elf_reloc_relative). Only make the 592 * jump if we've been given a hint on the number of relocations. 593 */ 594 if ((rtype == R_AMD64_RELATIVE) && 595 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 596 /* 597 * It's possible that the relative relocation block 598 * has relocations against the text segment as well 599 * as the data segment. Since our optimized relocation 600 * engine does not check which segment the relocation 601 * is against - just mprotect it now if it's been 602 * marked as containing TEXTREL's. 603 */ 604 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 605 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 606 ret = 0; 607 break; 608 } 609 textrel = 1; 610 } 611 if (relacount) { 612 relbgn = elf_reloc_relacount(relbgn, relacount, 613 relsiz, basebgn); 614 relacount = 0; 615 } else { 616 relbgn = elf_reloc_relative(relbgn, relend, 617 relsiz, basebgn, etext, emap); 618 } 619 620 if (relbgn >= relend) 621 break; 622 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 623 } 624 625 roffset = ((Rela *)relbgn)->r_offset; 626 627 /* 628 * If this is a shared object, add the base address to offset. 629 */ 630 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 631 632 633 /* 634 * If we're processing lazy bindings, we have to step 635 * through the plt entries and add the base address 636 * to the corresponding got entry. 637 */ 638 if (plthint && (plt == 0) && 639 (rtype == R_AMD64_JUMP_SLOT) && 640 ((MODE(lmp) & RTLD_NOW) == 0)) { 641 /* 642 * The PLT relocations (for lazy bindings) 643 * are additive to what's already in the GOT. 644 * This differs to what happens in 645 * elf_reloc_relacount() and that's why we 646 * just do it inline here. 647 */ 648 for (roffset = ((Rela *)relbgn)->r_offset; 649 plthint; plthint--) { 650 roffset += basebgn; 651 652 /* 653 * Perform the actual relocation. 654 */ 655 *((ulong_t *)roffset) += basebgn; 656 657 relbgn += relsiz; 658 roffset = ((Rela *)relbgn)->r_offset; 659 660 } 661 continue; 662 } 663 roffset += basebgn; 664 } 665 666 reladd = (long)(((Rela *)relbgn)->r_addend); 667 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 668 rel = (Rela *)relbgn; 669 relbgn += relsiz; 670 671 /* 672 * Optimizations. 673 */ 674 if (rtype == R_AMD64_NONE) 675 continue; 676 if (noplt && ((ulong_t)rel >= pltbgn) && 677 ((ulong_t)rel < pltend)) { 678 relbgn = pltend; 679 continue; 680 } 681 682 /* 683 * If this relocation is not against part of the image 684 * mapped into memory we skip it. 685 */ 686 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 687 MSIZE(lmp)))) { 688 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 689 rsymndx); 690 continue; 691 } 692 693 /* 694 * If we're promoting plts determine if this one has already 695 * been written. 696 */ 697 if (plt) { 698 if ((*(ulong_t *)roffset < _pltbgn) || 699 (*(ulong_t *)roffset > _pltend)) 700 continue; 701 } 702 703 binfo = 0; 704 /* 705 * If a symbol index is specified then get the symbol table 706 * entry, locate the symbol definition, and determine its 707 * address. 708 */ 709 if (rsymndx) { 710 /* 711 * Get the local symbol table entry. 712 */ 713 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 714 (rsymndx * SYMENT(lmp))); 715 716 /* 717 * If this is a local symbol, just use the base address. 718 * (we should have no local relocations in the 719 * executable). 720 */ 721 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 722 value = basebgn; 723 name = (char *)0; 724 725 /* 726 * Special case TLS relocations. 727 */ 728 if (rtype == R_AMD64_DTPMOD64) { 729 /* 730 * Use the TLS modid. 731 */ 732 value = TLSMODID(lmp); 733 734 } else if ((rtype == R_AMD64_TPOFF64) || 735 (rtype == R_AMD64_TPOFF32)) { 736 if ((value = elf_static_tls(lmp, symref, 737 rel, rtype, 0, roffset, 0)) == 0) { 738 ret = 0; 739 break; 740 } 741 } 742 } else { 743 /* 744 * If the symbol index is equal to the previous 745 * symbol index relocation we processed then 746 * reuse the previous values. (Note that there 747 * have been cases where a relocation exists 748 * against a copy relocation symbol, our ld(1) 749 * should optimize this away, but make sure we 750 * don't use the same symbol information should 751 * this case exist). 752 */ 753 if ((rsymndx == psymndx) && 754 (rtype != R_AMD64_COPY)) { 755 /* LINTED */ 756 if (psymdef == 0) { 757 DBG_CALL(Dbg_bind_weak(lmp, 758 (Addr)roffset, (Addr) 759 (roffset - basebgn), name)); 760 continue; 761 } 762 /* LINTED */ 763 value = pvalue; 764 /* LINTED */ 765 name = pname; 766 /* LINTED */ 767 symdef = psymdef; 768 /* LINTED */ 769 symref = psymref; 770 /* LINTED */ 771 _lmp = plmp; 772 /* LINTED */ 773 binfo = pbinfo; 774 775 if ((LIST(_lmp)->lm_tflags | 776 FLAGS1(_lmp)) & 777 LML_TFLG_AUD_SYMBIND) { 778 value = audit_symbind(lmp, _lmp, 779 /* LINTED */ 780 symdef, dsymndx, value, 781 &sb_flags); 782 } 783 } else { 784 Slookup sl; 785 uchar_t bind; 786 787 /* 788 * Lookup the symbol definition. 789 */ 790 name = (char *)(STRTAB(lmp) + 791 symref->st_name); 792 793 sl.sl_name = name; 794 sl.sl_cmap = lmp; 795 sl.sl_imap = 0; 796 sl.sl_hash = 0; 797 sl.sl_rsymndx = rsymndx; 798 799 if (rtype == R_AMD64_COPY) 800 sl.sl_flags = LKUP_COPY; 801 else 802 sl.sl_flags = LKUP_DEFT; 803 804 sl.sl_flags |= LKUP_ALLCNTLIST; 805 806 if (rtype != R_AMD64_JUMP_SLOT) 807 sl.sl_flags |= LKUP_SPEC; 808 809 /* 810 * Under ldd -w, any unresolved weak 811 * references are diagnosed. Set the 812 * symbol binding as global to trigger 813 * a relocation error if the symbol can 814 * not be found. 815 */ 816 if (LIST(lmp)->lm_flags & 817 LML_FLG_TRC_NOUNRESWEAK) { 818 bind = STB_GLOBAL; 819 } else if ((bind = 820 ELF_ST_BIND(symref->st_info)) == 821 STB_WEAK) { 822 sl.sl_flags |= LKUP_WEAK; 823 } 824 825 symdef = lookup_sym(&sl, &_lmp, &binfo); 826 827 /* 828 * If the symbol is not found and the 829 * reference was not to a weak symbol, 830 * report an error. Weak references 831 * may be unresolved. 832 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 833 */ 834 /* BEGIN CSTYLED */ 835 if (symdef == 0) { 836 Lm_list *lml = LIST(lmp); 837 838 if (bind != STB_WEAK) { 839 if (lml->lm_flags & 840 LML_FLG_IGNRELERR) { 841 continue; 842 } else if (lml->lm_flags & 843 LML_FLG_TRC_WARN) { 844 (void) printf(MSG_INTL( 845 MSG_LDD_SYM_NFOUND), 846 demangle(name), 847 NAME(lmp)); 848 continue; 849 } else { 850 DBG_CALL(Dbg_reloc_in(lml, 851 ELF_DBG_RTLD, M_MACH, 852 M_REL_SHT_TYPE, rel, 853 NULL, name)); 854 eprintf(lml, ERR_FATAL, 855 MSG_INTL(MSG_REL_NOSYM), 856 NAME(lmp), 857 demangle(name)); 858 ret = 0; 859 break; 860 } 861 } else { 862 psymndx = rsymndx; 863 psymdef = 0; 864 865 DBG_CALL(Dbg_bind_weak(lmp, 866 (Addr)roffset, (Addr) 867 (roffset - basebgn), name)); 868 continue; 869 } 870 } 871 /* END CSTYLED */ 872 873 /* 874 * If symbol was found in an object 875 * other than the referencing object 876 * then record the binding. 877 */ 878 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 879 FL1_RT_NOINIFIN) == 0)) { 880 if (alist_test(&bound, _lmp, 881 sizeof (Rt_map *), 882 AL_CNT_RELBIND) == 0) { 883 ret = 0; 884 break; 885 } 886 } 887 888 /* 889 * Calculate the location of definition; 890 * symbol value plus base address of 891 * containing shared object. 892 */ 893 if (IS_SIZE(rtype)) 894 value = symdef->st_size; 895 else 896 value = symdef->st_value; 897 898 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 899 !(IS_SIZE(rtype)) && 900 (symdef->st_shndx != SHN_ABS) && 901 (ELF_ST_TYPE(symdef->st_info) != 902 STT_TLS)) 903 value += ADDR(_lmp); 904 905 /* 906 * Retain this symbol index and the 907 * value in case it can be used for the 908 * subsequent relocations. 909 */ 910 if (rtype != R_AMD64_COPY) { 911 psymndx = rsymndx; 912 pvalue = value; 913 pname = name; 914 psymdef = symdef; 915 psymref = symref; 916 plmp = _lmp; 917 pbinfo = binfo; 918 } 919 if ((LIST(_lmp)->lm_tflags | 920 FLAGS1(_lmp)) & 921 LML_TFLG_AUD_SYMBIND) { 922 dsymndx = (((uintptr_t)symdef - 923 (uintptr_t)SYMTAB(_lmp)) / 924 SYMENT(_lmp)); 925 value = audit_symbind(lmp, _lmp, 926 symdef, dsymndx, value, 927 &sb_flags); 928 } 929 } 930 931 /* 932 * If relocation is PC-relative, subtract 933 * offset address. 934 */ 935 if (IS_PC_RELATIVE(rtype)) 936 value -= roffset; 937 938 /* 939 * Special case TLS relocations. 940 */ 941 if (rtype == R_AMD64_DTPMOD64) { 942 /* 943 * Relocation value is the TLS modid. 944 */ 945 value = TLSMODID(_lmp); 946 947 } else if ((rtype == R_AMD64_TPOFF64) || 948 (rtype == R_AMD64_TPOFF32)) { 949 if ((value = elf_static_tls(_lmp, 950 symdef, rel, rtype, name, roffset, 951 value)) == 0) { 952 ret = 0; 953 break; 954 } 955 } 956 } 957 } else { 958 /* 959 * Special cases. 960 */ 961 if (rtype == R_AMD64_DTPMOD64) { 962 /* 963 * TLS relocation value is the TLS modid. 964 */ 965 value = TLSMODID(lmp); 966 } else 967 value = basebgn; 968 name = (char *)0; 969 } 970 971 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 972 M_REL_SHT_TYPE, rel, NULL, name)); 973 974 /* 975 * If this object has relocations in the text segment, turn 976 * off the write protect. 977 */ 978 if ((roffset < etext) && (textrel == 0)) { 979 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 980 ret = 0; 981 break; 982 } 983 textrel = 1; 984 } 985 986 /* 987 * Call relocation routine to perform required relocation. 988 */ 989 switch (rtype) { 990 case R_AMD64_COPY: 991 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 992 symdef, _lmp, (const void *)value) == 0) 993 ret = 0; 994 break; 995 case R_AMD64_JUMP_SLOT: 996 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 997 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 998 AUDINFO(lmp)->ai_dynplts) { 999 int fail = 0; 1000 int pltndx = (((ulong_t)rel - 1001 (uintptr_t)JMPREL(lmp)) / relsiz); 1002 int symndx = (((uintptr_t)symdef - 1003 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 1004 1005 (void) elf_plt_trace_write(roffset, lmp, _lmp, 1006 symdef, symndx, pltndx, (caddr_t)value, 1007 sb_flags, &fail); 1008 if (fail) 1009 ret = 0; 1010 } else { 1011 /* 1012 * Write standard PLT entry to jump directly 1013 * to newly bound function. 1014 */ 1015 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 1016 ELF_DBG_RTLD, (Xword)roffset, 1017 (Xword)value)); 1018 *(ulong_t *)roffset = value; 1019 } 1020 break; 1021 default: 1022 value += reladd; 1023 /* 1024 * Write the relocation out. 1025 */ 1026 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 1027 (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 1028 ret = 0; 1029 1030 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 1031 (Xword)roffset, (Xword)value)); 1032 } 1033 1034 if ((ret == 0) && 1035 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1036 break; 1037 1038 if (binfo) { 1039 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 1040 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 1041 _lmp, (Addr)value, symdef->st_value, name, binfo)); 1042 } 1043 } 1044 1045 return (relocate_finish(lmp, bound, textrel, ret)); 1046 } 1047 1048 /* 1049 * Initialize the first few got entries so that function calls go to 1050 * elf_rtbndr: 1051 * 1052 * GOT[GOT_XLINKMAP] = the address of the link map 1053 * GOT[GOT_XRTLD] = the address of rtbinder 1054 */ 1055 void 1056 elf_plt_init(void *got, caddr_t l) 1057 { 1058 uint64_t *_got; 1059 /* LINTED */ 1060 Rt_map *lmp = (Rt_map *)l; 1061 1062 _got = (uint64_t *)got + M_GOT_XLINKMAP; 1063 *_got = (uint64_t)lmp; 1064 _got = (uint64_t *)got + M_GOT_XRTLD; 1065 *_got = (uint64_t)elf_rtbndr; 1066 } 1067 1068 /* 1069 * Plt writing interface to allow debugging initialization to be generic. 1070 */ 1071 Pltbindtype 1072 /* ARGSUSED1 */ 1073 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1074 Xword pltndx) 1075 { 1076 Rela *rel = (Rela*)rptr; 1077 uintptr_t pltaddr; 1078 1079 pltaddr = addr + rel->r_offset; 1080 *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 1081 DBG_CALL(pltcntfull++); 1082 return (PLT_T_FULL); 1083 } 1084 1085 /* 1086 * Provide a machine specific interface to the conversion routine. By calling 1087 * the machine specific version, rather than the generic version, we insure that 1088 * the data tables/strings for all known machine versions aren't dragged into 1089 * ld.so.1. 1090 */ 1091 const char * 1092 _conv_reloc_type(uint_t rel) 1093 { 1094 static Conv_inv_buf_t inv_buf; 1095 1096 return (conv_reloc_amd64_type(rel, 0, &inv_buf)); 1097 } 1098