1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * amd64 machine dependent and ELF file class dependent functions. 30 * Contains routines for performing function binding and symbol relocations. 31 */ 32 #include "_synonyms.h" 33 34 #include <stdio.h> 35 #include <sys/elf.h> 36 #include <sys/elf_amd64.h> 37 #include <sys/mman.h> 38 #include <dlfcn.h> 39 #include <synch.h> 40 #include <string.h> 41 #include "_rtld.h" 42 #include "_audit.h" 43 #include "_elf.h" 44 #include "msg.h" 45 #include "debug.h" 46 #include "reloc.h" 47 #include "conv.h" 48 49 50 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 51 52 int 53 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 54 { 55 /* 56 * Check machine type and flags. 57 */ 58 if (ehdr->e_flags != 0) { 59 rej->rej_type = SGS_REJ_BADFLAG; 60 rej->rej_info = (uint_t)ehdr->e_flags; 61 return (0); 62 } 63 return (1); 64 } 65 66 void 67 ldso_plt_init(Rt_map * lmp) 68 { 69 /* 70 * There is no need to analyze ld.so because we don't map in any of 71 * its dependencies. However we may map these dependencies in later 72 * (as if ld.so had dlopened them), so initialize the plt and the 73 * permission information. 74 */ 75 if (PLTGOT(lmp)) 76 elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 77 } 78 79 static const uchar_t dyn_plt_template[] = { 80 /* 0x00 */ 0x55, /* pushq %rbp */ 81 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 82 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 83 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 84 0x00, 0x00, 0x00, 85 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 86 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 87 0x00, 0x00, 0x00, 88 0x00, 0x00, 0x00, 89 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 90 /* 0x20 */ 91 }; 92 93 /* 94 * And the virutal outstanding relocations against the 95 * above block are: 96 * 97 * reloc offset Addend symbol 98 * R_AMD64_PC32 0x0b -4 trace_fields 99 * R_AMD64_64 0x15 0 elf_plt_trace 100 */ 101 102 #define TRCREL1OFF 0x0b 103 #define TRCREL2OFF 0x15 104 105 int dyn_plt_ent_size = sizeof (dyn_plt_template); 106 107 /* 108 * the dynamic plt entry is: 109 * 110 * pushq %rbp 111 * movq %rsp, %rbp 112 * subq $0x10, %rsp 113 * leaq trace_fields(%rip), %r11 114 * movq %r11, -0x8(%rbp) 115 * movq $elf_plt_trace, %r11 116 * jmp *%r11 117 * dyn_data: 118 * .align 8 119 * uintptr_t reflmp 120 * uintptr_t deflmp 121 * uint_t symndx 122 * uint_t sb_flags 123 * Sym symdef 124 */ 125 static caddr_t 126 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 127 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 128 { 129 extern int elf_plt_trace(); 130 ulong_t got_entry; 131 uchar_t *dyn_plt; 132 uintptr_t *dyndata; 133 134 135 /* 136 * We only need to add the glue code if there is an auditing 137 * library that is interested in this binding. 138 */ 139 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 140 (pltndx * dyn_plt_ent_size)); 141 142 /* 143 * Have we initialized this dynamic plt entry yet? If we haven't do it 144 * now. Otherwise this function has been called before, but from a 145 * different plt (ie. from another shared object). In that case 146 * we just set the plt to point to the new dyn_plt. 147 */ 148 if (*dyn_plt == 0) { 149 Sym * symp; 150 Xword symvalue; 151 152 (void) memcpy((void *)dyn_plt, dyn_plt_template, 153 sizeof (dyn_plt_template)); 154 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 155 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 156 157 /* 158 * relocate: 159 * leaq trace_fields(%rip), %r11 160 * R_AMD64_PC32 0x0b -4 trace_fields 161 */ 162 symvalue = (Xword)((uintptr_t)dyndata - 163 (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 164 if (do_reloc(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], &symvalue, 165 MSG_ORIG(MSG_SYM_LADYNDATA), 166 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 167 *fail = 1; 168 return (0); 169 } 170 171 /* 172 * relocating: 173 * movq $elf_plt_trace, %r11 174 * R_AMD64_64 0x15 0 elf_plt_trace 175 */ 176 symvalue = (Xword)elf_plt_trace; 177 if (do_reloc(R_AMD64_64, &dyn_plt[TRCREL2OFF], &symvalue, 178 MSG_ORIG(MSG_SYM_ELFPLTTRACE), 179 MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 180 *fail = 1; 181 return (0); 182 } 183 184 *dyndata++ = (uintptr_t)rlmp; 185 *dyndata++ = (uintptr_t)dlmp; 186 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 187 dyndata++; 188 symp = (Sym *)dyndata; 189 *symp = *sym; 190 symp->st_value = (Addr)to; 191 } 192 193 got_entry = (ulong_t)roffset; 194 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 195 return ((caddr_t)dyn_plt); 196 } 197 198 199 /* 200 * Function binding routine - invoked on the first call to a function through 201 * the procedure linkage table; 202 * passes first through an assembly language interface. 203 * 204 * Takes the offset into the relocation table of the associated 205 * relocation entry and the address of the link map (rt_private_map struct) 206 * for the entry. 207 * 208 * Returns the address of the function referenced after re-writing the PLT 209 * entry to invoke the function directly. 210 * 211 * On error, causes process to terminate with a signal. 212 */ 213 ulong_t 214 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 215 { 216 Rt_map *nlmp, * llmp; 217 ulong_t addr, reloff, symval, rsymndx; 218 char *name; 219 Rela *rptr; 220 Sym *sym, *nsym; 221 uint_t binfo, sb_flags = 0; 222 Slookup sl; 223 int entry, dbg_save, lmflags; 224 225 /* 226 * For compatibility with libthread (TI_VERSION 1) we track the entry 227 * value. A zero value indicates we have recursed into ld.so.1 to 228 * further process a locking request. Under this recursion we disable 229 * tsort and cleanup activities. 230 */ 231 entry = enter(); 232 233 if ((lmflags = LIST(lmp)->lm_flags) & LML_FLG_RTLDLM) { 234 dbg_save = dbg_mask; 235 dbg_mask = 0; 236 } 237 238 /* 239 * Perform some basic sanity checks. If we didn't get a load map or 240 * the relocation offset is invalid then its possible someone has walked 241 * over the .got entries or jumped to plt0 out of the blue. 242 */ 243 if ((!lmp) && (pltndx <= 244 (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 245 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 246 conv_reloc_amd64_type_str(R_AMD64_JUMP_SLOT), 247 EC_ADDR(lmp), EC_XWORD(pltndx), EC_ADDR(from)); 248 rtldexit(LIST(lmp), 1); 249 } 250 reloff = pltndx * (ulong_t)RELENT(lmp); 251 252 /* 253 * Use relocation entry to get symbol table entry and symbol name. 254 */ 255 addr = (ulong_t)JMPREL(lmp); 256 rptr = (Rela *)(addr + reloff); 257 rsymndx = ELF_R_SYM(rptr->r_info); 258 sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 259 name = (char *)(STRTAB(lmp) + sym->st_name); 260 261 /* 262 * Determine the last link-map of this list, this'll be the starting 263 * point for any tsort() processing. 264 */ 265 llmp = LIST(lmp)->lm_tail; 266 267 /* 268 * Find definition for symbol. 269 */ 270 sl.sl_name = name; 271 sl.sl_cmap = lmp; 272 sl.sl_imap = LIST(lmp)->lm_head; 273 sl.sl_hash = 0; 274 sl.sl_rsymndx = rsymndx; 275 sl.sl_flags = LKUP_DEFT; 276 277 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 278 eprintf(ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 279 demangle(name)); 280 rtldexit(LIST(lmp), 1); 281 } 282 283 symval = nsym->st_value; 284 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 285 (nsym->st_shndx != SHN_ABS)) 286 symval += ADDR(nlmp); 287 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 288 /* 289 * Record that this new link map is now bound to the caller. 290 */ 291 if (bind_one(lmp, nlmp, BND_REFER) == 0) 292 rtldexit(LIST(lmp), 1); 293 } 294 295 if ((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 296 uint_t symndx = (((uintptr_t)nsym - 297 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 298 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 299 &sb_flags); 300 } 301 302 if (!(rtld_flags & RT_FL_NOBIND)) { 303 addr = rptr->r_offset; 304 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 305 addr += ADDR(lmp); 306 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 307 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 308 AUDINFO(lmp)->ai_dynplts) { 309 int fail = 0; 310 uint_t pltndx = reloff / sizeof (Rela); 311 uint_t symndx = (((uintptr_t)nsym - 312 (uintptr_t)SYMTAB(nlmp)) / 313 SYMENT(nlmp)); 314 315 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 316 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 317 &fail); 318 if (fail) 319 rtldexit(LIST(lmp), 1); 320 } else { 321 /* 322 * Write standard PLT entry to jump directly 323 * to newly bound function. 324 */ 325 *(ulong_t *)addr = symval; 326 } 327 } 328 329 /* 330 * Print binding information and rebuild PLT entry. 331 */ 332 DBG_CALL(Dbg_bind_global(NAME(lmp), from, from - ADDR(lmp), 333 (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, NAME(nlmp), 334 (caddr_t)symval, (caddr_t)nsym->st_value, name, binfo)); 335 336 /* 337 * Complete any processing for newly loaded objects. Note we don't 338 * know exactly where any new objects are loaded (we know the object 339 * that supplied the symbol, but others may have been loaded lazily as 340 * we searched for the symbol), so sorting starts from the last 341 * link-map know on entry to this routine. 342 */ 343 if (entry) 344 load_completion(llmp, lmp); 345 346 /* 347 * Some operations like dldump() or dlopen()'ing a relocatable object 348 * result in objects being loaded on rtld's link-map, make sure these 349 * objects are initialized also. 350 */ 351 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 352 load_completion(nlmp, 0); 353 354 /* 355 * If the object we've bound to is in the process of being initialized 356 * by another thread, determine whether we should block. 357 */ 358 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 359 360 /* 361 * Make sure the object to which we've bound has had it's .init fired. 362 * Cleanup before return to user code. 363 */ 364 if (entry) { 365 is_dep_init(nlmp, lmp); 366 leave(LIST(lmp)); 367 } 368 369 if (lmflags & LML_FLG_RTLDLM) 370 dbg_mask = dbg_save; 371 372 return (symval); 373 } 374 375 376 /* 377 * When the relocation loop realizes that it's dealing with relative 378 * relocations in a shared object, it breaks into this tighter loop 379 * as an optimization. 380 */ 381 ulong_t 382 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 383 ulong_t basebgn, ulong_t etext, ulong_t emap) 384 { 385 ulong_t roffset = ((Rela *)relbgn)->r_offset; 386 char rtype; 387 388 do { 389 roffset += basebgn; 390 391 /* 392 * If this relocation is against an address not mapped in, 393 * then break out of the relative relocation loop, falling 394 * back on the main relocation loop. 395 */ 396 if (roffset < etext || roffset > emap) 397 break; 398 399 /* 400 * Perform the actual relocation. 401 */ 402 *((ulong_t *)roffset) = basebgn + 403 ((Rela *)relbgn)->r_addend; 404 405 relbgn += relsiz; 406 407 if (relbgn >= relend) 408 break; 409 410 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 411 roffset = ((Rela *)relbgn)->r_offset; 412 413 } while (rtype == R_AMD64_RELATIVE); 414 415 return (relbgn); 416 } 417 418 /* 419 * This is the tightest loop for RELATIVE relocations for those 420 * objects built with the DT_RELACOUNT .dynamic entry. 421 */ 422 ulong_t 423 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 424 ulong_t basebgn) 425 { 426 ulong_t roffset = ((Rela *) relbgn)->r_offset; 427 428 for (; relacount; relacount--) { 429 roffset += basebgn; 430 431 /* 432 * Perform the actual relocation. 433 */ 434 *((ulong_t *)roffset) = basebgn + 435 ((Rela *)relbgn)->r_addend; 436 437 relbgn += relsiz; 438 439 roffset = ((Rela *)relbgn)->r_offset; 440 441 } 442 443 return (relbgn); 444 } 445 446 /* 447 * Read and process the relocations for one link object, we assume all 448 * relocation sections for loadable segments are stored contiguously in 449 * the file. 450 */ 451 int 452 elf_reloc(Rt_map *lmp, uint_t plt) 453 { 454 ulong_t relbgn, relend, relsiz, basebgn; 455 ulong_t pltbgn, pltend, _pltbgn, _pltend; 456 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 457 ulong_t emap, dsymndx; 458 uchar_t rtype; 459 long reladd, value, pvalue; 460 Sym *symref, *psymref, *symdef, *psymdef; 461 char *name, *pname; 462 Rt_map *_lmp, *plmp; 463 int textrel = 0, ret = 1, noplt = 0; 464 int relacount = RELACOUNT(lmp), plthint = 0; 465 Rela *rel; 466 uint_t binfo, pbinfo; 467 Alist *bound = 0; 468 469 /* 470 * Although only necessary for lazy binding, initialize the first 471 * global offset entry to go to elf_rtbndr(). dbx(1) seems 472 * to find this useful. 473 */ 474 if ((plt == 0) && PLTGOT(lmp)) { 475 if ((ulong_t)PLTGOT(lmp) < etext) { 476 if (elf_set_prot(lmp, PROT_WRITE) == 0) 477 return (0); 478 textrel = 1; 479 } 480 elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 481 } 482 483 /* 484 * Initialize the plt start and end addresses. 485 */ 486 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 487 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 488 489 490 relsiz = (ulong_t)(RELENT(lmp)); 491 basebgn = ADDR(lmp); 492 emap = ADDR(lmp) + MSIZE(lmp); 493 494 if (PLTRELSZ(lmp)) 495 plthint = PLTRELSZ(lmp) / relsiz; 496 497 /* 498 * If we've been called upon to promote an RTLD_LAZY object to an 499 * RTLD_NOW then we're only interested in scaning the .plt table. 500 * An uninitialized .plt is the case where the associated got entry 501 * points back to the plt itself. Determine the range of the real .plt 502 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 503 */ 504 if (plt) { 505 Slookup sl; 506 507 relbgn = pltbgn; 508 relend = pltend; 509 if (!relbgn || (relbgn == relend)) 510 return (1); 511 512 sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 513 sl.sl_cmap = lmp; 514 sl.sl_imap = lmp; 515 sl.sl_hash = 0; 516 sl.sl_rsymndx = 0; 517 sl.sl_flags = LKUP_DEFT; 518 519 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 520 return (1); 521 522 _pltbgn = symdef->st_value; 523 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 524 (symdef->st_shndx != SHN_ABS)) 525 _pltbgn += basebgn; 526 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 527 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 528 529 } else { 530 /* 531 * The relocation sections appear to the run-time linker as a 532 * single table. Determine the address of the beginning and end 533 * of this table. There are two different interpretations of 534 * the ABI at this point: 535 * 536 * o The REL table and its associated RELSZ indicate the 537 * concatenation of *all* relocation sections (this is the 538 * model our link-editor constructs). 539 * 540 * o The REL table and its associated RELSZ indicate the 541 * concatenation of all *but* the .plt relocations. These 542 * relocations are specified individually by the JMPREL and 543 * PLTRELSZ entries. 544 * 545 * Determine from our knowledege of the relocation range and 546 * .plt range, the range of the total relocation table. Note 547 * that one other ABI assumption seems to be that the .plt 548 * relocations always follow any other relocations, the 549 * following range checking drops that assumption. 550 */ 551 relbgn = (ulong_t)(REL(lmp)); 552 relend = relbgn + (ulong_t)(RELSZ(lmp)); 553 if (pltbgn) { 554 if (!relbgn || (relbgn > pltbgn)) 555 relbgn = pltbgn; 556 if (!relbgn || (relend < pltend)) 557 relend = pltend; 558 } 559 } 560 if (!relbgn || (relbgn == relend)) { 561 DBG_CALL(Dbg_reloc_run(NAME(lmp), 0, plt, DBG_REL_NONE)); 562 return (1); 563 } 564 DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, plt, DBG_REL_START)); 565 566 /* 567 * If we're processing a dynamic executable in lazy mode there is no 568 * need to scan the .rel.plt table, however if we're processing a shared 569 * object in lazy mode the .got addresses associated to each .plt must 570 * be relocated to reflect the location of the shared object. 571 */ 572 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 573 (FLAGS(lmp) & FLG_RT_FIXED)) 574 noplt = 1; 575 576 /* 577 * Loop through relocations. 578 */ 579 while (relbgn < relend) { 580 uint_t sb_flags = 0; 581 582 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 583 584 /* 585 * If this is a RELATIVE relocation in a shared object (the 586 * common case), and if we are not debugging, then jump into a 587 * tighter relocation loop (elf_reloc_relative). Only make the 588 * jump if we've been given a hint on the number of relocations. 589 */ 590 if ((rtype == R_AMD64_RELATIVE) && 591 !(FLAGS(lmp) & FLG_RT_FIXED) && !dbg_mask) { 592 /* 593 * It's possible that the relative relocation block 594 * has relocations against the text segment as well 595 * as the data segment. Since our optimized relocation 596 * engine does not check which segment the relocation 597 * is against - just mprotect it now if it's been 598 * marked as containing TEXTREL's. 599 */ 600 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 601 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 602 ret = 0; 603 break; 604 } 605 textrel = 1; 606 } 607 if (relacount) { 608 relbgn = elf_reloc_relacount(relbgn, relacount, 609 relsiz, basebgn); 610 relacount = 0; 611 } else { 612 relbgn = elf_reloc_relative(relbgn, relend, 613 relsiz, basebgn, etext, emap); 614 } 615 if (relbgn >= relend) 616 break; 617 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 618 } 619 620 roffset = ((Rela *)relbgn)->r_offset; 621 622 /* 623 * If this is a shared object, add the base address to offset. 624 */ 625 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 626 627 628 /* 629 * If we're processing lazy bindings, we have to step 630 * through the plt entries and add the base address 631 * to the corresponding got entry. 632 */ 633 if (plthint && (plt == 0) && 634 (rtype == R_AMD64_JUMP_SLOT) && 635 ((MODE(lmp) & RTLD_NOW) == 0)) { 636 /* 637 * The PLT relocations (for lazy bindings) 638 * are additive to what's already in the GOT. 639 * This differs to what happens in 640 * elf_reloc_relacount() and that's why we 641 * just do it inline here. 642 */ 643 for (roffset = ((Rela *)relbgn)->r_offset; 644 plthint; plthint--) { 645 roffset += basebgn; 646 647 /* 648 * Perform the actual relocation. 649 */ 650 *((ulong_t *)roffset) += basebgn; 651 652 relbgn += relsiz; 653 roffset = ((Rela *)relbgn)->r_offset; 654 655 } 656 continue; 657 } 658 roffset += basebgn; 659 } 660 661 reladd = (long)(((Rela *)relbgn)->r_addend); 662 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 663 rel = (Rela *)relbgn; 664 relbgn += relsiz; 665 666 /* 667 * Optimizations. 668 */ 669 if (rtype == R_AMD64_NONE) 670 continue; 671 if (noplt && ((ulong_t)rel >= pltbgn) && 672 ((ulong_t)rel < pltend)) { 673 relbgn = pltend; 674 continue; 675 } 676 677 /* 678 * If this relocation is not against part of the image 679 * mapped into memory we skip it. 680 */ 681 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 682 MSIZE(lmp)))) { 683 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 684 rsymndx); 685 continue; 686 } 687 688 /* 689 * If we're promoting plts determine if this one has already 690 * been written. 691 */ 692 if (plt) { 693 if ((*(ulong_t *)roffset < _pltbgn) || 694 (*(ulong_t *)roffset > _pltend)) 695 continue; 696 } 697 698 binfo = 0; 699 /* 700 * If a symbol index is specified then get the symbol table 701 * entry, locate the symbol definition, and determine its 702 * address. 703 */ 704 if (rsymndx) { 705 /* 706 * Get the local symbol table entry. 707 */ 708 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 709 (rsymndx * SYMENT(lmp))); 710 711 /* 712 * If this is a local symbol, just use the base address. 713 * (we should have no local relocations in the 714 * executable). 715 */ 716 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 717 value = basebgn; 718 name = (char *)0; 719 720 /* 721 * TLS relocation - value for DTPMOD64 722 * relocation is the TLS modid. 723 */ 724 if (rtype == R_AMD64_DTPMOD64) 725 value = TLSMODID(lmp); 726 } else { 727 /* 728 * If the symbol index is equal to the previous 729 * symbol index relocation we processed then 730 * reuse the previous values. (Note that there 731 * have been cases where a relocation exists 732 * against a copy relocation symbol, our ld(1) 733 * should optimize this away, but make sure we 734 * don't use the same symbol information should 735 * this case exist). 736 */ 737 if ((rsymndx == psymndx) && 738 (rtype != R_AMD64_COPY)) { 739 /* LINTED */ 740 if (psymdef == 0) { 741 DBG_CALL(Dbg_bind_weak( 742 NAME(lmp), (caddr_t)roffset, 743 (caddr_t) 744 (roffset - basebgn), name)); 745 continue; 746 } 747 /* LINTED */ 748 value = pvalue; 749 /* LINTED */ 750 name = pname; 751 /* LINTED */ 752 symdef = psymdef; 753 /* LINTED */ 754 symref = psymref; 755 /* LINTED */ 756 _lmp = plmp; 757 /* LINTED */ 758 binfo = pbinfo; 759 760 if ((LIST(_lmp)->lm_tflags | 761 FLAGS1(_lmp)) & 762 LML_TFLG_AUD_SYMBIND) { 763 value = audit_symbind(lmp, _lmp, 764 /* LINTED */ 765 symdef, dsymndx, value, 766 &sb_flags); 767 } 768 } else { 769 Slookup sl; 770 uchar_t bind; 771 772 /* 773 * Lookup the symbol definition. 774 */ 775 name = (char *)(STRTAB(lmp) + 776 symref->st_name); 777 778 sl.sl_name = name; 779 sl.sl_cmap = lmp; 780 sl.sl_imap = 0; 781 sl.sl_hash = 0; 782 sl.sl_rsymndx = rsymndx; 783 784 if (rtype == R_AMD64_COPY) 785 sl.sl_flags = LKUP_COPY; 786 else 787 sl.sl_flags = LKUP_DEFT; 788 789 sl.sl_flags |= LKUP_ALLCNTLIST; 790 791 if (rtype != R_AMD64_JUMP_SLOT) 792 sl.sl_flags |= LKUP_SPEC; 793 794 bind = ELF_ST_BIND(symref->st_info); 795 if (bind == STB_WEAK) 796 sl.sl_flags |= LKUP_WEAK; 797 798 symdef = lookup_sym(&sl, &_lmp, &binfo); 799 800 /* 801 * If the symbol is not found and the 802 * reference was not to a weak symbol, 803 * report an error. Weak references 804 * may be unresolved. 805 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 806 */ 807 if (symdef == 0) { 808 if (bind != STB_WEAK) { 809 if (LIST(lmp)->lm_flags & 810 LML_FLG_IGNRELERR) { 811 continue; 812 } else if (LIST(lmp)->lm_flags & 813 LML_FLG_TRC_WARN) { 814 (void) printf(MSG_INTL( 815 MSG_LDD_SYM_NFOUND), 816 demangle(name), 817 NAME(lmp)); 818 continue; 819 } else { 820 eprintf(ERR_FATAL, 821 MSG_INTL(MSG_REL_NOSYM), 822 NAME(lmp), 823 demangle(name)); 824 ret = 0; 825 break; 826 } 827 } else { 828 psymndx = rsymndx; 829 psymdef = 0; 830 831 DBG_CALL(Dbg_bind_weak( 832 NAME(lmp), (caddr_t)roffset, 833 (caddr_t) 834 (roffset - basebgn), name)); 835 continue; 836 } 837 } 838 839 /* 840 * If symbol was found in an object 841 * other than the referencing object 842 * then record the binding. 843 */ 844 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 845 FL1_RT_NOINIFIN) == 0)) { 846 if (alist_test(&bound, _lmp, 847 sizeof (Rt_map *), 848 AL_CNT_RELBIND) == 0) { 849 ret = 0; 850 break; 851 } 852 } 853 854 /* 855 * Calculate the location of definition; 856 * symbol value plus base address of 857 * containing shared object. 858 */ 859 value = symdef->st_value; 860 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 861 (symdef->st_shndx != SHN_ABS) && 862 (ELF_ST_TYPE(symdef->st_info) != 863 STT_TLS)) 864 value += ADDR(_lmp); 865 866 /* 867 * Retain this symbol index and the 868 * value in case it can be used for the 869 * subsequent relocations. 870 */ 871 if (rtype != R_AMD64_COPY) { 872 psymndx = rsymndx; 873 pvalue = value; 874 pname = name; 875 psymdef = symdef; 876 psymref = symref; 877 plmp = _lmp; 878 pbinfo = binfo; 879 } 880 if ((LIST(_lmp)->lm_tflags | 881 FLAGS1(_lmp)) & 882 LML_TFLG_AUD_SYMBIND) { 883 dsymndx = (((uintptr_t)symdef - 884 (uintptr_t)SYMTAB(_lmp)) / 885 SYMENT(_lmp)); 886 value = audit_symbind(lmp, _lmp, 887 symdef, dsymndx, value, 888 &sb_flags); 889 } 890 } 891 892 /* 893 * If relocation is PC-relative, subtract 894 * offset address. 895 */ 896 if (IS_PC_RELATIVE(rtype)) 897 value -= roffset; 898 899 /* 900 * TLS relocation - value for DTPMOD64 901 * relocation is the TLS modid. 902 */ 903 if (rtype == R_AMD64_DTPMOD64) 904 value = TLSMODID(_lmp); 905 else if ((rtype == R_AMD64_TPOFF64) || 906 (rtype == R_AMD64_TPOFF32)) 907 value = -(TLSSTATOFF(_lmp) - value); 908 } 909 } else { 910 /* 911 * Special case: 912 * 913 * A DTPMOD32 relocation is a local binding to a TLS 914 * symbol. Fill in the TLSMODID for the current object. 915 */ 916 if (rtype == R_AMD64_DTPMOD64) 917 value = TLSMODID(lmp); 918 else 919 value = basebgn; 920 name = (char *)0; 921 } 922 923 /* 924 * If this object has relocations in the text segment, turn 925 * off the write protect. 926 */ 927 if ((roffset < etext) && (textrel == 0)) { 928 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 929 ret = 0; 930 break; 931 } 932 textrel = 1; 933 } 934 935 /* 936 * Call relocation routine to perform required relocation. 937 */ 938 DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, rel, name, NULL)); 939 940 switch (rtype) { 941 case R_AMD64_COPY: 942 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 943 symdef, _lmp, (const void *)value) == 0) 944 ret = 0; 945 break; 946 case R_AMD64_JUMP_SLOT: 947 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 948 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 949 AUDINFO(lmp)->ai_dynplts) { 950 int fail = 0; 951 int pltndx = (((ulong_t)rel - 952 (uintptr_t)JMPREL(lmp)) / relsiz); 953 int symndx = (((uintptr_t)symdef - 954 (uintptr_t)SYMTAB(_lmp)) / 955 SYMENT(_lmp)); 956 957 (void) elf_plt_trace_write(roffset, lmp, _lmp, 958 symdef, symndx, pltndx, (caddr_t)value, 959 sb_flags, &fail); 960 if (fail) 961 ret = 0; 962 } else { 963 /* 964 * Write standard PLT entry to jump directly 965 * to newly bound function. 966 */ 967 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 968 (Xword)value)); 969 *(ulong_t *)roffset = value; 970 } 971 break; 972 default: 973 value += reladd; 974 /* 975 * Write the relocation out. 976 */ 977 if (do_reloc(rtype, (uchar_t *)roffset, 978 (Xword *)&value, name, NAME(lmp)) == 0) 979 ret = 0; 980 981 DBG_CALL(Dbg_reloc_apply((Xword)roffset, 982 (Xword)value)); 983 } 984 985 if ((ret == 0) && 986 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 987 break; 988 989 if (binfo) { 990 DBG_CALL(Dbg_bind_global(NAME(lmp), (caddr_t)roffset, 991 (caddr_t)(roffset - basebgn), (Xword)(-1), 992 PLT_T_FULL, NAME(_lmp), (caddr_t)value, 993 (caddr_t)symdef->st_value, name, binfo)); 994 } 995 } 996 997 return (relocate_finish(lmp, bound, textrel, ret)); 998 } 999 1000 /* 1001 * Initialize the first few got entries so that function calls go to 1002 * elf_rtbndr: 1003 * 1004 * GOT[GOT_XLINKMAP] = the address of the link map 1005 * GOT[GOT_XRTLD] = the address of rtbinder 1006 */ 1007 void 1008 elf_plt_init(void *got, caddr_t l) 1009 { 1010 uint64_t *_got; 1011 /* LINTED */ 1012 Rt_map *lmp = (Rt_map *)l; 1013 1014 _got = (uint64_t *)got + M_GOT_XLINKMAP; 1015 *_got = (uint64_t)lmp; 1016 _got = (uint64_t *)got + M_GOT_XRTLD; 1017 *_got = (uint64_t)elf_rtbndr; 1018 } 1019 1020 /* 1021 * Plt writing interface to allow debugging initialization to be generic. 1022 */ 1023 Pltbindtype 1024 /* ARGSUSED1 */ 1025 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1026 Xword pltndx) 1027 { 1028 Rela *rel = (Rela*)rptr; 1029 uintptr_t pltaddr; 1030 1031 pltaddr = addr + rel->r_offset; 1032 *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 1033 DBG_CALL(pltcntfull++); 1034 return (PLT_T_FULL); 1035 } 1036 1037 /* 1038 * Provide a machine specific interface to the conversion routine. By calling 1039 * the machine specific version, rather than the generic version, we insure that 1040 * the data tables/strings for all known machine versions aren't dragged into 1041 * ld.so.1. 1042 */ 1043 const char * 1044 _conv_reloc_type_str(uint_t rel) 1045 { 1046 return (conv_reloc_amd64_type_str(rel)); 1047 } 1048