1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * amd64 machine dependent and ELF file class dependent functions. 30 * Contains routines for performing function binding and symbol relocations. 31 */ 32 #include "_synonyms.h" 33 34 #include <stdio.h> 35 #include <sys/elf.h> 36 #include <sys/elf_amd64.h> 37 #include <sys/mman.h> 38 #include <dlfcn.h> 39 #include <synch.h> 40 #include <string.h> 41 #include <debug.h> 42 #include <reloc.h> 43 #include <conv.h> 44 #include "_rtld.h" 45 #include "_audit.h" 46 #include "_elf.h" 47 #include "msg.h" 48 49 50 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 51 52 int 53 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 54 { 55 /* 56 * Check machine type and flags. 57 */ 58 if (ehdr->e_flags != 0) { 59 rej->rej_type = SGS_REJ_BADFLAG; 60 rej->rej_info = (uint_t)ehdr->e_flags; 61 return (0); 62 } 63 return (1); 64 } 65 66 void 67 ldso_plt_init(Rt_map * lmp) 68 { 69 /* 70 * There is no need to analyze ld.so because we don't map in any of 71 * its dependencies. However we may map these dependencies in later 72 * (as if ld.so had dlopened them), so initialize the plt and the 73 * permission information. 74 */ 75 if (PLTGOT(lmp)) 76 elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 77 } 78 79 static const uchar_t dyn_plt_template[] = { 80 /* 0x00 */ 0x55, /* pushq %rbp */ 81 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 82 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 83 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 84 0x00, 0x00, 0x00, 85 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 86 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 87 0x00, 0x00, 0x00, 88 0x00, 0x00, 0x00, 89 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 90 /* 0x20 */ 91 }; 92 93 /* 94 * And the virutal outstanding relocations against the 95 * above block are: 96 * 97 * reloc offset Addend symbol 98 * R_AMD64_PC32 0x0b -4 trace_fields 99 * R_AMD64_64 0x15 0 elf_plt_trace 100 */ 101 102 #define TRCREL1OFF 0x0b 103 #define TRCREL2OFF 0x15 104 105 int dyn_plt_ent_size = sizeof (dyn_plt_template); 106 107 /* 108 * the dynamic plt entry is: 109 * 110 * pushq %rbp 111 * movq %rsp, %rbp 112 * subq $0x10, %rsp 113 * leaq trace_fields(%rip), %r11 114 * movq %r11, -0x8(%rbp) 115 * movq $elf_plt_trace, %r11 116 * jmp *%r11 117 * dyn_data: 118 * .align 8 119 * uintptr_t reflmp 120 * uintptr_t deflmp 121 * uint_t symndx 122 * uint_t sb_flags 123 * Sym symdef 124 */ 125 static caddr_t 126 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 127 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 128 { 129 extern int elf_plt_trace(); 130 ulong_t got_entry; 131 uchar_t *dyn_plt; 132 uintptr_t *dyndata; 133 134 135 /* 136 * We only need to add the glue code if there is an auditing 137 * library that is interested in this binding. 138 */ 139 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 140 (pltndx * dyn_plt_ent_size)); 141 142 /* 143 * Have we initialized this dynamic plt entry yet? If we haven't do it 144 * now. Otherwise this function has been called before, but from a 145 * different plt (ie. from another shared object). In that case 146 * we just set the plt to point to the new dyn_plt. 147 */ 148 if (*dyn_plt == 0) { 149 Sym * symp; 150 Xword symvalue; 151 Lm_list *lml = LIST(rlmp); 152 153 (void) memcpy((void *)dyn_plt, dyn_plt_template, 154 sizeof (dyn_plt_template)); 155 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 156 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 157 158 /* 159 * relocate: 160 * leaq trace_fields(%rip), %r11 161 * R_AMD64_PC32 0x0b -4 trace_fields 162 */ 163 symvalue = (Xword)((uintptr_t)dyndata - 164 (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 165 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 166 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 167 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 168 *fail = 1; 169 return (0); 170 } 171 172 /* 173 * relocating: 174 * movq $elf_plt_trace, %r11 175 * R_AMD64_64 0x15 0 elf_plt_trace 176 */ 177 symvalue = (Xword)elf_plt_trace; 178 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF], 179 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 180 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 181 *fail = 1; 182 return (0); 183 } 184 185 *dyndata++ = (uintptr_t)rlmp; 186 *dyndata++ = (uintptr_t)dlmp; 187 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 188 dyndata++; 189 symp = (Sym *)dyndata; 190 *symp = *sym; 191 symp->st_value = (Addr)to; 192 } 193 194 got_entry = (ulong_t)roffset; 195 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 196 return ((caddr_t)dyn_plt); 197 } 198 199 200 /* 201 * Function binding routine - invoked on the first call to a function through 202 * the procedure linkage table; 203 * passes first through an assembly language interface. 204 * 205 * Takes the offset into the relocation table of the associated 206 * relocation entry and the address of the link map (rt_private_map struct) 207 * for the entry. 208 * 209 * Returns the address of the function referenced after re-writing the PLT 210 * entry to invoke the function directly. 211 * 212 * On error, causes process to terminate with a signal. 213 */ 214 ulong_t 215 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 216 { 217 Rt_map *nlmp, * llmp; 218 ulong_t addr, reloff, symval, rsymndx; 219 char *name; 220 Rela *rptr; 221 Sym *rsym, *nsym; 222 uint_t binfo, sb_flags = 0, dbg_class; 223 Slookup sl; 224 int entry, lmflags; 225 Lm_list *lml; 226 227 /* 228 * For compatibility with libthread (TI_VERSION 1) we track the entry 229 * value. A zero value indicates we have recursed into ld.so.1 to 230 * further process a locking request. Under this recursion we disable 231 * tsort and cleanup activities. 232 */ 233 entry = enter(); 234 235 lml = LIST(lmp); 236 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 237 dbg_class = dbg_desc->d_class; 238 dbg_desc->d_class = 0; 239 } 240 241 /* 242 * Perform some basic sanity checks. If we didn't get a load map or 243 * the relocation offset is invalid then its possible someone has walked 244 * over the .got entries or jumped to plt0 out of the blue. 245 */ 246 if ((!lmp) && (pltndx <= 247 (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 248 Conv_inv_buf_t inv_buf; 249 250 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 251 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf), 252 EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 253 rtldexit(lml, 1); 254 } 255 reloff = pltndx * (ulong_t)RELENT(lmp); 256 257 /* 258 * Use relocation entry to get symbol table entry and symbol name. 259 */ 260 addr = (ulong_t)JMPREL(lmp); 261 rptr = (Rela *)(addr + reloff); 262 rsymndx = ELF_R_SYM(rptr->r_info); 263 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 264 name = (char *)(STRTAB(lmp) + rsym->st_name); 265 266 /* 267 * Determine the last link-map of this list, this'll be the starting 268 * point for any tsort() processing. 269 */ 270 llmp = lml->lm_tail; 271 272 /* 273 * Find definition for symbol. 274 */ 275 sl.sl_name = name; 276 sl.sl_cmap = lmp; 277 sl.sl_imap = lml->lm_head; 278 sl.sl_hash = 0; 279 sl.sl_rsymndx = rsymndx; 280 sl.sl_rsym = rsym; 281 sl.sl_flags = LKUP_DEFT; 282 283 if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 284 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 285 demangle(name)); 286 rtldexit(lml, 1); 287 } 288 289 symval = nsym->st_value; 290 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 291 (nsym->st_shndx != SHN_ABS)) 292 symval += ADDR(nlmp); 293 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 294 /* 295 * Record that this new link map is now bound to the caller. 296 */ 297 if (bind_one(lmp, nlmp, BND_REFER) == 0) 298 rtldexit(lml, 1); 299 } 300 301 if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 302 uint_t symndx = (((uintptr_t)nsym - 303 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 304 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 305 &sb_flags); 306 } 307 308 if (!(rtld_flags & RT_FL_NOBIND)) { 309 addr = rptr->r_offset; 310 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 311 addr += ADDR(lmp); 312 if (((lml->lm_tflags | FLAGS1(lmp)) & 313 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 314 AUDINFO(lmp)->ai_dynplts) { 315 int fail = 0; 316 uint_t pltndx = reloff / sizeof (Rela); 317 uint_t symndx = (((uintptr_t)nsym - 318 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 319 320 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 321 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 322 &fail); 323 if (fail) 324 rtldexit(lml, 1); 325 } else { 326 /* 327 * Write standard PLT entry to jump directly 328 * to newly bound function. 329 */ 330 *(ulong_t *)addr = symval; 331 } 332 } 333 334 /* 335 * Print binding information and rebuild PLT entry. 336 */ 337 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 338 (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, 339 (Addr)symval, nsym->st_value, name, binfo)); 340 341 /* 342 * Complete any processing for newly loaded objects. Note we don't 343 * know exactly where any new objects are loaded (we know the object 344 * that supplied the symbol, but others may have been loaded lazily as 345 * we searched for the symbol), so sorting starts from the last 346 * link-map know on entry to this routine. 347 */ 348 if (entry) 349 load_completion(llmp); 350 351 /* 352 * Some operations like dldump() or dlopen()'ing a relocatable object 353 * result in objects being loaded on rtld's link-map, make sure these 354 * objects are initialized also. 355 */ 356 if ((lml->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 357 load_completion(nlmp); 358 359 /* 360 * If the object we've bound to is in the process of being initialized 361 * by another thread, determine whether we should block. 362 */ 363 is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 364 365 /* 366 * Make sure the object to which we've bound has had it's .init fired. 367 * Cleanup before return to user code. 368 */ 369 if (entry) { 370 is_dep_init(nlmp, lmp); 371 leave(lml); 372 } 373 374 if (lmflags & LML_FLG_RTLDLM) 375 dbg_desc->d_class = dbg_class; 376 377 return (symval); 378 } 379 380 381 /* 382 * When the relocation loop realizes that it's dealing with relative 383 * relocations in a shared object, it breaks into this tighter loop 384 * as an optimization. 385 */ 386 ulong_t 387 elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 388 ulong_t basebgn, ulong_t etext, ulong_t emap) 389 { 390 ulong_t roffset = ((Rela *)relbgn)->r_offset; 391 char rtype; 392 393 do { 394 roffset += basebgn; 395 396 /* 397 * If this relocation is against an address not mapped in, 398 * then break out of the relative relocation loop, falling 399 * back on the main relocation loop. 400 */ 401 if (roffset < etext || roffset > emap) 402 break; 403 404 /* 405 * Perform the actual relocation. 406 */ 407 *((ulong_t *)roffset) = basebgn + 408 ((Rela *)relbgn)->r_addend; 409 410 relbgn += relsiz; 411 412 if (relbgn >= relend) 413 break; 414 415 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 416 roffset = ((Rela *)relbgn)->r_offset; 417 418 } while (rtype == R_AMD64_RELATIVE); 419 420 return (relbgn); 421 } 422 423 /* 424 * This is the tightest loop for RELATIVE relocations for those 425 * objects built with the DT_RELACOUNT .dynamic entry. 426 */ 427 ulong_t 428 elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 429 ulong_t basebgn) 430 { 431 ulong_t roffset = ((Rela *) relbgn)->r_offset; 432 433 for (; relacount; relacount--) { 434 roffset += basebgn; 435 436 /* 437 * Perform the actual relocation. 438 */ 439 *((ulong_t *)roffset) = basebgn + 440 ((Rela *)relbgn)->r_addend; 441 442 relbgn += relsiz; 443 444 roffset = ((Rela *)relbgn)->r_offset; 445 446 } 447 448 return (relbgn); 449 } 450 451 /* 452 * Read and process the relocations for one link object, we assume all 453 * relocation sections for loadable segments are stored contiguously in 454 * the file. 455 */ 456 int 457 elf_reloc(Rt_map *lmp, uint_t plt) 458 { 459 ulong_t relbgn, relend, relsiz, basebgn; 460 ulong_t pltbgn, pltend, _pltbgn, _pltend; 461 ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 462 ulong_t emap, dsymndx; 463 uchar_t rtype; 464 long reladd, value, pvalue; 465 Sym *symref, *psymref, *symdef, *psymdef; 466 char *name, *pname; 467 Rt_map *_lmp, *plmp; 468 int textrel = 0, ret = 1, noplt = 0; 469 int relacount = RELACOUNT(lmp), plthint = 0; 470 Rela *rel; 471 uint_t binfo, pbinfo; 472 APlist *bound = NULL; 473 474 /* 475 * Although only necessary for lazy binding, initialize the first 476 * global offset entry to go to elf_rtbndr(). dbx(1) seems 477 * to find this useful. 478 */ 479 if ((plt == 0) && PLTGOT(lmp)) { 480 if ((ulong_t)PLTGOT(lmp) < etext) { 481 if (elf_set_prot(lmp, PROT_WRITE) == 0) 482 return (0); 483 textrel = 1; 484 } 485 elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 486 } 487 488 /* 489 * Initialize the plt start and end addresses. 490 */ 491 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 492 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 493 494 495 relsiz = (ulong_t)(RELENT(lmp)); 496 basebgn = ADDR(lmp); 497 emap = ADDR(lmp) + MSIZE(lmp); 498 499 if (PLTRELSZ(lmp)) 500 plthint = PLTRELSZ(lmp) / relsiz; 501 502 /* 503 * If we've been called upon to promote an RTLD_LAZY object to an 504 * RTLD_NOW then we're only interested in scaning the .plt table. 505 * An uninitialized .plt is the case where the associated got entry 506 * points back to the plt itself. Determine the range of the real .plt 507 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 508 */ 509 if (plt) { 510 Slookup sl; 511 512 relbgn = pltbgn; 513 relend = pltend; 514 if (!relbgn || (relbgn == relend)) 515 return (1); 516 517 sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 518 sl.sl_cmap = lmp; 519 sl.sl_imap = lmp; 520 sl.sl_hash = elf_hash(MSG_ORIG(MSG_SYM_PLT)); 521 sl.sl_rsymndx = 0; 522 sl.sl_flags = LKUP_DEFT; 523 524 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 525 return (1); 526 527 _pltbgn = symdef->st_value; 528 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 529 (symdef->st_shndx != SHN_ABS)) 530 _pltbgn += basebgn; 531 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 532 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 533 534 } else { 535 /* 536 * The relocation sections appear to the run-time linker as a 537 * single table. Determine the address of the beginning and end 538 * of this table. There are two different interpretations of 539 * the ABI at this point: 540 * 541 * o The REL table and its associated RELSZ indicate the 542 * concatenation of *all* relocation sections (this is the 543 * model our link-editor constructs). 544 * 545 * o The REL table and its associated RELSZ indicate the 546 * concatenation of all *but* the .plt relocations. These 547 * relocations are specified individually by the JMPREL and 548 * PLTRELSZ entries. 549 * 550 * Determine from our knowledege of the relocation range and 551 * .plt range, the range of the total relocation table. Note 552 * that one other ABI assumption seems to be that the .plt 553 * relocations always follow any other relocations, the 554 * following range checking drops that assumption. 555 */ 556 relbgn = (ulong_t)(REL(lmp)); 557 relend = relbgn + (ulong_t)(RELSZ(lmp)); 558 if (pltbgn) { 559 if (!relbgn || (relbgn > pltbgn)) 560 relbgn = pltbgn; 561 if (!relbgn || (relend < pltend)) 562 relend = pltend; 563 } 564 } 565 if (!relbgn || (relbgn == relend)) { 566 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 567 return (1); 568 } 569 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 570 571 /* 572 * If we're processing a dynamic executable in lazy mode there is no 573 * need to scan the .rel.plt table, however if we're processing a shared 574 * object in lazy mode the .got addresses associated to each .plt must 575 * be relocated to reflect the location of the shared object. 576 */ 577 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 578 (FLAGS(lmp) & FLG_RT_FIXED)) 579 noplt = 1; 580 581 /* 582 * Loop through relocations. 583 */ 584 while (relbgn < relend) { 585 uint_t sb_flags = 0; 586 587 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 588 589 /* 590 * If this is a RELATIVE relocation in a shared object (the 591 * common case), and if we are not debugging, then jump into a 592 * tighter relocation loop (elf_reloc_relative). Only make the 593 * jump if we've been given a hint on the number of relocations. 594 */ 595 if ((rtype == R_AMD64_RELATIVE) && 596 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 597 /* 598 * It's possible that the relative relocation block 599 * has relocations against the text segment as well 600 * as the data segment. Since our optimized relocation 601 * engine does not check which segment the relocation 602 * is against - just mprotect it now if it's been 603 * marked as containing TEXTREL's. 604 */ 605 if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 606 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 607 ret = 0; 608 break; 609 } 610 textrel = 1; 611 } 612 if (relacount) { 613 relbgn = elf_reloc_relacount(relbgn, relacount, 614 relsiz, basebgn); 615 relacount = 0; 616 } else { 617 relbgn = elf_reloc_relative(relbgn, relend, 618 relsiz, basebgn, etext, emap); 619 } 620 621 if (relbgn >= relend) 622 break; 623 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 624 } 625 626 roffset = ((Rela *)relbgn)->r_offset; 627 628 /* 629 * If this is a shared object, add the base address to offset. 630 */ 631 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 632 633 634 /* 635 * If we're processing lazy bindings, we have to step 636 * through the plt entries and add the base address 637 * to the corresponding got entry. 638 */ 639 if (plthint && (plt == 0) && 640 (rtype == R_AMD64_JUMP_SLOT) && 641 ((MODE(lmp) & RTLD_NOW) == 0)) { 642 /* 643 * The PLT relocations (for lazy bindings) 644 * are additive to what's already in the GOT. 645 * This differs to what happens in 646 * elf_reloc_relacount() and that's why we 647 * just do it inline here. 648 */ 649 for (roffset = ((Rela *)relbgn)->r_offset; 650 plthint; plthint--) { 651 roffset += basebgn; 652 653 /* 654 * Perform the actual relocation. 655 */ 656 *((ulong_t *)roffset) += basebgn; 657 658 relbgn += relsiz; 659 roffset = ((Rela *)relbgn)->r_offset; 660 661 } 662 continue; 663 } 664 roffset += basebgn; 665 } 666 667 reladd = (long)(((Rela *)relbgn)->r_addend); 668 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 669 rel = (Rela *)relbgn; 670 relbgn += relsiz; 671 672 /* 673 * Optimizations. 674 */ 675 if (rtype == R_AMD64_NONE) 676 continue; 677 if (noplt && ((ulong_t)rel >= pltbgn) && 678 ((ulong_t)rel < pltend)) { 679 relbgn = pltend; 680 continue; 681 } 682 683 /* 684 * If this relocation is not against part of the image 685 * mapped into memory we skip it. 686 */ 687 if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 688 MSIZE(lmp)))) { 689 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 690 rsymndx); 691 continue; 692 } 693 694 /* 695 * If we're promoting plts determine if this one has already 696 * been written. 697 */ 698 if (plt) { 699 if ((*(ulong_t *)roffset < _pltbgn) || 700 (*(ulong_t *)roffset > _pltend)) 701 continue; 702 } 703 704 binfo = 0; 705 /* 706 * If a symbol index is specified then get the symbol table 707 * entry, locate the symbol definition, and determine its 708 * address. 709 */ 710 if (rsymndx) { 711 /* 712 * Get the local symbol table entry. 713 */ 714 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 715 (rsymndx * SYMENT(lmp))); 716 717 /* 718 * If this is a local symbol, just use the base address. 719 * (we should have no local relocations in the 720 * executable). 721 */ 722 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 723 value = basebgn; 724 name = (char *)0; 725 726 /* 727 * Special case TLS relocations. 728 */ 729 if (rtype == R_AMD64_DTPMOD64) { 730 /* 731 * Use the TLS modid. 732 */ 733 value = TLSMODID(lmp); 734 735 } else if ((rtype == R_AMD64_TPOFF64) || 736 (rtype == R_AMD64_TPOFF32)) { 737 if ((value = elf_static_tls(lmp, symref, 738 rel, rtype, 0, roffset, 0)) == 0) { 739 ret = 0; 740 break; 741 } 742 } 743 } else { 744 /* 745 * If the symbol index is equal to the previous 746 * symbol index relocation we processed then 747 * reuse the previous values. (Note that there 748 * have been cases where a relocation exists 749 * against a copy relocation symbol, our ld(1) 750 * should optimize this away, but make sure we 751 * don't use the same symbol information should 752 * this case exist). 753 */ 754 if ((rsymndx == psymndx) && 755 (rtype != R_AMD64_COPY)) { 756 /* LINTED */ 757 if (psymdef == 0) { 758 DBG_CALL(Dbg_bind_weak(lmp, 759 (Addr)roffset, (Addr) 760 (roffset - basebgn), name)); 761 continue; 762 } 763 /* LINTED */ 764 value = pvalue; 765 /* LINTED */ 766 name = pname; 767 /* LINTED */ 768 symdef = psymdef; 769 /* LINTED */ 770 symref = psymref; 771 /* LINTED */ 772 _lmp = plmp; 773 /* LINTED */ 774 binfo = pbinfo; 775 776 if ((LIST(_lmp)->lm_tflags | 777 FLAGS1(_lmp)) & 778 LML_TFLG_AUD_SYMBIND) { 779 value = audit_symbind(lmp, _lmp, 780 /* LINTED */ 781 symdef, dsymndx, value, 782 &sb_flags); 783 } 784 } else { 785 Slookup sl; 786 787 /* 788 * Lookup the symbol definition. 789 */ 790 name = (char *)(STRTAB(lmp) + 791 symref->st_name); 792 793 sl.sl_name = name; 794 sl.sl_cmap = lmp; 795 sl.sl_imap = 0; 796 sl.sl_hash = 0; 797 sl.sl_rsymndx = rsymndx; 798 sl.sl_rsym = symref; 799 sl.sl_rtype = rtype; 800 sl.sl_flags = LKUP_STDRELOC; 801 802 symdef = lookup_sym(&sl, &_lmp, &binfo); 803 804 /* 805 * If the symbol is not found and the 806 * reference was not to a weak symbol, 807 * report an error. Weak references 808 * may be unresolved. 809 * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 810 */ 811 /* BEGIN CSTYLED */ 812 if (symdef == 0) { 813 Lm_list *lml = LIST(lmp); 814 815 if (sl.sl_bind != STB_WEAK) { 816 if (lml->lm_flags & 817 LML_FLG_IGNRELERR) { 818 continue; 819 } else if (lml->lm_flags & 820 LML_FLG_TRC_WARN) { 821 (void) printf(MSG_INTL( 822 MSG_LDD_SYM_NFOUND), 823 demangle(name), 824 NAME(lmp)); 825 continue; 826 } else { 827 DBG_CALL(Dbg_reloc_in(lml, 828 ELF_DBG_RTLD, M_MACH, 829 M_REL_SHT_TYPE, rel, 830 NULL, name)); 831 eprintf(lml, ERR_FATAL, 832 MSG_INTL(MSG_REL_NOSYM), 833 NAME(lmp), 834 demangle(name)); 835 ret = 0; 836 break; 837 } 838 } else { 839 psymndx = rsymndx; 840 psymdef = 0; 841 842 DBG_CALL(Dbg_bind_weak(lmp, 843 (Addr)roffset, (Addr) 844 (roffset - basebgn), name)); 845 continue; 846 } 847 } 848 /* END CSTYLED */ 849 850 /* 851 * If symbol was found in an object 852 * other than the referencing object 853 * then record the binding. 854 */ 855 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 856 FL1_RT_NOINIFIN) == 0)) { 857 if (aplist_test(&bound, _lmp, 858 AL_CNT_RELBIND) == 0) { 859 ret = 0; 860 break; 861 } 862 } 863 864 /* 865 * Calculate the location of definition; 866 * symbol value plus base address of 867 * containing shared object. 868 */ 869 if (IS_SIZE(rtype)) 870 value = symdef->st_size; 871 else 872 value = symdef->st_value; 873 874 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 875 !(IS_SIZE(rtype)) && 876 (symdef->st_shndx != SHN_ABS) && 877 (ELF_ST_TYPE(symdef->st_info) != 878 STT_TLS)) 879 value += ADDR(_lmp); 880 881 /* 882 * Retain this symbol index and the 883 * value in case it can be used for the 884 * subsequent relocations. 885 */ 886 if (rtype != R_AMD64_COPY) { 887 psymndx = rsymndx; 888 pvalue = value; 889 pname = name; 890 psymdef = symdef; 891 psymref = symref; 892 plmp = _lmp; 893 pbinfo = binfo; 894 } 895 if ((LIST(_lmp)->lm_tflags | 896 FLAGS1(_lmp)) & 897 LML_TFLG_AUD_SYMBIND) { 898 dsymndx = (((uintptr_t)symdef - 899 (uintptr_t)SYMTAB(_lmp)) / 900 SYMENT(_lmp)); 901 value = audit_symbind(lmp, _lmp, 902 symdef, dsymndx, value, 903 &sb_flags); 904 } 905 } 906 907 /* 908 * If relocation is PC-relative, subtract 909 * offset address. 910 */ 911 if (IS_PC_RELATIVE(rtype)) 912 value -= roffset; 913 914 /* 915 * Special case TLS relocations. 916 */ 917 if (rtype == R_AMD64_DTPMOD64) { 918 /* 919 * Relocation value is the TLS modid. 920 */ 921 value = TLSMODID(_lmp); 922 923 } else if ((rtype == R_AMD64_TPOFF64) || 924 (rtype == R_AMD64_TPOFF32)) { 925 if ((value = elf_static_tls(_lmp, 926 symdef, rel, rtype, name, roffset, 927 value)) == 0) { 928 ret = 0; 929 break; 930 } 931 } 932 } 933 } else { 934 /* 935 * Special cases. 936 */ 937 if (rtype == R_AMD64_DTPMOD64) { 938 /* 939 * TLS relocation value is the TLS modid. 940 */ 941 value = TLSMODID(lmp); 942 } else 943 value = basebgn; 944 name = (char *)0; 945 } 946 947 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 948 M_REL_SHT_TYPE, rel, NULL, name)); 949 950 /* 951 * If this object has relocations in the text segment, turn 952 * off the write protect. 953 */ 954 if ((roffset < etext) && (textrel == 0)) { 955 if (elf_set_prot(lmp, PROT_WRITE) == 0) { 956 ret = 0; 957 break; 958 } 959 textrel = 1; 960 } 961 962 /* 963 * Call relocation routine to perform required relocation. 964 */ 965 switch (rtype) { 966 case R_AMD64_COPY: 967 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 968 symdef, _lmp, (const void *)value) == 0) 969 ret = 0; 970 break; 971 case R_AMD64_JUMP_SLOT: 972 if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 973 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 974 AUDINFO(lmp)->ai_dynplts) { 975 int fail = 0; 976 int pltndx = (((ulong_t)rel - 977 (uintptr_t)JMPREL(lmp)) / relsiz); 978 int symndx = (((uintptr_t)symdef - 979 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 980 981 (void) elf_plt_trace_write(roffset, lmp, _lmp, 982 symdef, symndx, pltndx, (caddr_t)value, 983 sb_flags, &fail); 984 if (fail) 985 ret = 0; 986 } else { 987 /* 988 * Write standard PLT entry to jump directly 989 * to newly bound function. 990 */ 991 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 992 ELF_DBG_RTLD, (Xword)roffset, 993 (Xword)value)); 994 *(ulong_t *)roffset = value; 995 } 996 break; 997 default: 998 value += reladd; 999 /* 1000 * Write the relocation out. 1001 */ 1002 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 1003 (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 1004 ret = 0; 1005 1006 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 1007 (Xword)roffset, (Xword)value)); 1008 } 1009 1010 if ((ret == 0) && 1011 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 1012 break; 1013 1014 if (binfo) { 1015 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 1016 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 1017 _lmp, (Addr)value, symdef->st_value, name, binfo)); 1018 } 1019 } 1020 1021 return (relocate_finish(lmp, bound, textrel, ret)); 1022 } 1023 1024 /* 1025 * Initialize the first few got entries so that function calls go to 1026 * elf_rtbndr: 1027 * 1028 * GOT[GOT_XLINKMAP] = the address of the link map 1029 * GOT[GOT_XRTLD] = the address of rtbinder 1030 */ 1031 void 1032 elf_plt_init(void *got, caddr_t l) 1033 { 1034 uint64_t *_got; 1035 /* LINTED */ 1036 Rt_map *lmp = (Rt_map *)l; 1037 1038 _got = (uint64_t *)got + M_GOT_XLINKMAP; 1039 *_got = (uint64_t)lmp; 1040 _got = (uint64_t *)got + M_GOT_XRTLD; 1041 *_got = (uint64_t)elf_rtbndr; 1042 } 1043 1044 /* 1045 * Plt writing interface to allow debugging initialization to be generic. 1046 */ 1047 Pltbindtype 1048 /* ARGSUSED1 */ 1049 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1050 Xword pltndx) 1051 { 1052 Rela *rel = (Rela*)rptr; 1053 uintptr_t pltaddr; 1054 1055 pltaddr = addr + rel->r_offset; 1056 *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 1057 DBG_CALL(pltcntfull++); 1058 return (PLT_T_FULL); 1059 } 1060 1061 /* 1062 * Provide a machine specific interface to the conversion routine. By calling 1063 * the machine specific version, rather than the generic version, we insure that 1064 * the data tables/strings for all known machine versions aren't dragged into 1065 * ld.so.1. 1066 */ 1067 const char * 1068 _conv_reloc_type(uint_t rel) 1069 { 1070 static Conv_inv_buf_t inv_buf; 1071 1072 return (conv_reloc_amd64_type(rel, 0, &inv_buf)); 1073 } 1074