1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "@(#)amd64_elf.c 1.25 08/07/30 SMI" 28 29 /* 30 * amd64 machine dependent and ELF file class dependent functions. 31 * Contains routines for performing function binding and symbol relocations. 32 */ 33 34 #include <stdio.h> 35 #include <sys/elf.h> 36 #include <sys/elf_amd64.h> 37 #include <sys/mman.h> 38 #include <dlfcn.h> 39 #include <synch.h> 40 #include <string.h> 41 #include <debug.h> 42 #include <reloc.h> 43 #include <conv.h> 44 #include "_rtld.h" 45 #include "_audit.h" 46 #include "_elf.h" 47 #include "_inline.h" 48 #include "msg.h" 49 50 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 51 52 int 53 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 54 { 55 /* 56 * Check machine type and flags. 57 */ 58 if (ehdr->e_flags != 0) { 59 rej->rej_type = SGS_REJ_BADFLAG; 60 rej->rej_info = (uint_t)ehdr->e_flags; 61 return (0); 62 } 63 return (1); 64 } 65 66 void 67 ldso_plt_init(Rt_map *lmp) 68 { 69 /* 70 * There is no need to analyze ld.so because we don't map in any of 71 * its dependencies. However we may map these dependencies in later 72 * (as if ld.so had dlopened them), so initialize the plt and the 73 * permission information. 74 */ 75 if (PLTGOT(lmp)) 76 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 77 } 78 79 static const uchar_t dyn_plt_template[] = { 80 /* 0x00 */ 0x55, /* pushq %rbp */ 81 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 82 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 83 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 84 0x00, 0x00, 0x00, 85 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 86 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 87 0x00, 0x00, 0x00, 88 0x00, 0x00, 0x00, 89 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 90 /* 0x20 */ 91 }; 92 93 /* 94 * And the virutal outstanding relocations against the 95 * above block are: 96 * 97 * reloc offset Addend symbol 98 * R_AMD64_PC32 0x0b -4 trace_fields 99 * R_AMD64_64 0x15 0 elf_plt_trace 100 */ 101 102 #define TRCREL1OFF 0x0b 103 #define TRCREL2OFF 0x15 104 105 int dyn_plt_ent_size = sizeof (dyn_plt_template); 106 107 /* 108 * the dynamic plt entry is: 109 * 110 * pushq %rbp 111 * movq %rsp, %rbp 112 * subq $0x10, %rsp 113 * leaq trace_fields(%rip), %r11 114 * movq %r11, -0x8(%rbp) 115 * movq $elf_plt_trace, %r11 116 * jmp *%r11 117 * dyn_data: 118 * .align 8 119 * uintptr_t reflmp 120 * uintptr_t deflmp 121 * uint_t symndx 122 * uint_t sb_flags 123 * Sym symdef 124 */ 125 static caddr_t 126 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 127 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 128 { 129 extern int elf_plt_trace(); 130 ulong_t got_entry; 131 uchar_t *dyn_plt; 132 uintptr_t *dyndata; 133 134 /* 135 * We only need to add the glue code if there is an auditing 136 * library that is interested in this binding. 137 */ 138 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 139 (pltndx * dyn_plt_ent_size)); 140 141 /* 142 * Have we initialized this dynamic plt entry yet? If we haven't do it 143 * now. Otherwise this function has been called before, but from a 144 * different plt (ie. from another shared object). In that case 145 * we just set the plt to point to the new dyn_plt. 146 */ 147 if (*dyn_plt == 0) { 148 Sym *symp; 149 Xword symvalue; 150 Lm_list *lml = LIST(rlmp); 151 152 (void) memcpy((void *)dyn_plt, dyn_plt_template, 153 sizeof (dyn_plt_template)); 154 dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 155 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 156 157 /* 158 * relocate: 159 * leaq trace_fields(%rip), %r11 160 * R_AMD64_PC32 0x0b -4 trace_fields 161 */ 162 symvalue = (Xword)((uintptr_t)dyndata - 163 (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 164 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 165 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 166 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 167 *fail = 1; 168 return (0); 169 } 170 171 /* 172 * relocating: 173 * movq $elf_plt_trace, %r11 174 * R_AMD64_64 0x15 0 elf_plt_trace 175 */ 176 symvalue = (Xword)elf_plt_trace; 177 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF], 178 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 179 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 180 *fail = 1; 181 return (0); 182 } 183 184 *dyndata++ = (uintptr_t)rlmp; 185 *dyndata++ = (uintptr_t)dlmp; 186 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 187 dyndata++; 188 symp = (Sym *)dyndata; 189 *symp = *sym; 190 symp->st_value = (Addr)to; 191 } 192 193 got_entry = (ulong_t)roffset; 194 *(ulong_t *)got_entry = (ulong_t)dyn_plt; 195 return ((caddr_t)dyn_plt); 196 } 197 198 /* 199 * Function binding routine - invoked on the first call to a function through 200 * the procedure linkage table; 201 * passes first through an assembly language interface. 202 * 203 * Takes the offset into the relocation table of the associated 204 * relocation entry and the address of the link map (rt_private_map struct) 205 * for the entry. 206 * 207 * Returns the address of the function referenced after re-writing the PLT 208 * entry to invoke the function directly. 209 * 210 * On error, causes process to terminate with a signal. 211 */ 212 ulong_t 213 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 214 { 215 Rt_map *nlmp, *llmp; 216 ulong_t addr, reloff, symval, rsymndx; 217 char *name; 218 Rela *rptr; 219 Sym *rsym, *nsym; 220 uint_t binfo, sb_flags = 0, dbg_class; 221 Slookup sl; 222 int entry, lmflags; 223 Lm_list *lml; 224 225 /* 226 * For compatibility with libthread (TI_VERSION 1) we track the entry 227 * value. A zero value indicates we have recursed into ld.so.1 to 228 * further process a locking request. Under this recursion we disable 229 * tsort and cleanup activities. 230 */ 231 entry = enter(0); 232 233 lml = LIST(lmp); 234 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 235 dbg_class = dbg_desc->d_class; 236 dbg_desc->d_class = 0; 237 } 238 239 /* 240 * Perform some basic sanity checks. If we didn't get a load map or 241 * the relocation offset is invalid then its possible someone has walked 242 * over the .got entries or jumped to plt0 out of the blue. 243 */ 244 if ((!lmp) && (pltndx <= 245 (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 246 Conv_inv_buf_t inv_buf; 247 248 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 249 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf), 250 EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 251 rtldexit(lml, 1); 252 } 253 reloff = pltndx * (ulong_t)RELENT(lmp); 254 255 /* 256 * Use relocation entry to get symbol table entry and symbol name. 257 */ 258 addr = (ulong_t)JMPREL(lmp); 259 rptr = (Rela *)(addr + reloff); 260 rsymndx = ELF_R_SYM(rptr->r_info); 261 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 262 name = (char *)(STRTAB(lmp) + rsym->st_name); 263 264 /* 265 * Determine the last link-map of this list, this'll be the starting 266 * point for any tsort() processing. 267 */ 268 llmp = lml->lm_tail; 269 270 /* 271 * Find definition for symbol. Initialize the symbol lookup data 272 * structure. 273 */ 274 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 275 rsymndx, rsym, 0, LKUP_DEFT); 276 277 if ((nsym = lookup_sym(&sl, &nlmp, &binfo, NULL)) == 0) { 278 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 279 demangle(name)); 280 rtldexit(lml, 1); 281 } 282 283 symval = nsym->st_value; 284 if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 285 (nsym->st_shndx != SHN_ABS)) 286 symval += ADDR(nlmp); 287 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 288 /* 289 * Record that this new link map is now bound to the caller. 290 */ 291 if (bind_one(lmp, nlmp, BND_REFER) == 0) 292 rtldexit(lml, 1); 293 } 294 295 if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) { 296 uint_t symndx = (((uintptr_t)nsym - 297 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 298 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 299 &sb_flags); 300 } 301 302 if (!(rtld_flags & RT_FL_NOBIND)) { 303 addr = rptr->r_offset; 304 if (!(FLAGS(lmp) & FLG_RT_FIXED)) 305 addr += ADDR(lmp); 306 if (((lml->lm_tflags | AFLAGS(lmp)) & 307 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 308 AUDINFO(lmp)->ai_dynplts) { 309 int fail = 0; 310 uint_t pltndx = reloff / sizeof (Rela); 311 uint_t symndx = (((uintptr_t)nsym - 312 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 313 314 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 315 nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 316 &fail); 317 if (fail) 318 rtldexit(lml, 1); 319 } else { 320 /* 321 * Write standard PLT entry to jump directly 322 * to newly bound function. 323 */ 324 *(ulong_t *)addr = symval; 325 } 326 } 327 328 /* 329 * Print binding information and rebuild PLT entry. 330 */ 331 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 332 (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval, 333 nsym->st_value, name, binfo)); 334 335 /* 336 * Complete any processing for newly loaded objects. Note we don't 337 * know exactly where any new objects are loaded (we know the object 338 * that supplied the symbol, but others may have been loaded lazily as 339 * we searched for the symbol), so sorting starts from the last 340 * link-map know on entry to this routine. 341 */ 342 if (entry) 343 load_completion(llmp); 344 345 /* 346 * Some operations like dldump() or dlopen()'ing a relocatable object 347 * result in objects being loaded on rtld's link-map, make sure these 348 * objects are initialized also. 349 */ 350 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 351 load_completion(nlmp); 352 353 /* 354 * Make sure the object to which we've bound has had it's .init fired. 355 * Cleanup before return to user code. 356 */ 357 if (entry) { 358 is_dep_init(nlmp, lmp); 359 leave(lml, 0); 360 } 361 362 if (lmflags & LML_FLG_RTLDLM) 363 dbg_desc->d_class = dbg_class; 364 365 return (symval); 366 } 367 368 /* 369 * Read and process the relocations for one link object, we assume all 370 * relocation sections for loadable segments are stored contiguously in 371 * the file. 372 */ 373 int 374 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel) 375 { 376 ulong_t relbgn, relend, relsiz, basebgn; 377 ulong_t pltbgn, pltend, _pltbgn, _pltend; 378 ulong_t roffset, rsymndx, psymndx = 0; 379 ulong_t dsymndx; 380 uchar_t rtype; 381 long reladd, value, pvalue; 382 Sym *symref, *psymref, *symdef, *psymdef; 383 char *name, *pname; 384 Rt_map *_lmp, *plmp; 385 int ret = 1, noplt = 0; 386 int relacount = RELACOUNT(lmp), plthint = 0; 387 Rela *rel; 388 uint_t binfo, pbinfo; 389 APlist *bound = NULL; 390 391 /* 392 * Although only necessary for lazy binding, initialize the first 393 * global offset entry to go to elf_rtbndr(). dbx(1) seems 394 * to find this useful. 395 */ 396 if ((plt == 0) && PLTGOT(lmp)) { 397 mmapobj_result_t *mpp; 398 399 /* 400 * Make sure the segment is writable. 401 */ 402 if ((((mpp = 403 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) && 404 ((mpp->mr_prot & PROT_WRITE) == 0)) && 405 ((set_prot(lmp, mpp, 1) == 0) || 406 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) 407 return (0); 408 409 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 410 } 411 412 /* 413 * Initialize the plt start and end addresses. 414 */ 415 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 416 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 417 418 419 relsiz = (ulong_t)(RELENT(lmp)); 420 basebgn = ADDR(lmp); 421 422 if (PLTRELSZ(lmp)) 423 plthint = PLTRELSZ(lmp) / relsiz; 424 425 /* 426 * If we've been called upon to promote an RTLD_LAZY object to an 427 * RTLD_NOW then we're only interested in scaning the .plt table. 428 * An uninitialized .plt is the case where the associated got entry 429 * points back to the plt itself. Determine the range of the real .plt 430 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 431 */ 432 if (plt) { 433 Slookup sl; 434 435 relbgn = pltbgn; 436 relend = pltend; 437 if (!relbgn || (relbgn == relend)) 438 return (1); 439 440 /* 441 * Initialize the symbol lookup data structure. 442 */ 443 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, 444 elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); 445 446 if ((symdef = elf_find_sym(&sl, &_lmp, &binfo, NULL)) == 0) 447 return (1); 448 449 _pltbgn = symdef->st_value; 450 if (!(FLAGS(lmp) & FLG_RT_FIXED) && 451 (symdef->st_shndx != SHN_ABS)) 452 _pltbgn += basebgn; 453 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 454 M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 455 456 } else { 457 /* 458 * The relocation sections appear to the run-time linker as a 459 * single table. Determine the address of the beginning and end 460 * of this table. There are two different interpretations of 461 * the ABI at this point: 462 * 463 * o The REL table and its associated RELSZ indicate the 464 * concatenation of *all* relocation sections (this is the 465 * model our link-editor constructs). 466 * 467 * o The REL table and its associated RELSZ indicate the 468 * concatenation of all *but* the .plt relocations. These 469 * relocations are specified individually by the JMPREL and 470 * PLTRELSZ entries. 471 * 472 * Determine from our knowledege of the relocation range and 473 * .plt range, the range of the total relocation table. Note 474 * that one other ABI assumption seems to be that the .plt 475 * relocations always follow any other relocations, the 476 * following range checking drops that assumption. 477 */ 478 relbgn = (ulong_t)(REL(lmp)); 479 relend = relbgn + (ulong_t)(RELSZ(lmp)); 480 if (pltbgn) { 481 if (!relbgn || (relbgn > pltbgn)) 482 relbgn = pltbgn; 483 if (!relbgn || (relend < pltend)) 484 relend = pltend; 485 } 486 } 487 if (!relbgn || (relbgn == relend)) { 488 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 489 return (1); 490 } 491 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 492 493 /* 494 * If we're processing a dynamic executable in lazy mode there is no 495 * need to scan the .rel.plt table, however if we're processing a shared 496 * object in lazy mode the .got addresses associated to each .plt must 497 * be relocated to reflect the location of the shared object. 498 */ 499 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 500 (FLAGS(lmp) & FLG_RT_FIXED)) 501 noplt = 1; 502 503 /* 504 * Loop through relocations. 505 */ 506 while (relbgn < relend) { 507 mmapobj_result_t *mpp; 508 uint_t sb_flags = 0; 509 510 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 511 512 /* 513 * If this is a RELATIVE relocation in a shared object (the 514 * common case), and if we are not debugging, then jump into a 515 * tighter relocation loop (elf_reloc_relative). 516 */ 517 if ((rtype == R_AMD64_RELATIVE) && 518 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 519 if (relacount) { 520 relbgn = elf_reloc_relative_count(relbgn, 521 relacount, relsiz, basebgn, lmp, textrel); 522 relacount = 0; 523 } else { 524 relbgn = elf_reloc_relative(relbgn, relend, 525 relsiz, basebgn, lmp, textrel); 526 } 527 if (relbgn >= relend) 528 break; 529 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 530 } 531 532 roffset = ((Rela *)relbgn)->r_offset; 533 534 /* 535 * If this is a shared object, add the base address to offset. 536 */ 537 if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 538 /* 539 * If we're processing lazy bindings, we have to step 540 * through the plt entries and add the base address 541 * to the corresponding got entry. 542 */ 543 if (plthint && (plt == 0) && 544 (rtype == R_AMD64_JUMP_SLOT) && 545 ((MODE(lmp) & RTLD_NOW) == 0)) { 546 /* 547 * The PLT relocations (for lazy bindings) 548 * are additive to what's already in the GOT. 549 * This differs to what happens in 550 * elf_reloc_relacount() and that's why we 551 * just do it inline here. 552 */ 553 for (roffset = ((Rela *)relbgn)->r_offset; 554 plthint; plthint--) { 555 roffset += basebgn; 556 557 /* 558 * Perform the actual relocation. 559 */ 560 *((ulong_t *)roffset) += basebgn; 561 562 relbgn += relsiz; 563 roffset = ((Rela *)relbgn)->r_offset; 564 565 } 566 continue; 567 } 568 roffset += basebgn; 569 } 570 571 reladd = (long)(((Rela *)relbgn)->r_addend); 572 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 573 rel = (Rela *)relbgn; 574 relbgn += relsiz; 575 576 /* 577 * Optimizations. 578 */ 579 if (rtype == R_AMD64_NONE) 580 continue; 581 if (noplt && ((ulong_t)rel >= pltbgn) && 582 ((ulong_t)rel < pltend)) { 583 relbgn = pltend; 584 continue; 585 } 586 587 /* 588 * If we're promoting plts, determine if this one has already 589 * been written. 590 */ 591 if (plt && ((*(ulong_t *)roffset < _pltbgn) || 592 (*(ulong_t *)roffset > _pltend))) 593 continue; 594 595 /* 596 * If this relocation is not against part of the image 597 * mapped into memory we skip it. 598 */ 599 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) { 600 elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 601 rsymndx); 602 continue; 603 } 604 605 binfo = 0; 606 /* 607 * If a symbol index is specified then get the symbol table 608 * entry, locate the symbol definition, and determine its 609 * address. 610 */ 611 if (rsymndx) { 612 /* 613 * Get the local symbol table entry. 614 */ 615 symref = (Sym *)((ulong_t)SYMTAB(lmp) + 616 (rsymndx * SYMENT(lmp))); 617 618 /* 619 * If this is a local symbol, just use the base address. 620 * (we should have no local relocations in the 621 * executable). 622 */ 623 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 624 value = basebgn; 625 name = (char *)0; 626 627 /* 628 * Special case TLS relocations. 629 */ 630 if (rtype == R_AMD64_DTPMOD64) { 631 /* 632 * Use the TLS modid. 633 */ 634 value = TLSMODID(lmp); 635 636 } else if ((rtype == R_AMD64_TPOFF64) || 637 (rtype == R_AMD64_TPOFF32)) { 638 if ((value = elf_static_tls(lmp, symref, 639 rel, rtype, 0, roffset, 0)) == 0) { 640 ret = 0; 641 break; 642 } 643 } 644 } else { 645 /* 646 * If the symbol index is equal to the previous 647 * symbol index relocation we processed then 648 * reuse the previous values. (Note that there 649 * have been cases where a relocation exists 650 * against a copy relocation symbol, our ld(1) 651 * should optimize this away, but make sure we 652 * don't use the same symbol information should 653 * this case exist). 654 */ 655 if ((rsymndx == psymndx) && 656 (rtype != R_AMD64_COPY)) { 657 /* LINTED */ 658 if (psymdef == 0) { 659 DBG_CALL(Dbg_bind_weak(lmp, 660 (Addr)roffset, (Addr) 661 (roffset - basebgn), name)); 662 continue; 663 } 664 /* LINTED */ 665 value = pvalue; 666 /* LINTED */ 667 name = pname; 668 /* LINTED */ 669 symdef = psymdef; 670 /* LINTED */ 671 symref = psymref; 672 /* LINTED */ 673 _lmp = plmp; 674 /* LINTED */ 675 binfo = pbinfo; 676 677 if ((LIST(_lmp)->lm_tflags | 678 AFLAGS(_lmp)) & 679 LML_TFLG_AUD_SYMBIND) { 680 value = audit_symbind(lmp, _lmp, 681 /* LINTED */ 682 symdef, dsymndx, value, 683 &sb_flags); 684 } 685 } else { 686 Slookup sl; 687 688 /* 689 * Lookup the symbol definition. 690 * Initialize the symbol lookup data 691 * structure. 692 */ 693 name = (char *)(STRTAB(lmp) + 694 symref->st_name); 695 696 SLOOKUP_INIT(sl, name, lmp, 0, 697 ld_entry_cnt, 0, rsymndx, symref, 698 rtype, LKUP_STDRELOC); 699 700 symdef = lookup_sym(&sl, &_lmp, 701 &binfo, in_nfavl); 702 703 /* 704 * If the symbol is not found and the 705 * reference was not to a weak symbol, 706 * report an error. Weak references 707 * may be unresolved. 708 */ 709 /* BEGIN CSTYLED */ 710 if (symdef == 0) { 711 if (sl.sl_bind != STB_WEAK) { 712 if (elf_reloc_error(lmp, name, 713 rel, binfo)) 714 continue; 715 716 ret = 0; 717 break; 718 719 } else { 720 psymndx = rsymndx; 721 psymdef = 0; 722 723 DBG_CALL(Dbg_bind_weak(lmp, 724 (Addr)roffset, (Addr) 725 (roffset - basebgn), name)); 726 continue; 727 } 728 } 729 /* END CSTYLED */ 730 731 /* 732 * If symbol was found in an object 733 * other than the referencing object 734 * then record the binding. 735 */ 736 if ((lmp != _lmp) && ((FLAGS1(_lmp) & 737 FL1_RT_NOINIFIN) == 0)) { 738 if (aplist_test(&bound, _lmp, 739 AL_CNT_RELBIND) == 0) { 740 ret = 0; 741 break; 742 } 743 } 744 745 /* 746 * Calculate the location of definition; 747 * symbol value plus base address of 748 * containing shared object. 749 */ 750 if (IS_SIZE(rtype)) 751 value = symdef->st_size; 752 else 753 value = symdef->st_value; 754 755 if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 756 !(IS_SIZE(rtype)) && 757 (symdef->st_shndx != SHN_ABS) && 758 (ELF_ST_TYPE(symdef->st_info) != 759 STT_TLS)) 760 value += ADDR(_lmp); 761 762 /* 763 * Retain this symbol index and the 764 * value in case it can be used for the 765 * subsequent relocations. 766 */ 767 if (rtype != R_AMD64_COPY) { 768 psymndx = rsymndx; 769 pvalue = value; 770 pname = name; 771 psymdef = symdef; 772 psymref = symref; 773 plmp = _lmp; 774 pbinfo = binfo; 775 } 776 if ((LIST(_lmp)->lm_tflags | 777 AFLAGS(_lmp)) & 778 LML_TFLG_AUD_SYMBIND) { 779 dsymndx = (((uintptr_t)symdef - 780 (uintptr_t)SYMTAB(_lmp)) / 781 SYMENT(_lmp)); 782 value = audit_symbind(lmp, _lmp, 783 symdef, dsymndx, value, 784 &sb_flags); 785 } 786 } 787 788 /* 789 * If relocation is PC-relative, subtract 790 * offset address. 791 */ 792 if (IS_PC_RELATIVE(rtype)) 793 value -= roffset; 794 795 /* 796 * Special case TLS relocations. 797 */ 798 if (rtype == R_AMD64_DTPMOD64) { 799 /* 800 * Relocation value is the TLS modid. 801 */ 802 value = TLSMODID(_lmp); 803 804 } else if ((rtype == R_AMD64_TPOFF64) || 805 (rtype == R_AMD64_TPOFF32)) { 806 if ((value = elf_static_tls(_lmp, 807 symdef, rel, rtype, name, roffset, 808 value)) == 0) { 809 ret = 0; 810 break; 811 } 812 } 813 } 814 } else { 815 /* 816 * Special cases. 817 */ 818 if (rtype == R_AMD64_DTPMOD64) { 819 /* 820 * TLS relocation value is the TLS modid. 821 */ 822 value = TLSMODID(lmp); 823 } else 824 value = basebgn; 825 name = (char *)0; 826 } 827 828 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 829 M_REL_SHT_TYPE, rel, NULL, name)); 830 831 /* 832 * Make sure the segment is writable. 833 */ 834 if (((mpp->mr_prot & PROT_WRITE) == 0) && 835 ((set_prot(lmp, mpp, 1) == 0) || 836 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) { 837 ret = 0; 838 break; 839 } 840 841 /* 842 * Call relocation routine to perform required relocation. 843 */ 844 switch (rtype) { 845 case R_AMD64_COPY: 846 if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 847 symdef, _lmp, (const void *)value) == 0) 848 ret = 0; 849 break; 850 case R_AMD64_JUMP_SLOT: 851 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) & 852 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 853 AUDINFO(lmp)->ai_dynplts) { 854 int fail = 0; 855 int pltndx = (((ulong_t)rel - 856 (uintptr_t)JMPREL(lmp)) / relsiz); 857 int symndx = (((uintptr_t)symdef - 858 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 859 860 (void) elf_plt_trace_write(roffset, lmp, _lmp, 861 symdef, symndx, pltndx, (caddr_t)value, 862 sb_flags, &fail); 863 if (fail) 864 ret = 0; 865 } else { 866 /* 867 * Write standard PLT entry to jump directly 868 * to newly bound function. 869 */ 870 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 871 ELF_DBG_RTLD, (Xword)roffset, 872 (Xword)value)); 873 *(ulong_t *)roffset = value; 874 } 875 break; 876 default: 877 value += reladd; 878 /* 879 * Write the relocation out. 880 */ 881 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 882 (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 883 ret = 0; 884 885 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 886 (Xword)roffset, (Xword)value)); 887 } 888 889 if ((ret == 0) && 890 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 891 break; 892 893 if (binfo) { 894 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 895 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 896 _lmp, (Addr)value, symdef->st_value, name, binfo)); 897 } 898 } 899 900 return (relocate_finish(lmp, bound, ret)); 901 } 902 903 /* 904 * Initialize the first few got entries so that function calls go to 905 * elf_rtbndr: 906 * 907 * GOT[GOT_XLINKMAP] = the address of the link map 908 * GOT[GOT_XRTLD] = the address of rtbinder 909 */ 910 void 911 elf_plt_init(void *got, caddr_t l) 912 { 913 uint64_t *_got; 914 /* LINTED */ 915 Rt_map *lmp = (Rt_map *)l; 916 917 _got = (uint64_t *)got + M_GOT_XLINKMAP; 918 *_got = (uint64_t)lmp; 919 _got = (uint64_t *)got + M_GOT_XRTLD; 920 *_got = (uint64_t)elf_rtbndr; 921 } 922 923 /* 924 * Plt writing interface to allow debugging initialization to be generic. 925 */ 926 Pltbindtype 927 /* ARGSUSED1 */ 928 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 929 Xword pltndx) 930 { 931 Rela *rel = (Rela*)rptr; 932 uintptr_t pltaddr; 933 934 pltaddr = addr + rel->r_offset; 935 *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 936 DBG_CALL(pltcntfull++); 937 return (PLT_T_FULL); 938 } 939 940 /* 941 * Provide a machine specific interface to the conversion routine. By calling 942 * the machine specific version, rather than the generic version, we insure that 943 * the data tables/strings for all known machine versions aren't dragged into 944 * ld.so.1. 945 */ 946 const char * 947 _conv_reloc_type(uint_t rel) 948 { 949 static Conv_inv_buf_t inv_buf; 950 951 return (conv_reloc_amd64_type(rel, 0, &inv_buf)); 952 } 953