1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <string.h> 29 #include <stdio.h> 30 #include <strings.h> 31 #include <sys/elf_amd64.h> 32 #include <debug.h> 33 #include <reloc.h> 34 #include "msg.h" 35 #include "_libld.h" 36 37 Word 38 ld_init_rel(Rel_desc *reld, void *reloc) 39 { 40 Rela * rel = (Rela *)reloc; 41 42 /* LINTED */ 43 reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info); 44 reld->rel_roffset = rel->r_offset; 45 reld->rel_raddend = rel->r_addend; 46 reld->rel_typedata = 0; 47 48 reld->rel_flags |= FLG_REL_RELA; 49 50 return ((Word)ELF_R_SYM(rel->r_info)); 51 } 52 53 void 54 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl) 55 { 56 ofl->ofl_dehdr->e_flags |= ehdr->e_flags; 57 } 58 59 void 60 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt) 61 { 62 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) { 63 /* 64 * Create this entry if we are going to create a PLT table. 65 */ 66 if (ofl->ofl_pltcnt) 67 (*cnt)++; /* DT_PLTGOT */ 68 } 69 } 70 71 void 72 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn) 73 { 74 if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) { 75 (*dyn)->d_tag = DT_PLTGOT; 76 if (ofl->ofl_osgot) 77 (*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr; 78 else 79 (*dyn)->d_un.d_ptr = 0; 80 (*dyn)++; 81 } 82 } 83 84 Xword 85 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl) 86 { 87 Xword value; 88 89 value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) + 90 M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE); 91 return (value); 92 } 93 94 /* 95 * Build a single plt entry - code is: 96 * JMP *name1@GOTPCREL(%rip) 97 * PUSHL $index 98 * JMP .PLT0 99 */ 100 static uchar_t pltn_entry[M_PLT_ENTSIZE] = { 101 /* 0x00 jmpq *name1@GOTPCREL(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 102 /* 0x06 pushq $index */ 0x68, 0x00, 0x00, 0x00, 0x00, 103 /* 0x0b jmpq .plt0(%rip) */ 0xe9, 0x00, 0x00, 0x00, 0x00 104 /* 0x10 */ 105 }; 106 107 static uintptr_t 108 plt_entry(Ofl_desc * ofl, Sym_desc * sdp) 109 { 110 uchar_t *plt0, *pltent, *gotent; 111 Sword plt_off; 112 Word got_off; 113 Xword val1; 114 Word flags = ofl->ofl_flags; 115 Word dtflags1 = ofl->ofl_dtflags_1; 116 117 got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 118 plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * 119 M_PLT_ENTSIZE); 120 plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf); 121 pltent = plt0 + plt_off; 122 gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off; 123 124 bcopy(pltn_entry, pltent, sizeof (pltn_entry)); 125 /* 126 * Fill in the got entry with the address of the next instruction. 127 */ 128 /* LINTED */ 129 *(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off + 130 M_PLT_INSSIZE; 131 132 /* 133 * patchup: 134 * jmpq *name1@gotpcrel(%rip) 135 * 136 * NOTE: 0x06 represents next instruction. 137 */ 138 val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) - 139 (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06; 140 141 /* 142 * If '-z noreloc' is specified - skip the do_reloc 143 * stage. 144 */ 145 if ((flags & FLG_OF_RELOBJ) || 146 !(dtflags1 & DF_1_NORELOC)) { 147 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 148 &val1, MSG_ORIG(MSG_SYM_PLTENT), 149 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 150 eprintf(ofl->ofl_lml, ERR_FATAL, 151 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 152 demangle(sdp->sd_name)); 153 return (S_ERROR); 154 } 155 } 156 157 /* 158 * patchup: 159 * pushq $pltndx 160 */ 161 val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1); 162 /* 163 * If '-z noreloc' is specified - skip the do_reloc 164 * stage. 165 */ 166 if ((flags & FLG_OF_RELOBJ) || 167 !(dtflags1 & DF_1_NORELOC)) { 168 if (do_reloc(R_AMD64_32, &pltent[0x07], 169 &val1, MSG_ORIG(MSG_SYM_PLTENT), 170 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 171 eprintf(ofl->ofl_lml, ERR_FATAL, 172 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 173 demangle(sdp->sd_name)); 174 return (S_ERROR); 175 } 176 } 177 178 /* 179 * patchup: 180 * jmpq .plt0(%rip) 181 * NOTE: 0x10 represents next instruction. The rather complex series 182 * of casts is necessary to sign extend an offset into a 64-bit value 183 * while satisfying various compiler error checks. Handle with care. 184 */ 185 val1 = (Xword)((intptr_t)((uintptr_t)plt0 - 186 (uintptr_t)(&pltent[0x10]))); 187 188 /* 189 * If '-z noreloc' is specified - skip the do_reloc 190 * stage. 191 */ 192 if ((flags & FLG_OF_RELOBJ) || 193 !(dtflags1 & DF_1_NORELOC)) { 194 if (do_reloc(R_AMD64_PC32, &pltent[0x0c], 195 &val1, MSG_ORIG(MSG_SYM_PLTENT), 196 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 197 eprintf(ofl->ofl_lml, ERR_FATAL, 198 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 199 demangle(sdp->sd_name)); 200 return (S_ERROR); 201 } 202 } 203 return (1); 204 } 205 206 uintptr_t 207 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl) 208 { 209 Os_desc * relosp, * osp = 0; 210 Word ndx; 211 Xword roffset, value; 212 Sxword raddend; 213 Rela rea; 214 char *relbits; 215 Sym_desc * sdp, * psym = (Sym_desc *)0; 216 int sectmoved = 0; 217 218 raddend = orsp->rel_raddend; 219 sdp = orsp->rel_sym; 220 221 /* 222 * If the section this relocation is against has been discarded 223 * (-zignore), then also discard (skip) the relocation itself. 224 */ 225 if (orsp->rel_isdesc && ((orsp->rel_flags & 226 (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) && 227 (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) { 228 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp)); 229 return (1); 230 } 231 232 /* 233 * If this is a relocation against a move table, or expanded move 234 * table, adjust the relocation entries. 235 */ 236 if (orsp->rel_move) 237 ld_adj_movereloc(ofl, orsp); 238 239 /* 240 * If this is a relocation against a section then we need to adjust the 241 * raddend field to compensate for the new position of the input section 242 * within the new output section. 243 */ 244 if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) { 245 if (ofl->ofl_parsym.head && 246 (sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 247 /* LINTED */ 248 (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) { 249 DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym)); 250 sectmoved = 1; 251 if (ofl->ofl_flags & FLG_OF_RELOBJ) 252 raddend = psym->sd_sym->st_value; 253 else 254 raddend = psym->sd_sym->st_value - 255 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 256 /* LINTED */ 257 raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata); 258 if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 259 raddend += 260 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 261 } else { 262 /* LINTED */ 263 raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata); 264 if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 265 raddend += 266 sdp->sd_isc->is_osdesc->os_shdr->sh_addr; 267 } 268 } 269 270 value = sdp->sd_sym->st_value; 271 272 if (orsp->rel_flags & FLG_REL_GOT) { 273 /* 274 * Note: for GOT relative relocations on amd64 275 * we discard the addend. It was relevant 276 * to the reference - not to the data item 277 * being referenced (ie: that -4 thing). 278 */ 279 raddend = 0; 280 osp = ofl->ofl_osgot; 281 roffset = ld_calc_got_offset(orsp, ofl); 282 283 } else if (orsp->rel_flags & FLG_REL_PLT) { 284 /* 285 * Note that relocations for PLT's actually 286 * cause a relocation againt the GOT. 287 */ 288 osp = ofl->ofl_osplt; 289 roffset = (ofl->ofl_osgot->os_shdr->sh_addr) + 290 sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 291 raddend = 0; 292 if (plt_entry(ofl, sdp) == S_ERROR) 293 return (S_ERROR); 294 295 } else if (orsp->rel_flags & FLG_REL_BSS) { 296 /* 297 * This must be a R_AMD64_COPY. For these set the roffset to 298 * point to the new symbols location. 299 */ 300 osp = ofl->ofl_isbss->is_osdesc; 301 roffset = value; 302 303 /* 304 * The raddend doesn't mean anything in a R_SPARC_COPY 305 * relocation. Null it out because it can confuse people. 306 */ 307 raddend = 0; 308 } else { 309 osp = orsp->rel_osdesc; 310 311 /* 312 * Calculate virtual offset of reference point; equals offset 313 * into section + vaddr of section for loadable sections, or 314 * offset plus section displacement for nonloadable sections. 315 */ 316 roffset = orsp->rel_roffset + 317 (Off)_elf_getxoff(orsp->rel_isdesc->is_indata); 318 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) 319 roffset += orsp->rel_isdesc->is_osdesc-> 320 os_shdr->sh_addr; 321 } 322 323 if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0)) 324 relosp = ofl->ofl_osrel; 325 326 /* 327 * Assign the symbols index for the output relocation. If the 328 * relocation refers to a SECTION symbol then it's index is based upon 329 * the output sections symbols index. Otherwise the index can be 330 * derived from the symbols index itself. 331 */ 332 if (orsp->rel_rtype == R_AMD64_RELATIVE) 333 ndx = STN_UNDEF; 334 else if ((orsp->rel_flags & FLG_REL_SCNNDX) || 335 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) { 336 if (sectmoved == 0) { 337 /* 338 * Check for a null input section. This can 339 * occur if this relocation references a symbol 340 * generated by sym_add_sym(). 341 */ 342 if ((sdp->sd_isc != 0) && 343 (sdp->sd_isc->is_osdesc != 0)) 344 ndx = sdp->sd_isc->is_osdesc->os_scnsymndx; 345 else 346 ndx = sdp->sd_shndx; 347 } else 348 ndx = ofl->ofl_sunwdata1ndx; 349 } else 350 ndx = sdp->sd_symndx; 351 352 /* 353 * Add the symbols 'value' to the addend field. 354 */ 355 if (orsp->rel_flags & FLG_REL_ADVAL) 356 raddend += value; 357 358 /* 359 * The addend field for R_AMD64_DTPMOD64 means nothing. The addend 360 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation. 361 */ 362 if (orsp->rel_rtype == R_AMD64_DTPMOD64) 363 raddend = 0; 364 365 relbits = (char *)relosp->os_outdata->d_buf; 366 367 rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype); 368 rea.r_offset = roffset; 369 rea.r_addend = raddend; 370 DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name, 371 orsp->rel_sname)); 372 373 /* 374 * Assert we haven't walked off the end of our relocation table. 375 */ 376 assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size); 377 378 (void) memcpy((relbits + relosp->os_szoutrels), 379 (char *)&rea, sizeof (Rela)); 380 relosp->os_szoutrels += (Xword)sizeof (Rela); 381 382 /* 383 * Determine if this relocation is against a non-writable, allocatable 384 * section. If so we may need to provide a text relocation diagnostic. 385 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually 386 * result in modifications to the .got. 387 */ 388 if (orsp->rel_rtype == R_AMD64_JUMP_SLOT) 389 osp = ofl->ofl_osgot; 390 391 ld_reloc_remain_entry(orsp, osp, ofl); 392 return (1); 393 } 394 395 /* 396 * amd64 Instructions for TLS processing 397 */ 398 static uchar_t tlsinstr_gd_ie[] = { 399 /* 400 * 0x00 movq %fs:0, %rax 401 */ 402 0x64, 0x48, 0x8b, 0x04, 0x25, 403 0x00, 0x00, 0x00, 0x00, 404 /* 405 * 0x09 addq x@gottpoff(%rip), %rax 406 */ 407 0x48, 0x03, 0x05, 0x00, 0x00, 408 0x00, 0x00 409 }; 410 411 static uchar_t tlsinstr_gd_le[] = { 412 /* 413 * 0x00 movq %fs:0, %rax 414 */ 415 0x64, 0x48, 0x8b, 0x04, 0x25, 416 0x00, 0x00, 0x00, 0x00, 417 /* 418 * 0x09 leaq x@gottpoff(%rip), %rax 419 */ 420 0x48, 0x8d, 0x80, 0x00, 0x00, 421 0x00, 0x00 422 }; 423 424 static uchar_t tlsinstr_ld_le[] = { 425 /* 426 * .byte 0x66 427 */ 428 0x66, 429 /* 430 * .byte 0x66 431 */ 432 0x66, 433 /* 434 * .byte 0x66 435 */ 436 0x66, 437 /* 438 * movq %fs:0, %rax 439 */ 440 0x64, 0x48, 0x8b, 0x04, 0x25, 441 0x00, 0x00, 0x00, 0x00 442 }; 443 444 445 static Fixupret 446 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp) 447 { 448 Sym_desc *sdp = arsp->rel_sym; 449 Word rtype = arsp->rel_rtype; 450 uchar_t *offset; 451 452 offset = (uchar_t *)((uintptr_t)arsp->rel_roffset + 453 (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) + 454 (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf); 455 456 if (sdp->sd_ref == REF_DYN_NEED) { 457 /* 458 * IE reference model 459 */ 460 switch (rtype) { 461 case R_AMD64_TLSGD: 462 /* 463 * GD -> IE 464 * 465 * Transition: 466 * 0x00 .byte 0x66 467 * 0x01 leaq x@tlsgd(%rip), %rdi 468 * 0x08 .word 0x6666 469 * 0x0a rex64 470 * 0x0b call __tls_get_addr@plt 471 * 0x10 472 * To: 473 * 0x00 movq %fs:0, %rax 474 * 0x09 addq x@gottpoff(%rip), %rax 475 * 0x10 476 */ 477 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 478 R_AMD64_GOTTPOFF, arsp)); 479 arsp->rel_rtype = R_AMD64_GOTTPOFF; 480 arsp->rel_roffset += 8; 481 arsp->rel_raddend = (Sxword)-4; 482 483 /* 484 * Adjust 'offset' to beginning of instruction 485 * sequence. 486 */ 487 offset -= 4; 488 (void) memcpy(offset, tlsinstr_gd_ie, 489 sizeof (tlsinstr_gd_ie)); 490 return (FIX_RELOC); 491 492 case R_AMD64_PLT32: 493 /* 494 * Fixup done via the TLS_GD relocation. 495 */ 496 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 497 R_AMD64_NONE, arsp)); 498 return (FIX_DONE); 499 } 500 } 501 502 /* 503 * LE reference model 504 */ 505 switch (rtype) { 506 case R_AMD64_TLSGD: 507 /* 508 * GD -> LE 509 * 510 * Transition: 511 * 0x00 .byte 0x66 512 * 0x01 leaq x@tlsgd(%rip), %rdi 513 * 0x08 .word 0x6666 514 * 0x0a rex64 515 * 0x0b call __tls_get_addr@plt 516 * 0x10 517 * To: 518 * 0x00 movq %fs:0, %rax 519 * 0x09 leaq x@tpoff(%rax), %rax 520 * 0x10 521 */ 522 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 523 R_AMD64_TPOFF32, arsp)); 524 arsp->rel_rtype = R_AMD64_TPOFF32; 525 arsp->rel_roffset += 8; 526 arsp->rel_raddend = 0; 527 528 /* 529 * Adjust 'offset' to beginning of instruction sequence. 530 */ 531 offset -= 4; 532 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 533 return (FIX_RELOC); 534 535 case R_AMD64_GOTTPOFF: 536 /* 537 * IE -> LE 538 * 539 * Transition: 540 * 0x00 movq %fs:0, %rax 541 * 0x09 addq x@gottopoff(%rip), %rax 542 * 0x10 543 * To: 544 * 0x00 movq %fs:0, %rax 545 * 0x09 leaq x@tpoff(%rax), %rax 546 * 0x10 547 */ 548 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 549 R_AMD64_TPOFF32, arsp)); 550 arsp->rel_rtype = R_AMD64_TPOFF32; 551 arsp->rel_raddend = 0; 552 553 /* 554 * Adjust 'offset' to beginning of instruction sequence. 555 */ 556 offset -= 12; 557 558 /* 559 * Same code sequence used in the GD -> LE transition. 560 */ 561 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 562 return (FIX_RELOC); 563 564 case R_AMD64_TLSLD: 565 /* 566 * LD -> LE 567 * 568 * Transition 569 * 0x00 leaq x1@tlsgd(%rip), %rdi 570 * 0x07 call __tls_get_addr@plt 571 * 0x0c 572 * To: 573 * 0x00 .byte 0x66 574 * 0x01 .byte 0x66 575 * 0x02 .byte 0x66 576 * 0x03 movq %fs:0, %rax 577 */ 578 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 579 R_AMD64_NONE, arsp)); 580 offset -= 3; 581 (void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le)); 582 return (FIX_DONE); 583 584 case R_AMD64_DTPOFF32: 585 /* 586 * LD->LE 587 * 588 * Transition: 589 * 0x00 leaq x1@dtpoff(%rax), %rcx 590 * To: 591 * 0x00 leaq x1@tpoff(%rax), %rcx 592 */ 593 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 594 R_AMD64_TPOFF32, arsp)); 595 arsp->rel_rtype = R_AMD64_TPOFF32; 596 arsp->rel_raddend = 0; 597 return (FIX_RELOC); 598 } 599 600 return (FIX_RELOC); 601 } 602 603 uintptr_t 604 ld_do_activerelocs(Ofl_desc *ofl) 605 { 606 Rel_desc *arsp; 607 Rel_cache *rcp; 608 Listnode *lnp; 609 uintptr_t return_code = 1; 610 Word flags = ofl->ofl_flags; 611 Word dtflags1 = ofl->ofl_dtflags_1; 612 613 if (ofl->ofl_actrels.head) 614 DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml)); 615 616 /* 617 * Process active relocations. 618 */ 619 for (LIST_TRAVERSE(&ofl->ofl_actrels, lnp, rcp)) { 620 /* LINTED */ 621 for (arsp = (Rel_desc *)(rcp + 1); 622 arsp < rcp->rc_free; arsp++) { 623 uchar_t *addr; 624 Xword value; 625 Sym_desc *sdp; 626 const char *ifl_name; 627 Xword refaddr; 628 int moved = 0; 629 Gotref gref; 630 631 /* 632 * If the section this relocation is against has been 633 * discarded (-zignore), then discard (skip) the 634 * relocation itself. 635 */ 636 if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) && 637 ((arsp->rel_flags & 638 (FLG_REL_GOT | FLG_REL_BSS | 639 FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) { 640 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, 641 M_MACH, arsp)); 642 continue; 643 } 644 645 /* 646 * We deteremine what the 'got reference' 647 * model (if required) is at this point. This 648 * needs to be done before tls_fixup() since 649 * it may 'transition' our instructions. 650 * 651 * The got table entries have already been assigned, 652 * and we bind to those initial entries. 653 */ 654 if (arsp->rel_flags & FLG_REL_DTLS) 655 gref = GOT_REF_TLSGD; 656 else if (arsp->rel_flags & FLG_REL_MTLS) 657 gref = GOT_REF_TLSLD; 658 else if (arsp->rel_flags & FLG_REL_STLS) 659 gref = GOT_REF_TLSIE; 660 else 661 gref = GOT_REF_GENERIC; 662 663 /* 664 * Perform any required TLS fixups. 665 */ 666 if (arsp->rel_flags & FLG_REL_TLSFIX) { 667 Fixupret ret; 668 669 if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR) 670 return (S_ERROR); 671 if (ret == FIX_DONE) 672 continue; 673 } 674 675 /* 676 * If this is a relocation against a move table, or 677 * expanded move table, adjust the relocation entries. 678 */ 679 if (arsp->rel_move) 680 ld_adj_movereloc(ofl, arsp); 681 682 sdp = arsp->rel_sym; 683 refaddr = arsp->rel_roffset + 684 (Off)_elf_getxoff(arsp->rel_isdesc->is_indata); 685 686 if ((arsp->rel_flags & FLG_REL_CLVAL) || 687 (arsp->rel_flags & FLG_REL_GOTCL)) 688 value = 0; 689 else if (ELF_ST_TYPE(sdp->sd_sym->st_info) == 690 STT_SECTION) { 691 Sym_desc *sym; 692 693 /* 694 * The value for a symbol pointing to a SECTION 695 * is based off of that sections position. 696 * 697 * The second argument of the ld_am_I_partial() 698 * is the value stored at the target address 699 * relocation is going to be applied. 700 */ 701 if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 702 /* LINTED */ 703 (sym = ld_am_I_partial(arsp, *(Xword *) 704 ((uchar_t *) 705 arsp->rel_isdesc->is_indata->d_buf + 706 arsp->rel_roffset)))) { 707 /* 708 * If the symbol is moved, 709 * adjust the value 710 */ 711 value = sym->sd_sym->st_value; 712 moved = 1; 713 } else { 714 value = _elf_getxoff( 715 sdp->sd_isc->is_indata); 716 if (sdp->sd_isc->is_shdr->sh_flags & 717 SHF_ALLOC) 718 value += sdp->sd_isc->is_osdesc-> 719 os_shdr->sh_addr; 720 } 721 if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS) 722 value -= ofl->ofl_tlsphdr->p_vaddr; 723 724 } else if (IS_SIZE(arsp->rel_rtype)) { 725 /* 726 * Size relocations require the symbols size. 727 */ 728 value = sdp->sd_sym->st_size; 729 } else { 730 /* 731 * Else the value is the symbols value. 732 */ 733 value = sdp->sd_sym->st_value; 734 } 735 736 /* 737 * Relocation against the GLOBAL_OFFSET_TABLE. 738 */ 739 if (arsp->rel_flags & FLG_REL_GOT) 740 arsp->rel_osdesc = ofl->ofl_osgot; 741 742 /* 743 * If loadable and not producing a relocatable object 744 * add the sections virtual address to the reference 745 * address. 746 */ 747 if ((arsp->rel_flags & FLG_REL_LOAD) && 748 ((flags & FLG_OF_RELOBJ) == 0)) 749 refaddr += arsp->rel_isdesc->is_osdesc-> 750 os_shdr->sh_addr; 751 752 /* 753 * If this entry has a PLT assigned to it, it's 754 * value is actually the address of the PLT (and 755 * not the address of the function). 756 */ 757 if (IS_PLT(arsp->rel_rtype)) { 758 if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx) 759 value = ld_calc_plt_addr(sdp, ofl); 760 } 761 762 /* 763 * Add relocations addend to value. Add extra 764 * relocation addend if needed. 765 * 766 * Note: for GOT relative relocations on amd64 767 * we discard the addend. It was relevant 768 * to the reference - not to the data item 769 * being referenced (ie: that -4 thing). 770 */ 771 if ((arsp->rel_flags & FLG_REL_GOT) == 0) 772 value += arsp->rel_raddend; 773 774 /* 775 * Determine whether the value needs further adjustment. 776 * Filter through the attributes of the relocation to 777 * determine what adjustment is required. Note, many 778 * of the following cases are only applicable when a 779 * .got is present. As a .got is not generated when a 780 * relocatable object is being built, any adjustments 781 * that require a .got need to be skipped. 782 */ 783 if ((arsp->rel_flags & FLG_REL_GOT) && 784 ((flags & FLG_OF_RELOBJ) == 0)) { 785 Xword R1addr; 786 uintptr_t R2addr; 787 Word gotndx; 788 Gotndx *gnp; 789 790 /* 791 * Perform relocation against GOT table. Since 792 * this doesn't fit exactly into a relocation 793 * we place the appropriate byte in the GOT 794 * directly 795 * 796 * Calculate offset into GOT at which to apply 797 * the relocation. 798 */ 799 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 800 ofl, arsp); 801 assert(gnp); 802 803 if (arsp->rel_rtype == R_AMD64_DTPOFF64) 804 gotndx = gnp->gn_gotndx + 1; 805 else 806 gotndx = gnp->gn_gotndx; 807 808 R1addr = (Xword)(gotndx * M_GOT_ENTSIZE); 809 810 /* 811 * Add the GOTs data's offset. 812 */ 813 R2addr = R1addr + (uintptr_t) 814 arsp->rel_osdesc->os_outdata->d_buf; 815 816 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, 817 ELF_DBG_LD, M_MACH, SHT_RELA, 818 arsp->rel_rtype, R1addr, value, 819 arsp->rel_sname, arsp->rel_osdesc)); 820 821 /* 822 * And do it. 823 */ 824 *(Xword *)R2addr = value; 825 continue; 826 827 } else if (IS_GOT_BASED(arsp->rel_rtype) && 828 ((flags & FLG_OF_RELOBJ) == 0)) { 829 value -= ofl->ofl_osgot->os_shdr->sh_addr; 830 831 } else if (IS_GOTPCREL(arsp->rel_rtype) && 832 ((flags & FLG_OF_RELOBJ) == 0)) { 833 Gotndx *gnp; 834 835 /* 836 * Calculation: 837 * G + GOT + A - P 838 */ 839 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 840 gref, ofl, arsp); 841 assert(gnp); 842 value = (Xword)(ofl->ofl_osgot->os_shdr-> 843 sh_addr) + ((Xword)gnp->gn_gotndx * 844 M_GOT_ENTSIZE) + arsp->rel_raddend - 845 refaddr; 846 847 } else if (IS_GOT_PC(arsp->rel_rtype) && 848 ((flags & FLG_OF_RELOBJ) == 0)) { 849 value = (Xword)(ofl->ofl_osgot->os_shdr-> 850 sh_addr) - refaddr + arsp->rel_raddend; 851 852 } else if ((IS_PC_RELATIVE(arsp->rel_rtype)) && 853 (((flags & FLG_OF_RELOBJ) == 0) || 854 (arsp->rel_osdesc == sdp->sd_isc->is_osdesc))) { 855 value -= refaddr; 856 857 } else if (IS_TLS_INS(arsp->rel_rtype) && 858 IS_GOT_RELATIVE(arsp->rel_rtype) && 859 ((flags & FLG_OF_RELOBJ) == 0)) { 860 Gotndx *gnp; 861 862 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 863 ofl, arsp); 864 assert(gnp); 865 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 866 867 } else if (IS_GOT_RELATIVE(arsp->rel_rtype) && 868 ((flags & FLG_OF_RELOBJ) == 0)) { 869 Gotndx *gnp; 870 871 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 872 gref, ofl, arsp); 873 assert(gnp); 874 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 875 876 } else if ((arsp->rel_flags & FLG_REL_STLS) && 877 ((flags & FLG_OF_RELOBJ) == 0)) { 878 Xword tlsstatsize; 879 880 /* 881 * This is the LE TLS reference model. Static 882 * offset is hard-coded. 883 */ 884 tlsstatsize = 885 S_ROUND(ofl->ofl_tlsphdr->p_memsz, 886 M_TLSSTATALIGN); 887 value = tlsstatsize - value; 888 889 /* 890 * Since this code is fixed up, it assumes a 891 * negative offset that can be added to the 892 * thread pointer. 893 */ 894 if (arsp->rel_rtype == R_AMD64_TPOFF32) 895 value = -value; 896 } 897 898 if (arsp->rel_isdesc->is_file) 899 ifl_name = arsp->rel_isdesc->is_file->ifl_name; 900 else 901 ifl_name = MSG_INTL(MSG_STR_NULL); 902 903 /* 904 * Make sure we have data to relocate. Compiler and 905 * assembler developers have been known to generate 906 * relocations against invalid sections (normally .bss), 907 * so for their benefit give them sufficient information 908 * to help analyze the problem. End users should never 909 * see this. 910 */ 911 if (arsp->rel_isdesc->is_indata->d_buf == 0) { 912 eprintf(ofl->ofl_lml, ERR_FATAL, 913 MSG_INTL(MSG_REL_EMPTYSEC), 914 conv_reloc_amd64_type(arsp->rel_rtype, 0), 915 ifl_name, demangle(arsp->rel_sname), 916 arsp->rel_isdesc->is_name); 917 return (S_ERROR); 918 } 919 920 /* 921 * Get the address of the data item we need to modify. 922 */ 923 addr = (uchar_t *)((uintptr_t)arsp->rel_roffset + 924 (uintptr_t)_elf_getxoff(arsp->rel_isdesc-> 925 is_indata)); 926 927 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD, 928 M_MACH, SHT_RELA, arsp->rel_rtype, EC_NATPTR(addr), 929 value, arsp->rel_sname, arsp->rel_osdesc)); 930 addr += (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf; 931 932 if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) > 933 ofl->ofl_size) || (arsp->rel_roffset > 934 arsp->rel_osdesc->os_shdr->sh_size)) { 935 int class; 936 937 if (((uintptr_t)addr - 938 (uintptr_t)ofl->ofl_nehdr) > ofl->ofl_size) 939 class = ERR_FATAL; 940 else 941 class = ERR_WARNING; 942 943 eprintf(ofl->ofl_lml, class, 944 MSG_INTL(MSG_REL_INVALOFFSET), 945 conv_reloc_amd64_type(arsp->rel_rtype, 0), 946 ifl_name, arsp->rel_isdesc->is_name, 947 demangle(arsp->rel_sname), 948 EC_ADDR((uintptr_t)addr - 949 (uintptr_t)ofl->ofl_nehdr)); 950 951 if (class == ERR_FATAL) { 952 return_code = S_ERROR; 953 continue; 954 } 955 } 956 957 /* 958 * The relocation is additive. Ignore the previous 959 * symbol value if this local partial symbol is 960 * expanded. 961 */ 962 if (moved) 963 value -= *addr; 964 965 /* 966 * If '-z noreloc' is specified - skip the do_reloc 967 * stage. 968 */ 969 if ((flags & FLG_OF_RELOBJ) || 970 !(dtflags1 & DF_1_NORELOC)) { 971 if (do_reloc((uchar_t)arsp->rel_rtype, 972 addr, &value, arsp->rel_sname, ifl_name, 973 ofl->ofl_lml) == 0) 974 return_code = S_ERROR; 975 } 976 } 977 } 978 return (return_code); 979 } 980 981 uintptr_t 982 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl) 983 { 984 Rel_desc *orsp; 985 Rel_cache *rcp; 986 Sym_desc *sdp = rsp->rel_sym; 987 988 /* 989 * Static executables *do not* want any relocations against them. 990 * Since our engine still creates relocations against a WEAK UNDEFINED 991 * symbol in a static executable, it's best to disable them here 992 * instead of through out the relocation code. 993 */ 994 if ((ofl->ofl_flags & (FLG_OF_STATIC | FLG_OF_EXEC)) == 995 (FLG_OF_STATIC | FLG_OF_EXEC)) 996 return (1); 997 998 /* 999 * If no relocation cache structures are available allocate 1000 * a new one and link it into the cache list. 1001 */ 1002 if ((ofl->ofl_outrels.tail == 0) || 1003 ((rcp = (Rel_cache *)ofl->ofl_outrels.tail->data) == 0) || 1004 ((orsp = rcp->rc_free) == rcp->rc_end)) { 1005 static size_t nextsize = 0; 1006 size_t size; 1007 1008 /* 1009 * Output relocation numbers can vary considerably between 1010 * building executables or shared objects (pic vs. non-pic), 1011 * etc. But, they typically aren't very large, so for these 1012 * objects use a standard bucket size. For building relocatable 1013 * objects, typically there will be an output relocation for 1014 * every input relocation. 1015 */ 1016 if (nextsize == 0) { 1017 if (ofl->ofl_flags & FLG_OF_RELOBJ) { 1018 if ((size = ofl->ofl_relocincnt) == 0) 1019 size = REL_LOIDESCNO; 1020 if (size > REL_HOIDESCNO) 1021 nextsize = REL_HOIDESCNO; 1022 else 1023 nextsize = REL_LOIDESCNO; 1024 } else 1025 nextsize = size = REL_HOIDESCNO; 1026 } else 1027 size = nextsize; 1028 1029 size = size * sizeof (Rel_desc); 1030 1031 if (((rcp = libld_malloc(sizeof (Rel_cache) + size)) == 0) || 1032 (list_appendc(&ofl->ofl_outrels, rcp) == 0)) 1033 return (S_ERROR); 1034 1035 /* LINTED */ 1036 rcp->rc_free = orsp = (Rel_desc *)(rcp + 1); 1037 /* LINTED */ 1038 rcp->rc_end = (Rel_desc *)((char *)rcp->rc_free + size); 1039 } 1040 1041 /* 1042 * If we are adding a output relocation against a section 1043 * symbol (non-RELATIVE) then mark that section. These sections 1044 * will be added to the .dynsym symbol table. 1045 */ 1046 if (sdp && (rsp->rel_rtype != M_R_RELATIVE) && 1047 ((flags & FLG_REL_SCNNDX) || 1048 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) { 1049 1050 /* 1051 * If this is a COMMON symbol - no output section 1052 * exists yet - (it's created as part of sym_validate()). 1053 * So - we mark here that when it's created it should 1054 * be tagged with the FLG_OS_OUTREL flag. 1055 */ 1056 if ((sdp->sd_flags & FLG_SY_SPECSEC) && 1057 (sdp->sd_sym->st_shndx == SHN_COMMON)) { 1058 if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS) 1059 ofl->ofl_flags1 |= FLG_OF1_BSSOREL; 1060 else 1061 ofl->ofl_flags1 |= FLG_OF1_TLSOREL; 1062 } else { 1063 Os_desc *osp = sdp->sd_isc->is_osdesc; 1064 1065 if (osp && ((osp->os_flags & FLG_OS_OUTREL) == 0)) { 1066 ofl->ofl_dynshdrcnt++; 1067 osp->os_flags |= FLG_OS_OUTREL; 1068 } 1069 } 1070 } 1071 1072 *orsp = *rsp; 1073 orsp->rel_flags |= flags; 1074 1075 rcp->rc_free++; 1076 ofl->ofl_outrelscnt++; 1077 1078 if (flags & FLG_REL_GOT) 1079 ofl->ofl_relocgotsz += (Xword)sizeof (Rela); 1080 else if (flags & FLG_REL_PLT) 1081 ofl->ofl_relocpltsz += (Xword)sizeof (Rela); 1082 else if (flags & FLG_REL_BSS) 1083 ofl->ofl_relocbsssz += (Xword)sizeof (Rela); 1084 else if (flags & FLG_REL_NOINFO) 1085 ofl->ofl_relocrelsz += (Xword)sizeof (Rela); 1086 else 1087 orsp->rel_osdesc->os_szoutrels += (Xword)sizeof (Rela); 1088 1089 if (orsp->rel_rtype == M_R_RELATIVE) 1090 ofl->ofl_relocrelcnt++; 1091 1092 /* 1093 * We don't perform sorting on PLT relocations because 1094 * they have already been assigned a PLT index and if we 1095 * were to sort them we would have to re-assign the plt indexes. 1096 */ 1097 if (!(flags & FLG_REL_PLT)) 1098 ofl->ofl_reloccnt++; 1099 1100 /* 1101 * Insure a GLOBAL_OFFSET_TABLE is generated if required. 1102 */ 1103 if (IS_GOT_REQUIRED(orsp->rel_rtype)) 1104 ofl->ofl_flags |= FLG_OF_BLDGOT; 1105 1106 /* 1107 * Identify and possibly warn of a displacement relocation. 1108 */ 1109 if (orsp->rel_flags & FLG_REL_DISP) { 1110 ofl->ofl_dtflags_1 |= DF_1_DISPRELPND; 1111 1112 if (ofl->ofl_flags & FLG_OF_VERBOSE) 1113 ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl); 1114 } 1115 DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA, 1116 M_MACH, orsp)); 1117 return (1); 1118 } 1119 1120 /* 1121 * Stub routine since register symbols are not supported on amd64. 1122 */ 1123 /* ARGSUSED */ 1124 uintptr_t 1125 ld_reloc_register(Rel_desc * rsp, Is_desc * isp, Ofl_desc * ofl) 1126 { 1127 eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_REL_NOREG)); 1128 return (S_ERROR); 1129 } 1130 1131 /* 1132 * process relocation for a LOCAL symbol 1133 */ 1134 uintptr_t 1135 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl) 1136 { 1137 Word flags = ofl->ofl_flags; 1138 Sym_desc *sdp = rsp->rel_sym; 1139 Word shndx = sdp->sd_sym->st_shndx; 1140 Word ortype = rsp->rel_rtype; 1141 1142 /* 1143 * if ((shared object) and (not pc relative relocation) and 1144 * (not against ABS symbol)) 1145 * then 1146 * build R_AMD64_RELATIVE 1147 * fi 1148 */ 1149 if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) && 1150 !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) && 1151 !(IS_GOT_BASED(rsp->rel_rtype)) && 1152 !(rsp->rel_isdesc != NULL && 1153 (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) && 1154 (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) || 1155 (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) { 1156 1157 /* 1158 * R_AMD64_RELATIVE updates a 64bit address, if this 1159 * relocation isn't a 64bit binding then we can not 1160 * simplify it to a RELATIVE relocation. 1161 */ 1162 if (reloc_table[ortype].re_fsize != sizeof (Addr)) { 1163 return (ld_add_outrel(NULL, rsp, ofl)); 1164 } 1165 1166 rsp->rel_rtype = R_AMD64_RELATIVE; 1167 if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR) 1168 return (S_ERROR); 1169 rsp->rel_rtype = ortype; 1170 return (1); 1171 } 1172 1173 /* 1174 * If the relocation is against a 'non-allocatable' section 1175 * and we can not resolve it now - then give a warning 1176 * message. 1177 * 1178 * We can not resolve the symbol if either: 1179 * a) it's undefined 1180 * b) it's defined in a shared library and a 1181 * COPY relocation hasn't moved it to the executable 1182 * 1183 * Note: because we process all of the relocations against the 1184 * text segment before any others - we know whether 1185 * or not a copy relocation will be generated before 1186 * we get here (see reloc_init()->reloc_segments()). 1187 */ 1188 if (!(rsp->rel_flags & FLG_REL_LOAD) && 1189 ((shndx == SHN_UNDEF) || 1190 ((sdp->sd_ref == REF_DYN_NEED) && 1191 ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) { 1192 /* 1193 * If the relocation is against a SHT_SUNW_ANNOTATE 1194 * section - then silently ignore that the relocation 1195 * can not be resolved. 1196 */ 1197 if (rsp->rel_osdesc && 1198 (rsp->rel_osdesc->os_shdr->sh_type == SHT_SUNW_ANNOTATE)) 1199 return (0); 1200 (void) eprintf(ofl->ofl_lml, ERR_WARNING, 1201 MSG_INTL(MSG_REL_EXTERNSYM), 1202 conv_reloc_amd64_type(rsp->rel_rtype, 0), 1203 rsp->rel_isdesc->is_file->ifl_name, 1204 demangle(rsp->rel_sname), rsp->rel_osdesc->os_name); 1205 return (1); 1206 } 1207 1208 /* 1209 * Perform relocation. 1210 */ 1211 return (ld_add_actrel(NULL, rsp, ofl)); 1212 } 1213 1214 1215 uintptr_t 1216 /* ARGSUSED */ 1217 ld_reloc_GOTOP(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1218 { 1219 /* 1220 * Stub routine for common code compatibility, we shouldn't 1221 * actually get here on amd64. 1222 */ 1223 assert(0); 1224 return (S_ERROR); 1225 } 1226 1227 uintptr_t 1228 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1229 { 1230 Word rtype = rsp->rel_rtype; 1231 Sym_desc *sdp = rsp->rel_sym; 1232 Word flags = ofl->ofl_flags; 1233 Gotndx *gnp; 1234 1235 /* 1236 * If we're building an executable - use either the IE or LE access 1237 * model. If we're building a shared object process any IE model. 1238 */ 1239 if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) { 1240 /* 1241 * Set the DF_STATIC_TLS flag. 1242 */ 1243 ofl->ofl_dtflags |= DF_STATIC_TLS; 1244 1245 if (!local || ((flags & FLG_OF_EXEC) == 0)) { 1246 /* 1247 * Assign a GOT entry for static TLS references. 1248 */ 1249 if ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1250 GOT_REF_TLSIE, ofl, rsp)) == 0) { 1251 1252 if (ld_assign_got_TLS(local, rsp, ofl, sdp, 1253 gnp, GOT_REF_TLSIE, FLG_REL_STLS, 1254 rtype, R_AMD64_TPOFF64, 0) == S_ERROR) 1255 return (S_ERROR); 1256 } 1257 1258 /* 1259 * IE access model. 1260 */ 1261 if (IS_TLS_IE(rtype)) 1262 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1263 1264 /* 1265 * Fixups are required for other executable models. 1266 */ 1267 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1268 rsp, ofl)); 1269 } 1270 1271 /* 1272 * LE access model. 1273 */ 1274 if (IS_TLS_LE(rtype)) 1275 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1276 1277 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1278 rsp, ofl)); 1279 } 1280 1281 /* 1282 * Building a shared object. 1283 * 1284 * Assign a GOT entry for a dynamic TLS reference. 1285 */ 1286 if (IS_TLS_LD(rtype) && ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1287 GOT_REF_TLSLD, ofl, rsp)) == 0)) { 1288 1289 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD, 1290 FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, 0) == S_ERROR) 1291 return (S_ERROR); 1292 1293 } else if (IS_TLS_GD(rtype) && 1294 ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), GOT_REF_TLSGD, 1295 ofl, rsp)) == 0)) { 1296 1297 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD, 1298 FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64, 1299 R_AMD64_DTPOFF64) == S_ERROR) 1300 return (S_ERROR); 1301 } 1302 1303 if (IS_TLS_LD(rtype)) 1304 return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl)); 1305 1306 return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl)); 1307 } 1308 1309 /* ARGSUSED3 */ 1310 Gotndx * 1311 ld_find_gotndx(List * lst, Gotref gref, Ofl_desc * ofl, Rel_desc * rdesc) 1312 { 1313 Listnode * lnp; 1314 Gotndx * gnp; 1315 1316 assert(rdesc != 0); 1317 1318 if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx) 1319 return (ofl->ofl_tlsldgotndx); 1320 1321 for (LIST_TRAVERSE(lst, lnp, gnp)) { 1322 if ((rdesc->rel_raddend == gnp->gn_addend) && 1323 (gnp->gn_gotref == gref)) { 1324 return (gnp); 1325 } 1326 } 1327 return ((Gotndx *)0); 1328 } 1329 1330 Xword 1331 ld_calc_got_offset(Rel_desc * rdesc, Ofl_desc * ofl) 1332 { 1333 Os_desc *osp = ofl->ofl_osgot; 1334 Sym_desc *sdp = rdesc->rel_sym; 1335 Xword gotndx; 1336 Gotref gref; 1337 Gotndx *gnp; 1338 1339 if (rdesc->rel_flags & FLG_REL_DTLS) 1340 gref = GOT_REF_TLSGD; 1341 else if (rdesc->rel_flags & FLG_REL_MTLS) 1342 gref = GOT_REF_TLSLD; 1343 else if (rdesc->rel_flags & FLG_REL_STLS) 1344 gref = GOT_REF_TLSIE; 1345 else 1346 gref = GOT_REF_GENERIC; 1347 1348 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, ofl, rdesc); 1349 assert(gnp); 1350 1351 gotndx = (Xword)gnp->gn_gotndx; 1352 1353 if ((rdesc->rel_flags & FLG_REL_DTLS) && 1354 (rdesc->rel_rtype == R_AMD64_DTPOFF64)) 1355 gotndx++; 1356 1357 return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE))); 1358 } 1359 1360 1361 /* ARGSUSED5 */ 1362 uintptr_t 1363 ld_assign_got_ndx(List * lst, Gotndx * pgnp, Gotref gref, Ofl_desc * ofl, 1364 Rel_desc * rsp, Sym_desc * sdp) 1365 { 1366 Xword raddend; 1367 Gotndx *gnp, *_gnp; 1368 Listnode *lnp, *plnp; 1369 uint_t gotents; 1370 1371 raddend = rsp->rel_raddend; 1372 if (pgnp && (pgnp->gn_addend == raddend) && 1373 (pgnp->gn_gotref == gref)) 1374 return (1); 1375 1376 if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD)) 1377 gotents = 2; 1378 else 1379 gotents = 1; 1380 1381 plnp = 0; 1382 for (LIST_TRAVERSE(lst, lnp, _gnp)) { 1383 if (_gnp->gn_addend > raddend) 1384 break; 1385 plnp = lnp; 1386 } 1387 1388 /* 1389 * Allocate a new entry. 1390 */ 1391 if ((gnp = libld_calloc(sizeof (Gotndx), 1)) == 0) 1392 return (S_ERROR); 1393 gnp->gn_addend = raddend; 1394 gnp->gn_gotndx = ofl->ofl_gotcnt; 1395 gnp->gn_gotref = gref; 1396 1397 ofl->ofl_gotcnt += gotents; 1398 1399 if (gref == GOT_REF_TLSLD) { 1400 ofl->ofl_tlsldgotndx = gnp; 1401 return (1); 1402 } 1403 1404 if (plnp == 0) { 1405 /* 1406 * Insert at head of list 1407 */ 1408 if (list_prependc(lst, (void *)gnp) == 0) 1409 return (S_ERROR); 1410 } else if (_gnp->gn_addend > raddend) { 1411 /* 1412 * Insert in middle of lest 1413 */ 1414 if (list_insertc(lst, (void *)gnp, plnp) == 0) 1415 return (S_ERROR); 1416 } else { 1417 /* 1418 * Append to tail of list 1419 */ 1420 if (list_appendc(lst, (void *)gnp) == 0) 1421 return (S_ERROR); 1422 } 1423 return (1); 1424 } 1425 1426 void 1427 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl) 1428 { 1429 sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++; 1430 sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++; 1431 ofl->ofl_flags |= FLG_OF_BLDGOT; 1432 } 1433 1434 static uchar_t plt0_template[M_PLT_ENTSIZE] = { 1435 /* 0x00 PUSHQ GOT+8(%rip) */ 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, 1436 /* 0x06 JMP *GOT+16(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 1437 /* 0x0c NOP */ 0x90, 1438 /* 0x0d NOP */ 0x90, 1439 /* 0x0e NOP */ 0x90, 1440 /* 0x0f NOP */ 0x90 1441 }; 1442 1443 /* 1444 * Initializes .got[0] with the _DYNAMIC symbol value. 1445 */ 1446 uintptr_t 1447 ld_fillin_gotplt(Ofl_desc *ofl) 1448 { 1449 Word flags = ofl->ofl_flags; 1450 Word dtflags1 = ofl->ofl_dtflags_1; 1451 1452 if (ofl->ofl_osgot) { 1453 Sym_desc *sdp; 1454 1455 if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U), 1456 SYM_NOHASH, 0, ofl)) != NULL) { 1457 uchar_t *genptr; 1458 1459 genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf + 1460 (M_GOT_XDYNAMIC * M_GOT_ENTSIZE)); 1461 /* LINTED */ 1462 *(Xword *)genptr = sdp->sd_sym->st_value; 1463 } 1464 } 1465 1466 /* 1467 * Fill in the reserved slot in the procedure linkage table the first 1468 * entry is: 1469 * 0x00 PUSHQ GOT+8(%rip) # GOT[1] 1470 * 0x06 JMP *GOT+16(%rip) # GOT[2] 1471 * 0x0c NOP 1472 * 0x0d NOP 1473 * 0x0e NOP 1474 * 0x0f NOP 1475 */ 1476 if ((flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) { 1477 uchar_t *pltent; 1478 Xword val1; 1479 1480 pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf; 1481 bcopy(plt0_template, pltent, sizeof (plt0_template)); 1482 1483 /* 1484 * filin: 1485 * PUSHQ GOT + 8(%rip) 1486 * 1487 * Note: 0x06 below represents the offset to the 1488 * next instruction - which is what %rip will 1489 * be pointing at. 1490 */ 1491 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1492 (M_GOT_XLINKMAP * M_GOT_ENTSIZE) - 1493 ofl->ofl_osplt->os_shdr->sh_addr - 0x06; 1494 1495 /* 1496 * If '-z noreloc' is specified - skip the do_reloc 1497 * stage. 1498 */ 1499 if ((flags & FLG_OF_RELOBJ) || 1500 !(dtflags1 & DF_1_NORELOC)) { 1501 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 1502 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1503 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1504 eprintf(ofl->ofl_lml, ERR_FATAL, 1505 MSG_INTL(MSG_PLT_PLT0FAIL)); 1506 return (S_ERROR); 1507 } 1508 } 1509 1510 /* 1511 * filin: 1512 * JMP *GOT+16(%rip) 1513 */ 1514 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1515 (M_GOT_XRTLD * M_GOT_ENTSIZE) - 1516 ofl->ofl_osplt->os_shdr->sh_addr - 0x0c; 1517 /* 1518 * If '-z noreloc' is specified - skip the do_reloc 1519 * stage. 1520 */ 1521 if ((flags & FLG_OF_RELOBJ) || 1522 !(dtflags1 & DF_1_NORELOC)) { 1523 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x08], 1524 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1525 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1526 eprintf(ofl->ofl_lml, ERR_FATAL, 1527 MSG_INTL(MSG_PLT_PLT0FAIL)); 1528 return (S_ERROR); 1529 } 1530 } 1531 } 1532 return (1); 1533 } 1534