1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <string.h> 29 #include <stdio.h> 30 #include <strings.h> 31 #include <sys/elf_amd64.h> 32 #include <debug.h> 33 #include <reloc.h> 34 #include "msg.h" 35 #include "_libld.h" 36 37 Word 38 ld_init_rel(Rel_desc *reld, void *reloc) 39 { 40 Rela * rel = (Rela *)reloc; 41 42 /* LINTED */ 43 reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info); 44 reld->rel_roffset = rel->r_offset; 45 reld->rel_raddend = rel->r_addend; 46 reld->rel_typedata = 0; 47 48 reld->rel_flags |= FLG_REL_RELA; 49 50 return ((Word)ELF_R_SYM(rel->r_info)); 51 } 52 53 void 54 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl) 55 { 56 ofl->ofl_dehdr->e_flags |= ehdr->e_flags; 57 } 58 59 void 60 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt) 61 { 62 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) { 63 /* 64 * Create this entry if we are going to create a PLT table. 65 */ 66 if (ofl->ofl_pltcnt) 67 (*cnt)++; /* DT_PLTGOT */ 68 } 69 } 70 71 void 72 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn) 73 { 74 if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) { 75 (*dyn)->d_tag = DT_PLTGOT; 76 if (ofl->ofl_osgot) 77 (*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr; 78 else 79 (*dyn)->d_un.d_ptr = 0; 80 (*dyn)++; 81 } 82 } 83 84 Xword 85 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl) 86 { 87 Xword value; 88 89 value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) + 90 M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE); 91 return (value); 92 } 93 94 /* 95 * Build a single plt entry - code is: 96 * JMP *name1@GOTPCREL(%rip) 97 * PUSHL $index 98 * JMP .PLT0 99 */ 100 static uchar_t pltn_entry[M_PLT_ENTSIZE] = { 101 /* 0x00 jmpq *name1@GOTPCREL(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 102 /* 0x06 pushq $index */ 0x68, 0x00, 0x00, 0x00, 0x00, 103 /* 0x0b jmpq .plt0(%rip) */ 0xe9, 0x00, 0x00, 0x00, 0x00 104 /* 0x10 */ 105 }; 106 107 static uintptr_t 108 plt_entry(Ofl_desc * ofl, Sym_desc * sdp) 109 { 110 uchar_t *plt0, *pltent, *gotent; 111 Sword plt_off; 112 Word got_off; 113 Xword val1; 114 Word flags = ofl->ofl_flags; 115 Word dtflags1 = ofl->ofl_dtflags_1; 116 117 got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 118 plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * 119 M_PLT_ENTSIZE); 120 plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf); 121 pltent = plt0 + plt_off; 122 gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off; 123 124 bcopy(pltn_entry, pltent, sizeof (pltn_entry)); 125 /* 126 * Fill in the got entry with the address of the next instruction. 127 */ 128 /* LINTED */ 129 *(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off + 130 M_PLT_INSSIZE; 131 132 /* 133 * patchup: 134 * jmpq *name1@gotpcrel(%rip) 135 * 136 * NOTE: 0x06 represents next instruction. 137 */ 138 val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) - 139 (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06; 140 141 /* 142 * If '-z noreloc' is specified - skip the do_reloc 143 * stage. 144 */ 145 if ((flags & FLG_OF_RELOBJ) || 146 !(dtflags1 & DF_1_NORELOC)) { 147 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 148 &val1, MSG_ORIG(MSG_SYM_PLTENT), 149 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 150 eprintf(ofl->ofl_lml, ERR_FATAL, 151 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 152 demangle(sdp->sd_name)); 153 return (S_ERROR); 154 } 155 } 156 157 /* 158 * patchup: 159 * pushq $pltndx 160 */ 161 val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1); 162 /* 163 * If '-z noreloc' is specified - skip the do_reloc 164 * stage. 165 */ 166 if ((flags & FLG_OF_RELOBJ) || 167 !(dtflags1 & DF_1_NORELOC)) { 168 if (do_reloc(R_AMD64_32, &pltent[0x07], 169 &val1, MSG_ORIG(MSG_SYM_PLTENT), 170 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 171 eprintf(ofl->ofl_lml, ERR_FATAL, 172 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 173 demangle(sdp->sd_name)); 174 return (S_ERROR); 175 } 176 } 177 178 /* 179 * patchup: 180 * jmpq .plt0(%rip) 181 * NOTE: 0x10 represents next instruction. The rather complex series 182 * of casts is necessary to sign extend an offset into a 64-bit value 183 * while satisfying various compiler error checks. Handle with care. 184 */ 185 val1 = (Xword)((intptr_t)((uintptr_t)plt0 - 186 (uintptr_t)(&pltent[0x10]))); 187 188 /* 189 * If '-z noreloc' is specified - skip the do_reloc 190 * stage. 191 */ 192 if ((flags & FLG_OF_RELOBJ) || 193 !(dtflags1 & DF_1_NORELOC)) { 194 if (do_reloc(R_AMD64_PC32, &pltent[0x0c], 195 &val1, MSG_ORIG(MSG_SYM_PLTENT), 196 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 197 eprintf(ofl->ofl_lml, ERR_FATAL, 198 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 199 demangle(sdp->sd_name)); 200 return (S_ERROR); 201 } 202 } 203 return (1); 204 } 205 206 uintptr_t 207 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl) 208 { 209 Os_desc * relosp, * osp = 0; 210 Word ndx; 211 Xword roffset, value; 212 Sxword raddend; 213 Rela rea; 214 char *relbits; 215 Sym_desc * sdp, * psym = (Sym_desc *)0; 216 int sectmoved = 0; 217 218 raddend = orsp->rel_raddend; 219 sdp = orsp->rel_sym; 220 221 /* 222 * If the section this relocation is against has been discarded 223 * (-zignore), then also discard (skip) the relocation itself. 224 */ 225 if (orsp->rel_isdesc && ((orsp->rel_flags & 226 (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) && 227 (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) { 228 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp)); 229 return (1); 230 } 231 232 /* 233 * If this is a relocation against a move table, or expanded move 234 * table, adjust the relocation entries. 235 */ 236 if (orsp->rel_move) 237 ld_adj_movereloc(ofl, orsp); 238 239 /* 240 * If this is a relocation against a section then we need to adjust the 241 * raddend field to compensate for the new position of the input section 242 * within the new output section. 243 */ 244 if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) { 245 if (ofl->ofl_parsym.head && 246 (sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 247 /* LINTED */ 248 (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) { 249 DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym)); 250 sectmoved = 1; 251 if (ofl->ofl_flags & FLG_OF_RELOBJ) 252 raddend = psym->sd_sym->st_value; 253 else 254 raddend = psym->sd_sym->st_value - 255 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 256 /* LINTED */ 257 raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata); 258 if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 259 raddend += 260 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 261 } else { 262 /* LINTED */ 263 raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata); 264 if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 265 raddend += 266 sdp->sd_isc->is_osdesc->os_shdr->sh_addr; 267 } 268 } 269 270 value = sdp->sd_sym->st_value; 271 272 if (orsp->rel_flags & FLG_REL_GOT) { 273 /* 274 * Note: for GOT relative relocations on amd64 275 * we discard the addend. It was relevant 276 * to the reference - not to the data item 277 * being referenced (ie: that -4 thing). 278 */ 279 raddend = 0; 280 osp = ofl->ofl_osgot; 281 roffset = ld_calc_got_offset(orsp, ofl); 282 283 } else if (orsp->rel_flags & FLG_REL_PLT) { 284 /* 285 * Note that relocations for PLT's actually 286 * cause a relocation againt the GOT. 287 */ 288 osp = ofl->ofl_osplt; 289 roffset = (ofl->ofl_osgot->os_shdr->sh_addr) + 290 sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 291 raddend = 0; 292 if (plt_entry(ofl, sdp) == S_ERROR) 293 return (S_ERROR); 294 295 } else if (orsp->rel_flags & FLG_REL_BSS) { 296 /* 297 * This must be a R_AMD64_COPY. For these set the roffset to 298 * point to the new symbols location. 299 */ 300 osp = ofl->ofl_isbss->is_osdesc; 301 roffset = value; 302 303 /* 304 * The raddend doesn't mean anything in a R_SPARC_COPY 305 * relocation. Null it out because it can confuse people. 306 */ 307 raddend = 0; 308 } else { 309 osp = orsp->rel_osdesc; 310 311 /* 312 * Calculate virtual offset of reference point; equals offset 313 * into section + vaddr of section for loadable sections, or 314 * offset plus section displacement for nonloadable sections. 315 */ 316 roffset = orsp->rel_roffset + 317 (Off)_elf_getxoff(orsp->rel_isdesc->is_indata); 318 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) 319 roffset += orsp->rel_isdesc->is_osdesc-> 320 os_shdr->sh_addr; 321 } 322 323 if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0)) 324 relosp = ofl->ofl_osrel; 325 326 /* 327 * Assign the symbols index for the output relocation. If the 328 * relocation refers to a SECTION symbol then it's index is based upon 329 * the output sections symbols index. Otherwise the index can be 330 * derived from the symbols index itself. 331 */ 332 if (orsp->rel_rtype == R_AMD64_RELATIVE) 333 ndx = STN_UNDEF; 334 else if ((orsp->rel_flags & FLG_REL_SCNNDX) || 335 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) { 336 if (sectmoved == 0) { 337 /* 338 * Check for a null input section. This can 339 * occur if this relocation references a symbol 340 * generated by sym_add_sym(). 341 */ 342 if ((sdp->sd_isc != 0) && 343 (sdp->sd_isc->is_osdesc != 0)) 344 ndx = sdp->sd_isc->is_osdesc->os_scnsymndx; 345 else 346 ndx = sdp->sd_shndx; 347 } else 348 ndx = ofl->ofl_sunwdata1ndx; 349 } else 350 ndx = sdp->sd_symndx; 351 352 /* 353 * Add the symbols 'value' to the addend field. 354 */ 355 if (orsp->rel_flags & FLG_REL_ADVAL) 356 raddend += value; 357 358 /* 359 * The addend field for R_AMD64_DTPMOD64 means nothing. The addend 360 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation. 361 */ 362 if (orsp->rel_rtype == R_AMD64_DTPMOD64) 363 raddend = 0; 364 365 relbits = (char *)relosp->os_outdata->d_buf; 366 367 rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype); 368 rea.r_offset = roffset; 369 rea.r_addend = raddend; 370 DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name, 371 orsp->rel_sname)); 372 373 /* 374 * Assert we haven't walked off the end of our relocation table. 375 */ 376 assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size); 377 378 (void) memcpy((relbits + relosp->os_szoutrels), 379 (char *)&rea, sizeof (Rela)); 380 relosp->os_szoutrels += (Xword)sizeof (Rela); 381 382 /* 383 * Determine if this relocation is against a non-writable, allocatable 384 * section. If so we may need to provide a text relocation diagnostic. 385 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually 386 * result in modifications to the .got. 387 */ 388 if (orsp->rel_rtype == R_AMD64_JUMP_SLOT) 389 osp = ofl->ofl_osgot; 390 391 ld_reloc_remain_entry(orsp, osp, ofl); 392 return (1); 393 } 394 395 /* 396 * amd64 Instructions for TLS processing 397 */ 398 static uchar_t tlsinstr_gd_ie[] = { 399 /* 400 * 0x00 movq %fs:0, %rax 401 */ 402 0x64, 0x48, 0x8b, 0x04, 0x25, 403 0x00, 0x00, 0x00, 0x00, 404 /* 405 * 0x09 addq x@gottpoff(%rip), %rax 406 */ 407 0x48, 0x03, 0x05, 0x00, 0x00, 408 0x00, 0x00 409 }; 410 411 static uchar_t tlsinstr_gd_le[] = { 412 /* 413 * 0x00 movq %fs:0, %rax 414 */ 415 0x64, 0x48, 0x8b, 0x04, 0x25, 416 0x00, 0x00, 0x00, 0x00, 417 /* 418 * 0x09 leaq x@gottpoff(%rip), %rax 419 */ 420 0x48, 0x8d, 0x80, 0x00, 0x00, 421 0x00, 0x00 422 }; 423 424 static uchar_t tlsinstr_ld_le[] = { 425 /* 426 * .byte 0x66 427 */ 428 0x66, 429 /* 430 * .byte 0x66 431 */ 432 0x66, 433 /* 434 * .byte 0x66 435 */ 436 0x66, 437 /* 438 * movq %fs:0, %rax 439 */ 440 0x64, 0x48, 0x8b, 0x04, 0x25, 441 0x00, 0x00, 0x00, 0x00 442 }; 443 444 445 static Fixupret 446 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp) 447 { 448 Sym_desc *sdp = arsp->rel_sym; 449 Word rtype = arsp->rel_rtype; 450 uchar_t *offset; 451 452 offset = (uchar_t *)((uintptr_t)arsp->rel_roffset + 453 (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) + 454 (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf); 455 456 if (sdp->sd_ref == REF_DYN_NEED) { 457 /* 458 * IE reference model 459 */ 460 switch (rtype) { 461 case R_AMD64_TLSGD: 462 /* 463 * GD -> IE 464 * 465 * Transition: 466 * 0x00 .byte 0x66 467 * 0x01 leaq x@tlsgd(%rip), %rdi 468 * 0x08 .word 0x6666 469 * 0x0a rex64 470 * 0x0b call __tls_get_addr@plt 471 * 0x10 472 * To: 473 * 0x00 movq %fs:0, %rax 474 * 0x09 addq x@gottpoff(%rip), %rax 475 * 0x10 476 */ 477 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 478 R_AMD64_GOTTPOFF, arsp)); 479 arsp->rel_rtype = R_AMD64_GOTTPOFF; 480 arsp->rel_roffset += 8; 481 arsp->rel_raddend = (Sxword)-4; 482 483 /* 484 * Adjust 'offset' to beginning of instruction 485 * sequence. 486 */ 487 offset -= 4; 488 (void) memcpy(offset, tlsinstr_gd_ie, 489 sizeof (tlsinstr_gd_ie)); 490 return (FIX_RELOC); 491 492 case R_AMD64_PLT32: 493 /* 494 * Fixup done via the TLS_GD relocation. 495 */ 496 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 497 R_AMD64_NONE, arsp)); 498 return (FIX_DONE); 499 } 500 } 501 502 /* 503 * LE reference model 504 */ 505 switch (rtype) { 506 case R_AMD64_TLSGD: 507 /* 508 * GD -> LE 509 * 510 * Transition: 511 * 0x00 .byte 0x66 512 * 0x01 leaq x@tlsgd(%rip), %rdi 513 * 0x08 .word 0x6666 514 * 0x0a rex64 515 * 0x0b call __tls_get_addr@plt 516 * 0x10 517 * To: 518 * 0x00 movq %fs:0, %rax 519 * 0x09 leaq x@tpoff(%rax), %rax 520 * 0x10 521 */ 522 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 523 R_AMD64_TPOFF32, arsp)); 524 arsp->rel_rtype = R_AMD64_TPOFF32; 525 arsp->rel_roffset += 8; 526 arsp->rel_raddend = 0; 527 528 /* 529 * Adjust 'offset' to beginning of instruction sequence. 530 */ 531 offset -= 4; 532 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 533 return (FIX_RELOC); 534 535 case R_AMD64_GOTTPOFF: 536 /* 537 * IE -> LE 538 * 539 * Transition: 540 * 0x00 movq %fs:0, %rax 541 * 0x09 addq x@gottopoff(%rip), %rax 542 * 0x10 543 * To: 544 * 0x00 movq %fs:0, %rax 545 * 0x09 leaq x@tpoff(%rax), %rax 546 * 0x10 547 */ 548 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 549 R_AMD64_TPOFF32, arsp)); 550 arsp->rel_rtype = R_AMD64_TPOFF32; 551 arsp->rel_raddend = 0; 552 553 /* 554 * Adjust 'offset' to beginning of instruction sequence. 555 */ 556 offset -= 12; 557 558 /* 559 * Same code sequence used in the GD -> LE transition. 560 */ 561 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 562 return (FIX_RELOC); 563 564 case R_AMD64_TLSLD: 565 /* 566 * LD -> LE 567 * 568 * Transition 569 * 0x00 leaq x1@tlsgd(%rip), %rdi 570 * 0x07 call __tls_get_addr@plt 571 * 0x0c 572 * To: 573 * 0x00 .byte 0x66 574 * 0x01 .byte 0x66 575 * 0x02 .byte 0x66 576 * 0x03 movq %fs:0, %rax 577 */ 578 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 579 R_AMD64_NONE, arsp)); 580 offset -= 3; 581 (void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le)); 582 return (FIX_DONE); 583 584 case R_AMD64_DTPOFF32: 585 /* 586 * LD->LE 587 * 588 * Transition: 589 * 0x00 leaq x1@dtpoff(%rax), %rcx 590 * To: 591 * 0x00 leaq x1@tpoff(%rax), %rcx 592 */ 593 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 594 R_AMD64_TPOFF32, arsp)); 595 arsp->rel_rtype = R_AMD64_TPOFF32; 596 arsp->rel_raddend = 0; 597 return (FIX_RELOC); 598 } 599 600 return (FIX_RELOC); 601 } 602 603 uintptr_t 604 ld_do_activerelocs(Ofl_desc *ofl) 605 { 606 Rel_desc *arsp; 607 Rel_cache *rcp; 608 Listnode *lnp; 609 uintptr_t return_code = 1; 610 Word flags = ofl->ofl_flags; 611 Word dtflags1 = ofl->ofl_dtflags_1; 612 613 if (ofl->ofl_actrels.head) 614 DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml)); 615 616 /* 617 * Process active relocations. 618 */ 619 for (LIST_TRAVERSE(&ofl->ofl_actrels, lnp, rcp)) { 620 /* LINTED */ 621 for (arsp = (Rel_desc *)(rcp + 1); 622 arsp < rcp->rc_free; arsp++) { 623 uchar_t *addr; 624 Xword value; 625 Sym_desc *sdp; 626 const char *ifl_name; 627 Xword refaddr; 628 int moved = 0; 629 Gotref gref; 630 631 /* 632 * If the section this relocation is against has been 633 * discarded (-zignore), then discard (skip) the 634 * relocation itself. 635 */ 636 if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) && 637 ((arsp->rel_flags & 638 (FLG_REL_GOT | FLG_REL_BSS | 639 FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) { 640 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, 641 M_MACH, arsp)); 642 continue; 643 } 644 645 /* 646 * We deteremine what the 'got reference' 647 * model (if required) is at this point. This 648 * needs to be done before tls_fixup() since 649 * it may 'transition' our instructions. 650 * 651 * The got table entries have already been assigned, 652 * and we bind to those initial entries. 653 */ 654 if (arsp->rel_flags & FLG_REL_DTLS) 655 gref = GOT_REF_TLSGD; 656 else if (arsp->rel_flags & FLG_REL_MTLS) 657 gref = GOT_REF_TLSLD; 658 else if (arsp->rel_flags & FLG_REL_STLS) 659 gref = GOT_REF_TLSIE; 660 else 661 gref = GOT_REF_GENERIC; 662 663 /* 664 * Perform any required TLS fixups. 665 */ 666 if (arsp->rel_flags & FLG_REL_TLSFIX) { 667 Fixupret ret; 668 669 if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR) 670 return (S_ERROR); 671 if (ret == FIX_DONE) 672 continue; 673 } 674 675 /* 676 * If this is a relocation against a move table, or 677 * expanded move table, adjust the relocation entries. 678 */ 679 if (arsp->rel_move) 680 ld_adj_movereloc(ofl, arsp); 681 682 sdp = arsp->rel_sym; 683 refaddr = arsp->rel_roffset + 684 (Off)_elf_getxoff(arsp->rel_isdesc->is_indata); 685 686 if ((arsp->rel_flags & FLG_REL_CLVAL) || 687 (arsp->rel_flags & FLG_REL_GOTCL)) 688 value = 0; 689 else if (ELF_ST_TYPE(sdp->sd_sym->st_info) == 690 STT_SECTION) { 691 Sym_desc *sym; 692 693 /* 694 * The value for a symbol pointing to a SECTION 695 * is based off of that sections position. 696 * 697 * The second argument of the ld_am_I_partial() 698 * is the value stored at the target address 699 * relocation is going to be applied. 700 */ 701 if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 702 /* LINTED */ 703 (sym = ld_am_I_partial(arsp, *(Xword *) 704 ((uchar_t *) 705 arsp->rel_isdesc->is_indata->d_buf + 706 arsp->rel_roffset)))) { 707 /* 708 * If the symbol is moved, 709 * adjust the value 710 */ 711 value = sym->sd_sym->st_value; 712 moved = 1; 713 } else { 714 value = _elf_getxoff( 715 sdp->sd_isc->is_indata); 716 if (sdp->sd_isc->is_shdr->sh_flags & 717 SHF_ALLOC) 718 value += 719 sdp->sd_isc->is_osdesc-> 720 os_shdr->sh_addr; 721 } 722 if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS) 723 value -= ofl->ofl_tlsphdr->p_vaddr; 724 725 } else if (IS_SIZE(arsp->rel_rtype)) { 726 /* 727 * Size relocations require the symbols size. 728 */ 729 value = sdp->sd_sym->st_size; 730 } else { 731 /* 732 * Else the value is the symbols value. 733 */ 734 value = sdp->sd_sym->st_value; 735 } 736 737 /* 738 * Relocation against the GLOBAL_OFFSET_TABLE. 739 */ 740 if (arsp->rel_flags & FLG_REL_GOT) 741 arsp->rel_osdesc = ofl->ofl_osgot; 742 743 /* 744 * If loadable and not producing a relocatable object 745 * add the sections virtual address to the reference 746 * address. 747 */ 748 if ((arsp->rel_flags & FLG_REL_LOAD) && 749 ((flags & FLG_OF_RELOBJ) == 0)) 750 refaddr += arsp->rel_isdesc->is_osdesc-> 751 os_shdr->sh_addr; 752 753 /* 754 * If this entry has a PLT assigned to it, it's 755 * value is actually the address of the PLT (and 756 * not the address of the function). 757 */ 758 if (IS_PLT(arsp->rel_rtype)) { 759 if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx) 760 value = ld_calc_plt_addr(sdp, ofl); 761 } 762 763 /* 764 * Add relocations addend to value. Add extra 765 * relocation addend if needed. 766 * 767 * Note: for GOT relative relocations on amd64 768 * we discard the addend. It was relevant 769 * to the reference - not to the data item 770 * being referenced (ie: that -4 thing). 771 */ 772 if ((arsp->rel_flags & FLG_REL_GOT) == 0) 773 value += arsp->rel_raddend; 774 775 /* 776 * Determine whether the value needs further adjustment. 777 * Filter through the attributes of the relocation to 778 * determine what adjustment is required. Note, many 779 * of the following cases are only applicable when a 780 * .got is present. As a .got is not generated when a 781 * relocatable object is being built, any adjustments 782 * that require a .got need to be skipped. 783 */ 784 if ((arsp->rel_flags & FLG_REL_GOT) && 785 ((flags & FLG_OF_RELOBJ) == 0)) { 786 Xword R1addr; 787 uintptr_t R2addr; 788 Word gotndx; 789 Gotndx *gnp; 790 791 /* 792 * Perform relocation against GOT table. Since 793 * this doesn't fit exactly into a relocation 794 * we place the appropriate byte in the GOT 795 * directly 796 * 797 * Calculate offset into GOT at which to apply 798 * the relocation. 799 */ 800 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 801 ofl, arsp); 802 assert(gnp); 803 804 if (arsp->rel_rtype == R_AMD64_DTPOFF64) 805 gotndx = gnp->gn_gotndx + 1; 806 else 807 gotndx = gnp->gn_gotndx; 808 809 R1addr = (Xword)(gotndx * M_GOT_ENTSIZE); 810 811 /* 812 * Add the GOTs data's offset. 813 */ 814 R2addr = R1addr + (uintptr_t) 815 arsp->rel_osdesc->os_outdata->d_buf; 816 817 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, 818 ELF_DBG_LD, M_MACH, SHT_RELA, 819 arsp->rel_rtype, R1addr, value, 820 arsp->rel_sname, arsp->rel_osdesc)); 821 822 /* 823 * And do it. 824 */ 825 *(Xword *)R2addr = value; 826 continue; 827 828 } else if (IS_GOT_BASED(arsp->rel_rtype) && 829 ((flags & FLG_OF_RELOBJ) == 0)) { 830 value -= ofl->ofl_osgot->os_shdr->sh_addr; 831 832 } else if (IS_GOTPCREL(arsp->rel_rtype) && 833 ((flags & FLG_OF_RELOBJ) == 0)) { 834 Gotndx *gnp; 835 836 /* 837 * Calculation: 838 * G + GOT + A - P 839 */ 840 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 841 gref, ofl, arsp); 842 assert(gnp); 843 value = (Xword)(ofl->ofl_osgot->os_shdr-> 844 sh_addr) + ((Xword)gnp->gn_gotndx * 845 M_GOT_ENTSIZE) + arsp->rel_raddend - 846 refaddr; 847 848 } else if (IS_GOT_PC(arsp->rel_rtype) && 849 ((flags & FLG_OF_RELOBJ) == 0)) { 850 value = (Xword)(ofl->ofl_osgot->os_shdr-> 851 sh_addr) - refaddr + arsp->rel_raddend; 852 853 } else if ((IS_PC_RELATIVE(arsp->rel_rtype)) && 854 (((flags & FLG_OF_RELOBJ) == 0) || 855 (arsp->rel_osdesc == sdp->sd_isc->is_osdesc))) { 856 value -= refaddr; 857 858 } else if (IS_TLS_INS(arsp->rel_rtype) && 859 IS_GOT_RELATIVE(arsp->rel_rtype) && 860 ((flags & FLG_OF_RELOBJ) == 0)) { 861 Gotndx *gnp; 862 863 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 864 ofl, arsp); 865 assert(gnp); 866 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 867 868 } else if (IS_GOT_RELATIVE(arsp->rel_rtype) && 869 ((flags & FLG_OF_RELOBJ) == 0)) { 870 Gotndx *gnp; 871 872 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 873 gref, ofl, arsp); 874 assert(gnp); 875 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 876 877 } else if ((arsp->rel_flags & FLG_REL_STLS) && 878 ((flags & FLG_OF_RELOBJ) == 0)) { 879 Xword tlsstatsize; 880 881 /* 882 * This is the LE TLS reference model. Static 883 * offset is hard-coded. 884 */ 885 tlsstatsize = 886 S_ROUND(ofl->ofl_tlsphdr->p_memsz, 887 M_TLSSTATALIGN); 888 value = tlsstatsize - value; 889 890 /* 891 * Since this code is fixed up, it assumes a 892 * negative offset that can be added to the 893 * thread pointer. 894 */ 895 if (arsp->rel_rtype == R_AMD64_TPOFF32) 896 value = -value; 897 } 898 899 if (arsp->rel_isdesc->is_file) 900 ifl_name = arsp->rel_isdesc->is_file->ifl_name; 901 else 902 ifl_name = MSG_INTL(MSG_STR_NULL); 903 904 /* 905 * Make sure we have data to relocate. Compiler and 906 * assembler developers have been known to generate 907 * relocations against invalid sections (normally .bss), 908 * so for their benefit give them sufficient information 909 * to help analyze the problem. End users should never 910 * see this. 911 */ 912 if (arsp->rel_isdesc->is_indata->d_buf == 0) { 913 Conv_inv_buf_t inv_buf; 914 915 eprintf(ofl->ofl_lml, ERR_FATAL, 916 MSG_INTL(MSG_REL_EMPTYSEC), 917 conv_reloc_amd64_type(arsp->rel_rtype, 918 0, &inv_buf), ifl_name, 919 demangle(arsp->rel_sname), 920 arsp->rel_isdesc->is_name); 921 return (S_ERROR); 922 } 923 924 /* 925 * Get the address of the data item we need to modify. 926 */ 927 addr = (uchar_t *)((uintptr_t)arsp->rel_roffset + 928 (uintptr_t)_elf_getxoff(arsp->rel_isdesc-> 929 is_indata)); 930 931 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD, 932 M_MACH, SHT_RELA, arsp->rel_rtype, EC_NATPTR(addr), 933 value, arsp->rel_sname, arsp->rel_osdesc)); 934 addr += (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf; 935 936 if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) > 937 ofl->ofl_size) || (arsp->rel_roffset > 938 arsp->rel_osdesc->os_shdr->sh_size)) { 939 int class; 940 Conv_inv_buf_t inv_buf; 941 942 if (((uintptr_t)addr - 943 (uintptr_t)ofl->ofl_nehdr) > ofl->ofl_size) 944 class = ERR_FATAL; 945 else 946 class = ERR_WARNING; 947 948 eprintf(ofl->ofl_lml, class, 949 MSG_INTL(MSG_REL_INVALOFFSET), 950 conv_reloc_amd64_type(arsp->rel_rtype, 951 0, &inv_buf), ifl_name, 952 arsp->rel_isdesc->is_name, 953 demangle(arsp->rel_sname), 954 EC_ADDR((uintptr_t)addr - 955 (uintptr_t)ofl->ofl_nehdr)); 956 957 if (class == ERR_FATAL) { 958 return_code = S_ERROR; 959 continue; 960 } 961 } 962 963 /* 964 * The relocation is additive. Ignore the previous 965 * symbol value if this local partial symbol is 966 * expanded. 967 */ 968 if (moved) 969 value -= *addr; 970 971 /* 972 * If '-z noreloc' is specified - skip the do_reloc 973 * stage. 974 */ 975 if ((flags & FLG_OF_RELOBJ) || 976 !(dtflags1 & DF_1_NORELOC)) { 977 if (do_reloc((uchar_t)arsp->rel_rtype, 978 addr, &value, arsp->rel_sname, ifl_name, 979 ofl->ofl_lml) == 0) 980 return_code = S_ERROR; 981 } 982 } 983 } 984 return (return_code); 985 } 986 987 uintptr_t 988 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl) 989 { 990 Rel_desc *orsp; 991 Rel_cache *rcp; 992 Sym_desc *sdp = rsp->rel_sym; 993 994 /* 995 * Static executables *do not* want any relocations against them. 996 * Since our engine still creates relocations against a WEAK UNDEFINED 997 * symbol in a static executable, it's best to disable them here 998 * instead of through out the relocation code. 999 */ 1000 if ((ofl->ofl_flags & (FLG_OF_STATIC | FLG_OF_EXEC)) == 1001 (FLG_OF_STATIC | FLG_OF_EXEC)) 1002 return (1); 1003 1004 /* 1005 * If no relocation cache structures are available allocate 1006 * a new one and link it into the cache list. 1007 */ 1008 if ((ofl->ofl_outrels.tail == 0) || 1009 ((rcp = (Rel_cache *)ofl->ofl_outrels.tail->data) == 0) || 1010 ((orsp = rcp->rc_free) == rcp->rc_end)) { 1011 static size_t nextsize = 0; 1012 size_t size; 1013 1014 /* 1015 * Output relocation numbers can vary considerably between 1016 * building executables or shared objects (pic vs. non-pic), 1017 * etc. But, they typically aren't very large, so for these 1018 * objects use a standard bucket size. For building relocatable 1019 * objects, typically there will be an output relocation for 1020 * every input relocation. 1021 */ 1022 if (nextsize == 0) { 1023 if (ofl->ofl_flags & FLG_OF_RELOBJ) { 1024 if ((size = ofl->ofl_relocincnt) == 0) 1025 size = REL_LOIDESCNO; 1026 if (size > REL_HOIDESCNO) 1027 nextsize = REL_HOIDESCNO; 1028 else 1029 nextsize = REL_LOIDESCNO; 1030 } else 1031 nextsize = size = REL_HOIDESCNO; 1032 } else 1033 size = nextsize; 1034 1035 size = size * sizeof (Rel_desc); 1036 1037 if (((rcp = libld_malloc(sizeof (Rel_cache) + size)) == 0) || 1038 (list_appendc(&ofl->ofl_outrels, rcp) == 0)) 1039 return (S_ERROR); 1040 1041 /* LINTED */ 1042 rcp->rc_free = orsp = (Rel_desc *)(rcp + 1); 1043 /* LINTED */ 1044 rcp->rc_end = (Rel_desc *)((char *)rcp->rc_free + size); 1045 } 1046 1047 /* 1048 * If we are adding a output relocation against a section 1049 * symbol (non-RELATIVE) then mark that section. These sections 1050 * will be added to the .dynsym symbol table. 1051 */ 1052 if (sdp && (rsp->rel_rtype != M_R_RELATIVE) && 1053 ((flags & FLG_REL_SCNNDX) || 1054 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) { 1055 1056 /* 1057 * If this is a COMMON symbol - no output section 1058 * exists yet - (it's created as part of sym_validate()). 1059 * So - we mark here that when it's created it should 1060 * be tagged with the FLG_OS_OUTREL flag. 1061 */ 1062 if ((sdp->sd_flags & FLG_SY_SPECSEC) && 1063 (sdp->sd_sym->st_shndx == SHN_COMMON)) { 1064 if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS) 1065 ofl->ofl_flags1 |= FLG_OF1_BSSOREL; 1066 else 1067 ofl->ofl_flags1 |= FLG_OF1_TLSOREL; 1068 } else { 1069 Os_desc *osp = sdp->sd_isc->is_osdesc; 1070 1071 if (osp && ((osp->os_flags & FLG_OS_OUTREL) == 0)) { 1072 ofl->ofl_dynshdrcnt++; 1073 osp->os_flags |= FLG_OS_OUTREL; 1074 } 1075 } 1076 } 1077 1078 *orsp = *rsp; 1079 orsp->rel_flags |= flags; 1080 1081 rcp->rc_free++; 1082 ofl->ofl_outrelscnt++; 1083 1084 if (flags & FLG_REL_GOT) 1085 ofl->ofl_relocgotsz += (Xword)sizeof (Rela); 1086 else if (flags & FLG_REL_PLT) 1087 ofl->ofl_relocpltsz += (Xword)sizeof (Rela); 1088 else if (flags & FLG_REL_BSS) 1089 ofl->ofl_relocbsssz += (Xword)sizeof (Rela); 1090 else if (flags & FLG_REL_NOINFO) 1091 ofl->ofl_relocrelsz += (Xword)sizeof (Rela); 1092 else 1093 orsp->rel_osdesc->os_szoutrels += (Xword)sizeof (Rela); 1094 1095 if (orsp->rel_rtype == M_R_RELATIVE) 1096 ofl->ofl_relocrelcnt++; 1097 1098 /* 1099 * We don't perform sorting on PLT relocations because 1100 * they have already been assigned a PLT index and if we 1101 * were to sort them we would have to re-assign the plt indexes. 1102 */ 1103 if (!(flags & FLG_REL_PLT)) 1104 ofl->ofl_reloccnt++; 1105 1106 /* 1107 * Insure a GLOBAL_OFFSET_TABLE is generated if required. 1108 */ 1109 if (IS_GOT_REQUIRED(orsp->rel_rtype)) 1110 ofl->ofl_flags |= FLG_OF_BLDGOT; 1111 1112 /* 1113 * Identify and possibly warn of a displacement relocation. 1114 */ 1115 if (orsp->rel_flags & FLG_REL_DISP) { 1116 ofl->ofl_dtflags_1 |= DF_1_DISPRELPND; 1117 1118 if (ofl->ofl_flags & FLG_OF_VERBOSE) 1119 ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl); 1120 } 1121 DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA, 1122 M_MACH, orsp)); 1123 return (1); 1124 } 1125 1126 /* 1127 * Stub routine since register symbols are not supported on amd64. 1128 */ 1129 /* ARGSUSED */ 1130 uintptr_t 1131 ld_reloc_register(Rel_desc * rsp, Is_desc * isp, Ofl_desc * ofl) 1132 { 1133 eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_REL_NOREG)); 1134 return (S_ERROR); 1135 } 1136 1137 /* 1138 * process relocation for a LOCAL symbol 1139 */ 1140 uintptr_t 1141 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl) 1142 { 1143 Word flags = ofl->ofl_flags; 1144 Sym_desc *sdp = rsp->rel_sym; 1145 Word shndx = sdp->sd_sym->st_shndx; 1146 Word ortype = rsp->rel_rtype; 1147 1148 /* 1149 * if ((shared object) and (not pc relative relocation) and 1150 * (not against ABS symbol)) 1151 * then 1152 * build R_AMD64_RELATIVE 1153 * fi 1154 */ 1155 if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) && 1156 !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) && 1157 !(IS_GOT_BASED(rsp->rel_rtype)) && 1158 !(rsp->rel_isdesc != NULL && 1159 (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) && 1160 (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) || 1161 (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) { 1162 1163 /* 1164 * R_AMD64_RELATIVE updates a 64bit address, if this 1165 * relocation isn't a 64bit binding then we can not 1166 * simplify it to a RELATIVE relocation. 1167 */ 1168 if (reloc_table[ortype].re_fsize != sizeof (Addr)) { 1169 return (ld_add_outrel(NULL, rsp, ofl)); 1170 } 1171 1172 rsp->rel_rtype = R_AMD64_RELATIVE; 1173 if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR) 1174 return (S_ERROR); 1175 rsp->rel_rtype = ortype; 1176 return (1); 1177 } 1178 1179 /* 1180 * If the relocation is against a 'non-allocatable' section 1181 * and we can not resolve it now - then give a warning 1182 * message. 1183 * 1184 * We can not resolve the symbol if either: 1185 * a) it's undefined 1186 * b) it's defined in a shared library and a 1187 * COPY relocation hasn't moved it to the executable 1188 * 1189 * Note: because we process all of the relocations against the 1190 * text segment before any others - we know whether 1191 * or not a copy relocation will be generated before 1192 * we get here (see reloc_init()->reloc_segments()). 1193 */ 1194 if (!(rsp->rel_flags & FLG_REL_LOAD) && 1195 ((shndx == SHN_UNDEF) || 1196 ((sdp->sd_ref == REF_DYN_NEED) && 1197 ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) { 1198 Conv_inv_buf_t inv_buf; 1199 1200 /* 1201 * If the relocation is against a SHT_SUNW_ANNOTATE 1202 * section - then silently ignore that the relocation 1203 * can not be resolved. 1204 */ 1205 if (rsp->rel_osdesc && 1206 (rsp->rel_osdesc->os_shdr->sh_type == SHT_SUNW_ANNOTATE)) 1207 return (0); 1208 (void) eprintf(ofl->ofl_lml, ERR_WARNING, 1209 MSG_INTL(MSG_REL_EXTERNSYM), 1210 conv_reloc_amd64_type(rsp->rel_rtype, 0, &inv_buf), 1211 rsp->rel_isdesc->is_file->ifl_name, 1212 demangle(rsp->rel_sname), rsp->rel_osdesc->os_name); 1213 return (1); 1214 } 1215 1216 /* 1217 * Perform relocation. 1218 */ 1219 return (ld_add_actrel(NULL, rsp, ofl)); 1220 } 1221 1222 1223 uintptr_t 1224 /* ARGSUSED */ 1225 ld_reloc_GOTOP(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1226 { 1227 /* 1228 * Stub routine for common code compatibility, we shouldn't 1229 * actually get here on amd64. 1230 */ 1231 assert(0); 1232 return (S_ERROR); 1233 } 1234 1235 uintptr_t 1236 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1237 { 1238 Word rtype = rsp->rel_rtype; 1239 Sym_desc *sdp = rsp->rel_sym; 1240 Word flags = ofl->ofl_flags; 1241 Gotndx *gnp; 1242 1243 /* 1244 * If we're building an executable - use either the IE or LE access 1245 * model. If we're building a shared object process any IE model. 1246 */ 1247 if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) { 1248 /* 1249 * Set the DF_STATIC_TLS flag. 1250 */ 1251 ofl->ofl_dtflags |= DF_STATIC_TLS; 1252 1253 if (!local || ((flags & FLG_OF_EXEC) == 0)) { 1254 /* 1255 * Assign a GOT entry for static TLS references. 1256 */ 1257 if ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1258 GOT_REF_TLSIE, ofl, rsp)) == 0) { 1259 1260 if (ld_assign_got_TLS(local, rsp, ofl, sdp, 1261 gnp, GOT_REF_TLSIE, FLG_REL_STLS, 1262 rtype, R_AMD64_TPOFF64, 0) == S_ERROR) 1263 return (S_ERROR); 1264 } 1265 1266 /* 1267 * IE access model. 1268 */ 1269 if (IS_TLS_IE(rtype)) 1270 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1271 1272 /* 1273 * Fixups are required for other executable models. 1274 */ 1275 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1276 rsp, ofl)); 1277 } 1278 1279 /* 1280 * LE access model. 1281 */ 1282 if (IS_TLS_LE(rtype)) 1283 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1284 1285 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1286 rsp, ofl)); 1287 } 1288 1289 /* 1290 * Building a shared object. 1291 * 1292 * Assign a GOT entry for a dynamic TLS reference. 1293 */ 1294 if (IS_TLS_LD(rtype) && ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1295 GOT_REF_TLSLD, ofl, rsp)) == 0)) { 1296 1297 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD, 1298 FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, 0) == S_ERROR) 1299 return (S_ERROR); 1300 1301 } else if (IS_TLS_GD(rtype) && 1302 ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), GOT_REF_TLSGD, 1303 ofl, rsp)) == 0)) { 1304 1305 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD, 1306 FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64, 1307 R_AMD64_DTPOFF64) == S_ERROR) 1308 return (S_ERROR); 1309 } 1310 1311 if (IS_TLS_LD(rtype)) 1312 return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl)); 1313 1314 return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl)); 1315 } 1316 1317 /* ARGSUSED3 */ 1318 Gotndx * 1319 ld_find_gotndx(List * lst, Gotref gref, Ofl_desc * ofl, Rel_desc * rdesc) 1320 { 1321 Listnode * lnp; 1322 Gotndx * gnp; 1323 1324 assert(rdesc != 0); 1325 1326 if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx) 1327 return (ofl->ofl_tlsldgotndx); 1328 1329 for (LIST_TRAVERSE(lst, lnp, gnp)) { 1330 if ((rdesc->rel_raddend == gnp->gn_addend) && 1331 (gnp->gn_gotref == gref)) { 1332 return (gnp); 1333 } 1334 } 1335 return ((Gotndx *)0); 1336 } 1337 1338 Xword 1339 ld_calc_got_offset(Rel_desc * rdesc, Ofl_desc * ofl) 1340 { 1341 Os_desc *osp = ofl->ofl_osgot; 1342 Sym_desc *sdp = rdesc->rel_sym; 1343 Xword gotndx; 1344 Gotref gref; 1345 Gotndx *gnp; 1346 1347 if (rdesc->rel_flags & FLG_REL_DTLS) 1348 gref = GOT_REF_TLSGD; 1349 else if (rdesc->rel_flags & FLG_REL_MTLS) 1350 gref = GOT_REF_TLSLD; 1351 else if (rdesc->rel_flags & FLG_REL_STLS) 1352 gref = GOT_REF_TLSIE; 1353 else 1354 gref = GOT_REF_GENERIC; 1355 1356 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, ofl, rdesc); 1357 assert(gnp); 1358 1359 gotndx = (Xword)gnp->gn_gotndx; 1360 1361 if ((rdesc->rel_flags & FLG_REL_DTLS) && 1362 (rdesc->rel_rtype == R_AMD64_DTPOFF64)) 1363 gotndx++; 1364 1365 return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE))); 1366 } 1367 1368 1369 /* ARGSUSED5 */ 1370 uintptr_t 1371 ld_assign_got_ndx(List * lst, Gotndx * pgnp, Gotref gref, Ofl_desc * ofl, 1372 Rel_desc * rsp, Sym_desc * sdp) 1373 { 1374 Xword raddend; 1375 Gotndx *gnp, *_gnp; 1376 Listnode *lnp, *plnp; 1377 uint_t gotents; 1378 1379 raddend = rsp->rel_raddend; 1380 if (pgnp && (pgnp->gn_addend == raddend) && 1381 (pgnp->gn_gotref == gref)) 1382 return (1); 1383 1384 if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD)) 1385 gotents = 2; 1386 else 1387 gotents = 1; 1388 1389 plnp = 0; 1390 for (LIST_TRAVERSE(lst, lnp, _gnp)) { 1391 if (_gnp->gn_addend > raddend) 1392 break; 1393 plnp = lnp; 1394 } 1395 1396 /* 1397 * Allocate a new entry. 1398 */ 1399 if ((gnp = libld_calloc(sizeof (Gotndx), 1)) == 0) 1400 return (S_ERROR); 1401 gnp->gn_addend = raddend; 1402 gnp->gn_gotndx = ofl->ofl_gotcnt; 1403 gnp->gn_gotref = gref; 1404 1405 ofl->ofl_gotcnt += gotents; 1406 1407 if (gref == GOT_REF_TLSLD) { 1408 ofl->ofl_tlsldgotndx = gnp; 1409 return (1); 1410 } 1411 1412 if (plnp == 0) { 1413 /* 1414 * Insert at head of list 1415 */ 1416 if (list_prependc(lst, (void *)gnp) == 0) 1417 return (S_ERROR); 1418 } else if (_gnp->gn_addend > raddend) { 1419 /* 1420 * Insert in middle of lest 1421 */ 1422 if (list_insertc(lst, (void *)gnp, plnp) == 0) 1423 return (S_ERROR); 1424 } else { 1425 /* 1426 * Append to tail of list 1427 */ 1428 if (list_appendc(lst, (void *)gnp) == 0) 1429 return (S_ERROR); 1430 } 1431 return (1); 1432 } 1433 1434 void 1435 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl) 1436 { 1437 sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++; 1438 sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++; 1439 ofl->ofl_flags |= FLG_OF_BLDGOT; 1440 } 1441 1442 static uchar_t plt0_template[M_PLT_ENTSIZE] = { 1443 /* 0x00 PUSHQ GOT+8(%rip) */ 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, 1444 /* 0x06 JMP *GOT+16(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 1445 /* 0x0c NOP */ 0x90, 1446 /* 0x0d NOP */ 0x90, 1447 /* 0x0e NOP */ 0x90, 1448 /* 0x0f NOP */ 0x90 1449 }; 1450 1451 /* 1452 * Initializes .got[0] with the _DYNAMIC symbol value. 1453 */ 1454 uintptr_t 1455 ld_fillin_gotplt(Ofl_desc *ofl) 1456 { 1457 Word flags = ofl->ofl_flags; 1458 Word dtflags1 = ofl->ofl_dtflags_1; 1459 1460 if (ofl->ofl_osgot) { 1461 Sym_desc *sdp; 1462 1463 if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U), 1464 SYM_NOHASH, 0, ofl)) != NULL) { 1465 uchar_t *genptr; 1466 1467 genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf + 1468 (M_GOT_XDYNAMIC * M_GOT_ENTSIZE)); 1469 /* LINTED */ 1470 *(Xword *)genptr = sdp->sd_sym->st_value; 1471 } 1472 } 1473 1474 /* 1475 * Fill in the reserved slot in the procedure linkage table the first 1476 * entry is: 1477 * 0x00 PUSHQ GOT+8(%rip) # GOT[1] 1478 * 0x06 JMP *GOT+16(%rip) # GOT[2] 1479 * 0x0c NOP 1480 * 0x0d NOP 1481 * 0x0e NOP 1482 * 0x0f NOP 1483 */ 1484 if ((flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) { 1485 uchar_t *pltent; 1486 Xword val1; 1487 1488 pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf; 1489 bcopy(plt0_template, pltent, sizeof (plt0_template)); 1490 1491 /* 1492 * filin: 1493 * PUSHQ GOT + 8(%rip) 1494 * 1495 * Note: 0x06 below represents the offset to the 1496 * next instruction - which is what %rip will 1497 * be pointing at. 1498 */ 1499 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1500 (M_GOT_XLINKMAP * M_GOT_ENTSIZE) - 1501 ofl->ofl_osplt->os_shdr->sh_addr - 0x06; 1502 1503 /* 1504 * If '-z noreloc' is specified - skip the do_reloc 1505 * stage. 1506 */ 1507 if ((flags & FLG_OF_RELOBJ) || 1508 !(dtflags1 & DF_1_NORELOC)) { 1509 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 1510 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1511 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1512 eprintf(ofl->ofl_lml, ERR_FATAL, 1513 MSG_INTL(MSG_PLT_PLT0FAIL)); 1514 return (S_ERROR); 1515 } 1516 } 1517 1518 /* 1519 * filin: 1520 * JMP *GOT+16(%rip) 1521 */ 1522 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1523 (M_GOT_XRTLD * M_GOT_ENTSIZE) - 1524 ofl->ofl_osplt->os_shdr->sh_addr - 0x0c; 1525 /* 1526 * If '-z noreloc' is specified - skip the do_reloc 1527 * stage. 1528 */ 1529 if ((flags & FLG_OF_RELOBJ) || 1530 !(dtflags1 & DF_1_NORELOC)) { 1531 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x08], 1532 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1533 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1534 eprintf(ofl->ofl_lml, ERR_FATAL, 1535 MSG_INTL(MSG_PLT_PLT0FAIL)); 1536 return (S_ERROR); 1537 } 1538 } 1539 } 1540 return (1); 1541 } 1542