1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <string.h> 29 #include <stdio.h> 30 #include <strings.h> 31 #include <sys/elf_amd64.h> 32 #include <debug.h> 33 #include <reloc.h> 34 #include "msg.h" 35 #include "_libld.h" 36 37 Word 38 ld_init_rel(Rel_desc *reld, void *reloc) 39 { 40 Rela * rel = (Rela *)reloc; 41 42 /* LINTED */ 43 reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info); 44 reld->rel_roffset = rel->r_offset; 45 reld->rel_raddend = rel->r_addend; 46 reld->rel_typedata = 0; 47 48 reld->rel_flags |= FLG_REL_RELA; 49 50 return ((Word)ELF_R_SYM(rel->r_info)); 51 } 52 53 void 54 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl) 55 { 56 ofl->ofl_dehdr->e_flags |= ehdr->e_flags; 57 } 58 59 void 60 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt) 61 { 62 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) { 63 /* 64 * Create this entry if we are going to create a PLT table. 65 */ 66 if (ofl->ofl_pltcnt) 67 (*cnt)++; /* DT_PLTGOT */ 68 } 69 } 70 71 void 72 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn) 73 { 74 if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) { 75 (*dyn)->d_tag = DT_PLTGOT; 76 if (ofl->ofl_osgot) 77 (*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr; 78 else 79 (*dyn)->d_un.d_ptr = 0; 80 (*dyn)++; 81 } 82 } 83 84 Xword 85 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl) 86 { 87 Xword value; 88 89 value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) + 90 M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE); 91 return (value); 92 } 93 94 /* 95 * Build a single plt entry - code is: 96 * JMP *name1@GOTPCREL(%rip) 97 * PUSHL $index 98 * JMP .PLT0 99 */ 100 static uchar_t pltn_entry[M_PLT_ENTSIZE] = { 101 /* 0x00 jmpq *name1@GOTPCREL(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 102 /* 0x06 pushq $index */ 0x68, 0x00, 0x00, 0x00, 0x00, 103 /* 0x0b jmpq .plt0(%rip) */ 0xe9, 0x00, 0x00, 0x00, 0x00 104 /* 0x10 */ 105 }; 106 107 static uintptr_t 108 plt_entry(Ofl_desc * ofl, Sym_desc * sdp) 109 { 110 uchar_t *plt0, *pltent, *gotent; 111 Sword plt_off; 112 Word got_off; 113 Xword val1; 114 Word flags = ofl->ofl_flags; 115 Word dtflags1 = ofl->ofl_dtflags_1; 116 117 got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 118 plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * 119 M_PLT_ENTSIZE); 120 plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf); 121 pltent = plt0 + plt_off; 122 gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off; 123 124 bcopy(pltn_entry, pltent, sizeof (pltn_entry)); 125 /* 126 * Fill in the got entry with the address of the next instruction. 127 */ 128 /* LINTED */ 129 *(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off + 130 M_PLT_INSSIZE; 131 132 /* 133 * patchup: 134 * jmpq *name1@gotpcrel(%rip) 135 * 136 * NOTE: 0x06 represents next instruction. 137 */ 138 val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) - 139 (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06; 140 141 /* 142 * If '-z noreloc' is specified - skip the do_reloc 143 * stage. 144 */ 145 if ((flags & FLG_OF_RELOBJ) || 146 !(dtflags1 & DF_1_NORELOC)) { 147 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 148 &val1, MSG_ORIG(MSG_SYM_PLTENT), 149 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 150 eprintf(ofl->ofl_lml, ERR_FATAL, 151 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 152 demangle(sdp->sd_name)); 153 return (S_ERROR); 154 } 155 } 156 157 /* 158 * patchup: 159 * pushq $pltndx 160 */ 161 val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1); 162 /* 163 * If '-z noreloc' is specified - skip the do_reloc 164 * stage. 165 */ 166 if ((flags & FLG_OF_RELOBJ) || 167 !(dtflags1 & DF_1_NORELOC)) { 168 if (do_reloc(R_AMD64_32, &pltent[0x07], 169 &val1, MSG_ORIG(MSG_SYM_PLTENT), 170 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 171 eprintf(ofl->ofl_lml, ERR_FATAL, 172 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 173 demangle(sdp->sd_name)); 174 return (S_ERROR); 175 } 176 } 177 178 /* 179 * patchup: 180 * jmpq .plt0(%rip) 181 * NOTE: 0x10 represents next instruction. The rather complex series 182 * of casts is necessary to sign extend an offset into a 64-bit value 183 * while satisfying various compiler error checks. Handle with care. 184 */ 185 val1 = (Xword)((intptr_t)((uintptr_t)plt0 - 186 (uintptr_t)(&pltent[0x10]))); 187 188 /* 189 * If '-z noreloc' is specified - skip the do_reloc 190 * stage. 191 */ 192 if ((flags & FLG_OF_RELOBJ) || 193 !(dtflags1 & DF_1_NORELOC)) { 194 if (do_reloc(R_AMD64_PC32, &pltent[0x0c], 195 &val1, MSG_ORIG(MSG_SYM_PLTENT), 196 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 197 eprintf(ofl->ofl_lml, ERR_FATAL, 198 MSG_INTL(MSG_PLT_PLTNFAIL), sdp->sd_aux->sa_PLTndx, 199 demangle(sdp->sd_name)); 200 return (S_ERROR); 201 } 202 } 203 return (1); 204 } 205 206 uintptr_t 207 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl) 208 { 209 Os_desc * relosp, * osp = 0; 210 Word ndx; 211 Xword roffset, value; 212 Sxword raddend; 213 Rela rea; 214 char *relbits; 215 Sym_desc * sdp, * psym = (Sym_desc *)0; 216 int sectmoved = 0; 217 218 raddend = orsp->rel_raddend; 219 sdp = orsp->rel_sym; 220 221 /* 222 * If the section this relocation is against has been discarded 223 * (-zignore), then also discard (skip) the relocation itself. 224 */ 225 if (orsp->rel_isdesc && ((orsp->rel_flags & 226 (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) && 227 (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) { 228 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp)); 229 return (1); 230 } 231 232 /* 233 * If this is a relocation against a move table, or expanded move 234 * table, adjust the relocation entries. 235 */ 236 if (orsp->rel_move) 237 ld_adj_movereloc(ofl, orsp); 238 239 /* 240 * If this is a relocation against a section then we need to adjust the 241 * raddend field to compensate for the new position of the input section 242 * within the new output section. 243 */ 244 if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) { 245 if (ofl->ofl_parsym.head && 246 (sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 247 /* LINTED */ 248 (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) { 249 DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym)); 250 sectmoved = 1; 251 if (ofl->ofl_flags & FLG_OF_RELOBJ) 252 raddend = psym->sd_sym->st_value; 253 else 254 raddend = psym->sd_sym->st_value - 255 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 256 /* LINTED */ 257 raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata); 258 if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 259 raddend += 260 psym->sd_isc->is_osdesc->os_shdr->sh_addr; 261 } else { 262 /* LINTED */ 263 raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata); 264 if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC) 265 raddend += 266 sdp->sd_isc->is_osdesc->os_shdr->sh_addr; 267 } 268 } 269 270 value = sdp->sd_sym->st_value; 271 272 if (orsp->rel_flags & FLG_REL_GOT) { 273 /* 274 * Note: for GOT relative relocations on amd64 275 * we discard the addend. It was relevant 276 * to the reference - not to the data item 277 * being referenced (ie: that -4 thing). 278 */ 279 raddend = 0; 280 osp = ofl->ofl_osgot; 281 roffset = ld_calc_got_offset(orsp, ofl); 282 283 } else if (orsp->rel_flags & FLG_REL_PLT) { 284 /* 285 * Note that relocations for PLT's actually 286 * cause a relocation againt the GOT. 287 */ 288 osp = ofl->ofl_osplt; 289 roffset = (ofl->ofl_osgot->os_shdr->sh_addr) + 290 sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE; 291 raddend = 0; 292 if (plt_entry(ofl, sdp) == S_ERROR) 293 return (S_ERROR); 294 295 } else if (orsp->rel_flags & FLG_REL_BSS) { 296 /* 297 * This must be a R_AMD64_COPY. For these set the roffset to 298 * point to the new symbols location. 299 */ 300 osp = ofl->ofl_isbss->is_osdesc; 301 roffset = value; 302 303 /* 304 * The raddend doesn't mean anything in a R_SPARC_COPY 305 * relocation. Null it out because it can confuse people. 306 */ 307 raddend = 0; 308 } else { 309 osp = orsp->rel_osdesc; 310 311 /* 312 * Calculate virtual offset of reference point; equals offset 313 * into section + vaddr of section for loadable sections, or 314 * offset plus section displacement for nonloadable sections. 315 */ 316 roffset = orsp->rel_roffset + 317 (Off)_elf_getxoff(orsp->rel_isdesc->is_indata); 318 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) 319 roffset += orsp->rel_isdesc->is_osdesc-> 320 os_shdr->sh_addr; 321 } 322 323 if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0)) 324 relosp = ofl->ofl_osrel; 325 326 /* 327 * Assign the symbols index for the output relocation. If the 328 * relocation refers to a SECTION symbol then it's index is based upon 329 * the output sections symbols index. Otherwise the index can be 330 * derived from the symbols index itself. 331 */ 332 if (orsp->rel_rtype == R_AMD64_RELATIVE) 333 ndx = STN_UNDEF; 334 else if ((orsp->rel_flags & FLG_REL_SCNNDX) || 335 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) { 336 if (sectmoved == 0) { 337 /* 338 * Check for a null input section. This can 339 * occur if this relocation references a symbol 340 * generated by sym_add_sym(). 341 */ 342 if ((sdp->sd_isc != 0) && 343 (sdp->sd_isc->is_osdesc != 0)) 344 ndx = sdp->sd_isc->is_osdesc->os_scnsymndx; 345 else 346 ndx = sdp->sd_shndx; 347 } else 348 ndx = ofl->ofl_sunwdata1ndx; 349 } else 350 ndx = sdp->sd_symndx; 351 352 /* 353 * Add the symbols 'value' to the addend field. 354 */ 355 if (orsp->rel_flags & FLG_REL_ADVAL) 356 raddend += value; 357 358 /* 359 * The addend field for R_AMD64_DTPMOD64 means nothing. The addend 360 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation. 361 */ 362 if (orsp->rel_rtype == R_AMD64_DTPMOD64) 363 raddend = 0; 364 365 relbits = (char *)relosp->os_outdata->d_buf; 366 367 rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype); 368 rea.r_offset = roffset; 369 rea.r_addend = raddend; 370 DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name, 371 orsp->rel_sname)); 372 373 /* 374 * Assert we haven't walked off the end of our relocation table. 375 */ 376 assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size); 377 378 (void) memcpy((relbits + relosp->os_szoutrels), 379 (char *)&rea, sizeof (Rela)); 380 relosp->os_szoutrels += (Xword)sizeof (Rela); 381 382 /* 383 * Determine if this relocation is against a non-writable, allocatable 384 * section. If so we may need to provide a text relocation diagnostic. 385 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually 386 * result in modifications to the .got. 387 */ 388 if (orsp->rel_rtype == R_AMD64_JUMP_SLOT) 389 osp = ofl->ofl_osgot; 390 391 ld_reloc_remain_entry(orsp, osp, ofl); 392 return (1); 393 } 394 395 /* 396 * amd64 Instructions for TLS processing 397 */ 398 static uchar_t tlsinstr_gd_ie[] = { 399 /* 400 * 0x00 movq %fs:0, %rax 401 */ 402 0x64, 0x48, 0x8b, 0x04, 0x25, 403 0x00, 0x00, 0x00, 0x00, 404 /* 405 * 0x09 addq x@gottpoff(%rip), %rax 406 */ 407 0x48, 0x03, 0x05, 0x00, 0x00, 408 0x00, 0x00 409 }; 410 411 static uchar_t tlsinstr_gd_le[] = { 412 /* 413 * 0x00 movq %fs:0, %rax 414 */ 415 0x64, 0x48, 0x8b, 0x04, 0x25, 416 0x00, 0x00, 0x00, 0x00, 417 /* 418 * 0x09 leaq x@gottpoff(%rip), %rax 419 */ 420 0x48, 0x8d, 0x80, 0x00, 0x00, 421 0x00, 0x00 422 }; 423 424 static uchar_t tlsinstr_ld_le[] = { 425 /* 426 * .byte 0x66 427 */ 428 0x66, 429 /* 430 * .byte 0x66 431 */ 432 0x66, 433 /* 434 * .byte 0x66 435 */ 436 0x66, 437 /* 438 * movq %fs:0, %rax 439 */ 440 0x64, 0x48, 0x8b, 0x04, 0x25, 441 0x00, 0x00, 0x00, 0x00 442 }; 443 444 445 static Fixupret 446 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp) 447 { 448 Sym_desc *sdp = arsp->rel_sym; 449 Word rtype = arsp->rel_rtype; 450 uchar_t *offset; 451 452 offset = (uchar_t *)((uintptr_t)arsp->rel_roffset + 453 (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) + 454 (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf); 455 456 if (sdp->sd_ref == REF_DYN_NEED) { 457 /* 458 * IE reference model 459 */ 460 switch (rtype) { 461 case R_AMD64_TLSGD: 462 /* 463 * GD -> IE 464 * 465 * Transition: 466 * 0x00 .byte 0x66 467 * 0x01 leaq x@tlsgd(%rip), %rdi 468 * 0x08 .word 0x6666 469 * 0x0a rex64 470 * 0x0b call __tls_get_addr@plt 471 * 0x10 472 * To: 473 * 0x00 movq %fs:0, %rax 474 * 0x09 addq x@gottpoff(%rip), %rax 475 * 0x10 476 */ 477 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 478 rtype, R_AMD64_GOTTPOFF, arsp->rel_roffset, 479 sdp->sd_name)); 480 arsp->rel_rtype = R_AMD64_GOTTPOFF; 481 arsp->rel_roffset += 8; 482 arsp->rel_raddend = (Sxword)-4; 483 484 /* 485 * Addjust 'offset' to beginning of instruction 486 * sequence. 487 */ 488 offset -= 4; 489 (void) memcpy(offset, tlsinstr_gd_ie, 490 sizeof (tlsinstr_gd_ie)); 491 return (FIX_RELOC); 492 493 case R_AMD64_PLT32: 494 /* 495 * Fixup done via the TLS_GD relocation 496 */ 497 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 498 rtype, R_AMD64_NONE, arsp->rel_roffset, 499 sdp->sd_name)); 500 return (FIX_DONE); 501 } 502 } 503 504 /* 505 * LE reference model 506 */ 507 switch (rtype) { 508 case R_AMD64_TLSGD: 509 /* 510 * GD -> LE 511 * 512 * Transition: 513 * 0x00 .byte 0x66 514 * 0x01 leaq x@tlsgd(%rip), %rdi 515 * 0x08 .word 0x6666 516 * 0x0a rex64 517 * 0x0b call __tls_get_addr@plt 518 * 0x10 519 * To: 520 * 0x00 movq %fs:0, %rax 521 * 0x09 leaq x@tpoff(%rax), %rax 522 * 0x10 523 */ 524 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, 525 rtype, R_AMD64_TPOFF32, arsp->rel_roffset, sdp->sd_name)); 526 527 arsp->rel_rtype = R_AMD64_TPOFF32; 528 arsp->rel_roffset += 8; 529 arsp->rel_raddend = 0; 530 531 /* 532 * Addjust 'offset' to beginning of instruction 533 * sequence. 534 */ 535 offset -= 4; 536 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 537 return (FIX_RELOC); 538 539 case R_AMD64_GOTTPOFF: 540 /* 541 * IE -> LE 542 * 543 * Transition: 544 * 0x00 movq %fs:0, %rax 545 * 0x09 addq x@gottopoff(%rip), %rax 546 * 0x10 547 * To: 548 * 0x00 movq %fs:0, %rax 549 * 0x09 leaq x@tpoff(%rax), %rax 550 * 0x10 551 */ 552 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, rtype, 553 R_AMD64_TPOFF32, arsp->rel_roffset, sdp->sd_name)); 554 555 arsp->rel_rtype = R_AMD64_TPOFF32; 556 arsp->rel_raddend = 0; 557 558 /* 559 * Addjust 'offset' to beginning of instruction 560 * sequence. 561 */ 562 offset -= 12; 563 564 /* 565 * Same code sequence used in the GD -> LE 566 * transition. 567 */ 568 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le)); 569 return (FIX_RELOC); 570 571 case R_AMD64_TLSLD: 572 /* 573 * LD -> LE 574 * 575 * Transition 576 * 0x00 leaq x1@tlsgd(%rip), %rdi 577 * 0x07 call __tls_get_addr@plt 578 * 0x0c 579 * To: 580 * 0x00 .byte 0x66 581 * 0x01 .byte 0x66 582 * 0x02 .byte 0x66 583 * 0x03 movq %fs:0, %rax 584 */ 585 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, rtype, 586 R_AMD64_NONE, arsp->rel_roffset, sdp->sd_name)); 587 offset -= 3; 588 (void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le)); 589 return (FIX_DONE); 590 591 case R_AMD64_DTPOFF32: 592 /* 593 * LD->LE 594 * 595 * Transition: 596 * 0x00 leaq x1@dtpoff(%rax), %rcx 597 * To: 598 * 0x00 leaq x1@tpoff(%rax), %rcx 599 */ 600 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH, rtype, 601 R_AMD64_TPOFF32, arsp->rel_roffset, sdp->sd_name)); 602 arsp->rel_rtype = R_AMD64_TPOFF32; 603 arsp->rel_raddend = 0; 604 return (FIX_RELOC); 605 } 606 607 return (FIX_RELOC); 608 } 609 610 uintptr_t 611 ld_do_activerelocs(Ofl_desc *ofl) 612 { 613 Rel_desc *arsp; 614 Rel_cache *rcp; 615 Listnode *lnp; 616 uintptr_t return_code = 1; 617 Word flags = ofl->ofl_flags; 618 Word dtflags1 = ofl->ofl_dtflags_1; 619 620 if (ofl->ofl_actrels.head) 621 DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml)); 622 623 /* 624 * Process active relocations. 625 */ 626 for (LIST_TRAVERSE(&ofl->ofl_actrels, lnp, rcp)) { 627 /* LINTED */ 628 for (arsp = (Rel_desc *)(rcp + 1); 629 arsp < rcp->rc_free; arsp++) { 630 uchar_t *addr; 631 Xword value; 632 Sym_desc *sdp; 633 const char *ifl_name; 634 Xword refaddr; 635 int moved = 0; 636 Gotref gref; 637 638 /* 639 * If the section this relocation is against has been 640 * discarded (-zignore), then discard (skip) the 641 * relocation itself. 642 */ 643 if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) && 644 ((arsp->rel_flags & 645 (FLG_REL_GOT | FLG_REL_BSS | 646 FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) { 647 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, 648 M_MACH, arsp)); 649 continue; 650 } 651 652 /* 653 * We deteremine what the 'got reference' 654 * model (if required) is at this point. This 655 * needs to be done before tls_fixup() since 656 * it may 'transition' our instructions. 657 * 658 * The got table entries have already been assigned, 659 * and we bind to those initial entries. 660 */ 661 if (arsp->rel_flags & FLG_REL_DTLS) 662 gref = GOT_REF_TLSGD; 663 else if (arsp->rel_flags & FLG_REL_MTLS) 664 gref = GOT_REF_TLSLD; 665 else if (arsp->rel_flags & FLG_REL_STLS) 666 gref = GOT_REF_TLSIE; 667 else 668 gref = GOT_REF_GENERIC; 669 670 /* 671 * Perform any required TLS fixups. 672 */ 673 if (arsp->rel_flags & FLG_REL_TLSFIX) { 674 Fixupret ret; 675 676 if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR) 677 return (S_ERROR); 678 if (ret == FIX_DONE) 679 continue; 680 } 681 682 /* 683 * If this is a relocation against a move table, or 684 * expanded move table, adjust the relocation entries. 685 */ 686 if (arsp->rel_move) 687 ld_adj_movereloc(ofl, arsp); 688 689 sdp = arsp->rel_sym; 690 refaddr = arsp->rel_roffset + 691 (Off)_elf_getxoff(arsp->rel_isdesc->is_indata); 692 693 if ((arsp->rel_flags & FLG_REL_CLVAL) || 694 (arsp->rel_flags & FLG_REL_GOTCL)) 695 value = 0; 696 else if (ELF_ST_TYPE(sdp->sd_sym->st_info) == 697 STT_SECTION) { 698 Sym_desc *sym; 699 700 /* 701 * The value for a symbol pointing to a SECTION 702 * is based off of that sections position. 703 * 704 * The second argument of the ld_am_I_partial() 705 * is the value stored at the target address 706 * relocation is going to be applied. 707 */ 708 if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) && 709 /* LINTED */ 710 (sym = ld_am_I_partial(arsp, *(Xword *) 711 ((uchar_t *) 712 arsp->rel_isdesc->is_indata->d_buf + 713 arsp->rel_roffset)))) { 714 /* 715 * If the symbol is moved, 716 * adjust the value 717 */ 718 value = sym->sd_sym->st_value; 719 moved = 1; 720 } else { 721 value = _elf_getxoff( 722 sdp->sd_isc->is_indata); 723 if (sdp->sd_isc->is_shdr->sh_flags & 724 SHF_ALLOC) 725 value += sdp->sd_isc->is_osdesc-> 726 os_shdr->sh_addr; 727 } 728 if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS) 729 value -= ofl->ofl_tlsphdr->p_vaddr; 730 731 } else if (IS_SIZE(arsp->rel_rtype)) { 732 /* 733 * Size relocations require the symbols size. 734 */ 735 value = sdp->sd_sym->st_size; 736 } else { 737 /* 738 * Else the value is the symbols value. 739 */ 740 value = sdp->sd_sym->st_value; 741 } 742 743 /* 744 * Relocation against the GLOBAL_OFFSET_TABLE. 745 */ 746 if (arsp->rel_flags & FLG_REL_GOT) 747 arsp->rel_osdesc = ofl->ofl_osgot; 748 749 /* 750 * If loadable and not producing a relocatable object 751 * add the sections virtual address to the reference 752 * address. 753 */ 754 if ((arsp->rel_flags & FLG_REL_LOAD) && 755 ((flags & FLG_OF_RELOBJ) == 0)) 756 refaddr += arsp->rel_isdesc->is_osdesc-> 757 os_shdr->sh_addr; 758 759 /* 760 * If this entry has a PLT assigned to it, it's 761 * value is actually the address of the PLT (and 762 * not the address of the function). 763 */ 764 if (IS_PLT(arsp->rel_rtype)) { 765 if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx) 766 value = ld_calc_plt_addr(sdp, ofl); 767 } 768 769 /* 770 * Add relocations addend to value. Add extra 771 * relocation addend if needed. 772 * 773 * Note: for GOT relative relocations on amd64 774 * we discard the addend. It was relevant 775 * to the reference - not to the data item 776 * being referenced (ie: that -4 thing). 777 */ 778 if ((arsp->rel_flags & FLG_REL_GOT) == 0) 779 value += arsp->rel_raddend; 780 781 /* 782 * Determine whether the value needs further adjustment. 783 * Filter through the attributes of the relocation to 784 * determine what adjustment is required. Note, many 785 * of the following cases are only applicable when a 786 * .got is present. As a .got is not generated when a 787 * relocatable object is being built, any adjustments 788 * that require a .got need to be skipped. 789 */ 790 if ((arsp->rel_flags & FLG_REL_GOT) && 791 ((flags & FLG_OF_RELOBJ) == 0)) { 792 Xword R1addr; 793 uintptr_t R2addr; 794 Word gotndx; 795 Gotndx *gnp; 796 797 /* 798 * Perform relocation against GOT table. Since 799 * this doesn't fit exactly into a relocation 800 * we place the appropriate byte in the GOT 801 * directly 802 * 803 * Calculate offset into GOT at which to apply 804 * the relocation. 805 */ 806 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 807 ofl, arsp); 808 assert(gnp); 809 810 if (arsp->rel_rtype == R_AMD64_DTPOFF64) 811 gotndx = gnp->gn_gotndx + 1; 812 else 813 gotndx = gnp->gn_gotndx; 814 815 R1addr = (Xword)(gotndx * M_GOT_ENTSIZE); 816 817 /* 818 * Add the GOTs data's offset. 819 */ 820 R2addr = R1addr + (uintptr_t) 821 arsp->rel_osdesc->os_outdata->d_buf; 822 823 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, 824 ELF_DBG_LD, M_MACH, SHT_RELA, 825 arsp->rel_rtype, R1addr, value, 826 arsp->rel_sname, arsp->rel_osdesc)); 827 828 /* 829 * And do it. 830 */ 831 *(Xword *)R2addr = value; 832 continue; 833 834 } else if (IS_GOT_BASED(arsp->rel_rtype) && 835 ((flags & FLG_OF_RELOBJ) == 0)) { 836 value -= ofl->ofl_osgot->os_shdr->sh_addr; 837 838 } else if (IS_GOTPCREL(arsp->rel_rtype) && 839 ((flags & FLG_OF_RELOBJ) == 0)) { 840 Gotndx *gnp; 841 842 /* 843 * Calculation: 844 * G + GOT + A - P 845 */ 846 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 847 gref, ofl, arsp); 848 assert(gnp); 849 value = (Xword)(ofl->ofl_osgot->os_shdr-> 850 sh_addr) + ((Xword)gnp->gn_gotndx * 851 M_GOT_ENTSIZE) + arsp->rel_raddend - 852 refaddr; 853 854 } else if (IS_GOT_PC(arsp->rel_rtype) && 855 ((flags & FLG_OF_RELOBJ) == 0)) { 856 value = (Xword)(ofl->ofl_osgot->os_shdr-> 857 sh_addr) - refaddr + arsp->rel_raddend; 858 859 } else if ((IS_PC_RELATIVE(arsp->rel_rtype)) && 860 (((flags & FLG_OF_RELOBJ) == 0) || 861 (arsp->rel_osdesc == sdp->sd_isc->is_osdesc))) { 862 value -= refaddr; 863 864 } else if (IS_TLS_INS(arsp->rel_rtype) && 865 IS_GOT_RELATIVE(arsp->rel_rtype) && 866 ((flags & FLG_OF_RELOBJ) == 0)) { 867 Gotndx *gnp; 868 869 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, 870 ofl, arsp); 871 assert(gnp); 872 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 873 874 } else if (IS_GOT_RELATIVE(arsp->rel_rtype) && 875 ((flags & FLG_OF_RELOBJ) == 0)) { 876 Gotndx *gnp; 877 878 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 879 gref, ofl, arsp); 880 assert(gnp); 881 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE; 882 883 } else if ((arsp->rel_flags & FLG_REL_STLS) && 884 ((flags & FLG_OF_RELOBJ) == 0)) { 885 Xword tlsstatsize; 886 887 /* 888 * This is the LE TLS reference model. Static 889 * offset is hard-coded. 890 */ 891 tlsstatsize = 892 S_ROUND(ofl->ofl_tlsphdr->p_memsz, 893 M_TLSSTATALIGN); 894 value = tlsstatsize - value; 895 896 /* 897 * Since this code is fixed up, it assumes a 898 * negative offset that can be added to the 899 * thread pointer. 900 */ 901 if (arsp->rel_rtype == R_AMD64_TPOFF32) 902 value = -value; 903 } 904 905 if (arsp->rel_isdesc->is_file) 906 ifl_name = arsp->rel_isdesc->is_file->ifl_name; 907 else 908 ifl_name = MSG_INTL(MSG_STR_NULL); 909 910 /* 911 * Make sure we have data to relocate. Compiler and 912 * assembler developers have been known to generate 913 * relocations against invalid sections (normally .bss), 914 * so for their benefit give them sufficient information 915 * to help analyze the problem. End users should never 916 * see this. 917 */ 918 if (arsp->rel_isdesc->is_indata->d_buf == 0) { 919 eprintf(ofl->ofl_lml, ERR_FATAL, 920 MSG_INTL(MSG_REL_EMPTYSEC), 921 conv_reloc_amd64_type(arsp->rel_rtype, 0), 922 ifl_name, demangle(arsp->rel_sname), 923 arsp->rel_isdesc->is_name); 924 return (S_ERROR); 925 } 926 927 /* 928 * Get the address of the data item we need to modify. 929 */ 930 addr = (uchar_t *)((uintptr_t)arsp->rel_roffset + 931 (uintptr_t)_elf_getxoff(arsp->rel_isdesc-> 932 is_indata)); 933 934 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD, 935 M_MACH, SHT_RELA, arsp->rel_rtype, EC_NATPTR(addr), 936 value, arsp->rel_sname, arsp->rel_osdesc)); 937 addr += (uintptr_t)arsp->rel_osdesc->os_outdata->d_buf; 938 939 if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) > 940 ofl->ofl_size) || (arsp->rel_roffset > 941 arsp->rel_osdesc->os_shdr->sh_size)) { 942 int class; 943 944 if (((uintptr_t)addr - 945 (uintptr_t)ofl->ofl_nehdr) > ofl->ofl_size) 946 class = ERR_FATAL; 947 else 948 class = ERR_WARNING; 949 950 eprintf(ofl->ofl_lml, class, 951 MSG_INTL(MSG_REL_INVALOFFSET), 952 conv_reloc_amd64_type(arsp->rel_rtype, 0), 953 ifl_name, arsp->rel_isdesc->is_name, 954 demangle(arsp->rel_sname), 955 EC_ADDR((uintptr_t)addr - 956 (uintptr_t)ofl->ofl_nehdr)); 957 958 if (class == ERR_FATAL) { 959 return_code = S_ERROR; 960 continue; 961 } 962 } 963 964 /* 965 * The relocation is additive. Ignore the previous 966 * symbol value if this local partial symbol is 967 * expanded. 968 */ 969 if (moved) 970 value -= *addr; 971 972 /* 973 * If '-z noreloc' is specified - skip the do_reloc 974 * stage. 975 */ 976 if ((flags & FLG_OF_RELOBJ) || 977 !(dtflags1 & DF_1_NORELOC)) { 978 if (do_reloc((uchar_t)arsp->rel_rtype, 979 addr, &value, arsp->rel_sname, ifl_name, 980 ofl->ofl_lml) == 0) 981 return_code = S_ERROR; 982 } 983 } 984 } 985 return (return_code); 986 } 987 988 uintptr_t 989 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl) 990 { 991 Rel_desc *orsp; 992 Rel_cache *rcp; 993 Sym_desc *sdp = rsp->rel_sym; 994 995 /* 996 * Static executables *do not* want any relocations against them. 997 * Since our engine still creates relocations against a WEAK UNDEFINED 998 * symbol in a static executable, it's best to disable them here 999 * instead of through out the relocation code. 1000 */ 1001 if ((ofl->ofl_flags & (FLG_OF_STATIC | FLG_OF_EXEC)) == 1002 (FLG_OF_STATIC | FLG_OF_EXEC)) 1003 return (1); 1004 1005 /* 1006 * If no relocation cache structures are available allocate 1007 * a new one and link it into the cache list. 1008 */ 1009 if ((ofl->ofl_outrels.tail == 0) || 1010 ((rcp = (Rel_cache *)ofl->ofl_outrels.tail->data) == 0) || 1011 ((orsp = rcp->rc_free) == rcp->rc_end)) { 1012 static size_t nextsize = 0; 1013 size_t size; 1014 1015 /* 1016 * Output relocation numbers can vary considerably between 1017 * building executables or shared objects (pic vs. non-pic), 1018 * etc. But, they typically aren't very large, so for these 1019 * objects use a standard bucket size. For building relocatable 1020 * objects, typically there will be an output relocation for 1021 * every input relocation. 1022 */ 1023 if (nextsize == 0) { 1024 if (ofl->ofl_flags & FLG_OF_RELOBJ) { 1025 if ((size = ofl->ofl_relocincnt) == 0) 1026 size = REL_LOIDESCNO; 1027 if (size > REL_HOIDESCNO) 1028 nextsize = REL_HOIDESCNO; 1029 else 1030 nextsize = REL_LOIDESCNO; 1031 } else 1032 nextsize = size = REL_HOIDESCNO; 1033 } else 1034 size = nextsize; 1035 1036 size = size * sizeof (Rel_desc); 1037 1038 if (((rcp = libld_malloc(sizeof (Rel_cache) + size)) == 0) || 1039 (list_appendc(&ofl->ofl_outrels, rcp) == 0)) 1040 return (S_ERROR); 1041 1042 /* LINTED */ 1043 rcp->rc_free = orsp = (Rel_desc *)(rcp + 1); 1044 /* LINTED */ 1045 rcp->rc_end = (Rel_desc *)((char *)rcp->rc_free + size); 1046 } 1047 1048 /* 1049 * If we are adding a output relocation against a section 1050 * symbol (non-RELATIVE) then mark that section. These sections 1051 * will be added to the .dynsym symbol table. 1052 */ 1053 if (sdp && (rsp->rel_rtype != M_R_RELATIVE) && 1054 ((flags & FLG_REL_SCNNDX) || 1055 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) { 1056 1057 /* 1058 * If this is a COMMON symbol - no output section 1059 * exists yet - (it's created as part of sym_validate()). 1060 * So - we mark here that when it's created it should 1061 * be tagged with the FLG_OS_OUTREL flag. 1062 */ 1063 if ((sdp->sd_flags & FLG_SY_SPECSEC) && 1064 (sdp->sd_sym->st_shndx == SHN_COMMON)) { 1065 if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS) 1066 ofl->ofl_flags1 |= FLG_OF1_BSSOREL; 1067 else 1068 ofl->ofl_flags1 |= FLG_OF1_TLSOREL; 1069 } else { 1070 Os_desc *osp = sdp->sd_isc->is_osdesc; 1071 1072 if (osp && ((osp->os_flags & FLG_OS_OUTREL) == 0)) { 1073 ofl->ofl_dynshdrcnt++; 1074 osp->os_flags |= FLG_OS_OUTREL; 1075 } 1076 } 1077 } 1078 1079 *orsp = *rsp; 1080 orsp->rel_flags |= flags; 1081 1082 rcp->rc_free++; 1083 ofl->ofl_outrelscnt++; 1084 1085 if (flags & FLG_REL_GOT) 1086 ofl->ofl_relocgotsz += (Xword)sizeof (Rela); 1087 else if (flags & FLG_REL_PLT) 1088 ofl->ofl_relocpltsz += (Xword)sizeof (Rela); 1089 else if (flags & FLG_REL_BSS) 1090 ofl->ofl_relocbsssz += (Xword)sizeof (Rela); 1091 else if (flags & FLG_REL_NOINFO) 1092 ofl->ofl_relocrelsz += (Xword)sizeof (Rela); 1093 else 1094 orsp->rel_osdesc->os_szoutrels += (Xword)sizeof (Rela); 1095 1096 if (orsp->rel_rtype == M_R_RELATIVE) 1097 ofl->ofl_relocrelcnt++; 1098 1099 /* 1100 * We don't perform sorting on PLT relocations because 1101 * they have already been assigned a PLT index and if we 1102 * were to sort them we would have to re-assign the plt indexes. 1103 */ 1104 if (!(flags & FLG_REL_PLT)) 1105 ofl->ofl_reloccnt++; 1106 1107 /* 1108 * Insure a GLOBAL_OFFSET_TABLE is generated if required. 1109 */ 1110 if (IS_GOT_REQUIRED(orsp->rel_rtype)) 1111 ofl->ofl_flags |= FLG_OF_BLDGOT; 1112 1113 /* 1114 * Identify and possibly warn of a displacement relocation. 1115 */ 1116 if (orsp->rel_flags & FLG_REL_DISP) { 1117 ofl->ofl_dtflags_1 |= DF_1_DISPRELPND; 1118 1119 if (ofl->ofl_flags & FLG_OF_VERBOSE) 1120 ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl); 1121 } 1122 DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA, 1123 M_MACH, orsp)); 1124 return (1); 1125 } 1126 1127 /* 1128 * Stub routine since register symbols are not supported on amd64. 1129 */ 1130 /* ARGSUSED */ 1131 uintptr_t 1132 ld_reloc_register(Rel_desc * rsp, Is_desc * isp, Ofl_desc * ofl) 1133 { 1134 eprintf(ofl->ofl_lml, ERR_FATAL, MSG_INTL(MSG_REL_NOREG)); 1135 return (S_ERROR); 1136 } 1137 1138 /* 1139 * process relocation for a LOCAL symbol 1140 */ 1141 uintptr_t 1142 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl) 1143 { 1144 Word flags = ofl->ofl_flags; 1145 Sym_desc *sdp = rsp->rel_sym; 1146 Word shndx = sdp->sd_sym->st_shndx; 1147 Word ortype = rsp->rel_rtype; 1148 1149 /* 1150 * if ((shared object) and (not pc relative relocation) and 1151 * (not against ABS symbol)) 1152 * then 1153 * build R_AMD64_RELATIVE 1154 * fi 1155 */ 1156 if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) && 1157 !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) && 1158 !(IS_GOT_BASED(rsp->rel_rtype)) && 1159 !(rsp->rel_isdesc != NULL && 1160 (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) && 1161 (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) || 1162 (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) { 1163 1164 /* 1165 * R_AMD64_RELATIVE updates a 64bit address, if this 1166 * relocation isn't a 64bit binding then we can not 1167 * simplify it to a RELATIVE relocation. 1168 */ 1169 if (reloc_table[ortype].re_fsize != sizeof (Addr)) { 1170 return (ld_add_outrel(NULL, rsp, ofl)); 1171 } 1172 1173 rsp->rel_rtype = R_AMD64_RELATIVE; 1174 if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR) 1175 return (S_ERROR); 1176 rsp->rel_rtype = ortype; 1177 return (1); 1178 } 1179 1180 /* 1181 * If the relocation is against a 'non-allocatable' section 1182 * and we can not resolve it now - then give a warning 1183 * message. 1184 * 1185 * We can not resolve the symbol if either: 1186 * a) it's undefined 1187 * b) it's defined in a shared library and a 1188 * COPY relocation hasn't moved it to the executable 1189 * 1190 * Note: because we process all of the relocations against the 1191 * text segment before any others - we know whether 1192 * or not a copy relocation will be generated before 1193 * we get here (see reloc_init()->reloc_segments()). 1194 */ 1195 if (!(rsp->rel_flags & FLG_REL_LOAD) && 1196 ((shndx == SHN_UNDEF) || 1197 ((sdp->sd_ref == REF_DYN_NEED) && 1198 ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) { 1199 /* 1200 * If the relocation is against a SHT_SUNW_ANNOTATE 1201 * section - then silently ignore that the relocation 1202 * can not be resolved. 1203 */ 1204 if (rsp->rel_osdesc && 1205 (rsp->rel_osdesc->os_shdr->sh_type == SHT_SUNW_ANNOTATE)) 1206 return (0); 1207 (void) eprintf(ofl->ofl_lml, ERR_WARNING, 1208 MSG_INTL(MSG_REL_EXTERNSYM), 1209 conv_reloc_amd64_type(rsp->rel_rtype, 0), 1210 rsp->rel_isdesc->is_file->ifl_name, 1211 demangle(rsp->rel_sname), rsp->rel_osdesc->os_name); 1212 return (1); 1213 } 1214 1215 /* 1216 * Perform relocation. 1217 */ 1218 return (ld_add_actrel(NULL, rsp, ofl)); 1219 } 1220 1221 1222 uintptr_t 1223 /* ARGSUSED */ 1224 ld_reloc_GOTOP(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1225 { 1226 /* 1227 * Stub routine for common code compatibility, we shouldn't 1228 * actually get here on amd64. 1229 */ 1230 assert(0); 1231 return (S_ERROR); 1232 } 1233 1234 uintptr_t 1235 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl) 1236 { 1237 Word rtype = rsp->rel_rtype; 1238 Sym_desc *sdp = rsp->rel_sym; 1239 Word flags = ofl->ofl_flags; 1240 Gotndx *gnp; 1241 1242 /* 1243 * If we're building an executable - use either the IE or LE access 1244 * model. If we're building a shared object process any IE model. 1245 */ 1246 if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) { 1247 /* 1248 * Set the DF_STATIC_TLS flag. 1249 */ 1250 ofl->ofl_dtflags |= DF_STATIC_TLS; 1251 1252 if (!local || ((flags & FLG_OF_EXEC) == 0)) { 1253 /* 1254 * Assign a GOT entry for static TLS references. 1255 */ 1256 if ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1257 GOT_REF_TLSIE, ofl, rsp)) == 0) { 1258 1259 if (ld_assign_got_TLS(local, rsp, ofl, sdp, 1260 gnp, GOT_REF_TLSIE, FLG_REL_STLS, 1261 rtype, R_AMD64_TPOFF64, 0) == S_ERROR) 1262 return (S_ERROR); 1263 } 1264 1265 /* 1266 * IE access model. 1267 */ 1268 if (IS_TLS_IE(rtype)) 1269 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1270 1271 /* 1272 * Fixups are required for other executable models. 1273 */ 1274 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1275 rsp, ofl)); 1276 } 1277 1278 /* 1279 * LE access model. 1280 */ 1281 if (IS_TLS_LE(rtype)) 1282 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl)); 1283 1284 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS), 1285 rsp, ofl)); 1286 } 1287 1288 /* 1289 * Building a shared object. 1290 * 1291 * Assign a GOT entry for a dynamic TLS reference. 1292 */ 1293 if (IS_TLS_LD(rtype) && ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), 1294 GOT_REF_TLSLD, ofl, rsp)) == 0)) { 1295 1296 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD, 1297 FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, 0) == S_ERROR) 1298 return (S_ERROR); 1299 1300 } else if (IS_TLS_GD(rtype) && 1301 ((gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), GOT_REF_TLSGD, 1302 ofl, rsp)) == 0)) { 1303 1304 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD, 1305 FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64, 1306 R_AMD64_DTPOFF64) == S_ERROR) 1307 return (S_ERROR); 1308 } 1309 1310 if (IS_TLS_LD(rtype)) 1311 return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl)); 1312 1313 return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl)); 1314 } 1315 1316 /* ARGSUSED3 */ 1317 Gotndx * 1318 ld_find_gotndx(List * lst, Gotref gref, Ofl_desc * ofl, Rel_desc * rdesc) 1319 { 1320 Listnode * lnp; 1321 Gotndx * gnp; 1322 1323 assert(rdesc != 0); 1324 1325 if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx) 1326 return (ofl->ofl_tlsldgotndx); 1327 1328 for (LIST_TRAVERSE(lst, lnp, gnp)) { 1329 if ((rdesc->rel_raddend == gnp->gn_addend) && 1330 (gnp->gn_gotref == gref)) { 1331 return (gnp); 1332 } 1333 } 1334 return ((Gotndx *)0); 1335 } 1336 1337 Xword 1338 ld_calc_got_offset(Rel_desc * rdesc, Ofl_desc * ofl) 1339 { 1340 Os_desc *osp = ofl->ofl_osgot; 1341 Sym_desc *sdp = rdesc->rel_sym; 1342 Xword gotndx; 1343 Gotref gref; 1344 Gotndx *gnp; 1345 1346 if (rdesc->rel_flags & FLG_REL_DTLS) 1347 gref = GOT_REF_TLSGD; 1348 else if (rdesc->rel_flags & FLG_REL_MTLS) 1349 gref = GOT_REF_TLSLD; 1350 else if (rdesc->rel_flags & FLG_REL_STLS) 1351 gref = GOT_REF_TLSIE; 1352 else 1353 gref = GOT_REF_GENERIC; 1354 1355 gnp = ld_find_gotndx(&(sdp->sd_GOTndxs), gref, ofl, rdesc); 1356 assert(gnp); 1357 1358 gotndx = (Xword)gnp->gn_gotndx; 1359 1360 if ((rdesc->rel_flags & FLG_REL_DTLS) && 1361 (rdesc->rel_rtype == R_AMD64_DTPOFF64)) 1362 gotndx++; 1363 1364 return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE))); 1365 } 1366 1367 1368 /* ARGSUSED5 */ 1369 uintptr_t 1370 ld_assign_got_ndx(List * lst, Gotndx * pgnp, Gotref gref, Ofl_desc * ofl, 1371 Rel_desc * rsp, Sym_desc * sdp) 1372 { 1373 Xword raddend; 1374 Gotndx *gnp, *_gnp; 1375 Listnode *lnp, *plnp; 1376 uint_t gotents; 1377 1378 raddend = rsp->rel_raddend; 1379 if (pgnp && (pgnp->gn_addend == raddend) && 1380 (pgnp->gn_gotref == gref)) 1381 return (1); 1382 1383 if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD)) 1384 gotents = 2; 1385 else 1386 gotents = 1; 1387 1388 plnp = 0; 1389 for (LIST_TRAVERSE(lst, lnp, _gnp)) { 1390 if (_gnp->gn_addend > raddend) 1391 break; 1392 plnp = lnp; 1393 } 1394 1395 /* 1396 * Allocate a new entry. 1397 */ 1398 if ((gnp = libld_calloc(sizeof (Gotndx), 1)) == 0) 1399 return (S_ERROR); 1400 gnp->gn_addend = raddend; 1401 gnp->gn_gotndx = ofl->ofl_gotcnt; 1402 gnp->gn_gotref = gref; 1403 1404 ofl->ofl_gotcnt += gotents; 1405 1406 if (gref == GOT_REF_TLSLD) { 1407 ofl->ofl_tlsldgotndx = gnp; 1408 return (1); 1409 } 1410 1411 if (plnp == 0) { 1412 /* 1413 * Insert at head of list 1414 */ 1415 if (list_prependc(lst, (void *)gnp) == 0) 1416 return (S_ERROR); 1417 } else if (_gnp->gn_addend > raddend) { 1418 /* 1419 * Insert in middle of lest 1420 */ 1421 if (list_insertc(lst, (void *)gnp, plnp) == 0) 1422 return (S_ERROR); 1423 } else { 1424 /* 1425 * Append to tail of list 1426 */ 1427 if (list_appendc(lst, (void *)gnp) == 0) 1428 return (S_ERROR); 1429 } 1430 return (1); 1431 } 1432 1433 void 1434 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl) 1435 { 1436 sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++; 1437 sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++; 1438 ofl->ofl_flags |= FLG_OF_BLDGOT; 1439 } 1440 1441 static uchar_t plt0_template[M_PLT_ENTSIZE] = { 1442 /* 0x00 PUSHQ GOT+8(%rip) */ 0xff, 0x35, 0x00, 0x00, 0x00, 0x00, 1443 /* 0x06 JMP *GOT+16(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00, 1444 /* 0x0c NOP */ 0x90, 1445 /* 0x0d NOP */ 0x90, 1446 /* 0x0e NOP */ 0x90, 1447 /* 0x0f NOP */ 0x90 1448 }; 1449 1450 /* 1451 * Initializes .got[0] with the _DYNAMIC symbol value. 1452 */ 1453 uintptr_t 1454 ld_fillin_gotplt(Ofl_desc *ofl) 1455 { 1456 Word flags = ofl->ofl_flags; 1457 Word dtflags1 = ofl->ofl_dtflags_1; 1458 1459 if (ofl->ofl_osgot) { 1460 Sym_desc *sdp; 1461 1462 if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U), 1463 SYM_NOHASH, 0, ofl)) != NULL) { 1464 uchar_t *genptr; 1465 1466 genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf + 1467 (M_GOT_XDYNAMIC * M_GOT_ENTSIZE)); 1468 /* LINTED */ 1469 *(Xword *)genptr = sdp->sd_sym->st_value; 1470 } 1471 } 1472 1473 /* 1474 * Fill in the reserved slot in the procedure linkage table the first 1475 * entry is: 1476 * 0x00 PUSHQ GOT+8(%rip) # GOT[1] 1477 * 0x06 JMP *GOT+16(%rip) # GOT[2] 1478 * 0x0c NOP 1479 * 0x0d NOP 1480 * 0x0e NOP 1481 * 0x0f NOP 1482 */ 1483 if ((flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) { 1484 uchar_t *pltent; 1485 Xword val1; 1486 1487 pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf; 1488 bcopy(plt0_template, pltent, sizeof (plt0_template)); 1489 1490 /* 1491 * filin: 1492 * PUSHQ GOT + 8(%rip) 1493 * 1494 * Note: 0x06 below represents the offset to the 1495 * next instruction - which is what %rip will 1496 * be pointing at. 1497 */ 1498 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1499 (M_GOT_XLINKMAP * M_GOT_ENTSIZE) - 1500 ofl->ofl_osplt->os_shdr->sh_addr - 0x06; 1501 1502 /* 1503 * If '-z noreloc' is specified - skip the do_reloc 1504 * stage. 1505 */ 1506 if ((flags & FLG_OF_RELOBJ) || 1507 !(dtflags1 & DF_1_NORELOC)) { 1508 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x02], 1509 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1510 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1511 eprintf(ofl->ofl_lml, ERR_FATAL, 1512 MSG_INTL(MSG_PLT_PLT0FAIL)); 1513 return (S_ERROR); 1514 } 1515 } 1516 1517 /* 1518 * filin: 1519 * JMP *GOT+16(%rip) 1520 */ 1521 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) + 1522 (M_GOT_XRTLD * M_GOT_ENTSIZE) - 1523 ofl->ofl_osplt->os_shdr->sh_addr - 0x0c; 1524 /* 1525 * If '-z noreloc' is specified - skip the do_reloc 1526 * stage. 1527 */ 1528 if ((flags & FLG_OF_RELOBJ) || 1529 !(dtflags1 & DF_1_NORELOC)) { 1530 if (do_reloc(R_AMD64_GOTPCREL, &pltent[0x08], 1531 &val1, MSG_ORIG(MSG_SYM_PLTENT), 1532 MSG_ORIG(MSG_SPECFIL_PLTENT), ofl->ofl_lml) == 0) { 1533 eprintf(ofl->ofl_lml, ERR_FATAL, 1534 MSG_INTL(MSG_PLT_PLT0FAIL)); 1535 return (S_ERROR); 1536 } 1537 } 1538 } 1539 return (1); 1540 } 1541