1 /*- 2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 /* 29 * Dynamic linker for ELF. 30 * 31 * John Polstra <jdp@polstra.com>. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 #include <machine/sysarch.h> 37 38 #include <dlfcn.h> 39 #include <err.h> 40 #include <errno.h> 41 #include <fcntl.h> 42 #include <stdarg.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <unistd.h> 47 48 #include "debug.h" 49 #include "rtld.h" 50 51 /* 52 * Process the special R_X86_64_COPY relocations in the main program. These 53 * copy data from a shared object into a region in the main program's BSS 54 * segment. 55 * 56 * Returns 0 on success, -1 on failure. 57 */ 58 int 59 do_copy_relocations(Obj_Entry *dstobj) 60 { 61 const Elf_Rela *relalim; 62 const Elf_Rela *rela; 63 64 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ 65 66 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + dstobj->relasize); 67 for (rela = dstobj->rela; rela < relalim; rela++) { 68 if (ELF_R_TYPE(rela->r_info) == R_X86_64_COPY) { 69 void *dstaddr; 70 const Elf_Sym *dstsym; 71 const char *name; 72 size_t size; 73 const void *srcaddr; 74 const Elf_Sym *srcsym; 75 const Obj_Entry *srcobj, *defobj; 76 SymLook req; 77 int res; 78 79 dstaddr = (void *) (dstobj->relocbase + rela->r_offset); 80 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 81 name = dstobj->strtab + dstsym->st_name; 82 size = dstsym->st_size; 83 symlook_init(&req, name); 84 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 85 req.flags = SYMLOOK_EARLY; 86 87 for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) { 88 res = symlook_obj(&req, srcobj); 89 if (res == 0) { 90 srcsym = req.sym_out; 91 defobj = req.defobj_out; 92 break; 93 } 94 } 95 96 if (srcobj == NULL) { 97 _rtld_error("Undefined symbol \"%s\" referenced from COPY" 98 " relocation in %s", name, dstobj->path); 99 return -1; 100 } 101 102 srcaddr = (const void *) (defobj->relocbase + srcsym->st_value); 103 memcpy(dstaddr, srcaddr, size); 104 } 105 } 106 107 return 0; 108 } 109 110 /* Initialize the special GOT entries. */ 111 void 112 init_pltgot(Obj_Entry *obj) 113 { 114 if (obj->pltgot != NULL) { 115 obj->pltgot[1] = (Elf_Addr) obj; 116 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; 117 } 118 } 119 120 /* Process the non-PLT relocations. */ 121 int 122 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 123 RtldLockState *lockstate) 124 { 125 const Elf_Rela *relalim; 126 const Elf_Rela *rela; 127 SymCache *cache; 128 int r = -1; 129 130 /* 131 * The dynamic loader may be called from a thread, we have 132 * limited amounts of stack available so we cannot use alloca(). 133 */ 134 if (obj != obj_rtld) { 135 cache = calloc(obj->dynsymcount, sizeof(SymCache)); 136 /* No need to check for NULL here */ 137 } else 138 cache = NULL; 139 140 relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); 141 for (rela = obj->rela; rela < relalim; rela++) { 142 Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); 143 Elf32_Addr *where32 = (Elf32_Addr *)where; 144 145 switch (ELF_R_TYPE(rela->r_info)) { 146 147 case R_X86_64_NONE: 148 break; 149 150 case R_X86_64_64: 151 { 152 const Elf_Sym *def; 153 const Obj_Entry *defobj; 154 155 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 156 flags, cache, lockstate); 157 if (def == NULL) 158 goto done; 159 160 *where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend); 161 } 162 break; 163 164 case R_X86_64_PC32: 165 /* 166 * I don't think the dynamic linker should ever see this 167 * type of relocation. But the binutils-2.6 tools sometimes 168 * generate it. 169 */ 170 { 171 const Elf_Sym *def; 172 const Obj_Entry *defobj; 173 174 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 175 flags, cache, lockstate); 176 if (def == NULL) 177 goto done; 178 179 *where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase + 180 def->st_value + rela->r_addend - (Elf_Addr) where); 181 } 182 break; 183 /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */ 184 185 case R_X86_64_COPY: 186 /* 187 * These are deferred until all other relocations have 188 * been done. All we do here is make sure that the COPY 189 * relocation is not in a shared library. They are allowed 190 * only in executable files. 191 */ 192 if (!obj->mainprog) { 193 _rtld_error("%s: Unexpected R_X86_64_COPY relocation" 194 " in shared library", obj->path); 195 goto done; 196 } 197 break; 198 199 case R_X86_64_GLOB_DAT: 200 { 201 const Elf_Sym *def; 202 const Obj_Entry *defobj; 203 204 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 205 flags, cache, lockstate); 206 if (def == NULL) 207 goto done; 208 209 *where = (Elf_Addr) (defobj->relocbase + def->st_value); 210 } 211 break; 212 213 case R_X86_64_TPOFF64: 214 { 215 const Elf_Sym *def; 216 const Obj_Entry *defobj; 217 218 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 219 flags, cache, lockstate); 220 if (def == NULL) 221 goto done; 222 223 /* 224 * We lazily allocate offsets for static TLS as we 225 * see the first relocation that references the 226 * TLS block. This allows us to support (small 227 * amounts of) static TLS in dynamically loaded 228 * modules. If we run out of space, we generate an 229 * error. 230 */ 231 if (!defobj->tls_done) { 232 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 233 _rtld_error("%s: No space available for static " 234 "Thread Local Storage", obj->path); 235 goto done; 236 } 237 } 238 239 *where = (Elf_Addr) (def->st_value - defobj->tlsoffset + 240 rela->r_addend); 241 } 242 break; 243 244 case R_X86_64_TPOFF32: 245 { 246 const Elf_Sym *def; 247 const Obj_Entry *defobj; 248 249 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 250 flags, cache, lockstate); 251 if (def == NULL) 252 goto done; 253 254 /* 255 * We lazily allocate offsets for static TLS as we 256 * see the first relocation that references the 257 * TLS block. This allows us to support (small 258 * amounts of) static TLS in dynamically loaded 259 * modules. If we run out of space, we generate an 260 * error. 261 */ 262 if (!defobj->tls_done) { 263 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 264 _rtld_error("%s: No space available for static " 265 "Thread Local Storage", obj->path); 266 goto done; 267 } 268 } 269 270 *where32 = (Elf32_Addr) (def->st_value - 271 defobj->tlsoffset + 272 rela->r_addend); 273 } 274 break; 275 276 case R_X86_64_DTPMOD64: 277 { 278 const Elf_Sym *def; 279 const Obj_Entry *defobj; 280 281 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 282 flags, cache, lockstate); 283 if (def == NULL) 284 goto done; 285 286 *where += (Elf_Addr) defobj->tlsindex; 287 } 288 break; 289 290 case R_X86_64_DTPOFF64: 291 { 292 const Elf_Sym *def; 293 const Obj_Entry *defobj; 294 295 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 296 flags, cache, lockstate); 297 if (def == NULL) 298 goto done; 299 300 *where += (Elf_Addr) (def->st_value + rela->r_addend); 301 } 302 break; 303 304 case R_X86_64_DTPOFF32: 305 { 306 const Elf_Sym *def; 307 const Obj_Entry *defobj; 308 309 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 310 flags, cache, lockstate); 311 if (def == NULL) 312 goto done; 313 314 *where32 += (Elf32_Addr) (def->st_value + rela->r_addend); 315 } 316 break; 317 318 case R_X86_64_RELATIVE: 319 *where = (Elf_Addr)(obj->relocbase + rela->r_addend); 320 break; 321 322 /* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */ 323 324 default: 325 _rtld_error("%s: Unsupported relocation type %u" 326 " in non-PLT relocations\n", obj->path, 327 (unsigned int)ELF_R_TYPE(rela->r_info)); 328 goto done; 329 } 330 } 331 r = 0; 332 done: 333 if (cache != NULL) 334 free(cache); 335 return (r); 336 } 337 338 /* Process the PLT relocations. */ 339 int 340 reloc_plt(Obj_Entry *obj) 341 { 342 const Elf_Rela *relalim; 343 const Elf_Rela *rela; 344 345 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 346 for (rela = obj->pltrela; rela < relalim; rela++) { 347 Elf_Addr *where; 348 349 switch(ELF_R_TYPE(rela->r_info)) { 350 case R_X86_64_JMP_SLOT: 351 /* Relocate the GOT slot pointing into the PLT. */ 352 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 353 *where += (Elf_Addr)obj->relocbase; 354 break; 355 356 case R_X86_64_IRELATIVE: 357 obj->irelative = true; 358 break; 359 360 default: 361 _rtld_error("Unknown relocation type %x in PLT", 362 (unsigned int)ELF_R_TYPE(rela->r_info)); 363 return (-1); 364 } 365 } 366 return 0; 367 } 368 369 /* Relocate the jump slots in an object. */ 370 int 371 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 372 { 373 const Elf_Rela *relalim; 374 const Elf_Rela *rela; 375 376 if (obj->jmpslots_done) 377 return 0; 378 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 379 for (rela = obj->pltrela; rela < relalim; rela++) { 380 Elf_Addr *where, target; 381 const Elf_Sym *def; 382 const Obj_Entry *defobj; 383 384 switch (ELF_R_TYPE(rela->r_info)) { 385 case R_X86_64_JMP_SLOT: 386 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 387 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 388 SYMLOOK_IN_PLT | flags, NULL, lockstate); 389 if (def == NULL) 390 return (-1); 391 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) { 392 obj->gnu_ifunc = true; 393 continue; 394 } 395 target = (Elf_Addr)(defobj->relocbase + def->st_value + rela->r_addend); 396 reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela); 397 break; 398 399 case R_X86_64_IRELATIVE: 400 break; 401 402 default: 403 _rtld_error("Unknown relocation type %x in PLT", 404 (unsigned int)ELF_R_TYPE(rela->r_info)); 405 return (-1); 406 } 407 } 408 obj->jmpslots_done = true; 409 return 0; 410 } 411 412 int 413 reloc_iresolve(Obj_Entry *obj, RtldLockState *lockstate) 414 { 415 const Elf_Rela *relalim; 416 const Elf_Rela *rela; 417 418 if (!obj->irelative) 419 return (0); 420 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 421 for (rela = obj->pltrela; rela < relalim; rela++) { 422 Elf_Addr *where, target, *ptr; 423 424 switch (ELF_R_TYPE(rela->r_info)) { 425 case R_X86_64_JMP_SLOT: 426 break; 427 428 case R_X86_64_IRELATIVE: 429 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend); 430 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 431 lock_release(rtld_bind_lock, lockstate); 432 target = ((Elf_Addr (*)(void))ptr)(); 433 wlock_acquire(rtld_bind_lock, lockstate); 434 *where = target; 435 break; 436 } 437 } 438 obj->irelative = false; 439 return (0); 440 } 441 442 int 443 reloc_gnu_ifunc(Obj_Entry *obj, int flags, RtldLockState *lockstate) 444 { 445 const Elf_Rela *relalim; 446 const Elf_Rela *rela; 447 448 if (!obj->gnu_ifunc) 449 return (0); 450 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 451 for (rela = obj->pltrela; rela < relalim; rela++) { 452 Elf_Addr *where, target; 453 const Elf_Sym *def; 454 const Obj_Entry *defobj; 455 456 switch (ELF_R_TYPE(rela->r_info)) { 457 case R_X86_64_JMP_SLOT: 458 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 459 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 460 SYMLOOK_IN_PLT | flags, NULL, lockstate); 461 if (def == NULL) 462 return (-1); 463 if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC) 464 continue; 465 lock_release(rtld_bind_lock, lockstate); 466 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def); 467 wlock_acquire(rtld_bind_lock, lockstate); 468 reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela); 469 break; 470 } 471 } 472 obj->gnu_ifunc = false; 473 return (0); 474 } 475 476 void 477 allocate_initial_tls(Obj_Entry *objs) 478 { 479 /* 480 * Fix the size of the static TLS block by using the maximum 481 * offset allocated so far and adding a bit for dynamic modules to 482 * use. 483 */ 484 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; 485 amd64_set_fsbase(allocate_tls(objs, 0, 486 3*sizeof(Elf_Addr), sizeof(Elf_Addr))); 487 } 488 489 void *__tls_get_addr(tls_index *ti) 490 { 491 Elf_Addr** segbase; 492 493 __asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); 494 495 return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset); 496 } 497