1 /*- 2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 /* 29 * Dynamic linker for ELF. 30 * 31 * John Polstra <jdp@polstra.com>. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 #include <machine/sysarch.h> 37 38 #include <dlfcn.h> 39 #include <err.h> 40 #include <errno.h> 41 #include <fcntl.h> 42 #include <stdarg.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <unistd.h> 47 48 #include "debug.h" 49 #include "rtld.h" 50 51 /* 52 * Process the special R_X86_64_COPY relocations in the main program. These 53 * copy data from a shared object into a region in the main program's BSS 54 * segment. 55 * 56 * Returns 0 on success, -1 on failure. 57 */ 58 int 59 do_copy_relocations(Obj_Entry *dstobj) 60 { 61 const Elf_Rela *relalim; 62 const Elf_Rela *rela; 63 64 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ 65 66 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + dstobj->relasize); 67 for (rela = dstobj->rela; rela < relalim; rela++) { 68 if (ELF_R_TYPE(rela->r_info) == R_X86_64_COPY) { 69 void *dstaddr; 70 const Elf_Sym *dstsym; 71 const char *name; 72 unsigned long hash; 73 size_t size; 74 const void *srcaddr; 75 const Elf_Sym *srcsym; 76 Obj_Entry *srcobj; 77 const Ver_Entry *ve; 78 79 dstaddr = (void *) (dstobj->relocbase + rela->r_offset); 80 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 81 name = dstobj->strtab + dstsym->st_name; 82 hash = elf_hash(name); 83 size = dstsym->st_size; 84 ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 85 86 for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) 87 if ((srcsym = symlook_obj(name, hash, srcobj, ve, 0)) != NULL) 88 break; 89 90 if (srcobj == NULL) { 91 _rtld_error("Undefined symbol \"%s\" referenced from COPY" 92 " relocation in %s", name, dstobj->path); 93 return -1; 94 } 95 96 srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value); 97 memcpy(dstaddr, srcaddr, size); 98 } 99 } 100 101 return 0; 102 } 103 104 /* Initialize the special GOT entries. */ 105 void 106 init_pltgot(Obj_Entry *obj) 107 { 108 if (obj->pltgot != NULL) { 109 obj->pltgot[1] = (Elf_Addr) obj; 110 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; 111 } 112 } 113 114 /* Process the non-PLT relocations. */ 115 int 116 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) 117 { 118 const Elf_Rela *relalim; 119 const Elf_Rela *rela; 120 SymCache *cache; 121 int r = -1; 122 123 /* 124 * The dynamic loader may be called from a thread, we have 125 * limited amounts of stack available so we cannot use alloca(). 126 */ 127 if (obj != obj_rtld) { 128 cache = calloc(obj->nchains, sizeof(SymCache)); 129 /* No need to check for NULL here */ 130 } else 131 cache = NULL; 132 133 relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); 134 for (rela = obj->rela; rela < relalim; rela++) { 135 Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); 136 Elf32_Addr *where32 = (Elf32_Addr *)where; 137 138 switch (ELF_R_TYPE(rela->r_info)) { 139 140 case R_X86_64_NONE: 141 break; 142 143 case R_X86_64_64: 144 { 145 const Elf_Sym *def; 146 const Obj_Entry *defobj; 147 148 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 149 false, cache); 150 if (def == NULL) 151 goto done; 152 153 *where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend); 154 } 155 break; 156 157 case R_X86_64_PC32: 158 /* 159 * I don't think the dynamic linker should ever see this 160 * type of relocation. But the binutils-2.6 tools sometimes 161 * generate it. 162 */ 163 { 164 const Elf_Sym *def; 165 const Obj_Entry *defobj; 166 167 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 168 false, cache); 169 if (def == NULL) 170 goto done; 171 172 *where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase + 173 def->st_value + rela->r_addend - (Elf_Addr) where); 174 } 175 break; 176 /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */ 177 178 case R_X86_64_COPY: 179 /* 180 * These are deferred until all other relocations have 181 * been done. All we do here is make sure that the COPY 182 * relocation is not in a shared library. They are allowed 183 * only in executable files. 184 */ 185 if (!obj->mainprog) { 186 _rtld_error("%s: Unexpected R_X86_64_COPY relocation" 187 " in shared library", obj->path); 188 goto done; 189 } 190 break; 191 192 case R_X86_64_GLOB_DAT: 193 { 194 const Elf_Sym *def; 195 const Obj_Entry *defobj; 196 197 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 198 false, cache); 199 if (def == NULL) 200 goto done; 201 202 *where = (Elf_Addr) (defobj->relocbase + def->st_value); 203 } 204 break; 205 206 case R_X86_64_TPOFF64: 207 { 208 const Elf_Sym *def; 209 const Obj_Entry *defobj; 210 211 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 212 false, cache); 213 if (def == NULL) 214 goto done; 215 216 /* 217 * We lazily allocate offsets for static TLS as we 218 * see the first relocation that references the 219 * TLS block. This allows us to support (small 220 * amounts of) static TLS in dynamically loaded 221 * modules. If we run out of space, we generate an 222 * error. 223 */ 224 if (!defobj->tls_done) { 225 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 226 _rtld_error("%s: No space available for static " 227 "Thread Local Storage", obj->path); 228 goto done; 229 } 230 } 231 232 *where = (Elf_Addr) (def->st_value - defobj->tlsoffset + 233 rela->r_addend); 234 } 235 break; 236 237 case R_X86_64_TPOFF32: 238 { 239 const Elf_Sym *def; 240 const Obj_Entry *defobj; 241 242 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 243 false, cache); 244 if (def == NULL) 245 goto done; 246 247 /* 248 * We lazily allocate offsets for static TLS as we 249 * see the first relocation that references the 250 * TLS block. This allows us to support (small 251 * amounts of) static TLS in dynamically loaded 252 * modules. If we run out of space, we generate an 253 * error. 254 */ 255 if (!defobj->tls_done) { 256 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 257 _rtld_error("%s: No space available for static " 258 "Thread Local Storage", obj->path); 259 goto done; 260 } 261 } 262 263 *where32 = (Elf32_Addr) (def->st_value - 264 defobj->tlsoffset + 265 rela->r_addend); 266 } 267 break; 268 269 case R_X86_64_DTPMOD64: 270 { 271 const Elf_Sym *def; 272 const Obj_Entry *defobj; 273 274 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 275 false, cache); 276 if (def == NULL) 277 goto done; 278 279 *where += (Elf_Addr) defobj->tlsindex; 280 } 281 break; 282 283 case R_X86_64_DTPOFF64: 284 { 285 const Elf_Sym *def; 286 const Obj_Entry *defobj; 287 288 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 289 false, cache); 290 if (def == NULL) 291 goto done; 292 293 *where += (Elf_Addr) (def->st_value + rela->r_addend); 294 } 295 break; 296 297 case R_X86_64_DTPOFF32: 298 { 299 const Elf_Sym *def; 300 const Obj_Entry *defobj; 301 302 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 303 false, cache); 304 if (def == NULL) 305 goto done; 306 307 *where32 += (Elf32_Addr) (def->st_value + rela->r_addend); 308 } 309 break; 310 311 case R_X86_64_RELATIVE: 312 *where = (Elf_Addr)(obj->relocbase + rela->r_addend); 313 break; 314 315 /* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */ 316 317 default: 318 _rtld_error("%s: Unsupported relocation type %u" 319 " in non-PLT relocations\n", obj->path, 320 (unsigned int)ELF_R_TYPE(rela->r_info)); 321 goto done; 322 } 323 } 324 r = 0; 325 done: 326 if (cache != NULL) 327 free(cache); 328 return(r); 329 } 330 331 /* Process the PLT relocations. */ 332 int 333 reloc_plt(Obj_Entry *obj) 334 { 335 const Elf_Rela *relalim; 336 const Elf_Rela *rela; 337 338 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 339 for (rela = obj->pltrela; rela < relalim; rela++) { 340 Elf_Addr *where; 341 342 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT); 343 344 /* Relocate the GOT slot pointing into the PLT. */ 345 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 346 *where += (Elf_Addr)obj->relocbase; 347 } 348 return 0; 349 } 350 351 /* Relocate the jump slots in an object. */ 352 int 353 reloc_jmpslots(Obj_Entry *obj) 354 { 355 const Elf_Rela *relalim; 356 const Elf_Rela *rela; 357 358 if (obj->jmpslots_done) 359 return 0; 360 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 361 for (rela = obj->pltrela; rela < relalim; rela++) { 362 Elf_Addr *where, target; 363 const Elf_Sym *def; 364 const Obj_Entry *defobj; 365 366 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT); 367 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 368 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, true, NULL); 369 if (def == NULL) 370 return -1; 371 target = (Elf_Addr)(defobj->relocbase + def->st_value + rela->r_addend); 372 reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela); 373 } 374 obj->jmpslots_done = true; 375 return 0; 376 } 377 378 void 379 allocate_initial_tls(Obj_Entry *objs) 380 { 381 /* 382 * Fix the size of the static TLS block by using the maximum 383 * offset allocated so far and adding a bit for dynamic modules to 384 * use. 385 */ 386 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; 387 amd64_set_fsbase(allocate_tls(objs, 0, 388 3*sizeof(Elf_Addr), sizeof(Elf_Addr))); 389 } 390 391 void *__tls_get_addr(tls_index *ti) 392 { 393 Elf_Addr** segbase; 394 Elf_Addr* dtv; 395 396 __asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); 397 dtv = segbase[1]; 398 399 return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset); 400 } 401