1 /* $NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $ */ 2 3 #include <sys/cdefs.h> 4 __FBSDID("$FreeBSD$"); 5 #include <sys/param.h> 6 #include <sys/stat.h> 7 #include <sys/mman.h> 8 9 #include <errno.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <unistd.h> 14 15 #include "machine/sysarch.h" 16 17 #include "debug.h" 18 #include "rtld.h" 19 #include "rtld_paths.h" 20 21 void 22 init_pltgot(Obj_Entry *obj) 23 { 24 if (obj->pltgot != NULL) { 25 obj->pltgot[1] = (Elf_Addr) obj; 26 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; 27 } 28 } 29 30 int 31 do_copy_relocations(Obj_Entry *dstobj) 32 { 33 const Elf_Rel *rellim; 34 const Elf_Rel *rel; 35 36 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ 37 38 rellim = (const Elf_Rel *)((const char *) dstobj->rel + dstobj->relsize); 39 for (rel = dstobj->rel; rel < rellim; rel++) { 40 if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) { 41 void *dstaddr; 42 const Elf_Sym *dstsym; 43 const char *name; 44 size_t size; 45 const void *srcaddr; 46 const Elf_Sym *srcsym; 47 const Obj_Entry *srcobj, *defobj; 48 SymLook req; 49 int res; 50 51 dstaddr = (void *)(dstobj->relocbase + rel->r_offset); 52 dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info); 53 name = dstobj->strtab + dstsym->st_name; 54 size = dstsym->st_size; 55 56 symlook_init(&req, name); 57 req.ventry = fetch_ventry(dstobj, 58 ELF_R_SYM(rel->r_info)); 59 req.flags = SYMLOOK_EARLY; 60 61 for (srcobj = globallist_next(dstobj); srcobj != NULL; 62 srcobj = globallist_next(srcobj)) { 63 res = symlook_obj(&req, srcobj); 64 if (res == 0) { 65 srcsym = req.sym_out; 66 defobj = req.defobj_out; 67 break; 68 } 69 } 70 if (srcobj == NULL) { 71 _rtld_error( 72 "Undefined symbol \"%s\" referenced from COPY relocation in %s", 73 name, dstobj->path); 74 return (-1); 75 } 76 77 srcaddr = (const void *)(defobj->relocbase + 78 srcsym->st_value); 79 memcpy(dstaddr, srcaddr, size); 80 } 81 } 82 return 0; 83 } 84 85 void _rtld_bind_start(void); 86 void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr); 87 88 void 89 _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase) 90 { 91 const Elf_Rel *rel = NULL, *rellim; 92 Elf_Addr relsz = 0; 93 Elf_Addr *where; 94 uint32_t size; 95 96 for (; dynp->d_tag != DT_NULL; dynp++) { 97 switch (dynp->d_tag) { 98 case DT_REL: 99 rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr); 100 break; 101 case DT_RELSZ: 102 relsz = dynp->d_un.d_val; 103 break; 104 } 105 } 106 rellim = (const Elf_Rel *)((const char *)rel + relsz); 107 size = (rellim - 1)->r_offset - rel->r_offset; 108 for (; rel < rellim; rel++) { 109 where = (Elf_Addr *)(relocbase + rel->r_offset); 110 111 *where += (Elf_Addr)relocbase; 112 } 113 } 114 /* 115 * It is possible for the compiler to emit relocations for unaligned data. 116 * We handle this situation with these inlines. 117 */ 118 #define RELOC_ALIGNED_P(x) \ 119 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0) 120 121 static __inline Elf_Addr 122 load_ptr(void *where) 123 { 124 Elf_Addr res; 125 126 memcpy(&res, where, sizeof(res)); 127 128 return (res); 129 } 130 131 static __inline void 132 store_ptr(void *where, Elf_Addr val) 133 { 134 135 memcpy(where, &val, sizeof(val)); 136 } 137 138 static int 139 reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache, 140 int flags, RtldLockState *lockstate) 141 { 142 Elf_Addr *where; 143 const Elf_Sym *def; 144 const Obj_Entry *defobj; 145 Elf_Addr tmp; 146 unsigned long symnum; 147 148 where = (Elf_Addr *)(obj->relocbase + rel->r_offset); 149 symnum = ELF_R_SYM(rel->r_info); 150 151 switch (ELF_R_TYPE(rel->r_info)) { 152 case R_ARM_NONE: 153 break; 154 155 #if 1 /* XXX should not occur */ 156 case R_ARM_PC24: { /* word32 S - P + A */ 157 Elf32_Sword addend; 158 159 /* 160 * Extract addend and sign-extend if needed. 161 */ 162 addend = *where; 163 if (addend & 0x00800000) 164 addend |= 0xff000000; 165 166 def = find_symdef(symnum, obj, &defobj, flags, cache, 167 lockstate); 168 if (def == NULL) 169 return -1; 170 tmp = (Elf_Addr)obj->relocbase + def->st_value 171 - (Elf_Addr)where + (addend << 2); 172 if ((tmp & 0xfe000000) != 0xfe000000 && 173 (tmp & 0xfe000000) != 0) { 174 _rtld_error( 175 "%s: R_ARM_PC24 relocation @ %p to %s failed " 176 "(displacement %ld (%#lx) out of range)", 177 obj->path, where, 178 obj->strtab + obj->symtab[symnum].st_name, 179 (long) tmp, (long) tmp); 180 return -1; 181 } 182 tmp >>= 2; 183 *where = (*where & 0xff000000) | (tmp & 0x00ffffff); 184 dbg("PC24 %s in %s --> %p @ %p in %s", 185 obj->strtab + obj->symtab[symnum].st_name, 186 obj->path, (void *)*where, where, defobj->path); 187 break; 188 } 189 #endif 190 191 case R_ARM_ABS32: /* word32 B + S + A */ 192 case R_ARM_GLOB_DAT: /* word32 B + S */ 193 def = find_symdef(symnum, obj, &defobj, flags, cache, 194 lockstate); 195 if (def == NULL) 196 return -1; 197 if (__predict_true(RELOC_ALIGNED_P(where))) { 198 tmp = *where + (Elf_Addr)defobj->relocbase + 199 def->st_value; 200 *where = tmp; 201 } else { 202 tmp = load_ptr(where) + 203 (Elf_Addr)defobj->relocbase + 204 def->st_value; 205 store_ptr(where, tmp); 206 } 207 dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s", 208 obj->strtab + obj->symtab[symnum].st_name, 209 obj->path, (void *)tmp, where, defobj->path); 210 break; 211 212 case R_ARM_RELATIVE: /* word32 B + A */ 213 if (__predict_true(RELOC_ALIGNED_P(where))) { 214 tmp = *where + (Elf_Addr)obj->relocbase; 215 *where = tmp; 216 } else { 217 tmp = load_ptr(where) + 218 (Elf_Addr)obj->relocbase; 219 store_ptr(where, tmp); 220 } 221 dbg("RELATIVE in %s --> %p", obj->path, 222 (void *)tmp); 223 break; 224 225 case R_ARM_COPY: 226 /* 227 * These are deferred until all other relocations have 228 * been done. All we do here is make sure that the 229 * COPY relocation is not in a shared library. They 230 * are allowed only in executable files. 231 */ 232 if (!obj->mainprog) { 233 _rtld_error( 234 "%s: Unexpected R_COPY relocation in shared library", 235 obj->path); 236 return -1; 237 } 238 dbg("COPY (avoid in main)"); 239 break; 240 241 case R_ARM_TLS_DTPOFF32: 242 def = find_symdef(symnum, obj, &defobj, flags, cache, 243 lockstate); 244 if (def == NULL) 245 return -1; 246 247 tmp = (Elf_Addr)(def->st_value); 248 if (__predict_true(RELOC_ALIGNED_P(where))) 249 *where = tmp; 250 else 251 store_ptr(where, tmp); 252 253 dbg("TLS_DTPOFF32 %s in %s --> %p", 254 obj->strtab + obj->symtab[symnum].st_name, 255 obj->path, (void *)tmp); 256 257 break; 258 case R_ARM_TLS_DTPMOD32: 259 def = find_symdef(symnum, obj, &defobj, flags, cache, 260 lockstate); 261 if (def == NULL) 262 return -1; 263 264 tmp = (Elf_Addr)(defobj->tlsindex); 265 if (__predict_true(RELOC_ALIGNED_P(where))) 266 *where = tmp; 267 else 268 store_ptr(where, tmp); 269 270 dbg("TLS_DTPMOD32 %s in %s --> %p", 271 obj->strtab + obj->symtab[symnum].st_name, 272 obj->path, (void *)tmp); 273 274 break; 275 276 case R_ARM_TLS_TPOFF32: 277 def = find_symdef(symnum, obj, &defobj, flags, cache, 278 lockstate); 279 if (def == NULL) 280 return -1; 281 282 if (!defobj->tls_done && !allocate_tls_offset(obj)) 283 return -1; 284 285 tmp = (Elf_Addr)def->st_value + defobj->tlsoffset; 286 if (__predict_true(RELOC_ALIGNED_P(where))) 287 *where = tmp; 288 else 289 store_ptr(where, tmp); 290 dbg("TLS_TPOFF32 %s in %s --> %p", 291 obj->strtab + obj->symtab[symnum].st_name, 292 obj->path, (void *)tmp); 293 break; 294 295 296 default: 297 dbg("sym = %lu, type = %lu, offset = %p, " 298 "contents = %p, symbol = %s", 299 symnum, (u_long)ELF_R_TYPE(rel->r_info), 300 (void *)rel->r_offset, (void *)load_ptr(where), 301 obj->strtab + obj->symtab[symnum].st_name); 302 _rtld_error("%s: Unsupported relocation type %ld " 303 "in non-PLT relocations\n", 304 obj->path, (u_long) ELF_R_TYPE(rel->r_info)); 305 return -1; 306 } 307 return 0; 308 } 309 310 /* 311 * * Process non-PLT relocations 312 * */ 313 int 314 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 315 RtldLockState *lockstate) 316 { 317 const Elf_Rel *rellim; 318 const Elf_Rel *rel; 319 SymCache *cache; 320 int r = -1; 321 322 /* The relocation for the dynamic loader has already been done. */ 323 if (obj == obj_rtld) 324 return (0); 325 if ((flags & SYMLOOK_IFUNC) != 0) 326 /* XXX not implemented */ 327 return (0); 328 329 /* 330 * The dynamic loader may be called from a thread, we have 331 * limited amounts of stack available so we cannot use alloca(). 332 */ 333 cache = calloc(obj->dynsymcount, sizeof(SymCache)); 334 /* No need to check for NULL here */ 335 336 rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize); 337 for (rel = obj->rel; rel < rellim; rel++) { 338 if (reloc_nonplt_object(obj, rel, cache, flags, lockstate) < 0) 339 goto done; 340 } 341 r = 0; 342 done: 343 if (cache != NULL) 344 free(cache); 345 return (r); 346 } 347 348 /* 349 * * Process the PLT relocations. 350 * */ 351 int 352 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused) 353 { 354 const Elf_Rel *rellim; 355 const Elf_Rel *rel; 356 357 rellim = (const Elf_Rel *)((const char *)obj->pltrel + 358 obj->pltrelsize); 359 for (rel = obj->pltrel; rel < rellim; rel++) { 360 Elf_Addr *where; 361 362 assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT); 363 364 where = (Elf_Addr *)(obj->relocbase + rel->r_offset); 365 *where += (Elf_Addr )obj->relocbase; 366 } 367 368 return (0); 369 } 370 371 /* 372 * * LD_BIND_NOW was set - force relocation for all jump slots 373 * */ 374 int 375 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 376 { 377 const Obj_Entry *defobj; 378 const Elf_Rel *rellim; 379 const Elf_Rel *rel; 380 const Elf_Sym *def; 381 Elf_Addr *where; 382 Elf_Addr target; 383 384 rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize); 385 for (rel = obj->pltrel; rel < rellim; rel++) { 386 assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT); 387 where = (Elf_Addr *)(obj->relocbase + rel->r_offset); 388 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, 389 SYMLOOK_IN_PLT | flags, NULL, lockstate); 390 if (def == NULL) { 391 dbg("reloc_jmpslots: sym not found"); 392 return (-1); 393 } 394 395 target = (Elf_Addr)(defobj->relocbase + def->st_value); 396 reloc_jmpslot(where, target, defobj, obj, 397 (const Elf_Rel *) rel); 398 } 399 400 obj->jmpslots_done = true; 401 402 return (0); 403 } 404 405 int 406 reloc_iresolve(Obj_Entry *obj __unused, 407 struct Struct_RtldLockState *lockstate __unused) 408 { 409 410 /* XXX not implemented */ 411 return (0); 412 } 413 414 int 415 reloc_iresolve_nonplt(Obj_Entry *obj __unused, 416 struct Struct_RtldLockState *lockstate __unused) 417 { 418 419 /* XXX not implemented */ 420 return (0); 421 } 422 423 int 424 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused, 425 struct Struct_RtldLockState *lockstate __unused) 426 { 427 428 /* XXX not implemented */ 429 return (0); 430 } 431 432 Elf_Addr 433 reloc_jmpslot(Elf_Addr *where, Elf_Addr target, 434 const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused, 435 const Elf_Rel *rel) 436 { 437 438 assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT); 439 440 if (*where != target && !ld_bind_not) 441 *where = target; 442 return (target); 443 } 444 445 void 446 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused) 447 { 448 449 } 450 451 void 452 allocate_initial_tls(Obj_Entry *objs) 453 { 454 /* 455 * Fix the size of the static TLS block by using the maximum 456 * offset allocated so far and adding a bit for dynamic modules to 457 * use. 458 */ 459 460 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; 461 462 _tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN)); 463 } 464 465 void * 466 __tls_get_addr(tls_index* ti) 467 { 468 uintptr_t **dtvp; 469 470 dtvp = &_tcb_get()->tcb_dtv; 471 return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset)); 472 } 473