1 /* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */ 2 3 /*- 4 * Copyright (C) 1998 Tsubai Masanari 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/mman.h> 34 35 #include <errno.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <machine/cpu.h> 41 #include <machine/md_var.h> 42 43 #include "debug.h" 44 #include "rtld.h" 45 46 #if !defined(_CALL_ELF) || _CALL_ELF == 1 47 struct funcdesc { 48 Elf_Addr addr; 49 Elf_Addr toc; 50 Elf_Addr env; 51 }; 52 #endif 53 54 /* 55 * Process the R_PPC_COPY relocations 56 */ 57 int 58 do_copy_relocations(Obj_Entry *dstobj) 59 { 60 const Elf_Rela *relalim; 61 const Elf_Rela *rela; 62 63 /* 64 * COPY relocs are invalid outside of the main program 65 */ 66 assert(dstobj->mainprog); 67 68 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + 69 dstobj->relasize); 70 for (rela = dstobj->rela; rela < relalim; rela++) { 71 void *dstaddr; 72 const Elf_Sym *dstsym; 73 const char *name; 74 size_t size; 75 const void *srcaddr; 76 const Elf_Sym *srcsym = NULL; 77 const Obj_Entry *srcobj, *defobj; 78 SymLook req; 79 int res; 80 81 if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) { 82 continue; 83 } 84 85 dstaddr = (void *) (dstobj->relocbase + rela->r_offset); 86 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 87 name = dstobj->strtab + dstsym->st_name; 88 size = dstsym->st_size; 89 symlook_init(&req, name); 90 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 91 req.flags = SYMLOOK_EARLY; 92 93 for (srcobj = globallist_next(dstobj); srcobj != NULL; 94 srcobj = globallist_next(srcobj)) { 95 res = symlook_obj(&req, srcobj); 96 if (res == 0) { 97 srcsym = req.sym_out; 98 defobj = req.defobj_out; 99 break; 100 } 101 } 102 103 if (srcobj == NULL) { 104 _rtld_error("Undefined symbol \"%s\" " 105 " referenced from COPY" 106 " relocation in %s", name, dstobj->path); 107 return (-1); 108 } 109 110 srcaddr = (const void *) (defobj->relocbase+srcsym->st_value); 111 memcpy(dstaddr, srcaddr, size); 112 dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size); 113 } 114 115 return (0); 116 } 117 118 119 /* 120 * Perform early relocation of the run-time linker image 121 */ 122 void 123 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase) 124 { 125 const Elf_Rela *rela = NULL, *relalim; 126 Elf_Addr relasz = 0; 127 Elf_Addr *where; 128 129 /* 130 * Extract the rela/relasz values from the dynamic section 131 */ 132 for (; dynp->d_tag != DT_NULL; dynp++) { 133 switch (dynp->d_tag) { 134 case DT_RELA: 135 rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr); 136 break; 137 case DT_RELASZ: 138 relasz = dynp->d_un.d_val; 139 break; 140 } 141 } 142 143 /* 144 * Relocate these values 145 */ 146 relalim = (const Elf_Rela *)((caddr_t)rela + relasz); 147 for (; rela < relalim; rela++) { 148 where = (Elf_Addr *)(relocbase + rela->r_offset); 149 *where = (Elf_Addr)(relocbase + rela->r_addend); 150 } 151 } 152 153 154 /* 155 * Relocate a non-PLT object with addend. 156 */ 157 static int 158 reloc_nonplt_object(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, 159 SymCache *cache, int flags, RtldLockState *lockstate) 160 { 161 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 162 const Elf_Sym *def; 163 const Obj_Entry *defobj; 164 Elf_Addr tmp; 165 166 switch (ELF_R_TYPE(rela->r_info)) { 167 168 case R_PPC_NONE: 169 break; 170 171 case R_PPC64_UADDR64: /* doubleword64 S + A */ 172 case R_PPC64_ADDR64: 173 case R_PPC_GLOB_DAT: 174 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 175 flags, cache, lockstate); 176 if (def == NULL) { 177 return (-1); 178 } 179 180 tmp = (Elf_Addr)(defobj->relocbase + def->st_value + 181 rela->r_addend); 182 183 /* Don't issue write if unnecessary; avoid COW page fault */ 184 if (*where != tmp) { 185 *where = tmp; 186 } 187 break; 188 189 case R_PPC_RELATIVE: /* doubleword64 B + A */ 190 tmp = (Elf_Addr)(obj->relocbase + rela->r_addend); 191 192 /* As above, don't issue write unnecessarily */ 193 if (*where != tmp) { 194 *where = tmp; 195 } 196 break; 197 198 case R_PPC_COPY: 199 /* 200 * These are deferred until all other relocations 201 * have been done. All we do here is make sure 202 * that the COPY relocation is not in a shared 203 * library. They are allowed only in executable 204 * files. 205 */ 206 if (!obj->mainprog) { 207 _rtld_error("%s: Unexpected R_COPY " 208 " relocation in shared library", 209 obj->path); 210 return (-1); 211 } 212 break; 213 214 case R_PPC_JMP_SLOT: 215 /* 216 * These will be handled by the plt/jmpslot routines 217 */ 218 break; 219 220 case R_PPC64_DTPMOD64: 221 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 222 flags, cache, lockstate); 223 224 if (def == NULL) 225 return (-1); 226 227 *where = (Elf_Addr) defobj->tlsindex; 228 229 break; 230 231 case R_PPC64_TPREL64: 232 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 233 flags, cache, lockstate); 234 235 if (def == NULL) 236 return (-1); 237 238 /* 239 * We lazily allocate offsets for static TLS as we 240 * see the first relocation that references the 241 * TLS block. This allows us to support (small 242 * amounts of) static TLS in dynamically loaded 243 * modules. If we run out of space, we generate an 244 * error. 245 */ 246 if (!defobj->tls_done) { 247 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 248 _rtld_error("%s: No space available for static " 249 "Thread Local Storage", obj->path); 250 return (-1); 251 } 252 } 253 254 *(Elf_Addr **)where = *where * sizeof(Elf_Addr) 255 + (Elf_Addr *)(def->st_value + rela->r_addend 256 + defobj->tlsoffset - TLS_TP_OFFSET); 257 258 break; 259 260 case R_PPC64_DTPREL64: 261 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 262 flags, cache, lockstate); 263 264 if (def == NULL) 265 return (-1); 266 267 *where += (Elf_Addr)(def->st_value + rela->r_addend 268 - TLS_DTV_OFFSET); 269 270 break; 271 272 default: 273 _rtld_error("%s: Unsupported relocation type %ld" 274 " in non-PLT relocations\n", obj->path, 275 ELF_R_TYPE(rela->r_info)); 276 return (-1); 277 } 278 return (0); 279 } 280 281 282 /* 283 * Process non-PLT relocations 284 */ 285 int 286 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 287 RtldLockState *lockstate) 288 { 289 const Elf_Rela *relalim; 290 const Elf_Rela *rela; 291 SymCache *cache; 292 int bytes = obj->dynsymcount * sizeof(SymCache); 293 int r = -1; 294 295 if ((flags & SYMLOOK_IFUNC) != 0) 296 /* XXX not implemented */ 297 return (0); 298 299 /* 300 * The dynamic loader may be called from a thread, we have 301 * limited amounts of stack available so we cannot use alloca(). 302 */ 303 if (obj != obj_rtld) { 304 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, 305 -1, 0); 306 if (cache == MAP_FAILED) 307 cache = NULL; 308 } else 309 cache = NULL; 310 311 /* 312 * From the SVR4 PPC ABI: 313 * "The PowerPC family uses only the Elf32_Rela relocation 314 * entries with explicit addends." 315 */ 316 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize); 317 for (rela = obj->rela; rela < relalim; rela++) { 318 if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags, 319 lockstate) < 0) 320 goto done; 321 } 322 r = 0; 323 done: 324 if (cache) 325 munmap(cache, bytes); 326 327 /* Synchronize icache for text seg in case we made any changes */ 328 __syncicache(obj->mapbase, obj->textsize); 329 330 return (r); 331 } 332 333 334 /* 335 * Initialise a PLT slot to the resolving trampoline 336 */ 337 static int 338 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela) 339 { 340 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 341 long reloff; 342 343 reloff = rela - obj->pltrela; 344 345 dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where, 346 reloff, obj->glink); 347 348 #if !defined(_CALL_ELF) || _CALL_ELF == 1 349 /* Glink code is 3 instructions after the first 32k, 2 before */ 350 *where = (Elf_Addr)obj->glink + 32 + 351 8*((reloff < 0x8000) ? reloff : 0x8000) + 352 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000)); 353 #else 354 *where = (Elf_Addr)obj->glink + 4*reloff + 32; 355 #endif 356 357 return (0); 358 } 359 360 361 /* 362 * Process the PLT relocations. 363 */ 364 int 365 reloc_plt(Obj_Entry *obj) 366 { 367 const Elf_Rela *relalim; 368 const Elf_Rela *rela; 369 370 if (obj->pltrelasize != 0) { 371 relalim = (const Elf_Rela *)((char *)obj->pltrela + 372 obj->pltrelasize); 373 for (rela = obj->pltrela; rela < relalim; rela++) { 374 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 375 376 if (reloc_plt_object(obj, rela) < 0) { 377 return (-1); 378 } 379 } 380 } 381 382 return (0); 383 } 384 385 386 /* 387 * LD_BIND_NOW was set - force relocation for all jump slots 388 */ 389 int 390 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 391 { 392 const Obj_Entry *defobj; 393 const Elf_Rela *relalim; 394 const Elf_Rela *rela; 395 const Elf_Sym *def; 396 Elf_Addr *where; 397 Elf_Addr target; 398 399 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 400 for (rela = obj->pltrela; rela < relalim; rela++) { 401 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 402 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 403 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 404 SYMLOOK_IN_PLT | flags, NULL, lockstate); 405 if (def == NULL) { 406 dbg("reloc_jmpslots: sym not found"); 407 return (-1); 408 } 409 410 target = (Elf_Addr)(defobj->relocbase + def->st_value); 411 412 if (def == &sym_zero) { 413 /* Zero undefined weak symbols */ 414 #if !defined(_CALL_ELF) || _CALL_ELF == 1 415 bzero(where, sizeof(struct funcdesc)); 416 #else 417 *where = 0; 418 #endif 419 } else { 420 reloc_jmpslot(where, target, defobj, obj, 421 (const Elf_Rel *) rela); 422 } 423 } 424 425 obj->jmpslots_done = true; 426 427 return (0); 428 } 429 430 431 /* 432 * Update the value of a PLT jump slot. 433 */ 434 Elf_Addr 435 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj, 436 const Obj_Entry *obj, const Elf_Rel *rel) 437 { 438 439 /* 440 * At the PLT entry pointed at by `wherep', construct 441 * a direct transfer to the now fully resolved function 442 * address. 443 */ 444 445 #if !defined(_CALL_ELF) || _CALL_ELF == 1 446 dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)", 447 (void *)wherep, (void *)target, *(Elf_Addr *)target, 448 (Elf_Addr)defobj->relocbase); 449 450 if (ld_bind_not) 451 goto out; 452 453 /* 454 * For the trampoline, the second two elements of the function 455 * descriptor are unused, so we are fine replacing those at any time 456 * with the real ones with no thread safety implications. However, we 457 * need to make sure the main entry point pointer ([0]) is seen to be 458 * modified *after* the second two elements. This can't be done in 459 * general, since there are no barriers in the reading code, but put in 460 * some isyncs to at least make it a little better. 461 */ 462 memcpy(wherep, (void *)target, sizeof(struct funcdesc)); 463 wherep[2] = ((Elf_Addr *)target)[2]; 464 wherep[1] = ((Elf_Addr *)target)[1]; 465 __asm __volatile ("isync" : : : "memory"); 466 wherep[0] = ((Elf_Addr *)target)[0]; 467 __asm __volatile ("isync" : : : "memory"); 468 469 if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) { 470 /* 471 * It is possible (LD_BIND_NOW) that the function 472 * descriptor we are copying has not yet been relocated. 473 * If this happens, fix it. Don't worry about threading in 474 * this case since LD_BIND_NOW makes it irrelevant. 475 */ 476 477 ((struct funcdesc *)(wherep))->addr += 478 (Elf_Addr)defobj->relocbase; 479 ((struct funcdesc *)(wherep))->toc += 480 (Elf_Addr)defobj->relocbase; 481 } 482 out: 483 #else 484 dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep, 485 (void *)target); 486 487 if (!ld_bind_not) 488 *wherep = target; 489 #endif 490 491 return (target); 492 } 493 494 int 495 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate) 496 { 497 498 /* XXX not implemented */ 499 return (0); 500 } 501 502 int 503 reloc_gnu_ifunc(Obj_Entry *obj, int flags, 504 struct Struct_RtldLockState *lockstate) 505 { 506 507 /* XXX not implemented */ 508 return (0); 509 } 510 511 void 512 init_pltgot(Obj_Entry *obj) 513 { 514 Elf_Addr *pltcall; 515 516 pltcall = obj->pltgot; 517 518 if (pltcall == NULL) { 519 return; 520 } 521 522 #if defined(_CALL_ELF) && _CALL_ELF == 2 523 pltcall[0] = (Elf_Addr)&_rtld_bind_start; 524 pltcall[1] = (Elf_Addr)obj; 525 #else 526 memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc)); 527 pltcall[2] = (Elf_Addr)obj; 528 #endif 529 } 530 531 void 532 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused) 533 { 534 } 535 536 void 537 allocate_initial_tls(Obj_Entry *list) 538 { 539 Elf_Addr **tp; 540 541 /* 542 * Fix the size of the static TLS block by using the maximum 543 * offset allocated so far and adding a bit for dynamic modules to 544 * use. 545 */ 546 547 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; 548 549 tp = (Elf_Addr **) ((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16) 550 + TLS_TP_OFFSET + TLS_TCB_SIZE); 551 552 __asm __volatile("mr 13,%0" :: "r"(tp)); 553 } 554 555 void* 556 __tls_get_addr(tls_index* ti) 557 { 558 Elf_Addr **tp; 559 char *p; 560 561 __asm __volatile("mr %0,13" : "=r"(tp)); 562 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET 563 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset); 564 565 return (p + TLS_DTV_OFFSET); 566 } 567