1 /* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (C) 1998 Tsubai Masanari 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 37 #include <errno.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <unistd.h> 42 #include <machine/cpu.h> 43 #include <machine/md_var.h> 44 45 #include "debug.h" 46 #include "rtld.h" 47 48 #if !defined(_CALL_ELF) || _CALL_ELF == 1 49 struct funcdesc { 50 Elf_Addr addr; 51 Elf_Addr toc; 52 Elf_Addr env; 53 }; 54 #endif 55 56 /* 57 * Process the R_PPC_COPY relocations 58 */ 59 int 60 do_copy_relocations(Obj_Entry *dstobj) 61 { 62 const Elf_Rela *relalim; 63 const Elf_Rela *rela; 64 65 /* 66 * COPY relocs are invalid outside of the main program 67 */ 68 assert(dstobj->mainprog); 69 70 relalim = (const Elf_Rela *)((const char *) dstobj->rela + 71 dstobj->relasize); 72 for (rela = dstobj->rela; rela < relalim; rela++) { 73 void *dstaddr; 74 const Elf_Sym *dstsym; 75 const char *name; 76 size_t size; 77 const void *srcaddr; 78 const Elf_Sym *srcsym = NULL; 79 const Obj_Entry *srcobj, *defobj; 80 SymLook req; 81 int res; 82 83 if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) { 84 continue; 85 } 86 87 dstaddr = (void *)(dstobj->relocbase + rela->r_offset); 88 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 89 name = dstobj->strtab + dstsym->st_name; 90 size = dstsym->st_size; 91 symlook_init(&req, name); 92 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 93 req.flags = SYMLOOK_EARLY; 94 95 for (srcobj = globallist_next(dstobj); srcobj != NULL; 96 srcobj = globallist_next(srcobj)) { 97 res = symlook_obj(&req, srcobj); 98 if (res == 0) { 99 srcsym = req.sym_out; 100 defobj = req.defobj_out; 101 break; 102 } 103 } 104 105 if (srcobj == NULL) { 106 _rtld_error("Undefined symbol \"%s\" " 107 " referenced from COPY" 108 " relocation in %s", name, dstobj->path); 109 return (-1); 110 } 111 112 srcaddr = (const void *)(defobj->relocbase+srcsym->st_value); 113 memcpy(dstaddr, srcaddr, size); 114 dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size); 115 } 116 117 return (0); 118 } 119 120 121 /* 122 * Perform early relocation of the run-time linker image 123 */ 124 void 125 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase) 126 { 127 const Elf_Rela *rela = NULL, *relalim; 128 Elf_Addr relasz = 0; 129 Elf_Addr *where; 130 131 /* 132 * Extract the rela/relasz values from the dynamic section 133 */ 134 for (; dynp->d_tag != DT_NULL; dynp++) { 135 switch (dynp->d_tag) { 136 case DT_RELA: 137 rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr); 138 break; 139 case DT_RELASZ: 140 relasz = dynp->d_un.d_val; 141 break; 142 } 143 } 144 145 /* 146 * Relocate these values 147 */ 148 relalim = (const Elf_Rela *)((const char *)rela + relasz); 149 for (; rela < relalim; rela++) { 150 where = (Elf_Addr *)(relocbase + rela->r_offset); 151 *where = (Elf_Addr)(relocbase + rela->r_addend); 152 } 153 } 154 155 156 /* 157 * Relocate a non-PLT object with addend. 158 */ 159 static int 160 reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj, 161 const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate) 162 { 163 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 164 const Elf_Sym *def; 165 const Obj_Entry *defobj; 166 Elf_Addr tmp; 167 168 switch (ELF_R_TYPE(rela->r_info)) { 169 170 case R_PPC_NONE: 171 break; 172 173 case R_PPC64_UADDR64: /* doubleword64 S + A */ 174 case R_PPC64_ADDR64: 175 case R_PPC_GLOB_DAT: 176 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 177 flags, cache, lockstate); 178 if (def == NULL) { 179 return (-1); 180 } 181 182 tmp = (Elf_Addr)(defobj->relocbase + def->st_value + 183 rela->r_addend); 184 185 /* Don't issue write if unnecessary; avoid COW page fault */ 186 if (*where != tmp) { 187 *where = tmp; 188 } 189 break; 190 191 case R_PPC_RELATIVE: /* doubleword64 B + A */ 192 tmp = (Elf_Addr)(obj->relocbase + rela->r_addend); 193 194 /* As above, don't issue write unnecessarily */ 195 if (*where != tmp) { 196 *where = tmp; 197 } 198 break; 199 200 case R_PPC_COPY: 201 /* 202 * These are deferred until all other relocations 203 * have been done. All we do here is make sure 204 * that the COPY relocation is not in a shared 205 * library. They are allowed only in executable 206 * files. 207 */ 208 if (!obj->mainprog) { 209 _rtld_error("%s: Unexpected R_COPY " 210 " relocation in shared library", 211 obj->path); 212 return (-1); 213 } 214 break; 215 216 case R_PPC_JMP_SLOT: 217 /* 218 * These will be handled by the plt/jmpslot routines 219 */ 220 break; 221 222 case R_PPC64_DTPMOD64: 223 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 224 flags, cache, lockstate); 225 226 if (def == NULL) 227 return (-1); 228 229 *where = (Elf_Addr) defobj->tlsindex; 230 231 break; 232 233 case R_PPC64_TPREL64: 234 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 235 flags, cache, lockstate); 236 237 if (def == NULL) 238 return (-1); 239 240 /* 241 * We lazily allocate offsets for static TLS as we 242 * see the first relocation that references the 243 * TLS block. This allows us to support (small 244 * amounts of) static TLS in dynamically loaded 245 * modules. If we run out of space, we generate an 246 * error. 247 */ 248 if (!defobj->tls_done) { 249 if (!allocate_tls_offset( 250 __DECONST(Obj_Entry *, defobj))) { 251 _rtld_error("%s: No space available for static " 252 "Thread Local Storage", obj->path); 253 return (-1); 254 } 255 } 256 257 *(Elf_Addr **)where = *where * sizeof(Elf_Addr) 258 + (Elf_Addr *)(def->st_value + rela->r_addend 259 + defobj->tlsoffset - TLS_TP_OFFSET); 260 261 break; 262 263 case R_PPC64_DTPREL64: 264 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 265 flags, cache, lockstate); 266 267 if (def == NULL) 268 return (-1); 269 270 *where += (Elf_Addr)(def->st_value + rela->r_addend 271 - TLS_DTV_OFFSET); 272 273 break; 274 275 default: 276 _rtld_error("%s: Unsupported relocation type %ld" 277 " in non-PLT relocations\n", obj->path, 278 ELF_R_TYPE(rela->r_info)); 279 return (-1); 280 } 281 return (0); 282 } 283 284 285 /* 286 * Process non-PLT relocations 287 */ 288 int 289 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 290 RtldLockState *lockstate) 291 { 292 const Elf_Rela *relalim; 293 const Elf_Rela *rela; 294 SymCache *cache; 295 int bytes = obj->dynsymcount * sizeof(SymCache); 296 int r = -1; 297 298 if ((flags & SYMLOOK_IFUNC) != 0) 299 /* XXX not implemented */ 300 return (0); 301 302 /* 303 * The dynamic loader may be called from a thread, we have 304 * limited amounts of stack available so we cannot use alloca(). 305 */ 306 if (obj != obj_rtld) { 307 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, 308 -1, 0); 309 if (cache == MAP_FAILED) 310 cache = NULL; 311 } else 312 cache = NULL; 313 314 /* 315 * From the SVR4 PPC ABI: 316 * "The PowerPC family uses only the Elf32_Rela relocation 317 * entries with explicit addends." 318 */ 319 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize); 320 for (rela = obj->rela; rela < relalim; rela++) { 321 if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags, 322 lockstate) < 0) 323 goto done; 324 } 325 r = 0; 326 done: 327 if (cache) 328 munmap(cache, bytes); 329 330 /* Synchronize icache for text seg in case we made any changes */ 331 __syncicache(obj->mapbase, obj->textsize); 332 333 return (r); 334 } 335 336 337 /* 338 * Initialise a PLT slot to the resolving trampoline 339 */ 340 static int 341 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela) 342 { 343 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 344 long reloff; 345 346 reloff = rela - obj->pltrela; 347 348 dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where, 349 reloff, obj->glink); 350 351 #if !defined(_CALL_ELF) || _CALL_ELF == 1 352 /* Glink code is 3 instructions after the first 32k, 2 before */ 353 *where = (Elf_Addr)obj->glink + 32 + 354 8*((reloff < 0x8000) ? reloff : 0x8000) + 355 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000)); 356 #else 357 *where = (Elf_Addr)obj->glink + 4*reloff + 32; 358 #endif 359 360 return (0); 361 } 362 363 364 /* 365 * Process the PLT relocations. 366 */ 367 int 368 reloc_plt(Obj_Entry *obj) 369 { 370 const Elf_Rela *relalim; 371 const Elf_Rela *rela; 372 373 if (obj->pltrelasize != 0) { 374 relalim = (const Elf_Rela *)((const char *)obj->pltrela + 375 obj->pltrelasize); 376 for (rela = obj->pltrela; rela < relalim; rela++) { 377 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 378 379 if (reloc_plt_object(obj, rela) < 0) { 380 return (-1); 381 } 382 } 383 } 384 385 return (0); 386 } 387 388 389 /* 390 * LD_BIND_NOW was set - force relocation for all jump slots 391 */ 392 int 393 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 394 { 395 const Obj_Entry *defobj; 396 const Elf_Rela *relalim; 397 const Elf_Rela *rela; 398 const Elf_Sym *def; 399 Elf_Addr *where; 400 Elf_Addr target; 401 402 relalim = (const Elf_Rela *)((const char *)obj->pltrela + 403 obj->pltrelasize); 404 for (rela = obj->pltrela; rela < relalim; rela++) { 405 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 406 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 407 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 408 SYMLOOK_IN_PLT | flags, NULL, lockstate); 409 if (def == NULL) { 410 dbg("reloc_jmpslots: sym not found"); 411 return (-1); 412 } 413 414 target = (Elf_Addr)(defobj->relocbase + def->st_value); 415 416 if (def == &sym_zero) { 417 /* Zero undefined weak symbols */ 418 #if !defined(_CALL_ELF) || _CALL_ELF == 1 419 bzero(where, sizeof(struct funcdesc)); 420 #else 421 *where = 0; 422 #endif 423 } else { 424 reloc_jmpslot(where, target, defobj, obj, 425 (const Elf_Rel *) rela); 426 } 427 } 428 429 obj->jmpslots_done = true; 430 431 return (0); 432 } 433 434 435 /* 436 * Update the value of a PLT jump slot. 437 */ 438 Elf_Addr 439 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj, 440 const Obj_Entry *obj __unused, const Elf_Rel *rel __unused) 441 { 442 443 /* 444 * At the PLT entry pointed at by `wherep', construct 445 * a direct transfer to the now fully resolved function 446 * address. 447 */ 448 449 #if !defined(_CALL_ELF) || _CALL_ELF == 1 450 dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)", 451 (void *)wherep, (void *)target, *(Elf_Addr *)target, 452 (Elf_Addr)defobj->relocbase); 453 454 if (ld_bind_not) 455 goto out; 456 457 /* 458 * For the trampoline, the second two elements of the function 459 * descriptor are unused, so we are fine replacing those at any time 460 * with the real ones with no thread safety implications. However, we 461 * need to make sure the main entry point pointer ([0]) is seen to be 462 * modified *after* the second two elements. This can't be done in 463 * general, since there are no barriers in the reading code, but put in 464 * some isyncs to at least make it a little better. 465 */ 466 memcpy(wherep, (void *)target, sizeof(struct funcdesc)); 467 wherep[2] = ((Elf_Addr *)target)[2]; 468 wherep[1] = ((Elf_Addr *)target)[1]; 469 __asm __volatile ("isync" : : : "memory"); 470 wherep[0] = ((Elf_Addr *)target)[0]; 471 __asm __volatile ("isync" : : : "memory"); 472 473 if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) { 474 /* 475 * It is possible (LD_BIND_NOW) that the function 476 * descriptor we are copying has not yet been relocated. 477 * If this happens, fix it. Don't worry about threading in 478 * this case since LD_BIND_NOW makes it irrelevant. 479 */ 480 481 ((struct funcdesc *)(wherep))->addr += 482 (Elf_Addr)defobj->relocbase; 483 ((struct funcdesc *)(wherep))->toc += 484 (Elf_Addr)defobj->relocbase; 485 } 486 out: 487 #else 488 dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep, 489 (void *)target); 490 491 if (!ld_bind_not) 492 *wherep = target; 493 #endif 494 495 return (target); 496 } 497 498 int 499 reloc_iresolve(Obj_Entry *obj __unused, 500 struct Struct_RtldLockState *lockstate __unused) 501 { 502 503 /* XXX not implemented */ 504 return (0); 505 } 506 507 int 508 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused, 509 struct Struct_RtldLockState *lockstate __unused) 510 { 511 512 /* XXX not implemented */ 513 return (0); 514 } 515 516 void 517 init_pltgot(Obj_Entry *obj) 518 { 519 Elf_Addr *pltcall; 520 521 pltcall = obj->pltgot; 522 523 if (pltcall == NULL) { 524 return; 525 } 526 527 #if defined(_CALL_ELF) && _CALL_ELF == 2 528 pltcall[0] = (Elf_Addr)&_rtld_bind_start; 529 pltcall[1] = (Elf_Addr)obj; 530 #else 531 memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc)); 532 pltcall[2] = (Elf_Addr)obj; 533 #endif 534 } 535 536 void 537 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused) 538 { 539 540 } 541 542 void 543 pre_init(void) 544 { 545 546 } 547 548 void 549 allocate_initial_tls(Obj_Entry *list) 550 { 551 Elf_Addr **tp; 552 553 /* 554 * Fix the size of the static TLS block by using the maximum 555 * offset allocated so far and adding a bit for dynamic modules to 556 * use. 557 */ 558 559 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; 560 561 tp = (Elf_Addr **)((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16) 562 + TLS_TP_OFFSET + TLS_TCB_SIZE); 563 564 __asm __volatile("mr 13,%0" :: "r"(tp)); 565 } 566 567 void* 568 __tls_get_addr(tls_index* ti) 569 { 570 Elf_Addr **tp; 571 char *p; 572 573 __asm __volatile("mr %0,13" : "=r"(tp)); 574 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET 575 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset); 576 577 return (p + TLS_DTV_OFFSET); 578 } 579