1 /* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (C) 1998 Tsubai Masanari 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 37 #include <errno.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <unistd.h> 42 #include <machine/cpu.h> 43 #include <machine/md_var.h> 44 45 #include "debug.h" 46 #include "rtld.h" 47 48 #if !defined(_CALL_ELF) || _CALL_ELF == 1 49 struct funcdesc { 50 Elf_Addr addr; 51 Elf_Addr toc; 52 Elf_Addr env; 53 }; 54 #endif 55 56 /* 57 * Process the R_PPC_COPY relocations 58 */ 59 int 60 do_copy_relocations(Obj_Entry *dstobj) 61 { 62 const Elf_Rela *relalim; 63 const Elf_Rela *rela; 64 65 /* 66 * COPY relocs are invalid outside of the main program 67 */ 68 assert(dstobj->mainprog); 69 70 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + 71 dstobj->relasize); 72 for (rela = dstobj->rela; rela < relalim; rela++) { 73 void *dstaddr; 74 const Elf_Sym *dstsym; 75 const char *name; 76 size_t size; 77 const void *srcaddr; 78 const Elf_Sym *srcsym = NULL; 79 const Obj_Entry *srcobj, *defobj; 80 SymLook req; 81 int res; 82 83 if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) { 84 continue; 85 } 86 87 dstaddr = (void *) (dstobj->relocbase + rela->r_offset); 88 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 89 name = dstobj->strtab + dstsym->st_name; 90 size = dstsym->st_size; 91 symlook_init(&req, name); 92 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 93 req.flags = SYMLOOK_EARLY; 94 95 for (srcobj = globallist_next(dstobj); srcobj != NULL; 96 srcobj = globallist_next(srcobj)) { 97 res = symlook_obj(&req, srcobj); 98 if (res == 0) { 99 srcsym = req.sym_out; 100 defobj = req.defobj_out; 101 break; 102 } 103 } 104 105 if (srcobj == NULL) { 106 _rtld_error("Undefined symbol \"%s\" " 107 " referenced from COPY" 108 " relocation in %s", name, dstobj->path); 109 return (-1); 110 } 111 112 srcaddr = (const void *) (defobj->relocbase+srcsym->st_value); 113 memcpy(dstaddr, srcaddr, size); 114 dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size); 115 } 116 117 return (0); 118 } 119 120 121 /* 122 * Perform early relocation of the run-time linker image 123 */ 124 void 125 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase) 126 { 127 const Elf_Rela *rela = NULL, *relalim; 128 Elf_Addr relasz = 0; 129 Elf_Addr *where; 130 131 /* 132 * Extract the rela/relasz values from the dynamic section 133 */ 134 for (; dynp->d_tag != DT_NULL; dynp++) { 135 switch (dynp->d_tag) { 136 case DT_RELA: 137 rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr); 138 break; 139 case DT_RELASZ: 140 relasz = dynp->d_un.d_val; 141 break; 142 } 143 } 144 145 /* 146 * Relocate these values 147 */ 148 relalim = (const Elf_Rela *)((caddr_t)rela + relasz); 149 for (; rela < relalim; rela++) { 150 where = (Elf_Addr *)(relocbase + rela->r_offset); 151 *where = (Elf_Addr)(relocbase + rela->r_addend); 152 } 153 } 154 155 156 /* 157 * Relocate a non-PLT object with addend. 158 */ 159 static int 160 reloc_nonplt_object(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, 161 SymCache *cache, int flags, RtldLockState *lockstate) 162 { 163 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 164 const Elf_Sym *def; 165 const Obj_Entry *defobj; 166 Elf_Addr tmp; 167 168 switch (ELF_R_TYPE(rela->r_info)) { 169 170 case R_PPC_NONE: 171 break; 172 173 case R_PPC64_UADDR64: /* doubleword64 S + A */ 174 case R_PPC64_ADDR64: 175 case R_PPC_GLOB_DAT: 176 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 177 flags, cache, lockstate); 178 if (def == NULL) { 179 return (-1); 180 } 181 182 tmp = (Elf_Addr)(defobj->relocbase + def->st_value + 183 rela->r_addend); 184 185 /* Don't issue write if unnecessary; avoid COW page fault */ 186 if (*where != tmp) { 187 *where = tmp; 188 } 189 break; 190 191 case R_PPC_RELATIVE: /* doubleword64 B + A */ 192 tmp = (Elf_Addr)(obj->relocbase + rela->r_addend); 193 194 /* As above, don't issue write unnecessarily */ 195 if (*where != tmp) { 196 *where = tmp; 197 } 198 break; 199 200 case R_PPC_COPY: 201 /* 202 * These are deferred until all other relocations 203 * have been done. All we do here is make sure 204 * that the COPY relocation is not in a shared 205 * library. They are allowed only in executable 206 * files. 207 */ 208 if (!obj->mainprog) { 209 _rtld_error("%s: Unexpected R_COPY " 210 " relocation in shared library", 211 obj->path); 212 return (-1); 213 } 214 break; 215 216 case R_PPC_JMP_SLOT: 217 /* 218 * These will be handled by the plt/jmpslot routines 219 */ 220 break; 221 222 case R_PPC64_DTPMOD64: 223 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 224 flags, cache, lockstate); 225 226 if (def == NULL) 227 return (-1); 228 229 *where = (Elf_Addr) defobj->tlsindex; 230 231 break; 232 233 case R_PPC64_TPREL64: 234 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 235 flags, cache, lockstate); 236 237 if (def == NULL) 238 return (-1); 239 240 /* 241 * We lazily allocate offsets for static TLS as we 242 * see the first relocation that references the 243 * TLS block. This allows us to support (small 244 * amounts of) static TLS in dynamically loaded 245 * modules. If we run out of space, we generate an 246 * error. 247 */ 248 if (!defobj->tls_done) { 249 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 250 _rtld_error("%s: No space available for static " 251 "Thread Local Storage", obj->path); 252 return (-1); 253 } 254 } 255 256 *(Elf_Addr **)where = *where * sizeof(Elf_Addr) 257 + (Elf_Addr *)(def->st_value + rela->r_addend 258 + defobj->tlsoffset - TLS_TP_OFFSET); 259 260 break; 261 262 case R_PPC64_DTPREL64: 263 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 264 flags, cache, lockstate); 265 266 if (def == NULL) 267 return (-1); 268 269 *where += (Elf_Addr)(def->st_value + rela->r_addend 270 - TLS_DTV_OFFSET); 271 272 break; 273 274 default: 275 _rtld_error("%s: Unsupported relocation type %ld" 276 " in non-PLT relocations\n", obj->path, 277 ELF_R_TYPE(rela->r_info)); 278 return (-1); 279 } 280 return (0); 281 } 282 283 284 /* 285 * Process non-PLT relocations 286 */ 287 int 288 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 289 RtldLockState *lockstate) 290 { 291 const Elf_Rela *relalim; 292 const Elf_Rela *rela; 293 SymCache *cache; 294 int bytes = obj->dynsymcount * sizeof(SymCache); 295 int r = -1; 296 297 if ((flags & SYMLOOK_IFUNC) != 0) 298 /* XXX not implemented */ 299 return (0); 300 301 /* 302 * The dynamic loader may be called from a thread, we have 303 * limited amounts of stack available so we cannot use alloca(). 304 */ 305 if (obj != obj_rtld) { 306 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, 307 -1, 0); 308 if (cache == MAP_FAILED) 309 cache = NULL; 310 } else 311 cache = NULL; 312 313 /* 314 * From the SVR4 PPC ABI: 315 * "The PowerPC family uses only the Elf32_Rela relocation 316 * entries with explicit addends." 317 */ 318 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize); 319 for (rela = obj->rela; rela < relalim; rela++) { 320 if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags, 321 lockstate) < 0) 322 goto done; 323 } 324 r = 0; 325 done: 326 if (cache) 327 munmap(cache, bytes); 328 329 /* Synchronize icache for text seg in case we made any changes */ 330 __syncicache(obj->mapbase, obj->textsize); 331 332 return (r); 333 } 334 335 336 /* 337 * Initialise a PLT slot to the resolving trampoline 338 */ 339 static int 340 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela) 341 { 342 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 343 long reloff; 344 345 reloff = rela - obj->pltrela; 346 347 dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where, 348 reloff, obj->glink); 349 350 #if !defined(_CALL_ELF) || _CALL_ELF == 1 351 /* Glink code is 3 instructions after the first 32k, 2 before */ 352 *where = (Elf_Addr)obj->glink + 32 + 353 8*((reloff < 0x8000) ? reloff : 0x8000) + 354 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000)); 355 #else 356 *where = (Elf_Addr)obj->glink + 4*reloff + 32; 357 #endif 358 359 return (0); 360 } 361 362 363 /* 364 * Process the PLT relocations. 365 */ 366 int 367 reloc_plt(Obj_Entry *obj) 368 { 369 const Elf_Rela *relalim; 370 const Elf_Rela *rela; 371 372 if (obj->pltrelasize != 0) { 373 relalim = (const Elf_Rela *)((char *)obj->pltrela + 374 obj->pltrelasize); 375 for (rela = obj->pltrela; rela < relalim; rela++) { 376 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 377 378 if (reloc_plt_object(obj, rela) < 0) { 379 return (-1); 380 } 381 } 382 } 383 384 return (0); 385 } 386 387 388 /* 389 * LD_BIND_NOW was set - force relocation for all jump slots 390 */ 391 int 392 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 393 { 394 const Obj_Entry *defobj; 395 const Elf_Rela *relalim; 396 const Elf_Rela *rela; 397 const Elf_Sym *def; 398 Elf_Addr *where; 399 Elf_Addr target; 400 401 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); 402 for (rela = obj->pltrela; rela < relalim; rela++) { 403 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); 404 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 405 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 406 SYMLOOK_IN_PLT | flags, NULL, lockstate); 407 if (def == NULL) { 408 dbg("reloc_jmpslots: sym not found"); 409 return (-1); 410 } 411 412 target = (Elf_Addr)(defobj->relocbase + def->st_value); 413 414 if (def == &sym_zero) { 415 /* Zero undefined weak symbols */ 416 #if !defined(_CALL_ELF) || _CALL_ELF == 1 417 bzero(where, sizeof(struct funcdesc)); 418 #else 419 *where = 0; 420 #endif 421 } else { 422 reloc_jmpslot(where, target, defobj, obj, 423 (const Elf_Rel *) rela); 424 } 425 } 426 427 obj->jmpslots_done = true; 428 429 return (0); 430 } 431 432 433 /* 434 * Update the value of a PLT jump slot. 435 */ 436 Elf_Addr 437 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj, 438 const Obj_Entry *obj, const Elf_Rel *rel) 439 { 440 441 /* 442 * At the PLT entry pointed at by `wherep', construct 443 * a direct transfer to the now fully resolved function 444 * address. 445 */ 446 447 #if !defined(_CALL_ELF) || _CALL_ELF == 1 448 dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)", 449 (void *)wherep, (void *)target, *(Elf_Addr *)target, 450 (Elf_Addr)defobj->relocbase); 451 452 if (ld_bind_not) 453 goto out; 454 455 /* 456 * For the trampoline, the second two elements of the function 457 * descriptor are unused, so we are fine replacing those at any time 458 * with the real ones with no thread safety implications. However, we 459 * need to make sure the main entry point pointer ([0]) is seen to be 460 * modified *after* the second two elements. This can't be done in 461 * general, since there are no barriers in the reading code, but put in 462 * some isyncs to at least make it a little better. 463 */ 464 memcpy(wherep, (void *)target, sizeof(struct funcdesc)); 465 wherep[2] = ((Elf_Addr *)target)[2]; 466 wherep[1] = ((Elf_Addr *)target)[1]; 467 __asm __volatile ("isync" : : : "memory"); 468 wherep[0] = ((Elf_Addr *)target)[0]; 469 __asm __volatile ("isync" : : : "memory"); 470 471 if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) { 472 /* 473 * It is possible (LD_BIND_NOW) that the function 474 * descriptor we are copying has not yet been relocated. 475 * If this happens, fix it. Don't worry about threading in 476 * this case since LD_BIND_NOW makes it irrelevant. 477 */ 478 479 ((struct funcdesc *)(wherep))->addr += 480 (Elf_Addr)defobj->relocbase; 481 ((struct funcdesc *)(wherep))->toc += 482 (Elf_Addr)defobj->relocbase; 483 } 484 out: 485 #else 486 dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep, 487 (void *)target); 488 489 if (!ld_bind_not) 490 *wherep = target; 491 #endif 492 493 return (target); 494 } 495 496 int 497 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate) 498 { 499 500 /* XXX not implemented */ 501 return (0); 502 } 503 504 int 505 reloc_gnu_ifunc(Obj_Entry *obj, int flags, 506 struct Struct_RtldLockState *lockstate) 507 { 508 509 /* XXX not implemented */ 510 return (0); 511 } 512 513 void 514 init_pltgot(Obj_Entry *obj) 515 { 516 Elf_Addr *pltcall; 517 518 pltcall = obj->pltgot; 519 520 if (pltcall == NULL) { 521 return; 522 } 523 524 #if defined(_CALL_ELF) && _CALL_ELF == 2 525 pltcall[0] = (Elf_Addr)&_rtld_bind_start; 526 pltcall[1] = (Elf_Addr)obj; 527 #else 528 memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc)); 529 pltcall[2] = (Elf_Addr)obj; 530 #endif 531 } 532 533 void 534 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused) 535 { 536 } 537 538 void 539 allocate_initial_tls(Obj_Entry *list) 540 { 541 Elf_Addr **tp; 542 543 /* 544 * Fix the size of the static TLS block by using the maximum 545 * offset allocated so far and adding a bit for dynamic modules to 546 * use. 547 */ 548 549 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; 550 551 tp = (Elf_Addr **) ((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16) 552 + TLS_TP_OFFSET + TLS_TCB_SIZE); 553 554 __asm __volatile("mr 13,%0" :: "r"(tp)); 555 } 556 557 void* 558 __tls_get_addr(tls_index* ti) 559 { 560 Elf_Addr **tp; 561 char *p; 562 563 __asm __volatile("mr %0,13" : "=r"(tp)); 564 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET 565 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset); 566 567 return (p + TLS_DTV_OFFSET); 568 } 569