1 /*- 2 * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * This software was developed by the University of Cambridge Computer 10 * Laboratory as part of the CTSRD Project, with support from the UK Higher 11 * Education Innovation Fund (HEIF). 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/types.h> 39 40 #include <stdlib.h> 41 42 #include "debug.h" 43 #include "rtld.h" 44 #include "rtld_printf.h" 45 46 /* 47 * It is possible for the compiler to emit relocations for unaligned data. 48 * We handle this situation with these inlines. 49 */ 50 #define RELOC_ALIGNED_P(x) \ 51 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0) 52 53 uint64_t 54 set_gp(Obj_Entry *obj) 55 { 56 uint64_t old; 57 SymLook req; 58 uint64_t gp; 59 int res; 60 61 __asm __volatile("mv %0, gp" : "=r"(old)); 62 63 symlook_init(&req, "__global_pointer$"); 64 req.ventry = NULL; 65 req.flags = SYMLOOK_EARLY; 66 res = symlook_obj(&req, obj); 67 68 if (res == 0) { 69 gp = req.sym_out->st_value; 70 __asm __volatile("mv gp, %0" :: "r"(gp)); 71 } 72 73 return (old); 74 } 75 76 void 77 init_pltgot(Obj_Entry *obj) 78 { 79 80 if (obj->pltgot != NULL) { 81 obj->pltgot[0] = (Elf_Addr)&_rtld_bind_start; 82 obj->pltgot[1] = (Elf_Addr)obj; 83 } 84 } 85 86 int 87 do_copy_relocations(Obj_Entry *dstobj) 88 { 89 const Obj_Entry *srcobj, *defobj; 90 const Elf_Rela *relalim; 91 const Elf_Rela *rela; 92 const Elf_Sym *srcsym; 93 const Elf_Sym *dstsym; 94 const void *srcaddr; 95 const char *name; 96 void *dstaddr; 97 SymLook req; 98 size_t size; 99 int res; 100 101 /* 102 * COPY relocs are invalid outside of the main program 103 */ 104 assert(dstobj->mainprog); 105 106 relalim = (const Elf_Rela *)((const char *)dstobj->rela + 107 dstobj->relasize); 108 for (rela = dstobj->rela; rela < relalim; rela++) { 109 if (ELF_R_TYPE(rela->r_info) != R_RISCV_COPY) 110 continue; 111 112 dstaddr = (void *)(dstobj->relocbase + rela->r_offset); 113 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); 114 name = dstobj->strtab + dstsym->st_name; 115 size = dstsym->st_size; 116 117 symlook_init(&req, name); 118 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); 119 req.flags = SYMLOOK_EARLY; 120 121 for (srcobj = globallist_next(dstobj); srcobj != NULL; 122 srcobj = globallist_next(srcobj)) { 123 res = symlook_obj(&req, srcobj); 124 if (res == 0) { 125 srcsym = req.sym_out; 126 defobj = req.defobj_out; 127 break; 128 } 129 } 130 if (srcobj == NULL) { 131 _rtld_error( 132 "Undefined symbol \"%s\" referenced from COPY relocation in %s", 133 name, dstobj->path); 134 return (-1); 135 } 136 137 srcaddr = (const void *)(defobj->relocbase + srcsym->st_value); 138 memcpy(dstaddr, srcaddr, size); 139 } 140 141 return (0); 142 } 143 144 /* 145 * Process the PLT relocations. 146 */ 147 int 148 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused) 149 { 150 const Elf_Rela *relalim; 151 const Elf_Rela *rela; 152 153 relalim = (const Elf_Rela *)((const char *)obj->pltrela + 154 obj->pltrelasize); 155 for (rela = obj->pltrela; rela < relalim; rela++) { 156 Elf_Addr *where; 157 158 assert(ELF_R_TYPE(rela->r_info) == R_RISCV_JUMP_SLOT); 159 160 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 161 *where += (Elf_Addr)obj->relocbase; 162 } 163 164 return (0); 165 } 166 167 /* 168 * LD_BIND_NOW was set - force relocation for all jump slots 169 */ 170 int 171 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 172 { 173 const Obj_Entry *defobj; 174 const Elf_Rela *relalim; 175 const Elf_Rela *rela; 176 const Elf_Sym *def; 177 178 relalim = (const Elf_Rela *)((const char *)obj->pltrela + 179 obj->pltrelasize); 180 for (rela = obj->pltrela; rela < relalim; rela++) { 181 Elf_Addr *where; 182 183 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 184 switch(ELF_R_TYPE(rela->r_info)) { 185 case R_RISCV_JUMP_SLOT: 186 def = find_symdef(ELF_R_SYM(rela->r_info), obj, 187 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate); 188 if (def == NULL) { 189 dbg("reloc_jmpslots: sym not found"); 190 return (-1); 191 } 192 193 *where = (Elf_Addr)(defobj->relocbase + def->st_value); 194 break; 195 default: 196 _rtld_error("Unknown relocation type %x in jmpslot", 197 (unsigned int)ELF_R_TYPE(rela->r_info)); 198 return (-1); 199 } 200 } 201 202 return (0); 203 } 204 205 int 206 reloc_iresolve(Obj_Entry *obj __unused, 207 struct Struct_RtldLockState *lockstate __unused) 208 { 209 210 /* XXX not implemented */ 211 return (0); 212 } 213 214 int 215 reloc_iresolve_nonplt(Obj_Entry *obj __unused, 216 struct Struct_RtldLockState *lockstate __unused) 217 { 218 219 /* XXX not implemented */ 220 return (0); 221 } 222 223 int 224 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused, 225 struct Struct_RtldLockState *lockstate __unused) 226 { 227 228 /* XXX not implemented */ 229 return (0); 230 } 231 232 Elf_Addr 233 reloc_jmpslot(Elf_Addr *where, Elf_Addr target, 234 const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused, 235 const Elf_Rel *rel) 236 { 237 238 assert(ELF_R_TYPE(rel->r_info) == R_RISCV_JUMP_SLOT); 239 240 if (*where != target && !ld_bind_not) 241 *where = target; 242 return (target); 243 } 244 245 /* 246 * Process non-PLT relocations 247 */ 248 int 249 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 250 RtldLockState *lockstate) 251 { 252 const Obj_Entry *defobj; 253 const Elf_Rela *relalim; 254 const Elf_Rela *rela; 255 const Elf_Sym *def; 256 SymCache *cache; 257 Elf_Addr *where; 258 unsigned long symnum; 259 260 if ((flags & SYMLOOK_IFUNC) != 0) 261 /* XXX not implemented */ 262 return (0); 263 264 /* 265 * The dynamic loader may be called from a thread, we have 266 * limited amounts of stack available so we cannot use alloca(). 267 */ 268 if (obj == obj_rtld) 269 cache = NULL; 270 else 271 cache = calloc(obj->dynsymcount, sizeof(SymCache)); 272 /* No need to check for NULL here */ 273 274 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize); 275 for (rela = obj->rela; rela < relalim; rela++) { 276 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 277 symnum = ELF_R_SYM(rela->r_info); 278 279 switch (ELF_R_TYPE(rela->r_info)) { 280 case R_RISCV_JUMP_SLOT: 281 /* This will be handled by the plt/jmpslot routines */ 282 break; 283 case R_RISCV_NONE: 284 break; 285 case R_RISCV_64: 286 def = find_symdef(symnum, obj, &defobj, flags, cache, 287 lockstate); 288 if (def == NULL) 289 return (-1); 290 291 *where = (Elf_Addr)(defobj->relocbase + def->st_value + 292 rela->r_addend); 293 break; 294 case R_RISCV_TLS_DTPMOD64: 295 def = find_symdef(symnum, obj, &defobj, flags, cache, 296 lockstate); 297 if (def == NULL) 298 return -1; 299 300 *where += (Elf_Addr)defobj->tlsindex; 301 break; 302 case R_RISCV_COPY: 303 /* 304 * These are deferred until all other relocations have 305 * been done. All we do here is make sure that the 306 * COPY relocation is not in a shared library. They 307 * are allowed only in executable files. 308 */ 309 if (!obj->mainprog) { 310 _rtld_error("%s: Unexpected R_RISCV_COPY " 311 "relocation in shared library", obj->path); 312 return (-1); 313 } 314 break; 315 case R_RISCV_TLS_DTPREL64: 316 def = find_symdef(symnum, obj, &defobj, flags, cache, 317 lockstate); 318 if (def == NULL) 319 return (-1); 320 /* 321 * We lazily allocate offsets for static TLS as we 322 * see the first relocation that references the 323 * TLS block. This allows us to support (small 324 * amounts of) static TLS in dynamically loaded 325 * modules. If we run out of space, we generate an 326 * error. 327 */ 328 if (!defobj->tls_done) { 329 if (!allocate_tls_offset( 330 __DECONST(Obj_Entry *, defobj))) { 331 _rtld_error( 332 "%s: No space available for static " 333 "Thread Local Storage", obj->path); 334 return (-1); 335 } 336 } 337 338 *where += (Elf_Addr)(def->st_value + rela->r_addend 339 - TLS_DTV_OFFSET); 340 break; 341 case R_RISCV_TLS_TPREL64: 342 def = find_symdef(symnum, obj, &defobj, flags, cache, 343 lockstate); 344 if (def == NULL) 345 return (-1); 346 347 /* 348 * We lazily allocate offsets for static TLS as we 349 * see the first relocation that references the 350 * TLS block. This allows us to support (small 351 * amounts of) static TLS in dynamically loaded 352 * modules. If we run out of space, we generate an 353 * error. 354 */ 355 if (!defobj->tls_done) { 356 if (!allocate_tls_offset( 357 __DECONST(Obj_Entry *, defobj))) { 358 _rtld_error( 359 "%s: No space available for static " 360 "Thread Local Storage", obj->path); 361 return (-1); 362 } 363 } 364 365 *where = (def->st_value + rela->r_addend + 366 defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE); 367 break; 368 case R_RISCV_RELATIVE: 369 *where = (Elf_Addr)(obj->relocbase + rela->r_addend); 370 break; 371 default: 372 rtld_printf("%s: Unhandled relocation %lu\n", 373 obj->path, ELF_R_TYPE(rela->r_info)); 374 return (-1); 375 } 376 } 377 378 return (0); 379 } 380 381 void 382 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused) 383 { 384 385 } 386 387 void 388 pre_init(void) 389 { 390 391 } 392 393 void 394 allocate_initial_tls(Obj_Entry *objs) 395 { 396 Elf_Addr **tp; 397 398 /* 399 * Fix the size of the static TLS block by using the maximum 400 * offset allocated so far and adding a bit for dynamic modules to 401 * use. 402 */ 403 tls_static_space = tls_last_offset + tls_last_size + 404 RTLD_STATIC_TLS_EXTRA; 405 406 tp = (Elf_Addr **)((char *)allocate_tls(objs, NULL, TLS_TCB_SIZE, 16) 407 + TLS_TP_OFFSET + TLS_TCB_SIZE); 408 409 __asm __volatile("mv tp, %0" :: "r"(tp)); 410 } 411 412 void * 413 __tls_get_addr(tls_index* ti) 414 { 415 char *_tp; 416 void *p; 417 418 __asm __volatile("mv %0, tp" : "=r" (_tp)); 419 420 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)_tp - TLS_TP_OFFSET 421 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset); 422 423 return ((char*)p + TLS_DTV_OFFSET); 424 } 425