1 /*-
2 * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * This software was developed by the University of Cambridge Computer
10 * Laboratory as part of the CTSRD Project, with support from the UK Higher
11 * Education Innovation Fund (HEIF).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/types.h>
36
37 #include <stdlib.h>
38
39 #include "debug.h"
40 #include "rtld.h"
41 #include "rtld_printf.h"
42
43 /*
44 * It is possible for the compiler to emit relocations for unaligned data.
45 * We handle this situation with these inlines.
46 */
47 #define RELOC_ALIGNED_P(x) \
48 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
49
50 uint64_t
set_gp(Obj_Entry * obj)51 set_gp(Obj_Entry *obj)
52 {
53 uint64_t old;
54 SymLook req;
55 uint64_t gp;
56 int res;
57
58 __asm __volatile("mv %0, gp" : "=r"(old));
59
60 symlook_init(&req, "__global_pointer$");
61 req.ventry = NULL;
62 req.flags = SYMLOOK_EARLY;
63 res = symlook_obj(&req, obj);
64
65 if (res == 0) {
66 gp = req.sym_out->st_value;
67 __asm __volatile("mv gp, %0" :: "r"(gp));
68 }
69
70 return (old);
71 }
72
73 void
init_pltgot(Obj_Entry * obj)74 init_pltgot(Obj_Entry *obj)
75 {
76
77 if (obj->pltgot != NULL) {
78 obj->pltgot[0] = (Elf_Addr)&_rtld_bind_start;
79 obj->pltgot[1] = (Elf_Addr)obj;
80 }
81 }
82
83 int
do_copy_relocations(Obj_Entry * dstobj)84 do_copy_relocations(Obj_Entry *dstobj)
85 {
86 const Obj_Entry *srcobj, *defobj;
87 const Elf_Rela *relalim;
88 const Elf_Rela *rela;
89 const Elf_Sym *srcsym;
90 const Elf_Sym *dstsym;
91 const void *srcaddr;
92 const char *name;
93 void *dstaddr;
94 SymLook req;
95 size_t size;
96 int res;
97
98 /*
99 * COPY relocs are invalid outside of the main program
100 */
101 assert(dstobj->mainprog);
102
103 relalim = (const Elf_Rela *)((const char *)dstobj->rela +
104 dstobj->relasize);
105 for (rela = dstobj->rela; rela < relalim; rela++) {
106 if (ELF_R_TYPE(rela->r_info) != R_RISCV_COPY)
107 continue;
108
109 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
110 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
111 name = dstobj->strtab + dstsym->st_name;
112 size = dstsym->st_size;
113
114 symlook_init(&req, name);
115 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
116 req.flags = SYMLOOK_EARLY;
117
118 for (srcobj = globallist_next(dstobj); srcobj != NULL;
119 srcobj = globallist_next(srcobj)) {
120 res = symlook_obj(&req, srcobj);
121 if (res == 0) {
122 srcsym = req.sym_out;
123 defobj = req.defobj_out;
124 break;
125 }
126 }
127 if (srcobj == NULL) {
128 _rtld_error(
129 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
130 name, dstobj->path);
131 return (-1);
132 }
133
134 srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
135 memcpy(dstaddr, srcaddr, size);
136 }
137
138 return (0);
139 }
140
141 /*
142 * Process the PLT relocations.
143 */
144 int
reloc_plt(Obj_Entry * obj,int flags __unused,RtldLockState * lockstate __unused)145 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
146 {
147 const Elf_Rela *relalim;
148 const Elf_Rela *rela;
149
150 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
151 obj->pltrelasize);
152 for (rela = obj->pltrela; rela < relalim; rela++) {
153 Elf_Addr *where;
154
155 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
156
157 switch (ELF_R_TYPE(rela->r_info)) {
158 case R_RISCV_JUMP_SLOT:
159 *where += (Elf_Addr)obj->relocbase;
160 break;
161 case R_RISCV_IRELATIVE:
162 obj->irelative = true;
163 break;
164 default:
165 _rtld_error("Unknown relocation type %u in PLT",
166 (unsigned int)ELF_R_TYPE(rela->r_info));
167 return (-1);
168 }
169 }
170
171 return (0);
172 }
173
174 /*
175 * LD_BIND_NOW was set - force relocation for all jump slots
176 */
177 int
reloc_jmpslots(Obj_Entry * obj,int flags,RtldLockState * lockstate)178 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
179 {
180 const Obj_Entry *defobj;
181 const Elf_Rela *relalim;
182 const Elf_Rela *rela;
183 const Elf_Sym *def;
184
185 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
186 obj->pltrelasize);
187 for (rela = obj->pltrela; rela < relalim; rela++) {
188 Elf_Addr *where;
189
190 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
191 switch(ELF_R_TYPE(rela->r_info)) {
192 case R_RISCV_JUMP_SLOT:
193 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
194 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
195 if (def == NULL) {
196 dbg("reloc_jmpslots: sym not found");
197 return (-1);
198 }
199
200 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
201 obj->gnu_ifunc = true;
202 continue;
203 }
204
205 *where = (Elf_Addr)(defobj->relocbase + def->st_value);
206 break;
207 default:
208 _rtld_error("Unknown relocation type %x in jmpslot",
209 (unsigned int)ELF_R_TYPE(rela->r_info));
210 return (-1);
211 }
212 }
213
214 return (0);
215 }
216
217 static void
reloc_iresolve_one(Obj_Entry * obj,const Elf_Rela * rela,RtldLockState * lockstate)218 reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
219 RtldLockState *lockstate)
220 {
221 Elf_Addr *where, target, *ptr;
222
223 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
224 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
225 lock_release(rtld_bind_lock, lockstate);
226 target = call_ifunc_resolver(ptr);
227 wlock_acquire(rtld_bind_lock, lockstate);
228 *where = target;
229 }
230
231 int
reloc_iresolve(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)232 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
233 {
234 const Elf_Rela *relalim;
235 const Elf_Rela *rela;
236
237 if (!obj->irelative)
238 return (0);
239
240 obj->irelative = false;
241 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
242 obj->pltrelasize);
243 for (rela = obj->pltrela; rela < relalim; rela++) {
244 if (ELF_R_TYPE(rela->r_info) == R_RISCV_IRELATIVE)
245 reloc_iresolve_one(obj, rela, lockstate);
246 }
247 return (0);
248 }
249
250 int
reloc_iresolve_nonplt(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)251 reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
252 {
253 const Elf_Rela *relalim;
254 const Elf_Rela *rela;
255
256 if (!obj->irelative_nonplt)
257 return (0);
258
259 obj->irelative_nonplt = false;
260 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
261 for (rela = obj->rela; rela < relalim; rela++) {
262 if (ELF_R_TYPE(rela->r_info) == R_RISCV_IRELATIVE)
263 reloc_iresolve_one(obj, rela, lockstate);
264 }
265 return (0);
266 }
267
268 int
reloc_gnu_ifunc(Obj_Entry * obj,int flags,struct Struct_RtldLockState * lockstate)269 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
270 struct Struct_RtldLockState *lockstate)
271 {
272 const Elf_Rela *relalim;
273 const Elf_Rela *rela;
274 Elf_Addr *where, target;
275 const Elf_Sym *def;
276 const Obj_Entry *defobj;
277
278 if (!obj->gnu_ifunc)
279 return (0);
280
281 relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
282 for (rela = obj->pltrela; rela < relalim; rela++) {
283 if (ELF_R_TYPE(rela->r_info) == R_RISCV_JUMP_SLOT) {
284 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
285 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
286 SYMLOOK_IN_PLT | flags, NULL, lockstate);
287 if (def == NULL)
288 return (-1);
289 if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
290 continue;
291
292 lock_release(rtld_bind_lock, lockstate);
293 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
294 wlock_acquire(rtld_bind_lock, lockstate);
295 reloc_jmpslot(where, target, defobj, obj,
296 (const Elf_Rel *)rela);
297 }
298 }
299 obj->gnu_ifunc = false;
300 return (0);
301 }
302
303 Elf_Addr
reloc_jmpslot(Elf_Addr * where,Elf_Addr target,const Obj_Entry * defobj __unused,const Obj_Entry * obj __unused,const Elf_Rel * rel)304 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
305 const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
306 const Elf_Rel *rel)
307 {
308
309 assert(ELF_R_TYPE(rel->r_info) == R_RISCV_JUMP_SLOT ||
310 ELF_R_TYPE(rel->r_info) == R_RISCV_IRELATIVE);
311
312 if (*where != target && !ld_bind_not)
313 *where = target;
314 return (target);
315 }
316
317 /*
318 * Process non-PLT relocations
319 */
320 int
reloc_non_plt(Obj_Entry * obj,Obj_Entry * obj_rtld,int flags,RtldLockState * lockstate)321 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
322 RtldLockState *lockstate)
323 {
324 const Obj_Entry *defobj;
325 const Elf_Rela *relalim;
326 const Elf_Rela *rela;
327 const Elf_Sym *def;
328 SymCache *cache;
329 Elf_Addr *where, symval;
330 unsigned long symnum;
331
332 /*
333 * The dynamic loader may be called from a thread, we have
334 * limited amounts of stack available so we cannot use alloca().
335 */
336 if (obj == obj_rtld)
337 cache = NULL;
338 else
339 cache = calloc(obj->dynsymcount, sizeof(SymCache));
340 /* No need to check for NULL here */
341
342 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
343 for (rela = obj->rela; rela < relalim; rela++) {
344 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
345 symnum = ELF_R_SYM(rela->r_info);
346
347 switch (ELF_R_TYPE(rela->r_info)) {
348 case R_RISCV_JUMP_SLOT:
349 /* This will be handled by the plt/jmpslot routines */
350 break;
351 case R_RISCV_NONE:
352 break;
353 case R_RISCV_64:
354 def = find_symdef(symnum, obj, &defobj, flags, cache,
355 lockstate);
356 if (def == NULL)
357 return (-1);
358
359 /*
360 * If symbol is IFUNC, only perform relocation
361 * when caller allowed it by passing
362 * SYMLOOK_IFUNC flag. Skip the relocations
363 * otherwise.
364 */
365 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
366 if ((flags & SYMLOOK_IFUNC) == 0) {
367 obj->non_plt_gnu_ifunc = true;
368 continue;
369 }
370 symval = (Elf_Addr)rtld_resolve_ifunc(defobj,
371 def);
372 } else {
373 if ((flags & SYMLOOK_IFUNC) != 0)
374 continue;
375 symval = (Elf_Addr)(defobj->relocbase +
376 def->st_value);
377 }
378
379 *where = symval + rela->r_addend;
380 break;
381 case R_RISCV_TLS_DTPMOD64:
382 def = find_symdef(symnum, obj, &defobj, flags, cache,
383 lockstate);
384 if (def == NULL)
385 return -1;
386
387 *where += (Elf_Addr)defobj->tlsindex;
388 break;
389 case R_RISCV_COPY:
390 /*
391 * These are deferred until all other relocations have
392 * been done. All we do here is make sure that the
393 * COPY relocation is not in a shared library. They
394 * are allowed only in executable files.
395 */
396 if (!obj->mainprog) {
397 _rtld_error("%s: Unexpected R_RISCV_COPY "
398 "relocation in shared library", obj->path);
399 return (-1);
400 }
401 break;
402 case R_RISCV_TLS_DTPREL64:
403 def = find_symdef(symnum, obj, &defobj, flags, cache,
404 lockstate);
405 if (def == NULL)
406 return (-1);
407 /*
408 * We lazily allocate offsets for static TLS as we
409 * see the first relocation that references the
410 * TLS block. This allows us to support (small
411 * amounts of) static TLS in dynamically loaded
412 * modules. If we run out of space, we generate an
413 * error.
414 */
415 if (!defobj->tls_static) {
416 if (!allocate_tls_offset(
417 __DECONST(Obj_Entry *, defobj))) {
418 _rtld_error(
419 "%s: No space available for static "
420 "Thread Local Storage", obj->path);
421 return (-1);
422 }
423 }
424
425 *where += (Elf_Addr)(def->st_value + rela->r_addend
426 - TLS_DTV_OFFSET);
427 break;
428 case R_RISCV_TLS_TPREL64:
429 def = find_symdef(symnum, obj, &defobj, flags, cache,
430 lockstate);
431 if (def == NULL)
432 return (-1);
433
434 /*
435 * We lazily allocate offsets for static TLS as we
436 * see the first relocation that references the
437 * TLS block. This allows us to support (small
438 * amounts of) static TLS in dynamically loaded
439 * modules. If we run out of space, we generate an
440 * error.
441 */
442 if (!defobj->tls_static) {
443 if (!allocate_tls_offset(
444 __DECONST(Obj_Entry *, defobj))) {
445 _rtld_error(
446 "%s: No space available for static "
447 "Thread Local Storage", obj->path);
448 return (-1);
449 }
450 }
451
452 *where = (def->st_value + rela->r_addend +
453 defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);
454 break;
455 case R_RISCV_RELATIVE:
456 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
457 break;
458 case R_RISCV_IRELATIVE:
459 obj->irelative_nonplt = true;
460 break;
461 default:
462 rtld_printf("%s: Unhandled relocation %lu\n",
463 obj->path, ELF_R_TYPE(rela->r_info));
464 return (-1);
465 }
466 }
467
468 return (0);
469 }
470
471 unsigned long elf_hwcap;
472
473 void
ifunc_init(Elf_Auxinfo * aux_info[__min_size (AT_COUNT)])474 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)])
475 {
476 if (aux_info[AT_HWCAP] != NULL)
477 elf_hwcap = aux_info[AT_HWCAP]->a_un.a_val;
478 }
479
480 void
allocate_initial_tls(Obj_Entry * objs)481 allocate_initial_tls(Obj_Entry *objs)
482 {
483
484 /*
485 * Fix the size of the static TLS block by using the maximum
486 * offset allocated so far and adding a bit for dynamic modules to
487 * use.
488 */
489 tls_static_space = tls_last_offset + tls_last_size +
490 ld_static_tls_extra;
491
492 _tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
493 }
494
495 void *
__tls_get_addr(tls_index * ti)496 __tls_get_addr(tls_index* ti)
497 {
498 uintptr_t **dtvp;
499 void *p;
500
501 dtvp = &_tcb_get()->tcb_dtv;
502 p = tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset);
503
504 return ((char*)p + TLS_DTV_OFFSET);
505 }
506