1 /*-
2 * Copyright (c) 2014-2015 The FreeBSD Foundation
3 *
4 * Portions of this software were developed by Andrew Turner
5 * under sponsorship from the FreeBSD Foundation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/types.h>
30
31 #include <machine/sysarch.h>
32
33 #include <stdlib.h>
34
35 #include "debug.h"
36 #include "rtld.h"
37 #include "rtld_printf.h"
38
39 /*
40 * It is possible for the compiler to emit relocations for unaligned data.
41 * We handle this situation with these inlines.
42 */
43 #define RELOC_ALIGNED_P(x) \
44 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
45
46 /*
47 * This is not the correct prototype, but we only need it for
48 * a function pointer to a simple asm function.
49 */
50 void *_rtld_tlsdesc_static(void *);
51 void *_rtld_tlsdesc_undef(void *);
52 void *_rtld_tlsdesc_dynamic(void *);
53
54 void _exit(int);
55
56 bool
arch_digest_dynamic(struct Struct_Obj_Entry * obj,const Elf_Dyn * dynp)57 arch_digest_dynamic(struct Struct_Obj_Entry *obj, const Elf_Dyn *dynp)
58 {
59 if (dynp->d_tag == DT_AARCH64_VARIANT_PCS) {
60 obj->variant_pcs = true;
61 return (true);
62 }
63
64 return (false);
65 }
66
67 bool
arch_digest_note(struct Struct_Obj_Entry * obj __unused,const Elf_Note * note)68 arch_digest_note(struct Struct_Obj_Entry *obj __unused, const Elf_Note *note)
69 {
70 const char *note_name;
71 const uint32_t *note_data;
72
73 note_name = (const char *)(note + 1);
74 /* Only handle GNU notes */
75 if (note->n_namesz != sizeof(ELF_NOTE_GNU) ||
76 strncmp(note_name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) != 0)
77 return (false);
78
79 /* Only handle GNU property notes */
80 if (note->n_type != NT_GNU_PROPERTY_TYPE_0)
81 return (false);
82
83 /*
84 * note_data[0] - Type
85 * note_data[1] - Length
86 * note_data[2] - Data
87 * note_data[3] - Padding?
88 */
89 note_data = (const uint32_t *)(note_name + note->n_namesz);
90
91 /* Only handle AArch64 feature notes */
92 if (note_data[0] != GNU_PROPERTY_AARCH64_FEATURE_1_AND)
93 return (false);
94
95 /* We expect at least 4 bytes of data */
96 if (note_data[1] < 4)
97 return (false);
98
99 /* TODO: Only guard if HWCAP2_BTI is set */
100 if ((note_data[2] & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) {
101 struct arm64_guard_page_args guard;
102
103 guard.addr = (uintptr_t)obj->mapbase;
104 guard.len = obj->mapsize;
105
106 sysarch(ARM64_GUARD_PAGE, &guard);
107 }
108
109 return (true);
110 }
111
112 void
init_pltgot(Obj_Entry * obj)113 init_pltgot(Obj_Entry *obj)
114 {
115
116 if (obj->pltgot != NULL) {
117 obj->pltgot[1] = (Elf_Addr) obj;
118 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
119 }
120 }
121
122 int
do_copy_relocations(Obj_Entry * dstobj)123 do_copy_relocations(Obj_Entry *dstobj)
124 {
125 const Obj_Entry *srcobj, *defobj;
126 const Elf_Rela *relalim;
127 const Elf_Rela *rela;
128 const Elf_Sym *srcsym;
129 const Elf_Sym *dstsym;
130 const void *srcaddr;
131 const char *name;
132 void *dstaddr;
133 SymLook req;
134 size_t size;
135 int res;
136
137 /*
138 * COPY relocs are invalid outside of the main program
139 */
140 assert(dstobj->mainprog);
141
142 relalim = (const Elf_Rela *)((const char *)dstobj->rela +
143 dstobj->relasize);
144 for (rela = dstobj->rela; rela < relalim; rela++) {
145 if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
146 continue;
147
148 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
149 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
150 name = dstobj->strtab + dstsym->st_name;
151 size = dstsym->st_size;
152
153 symlook_init(&req, name);
154 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
155 req.flags = SYMLOOK_EARLY;
156
157 for (srcobj = globallist_next(dstobj); srcobj != NULL;
158 srcobj = globallist_next(srcobj)) {
159 res = symlook_obj(&req, srcobj);
160 if (res == 0) {
161 srcsym = req.sym_out;
162 defobj = req.defobj_out;
163 break;
164 }
165 }
166 if (srcobj == NULL) {
167 _rtld_error("Undefined symbol \"%s\" referenced from "
168 "COPY relocation in %s", name, dstobj->path);
169 return (-1);
170 }
171
172 srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
173 memcpy(dstaddr, srcaddr, size);
174 }
175
176 return (0);
177 }
178
179 struct tls_data {
180 Elf_Addr dtv_gen;
181 int tls_index;
182 Elf_Addr tls_offs;
183 };
184
185 static struct tls_data *
reloc_tlsdesc_alloc(int tlsindex,Elf_Addr tlsoffs)186 reloc_tlsdesc_alloc(int tlsindex, Elf_Addr tlsoffs)
187 {
188 struct tls_data *tlsdesc;
189
190 tlsdesc = xmalloc(sizeof(struct tls_data));
191 tlsdesc->dtv_gen = tls_dtv_generation;
192 tlsdesc->tls_index = tlsindex;
193 tlsdesc->tls_offs = tlsoffs;
194
195 return (tlsdesc);
196 }
197
198 struct tlsdesc_entry {
199 void *(*func)(void *);
200 union {
201 Elf_Ssize addend;
202 Elf_Size offset;
203 struct tls_data *data;
204 };
205 };
206
207 static void
reloc_tlsdesc(const Obj_Entry * obj,const Elf_Rela * rela,struct tlsdesc_entry * where,int flags,RtldLockState * lockstate)208 reloc_tlsdesc(const Obj_Entry *obj, const Elf_Rela *rela,
209 struct tlsdesc_entry *where, int flags, RtldLockState *lockstate)
210 {
211 const Elf_Sym *def;
212 const Obj_Entry *defobj;
213 Elf_Addr offs;
214
215 offs = 0;
216 if (ELF_R_SYM(rela->r_info) != 0) {
217 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags,
218 NULL, lockstate);
219 if (def == NULL)
220 rtld_die();
221 offs = def->st_value;
222 obj = defobj;
223 if (def->st_shndx == SHN_UNDEF) {
224 /* Weak undefined thread variable */
225 where->func = _rtld_tlsdesc_undef;
226 where->addend = rela->r_addend;
227 return;
228 }
229 }
230 offs += rela->r_addend;
231
232 if (obj->tlsoffset != 0) {
233 /* Variable is in initialy allocated TLS segment */
234 where->func = _rtld_tlsdesc_static;
235 where->offset = obj->tlsoffset + offs;
236 } else {
237 /* TLS offest is unknown at load time, use dynamic resolving */
238 where->func = _rtld_tlsdesc_dynamic;
239 where->data = reloc_tlsdesc_alloc(obj->tlsindex, offs);
240 }
241 }
242
243 /*
244 * Process the PLT relocations.
245 */
246 int
reloc_plt(Obj_Entry * obj,int flags,RtldLockState * lockstate)247 reloc_plt(Obj_Entry *obj, int flags, RtldLockState *lockstate)
248 {
249 const Obj_Entry *defobj;
250 const Elf_Rela *relalim;
251 const Elf_Rela *rela;
252 const Elf_Sym *def, *sym;
253 bool lazy;
254
255 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
256 obj->pltrelasize);
257 for (rela = obj->pltrela; rela < relalim; rela++) {
258 Elf_Addr *where, target;
259
260 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
261
262 switch(ELF_R_TYPE(rela->r_info)) {
263 case R_AARCH64_JUMP_SLOT:
264 lazy = true;
265 if (obj->variant_pcs) {
266 sym = &obj->symtab[ELF_R_SYM(rela->r_info)];
267 /*
268 * Variant PCS functions don't follow the
269 * standard register convention. Because of
270 * this we can't use lazy relocation and
271 * need to set the target address.
272 */
273 if ((sym->st_other & STO_AARCH64_VARIANT_PCS) !=
274 0)
275 lazy = false;
276 }
277 if (lazy) {
278 *where += (Elf_Addr)obj->relocbase;
279 } else {
280 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
281 &defobj, SYMLOOK_IN_PLT | flags, NULL,
282 lockstate);
283 if (def == NULL)
284 return (-1);
285 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC){
286 obj->gnu_ifunc = true;
287 continue;
288 }
289 target = (Elf_Addr)(defobj->relocbase +
290 def->st_value);
291 /*
292 * Ignore ld_bind_not as it requires lazy
293 * binding
294 */
295 *where = target;
296 }
297 break;
298 case R_AARCH64_TLSDESC:
299 reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
300 SYMLOOK_IN_PLT | flags, lockstate);
301 break;
302 case R_AARCH64_IRELATIVE:
303 obj->irelative = true;
304 break;
305 case R_AARCH64_NONE:
306 break;
307 default:
308 _rtld_error("Unknown relocation type %u in PLT",
309 (unsigned int)ELF_R_TYPE(rela->r_info));
310 return (-1);
311 }
312 }
313
314 return (0);
315 }
316
317 /*
318 * LD_BIND_NOW was set - force relocation for all jump slots
319 */
320 int
reloc_jmpslots(Obj_Entry * obj,int flags,RtldLockState * lockstate)321 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
322 {
323 const Obj_Entry *defobj;
324 const Elf_Rela *relalim;
325 const Elf_Rela *rela;
326 const Elf_Sym *def;
327
328 if (obj->jmpslots_done)
329 return (0);
330
331 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
332 obj->pltrelasize);
333 for (rela = obj->pltrela; rela < relalim; rela++) {
334 Elf_Addr *where, target;
335
336 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
337 switch(ELF_R_TYPE(rela->r_info)) {
338 case R_AARCH64_JUMP_SLOT:
339 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
340 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
341 if (def == NULL)
342 return (-1);
343 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
344 obj->gnu_ifunc = true;
345 continue;
346 }
347 target = (Elf_Addr)(defobj->relocbase + def->st_value);
348 reloc_jmpslot(where, target, defobj, obj,
349 (const Elf_Rel *)rela);
350 break;
351 }
352 }
353 obj->jmpslots_done = true;
354
355 return (0);
356 }
357
358 static void
reloc_iresolve_one(Obj_Entry * obj,const Elf_Rela * rela,RtldLockState * lockstate)359 reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
360 RtldLockState *lockstate)
361 {
362 Elf_Addr *where, target, *ptr;
363
364 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
365 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
366 lock_release(rtld_bind_lock, lockstate);
367 target = call_ifunc_resolver(ptr);
368 wlock_acquire(rtld_bind_lock, lockstate);
369 *where = target;
370 }
371
372 int
reloc_iresolve(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)373 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
374 {
375 const Elf_Rela *relalim;
376 const Elf_Rela *rela;
377
378 if (!obj->irelative)
379 return (0);
380 obj->irelative = false;
381 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
382 obj->pltrelasize);
383 for (rela = obj->pltrela; rela < relalim; rela++) {
384 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
385 reloc_iresolve_one(obj, rela, lockstate);
386 }
387 return (0);
388 }
389
390 int
reloc_iresolve_nonplt(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)391 reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
392 {
393 const Elf_Rela *relalim;
394 const Elf_Rela *rela;
395
396 if (!obj->irelative_nonplt)
397 return (0);
398 obj->irelative_nonplt = false;
399 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
400 for (rela = obj->rela; rela < relalim; rela++) {
401 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
402 reloc_iresolve_one(obj, rela, lockstate);
403 }
404 return (0);
405 }
406
407 int
reloc_gnu_ifunc(Obj_Entry * obj,int flags,struct Struct_RtldLockState * lockstate)408 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
409 struct Struct_RtldLockState *lockstate)
410 {
411 const Elf_Rela *relalim;
412 const Elf_Rela *rela;
413 Elf_Addr *where, target;
414 const Elf_Sym *def;
415 const Obj_Entry *defobj;
416
417 if (!obj->gnu_ifunc)
418 return (0);
419 relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
420 for (rela = obj->pltrela; rela < relalim; rela++) {
421 if (ELF_R_TYPE(rela->r_info) == R_AARCH64_JUMP_SLOT) {
422 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
423 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
424 SYMLOOK_IN_PLT | flags, NULL, lockstate);
425 if (def == NULL)
426 return (-1);
427 if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
428 continue;
429 lock_release(rtld_bind_lock, lockstate);
430 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
431 wlock_acquire(rtld_bind_lock, lockstate);
432 reloc_jmpslot(where, target, defobj, obj,
433 (const Elf_Rel *)rela);
434 }
435 }
436 obj->gnu_ifunc = false;
437 return (0);
438 }
439
440 Elf_Addr
reloc_jmpslot(Elf_Addr * where,Elf_Addr target,const Obj_Entry * defobj __unused,const Obj_Entry * obj __unused,const Elf_Rel * rel)441 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
442 const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
443 const Elf_Rel *rel)
444 {
445
446 assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT ||
447 ELF_R_TYPE(rel->r_info) == R_AARCH64_IRELATIVE);
448
449 if (*where != target && !ld_bind_not)
450 *where = target;
451 return (target);
452 }
453
454 void
ifunc_init(Elf_Auxinfo * aux_info[__min_size (AT_COUNT)]__unused)455 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)] __unused)
456 {
457
458 }
459
460 /*
461 * Process non-PLT relocations
462 */
463 int
reloc_non_plt(Obj_Entry * obj,Obj_Entry * obj_rtld,int flags,RtldLockState * lockstate)464 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
465 RtldLockState *lockstate)
466 {
467 const Obj_Entry *defobj;
468 const Elf_Rela *relalim;
469 const Elf_Rela *rela;
470 const Elf_Sym *def;
471 SymCache *cache;
472 Elf_Addr *where, symval;
473
474 /*
475 * The dynamic loader may be called from a thread, we have
476 * limited amounts of stack available so we cannot use alloca().
477 */
478 if (obj == obj_rtld)
479 cache = NULL;
480 else
481 cache = calloc(obj->dynsymcount, sizeof(SymCache));
482 /* No need to check for NULL here */
483
484 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
485 for (rela = obj->rela; rela < relalim; rela++) {
486 /*
487 * First, resolve symbol for relocations which
488 * reference symbols.
489 */
490 switch (ELF_R_TYPE(rela->r_info)) {
491 case R_AARCH64_ABS64:
492 case R_AARCH64_GLOB_DAT:
493 case R_AARCH64_TLS_TPREL64:
494 case R_AARCH64_TLS_DTPREL64:
495 case R_AARCH64_TLS_DTPMOD64:
496 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
497 &defobj, flags, cache, lockstate);
498 if (def == NULL)
499 return (-1);
500 /*
501 * If symbol is IFUNC, only perform relocation
502 * when caller allowed it by passing
503 * SYMLOOK_IFUNC flag. Skip the relocations
504 * otherwise.
505 *
506 * Also error out in case IFUNC relocations
507 * are specified for TLS, which cannot be
508 * usefully interpreted.
509 */
510 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
511 switch (ELF_R_TYPE(rela->r_info)) {
512 case R_AARCH64_ABS64:
513 case R_AARCH64_GLOB_DAT:
514 if ((flags & SYMLOOK_IFUNC) == 0) {
515 obj->non_plt_gnu_ifunc = true;
516 continue;
517 }
518 symval = (Elf_Addr)rtld_resolve_ifunc(
519 defobj, def);
520 break;
521 default:
522 _rtld_error("%s: IFUNC for TLS reloc",
523 obj->path);
524 return (-1);
525 }
526 } else {
527 if ((flags & SYMLOOK_IFUNC) != 0)
528 continue;
529 symval = (Elf_Addr)defobj->relocbase +
530 def->st_value;
531 }
532 break;
533 default:
534 if ((flags & SYMLOOK_IFUNC) != 0)
535 continue;
536 }
537
538 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
539
540 switch (ELF_R_TYPE(rela->r_info)) {
541 case R_AARCH64_ABS64:
542 case R_AARCH64_GLOB_DAT:
543 *where = symval + rela->r_addend;
544 break;
545 case R_AARCH64_COPY:
546 /*
547 * These are deferred until all other relocations have
548 * been done. All we do here is make sure that the
549 * COPY relocation is not in a shared library. They
550 * are allowed only in executable files.
551 */
552 if (!obj->mainprog) {
553 _rtld_error("%s: Unexpected R_AARCH64_COPY "
554 "relocation in shared library", obj->path);
555 return (-1);
556 }
557 break;
558 case R_AARCH64_TLSDESC:
559 reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
560 flags, lockstate);
561 break;
562 case R_AARCH64_TLS_TPREL64:
563 /*
564 * We lazily allocate offsets for static TLS as we
565 * see the first relocation that references the
566 * TLS block. This allows us to support (small
567 * amounts of) static TLS in dynamically loaded
568 * modules. If we run out of space, we generate an
569 * error.
570 */
571 if (!defobj->tls_static) {
572 if (!allocate_tls_offset(
573 __DECONST(Obj_Entry *, defobj))) {
574 _rtld_error(
575 "%s: No space available for static "
576 "Thread Local Storage", obj->path);
577 return (-1);
578 }
579 }
580 *where = def->st_value + rela->r_addend +
581 defobj->tlsoffset;
582 break;
583
584 /*
585 * !!! BEWARE !!!
586 * ARM ELF ABI defines TLS_DTPMOD64 as 1029, and TLS_DTPREL64
587 * as 1028. But actual bfd linker and the glibc RTLD linker
588 * treats TLS_DTPMOD64 as 1028 and TLS_DTPREL64 1029.
589 */
590 case R_AARCH64_TLS_DTPREL64: /* efectively is TLS_DTPMOD64 */
591 *where += (Elf_Addr)defobj->tlsindex;
592 break;
593 case R_AARCH64_TLS_DTPMOD64: /* efectively is TLS_DTPREL64 */
594 *where += (Elf_Addr)(def->st_value + rela->r_addend);
595 break;
596 case R_AARCH64_RELATIVE:
597 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
598 break;
599 case R_AARCH64_NONE:
600 break;
601 case R_AARCH64_IRELATIVE:
602 obj->irelative_nonplt = true;
603 break;
604 default:
605 rtld_printf("%s: Unhandled relocation %lu\n",
606 obj->path, ELF_R_TYPE(rela->r_info));
607 return (-1);
608 }
609 }
610
611 return (0);
612 }
613
614 void
allocate_initial_tls(Obj_Entry * objs)615 allocate_initial_tls(Obj_Entry *objs)
616 {
617
618 /*
619 * Fix the size of the static TLS block by using the maximum
620 * offset allocated so far and adding a bit for dynamic modules to
621 * use.
622 */
623 tls_static_space = tls_last_offset + tls_last_size +
624 ld_static_tls_extra;
625
626 _tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
627 }
628
629 void *
__tls_get_addr(tls_index * ti)630 __tls_get_addr(tls_index* ti)
631 {
632 uintptr_t **dtvp;
633
634 dtvp = &_tcb_get()->tcb_dtv;
635 return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset));
636 }
637