xref: /freebsd/libexec/rtld-elf/aarch64/reloc.c (revision f5f40dd63bc7acbb5312b26ac1ea1103c12352a6)
1 /*-
2  * Copyright (c) 2014-2015 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * Portions of this software were developed by Andrew Turner
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/types.h>
31 
32 #include <machine/sysarch.h>
33 
34 #include <stdlib.h>
35 
36 #include "debug.h"
37 #include "rtld.h"
38 #include "rtld_printf.h"
39 
40 /*
41  * It is possible for the compiler to emit relocations for unaligned data.
42  * We handle this situation with these inlines.
43  */
44 #define	RELOC_ALIGNED_P(x) \
45 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
46 
47 /*
48  * This is not the correct prototype, but we only need it for
49  * a function pointer to a simple asm function.
50  */
51 void *_rtld_tlsdesc_static(void *);
52 void *_rtld_tlsdesc_undef(void *);
53 void *_rtld_tlsdesc_dynamic(void *);
54 
55 void _exit(int);
56 
57 bool
58 arch_digest_dynamic(struct Struct_Obj_Entry *obj, const Elf_Dyn *dynp)
59 {
60 	if (dynp->d_tag == DT_AARCH64_VARIANT_PCS) {
61 		obj->variant_pcs = true;
62 		return (true);
63 	}
64 
65 	return (false);
66 }
67 
68 bool
69 arch_digest_note(struct Struct_Obj_Entry *obj __unused, const Elf_Note *note)
70 {
71 	const char *note_name;
72 	const uint32_t *note_data;
73 
74 	note_name = (const char *)(note + 1);
75 	/* Only handle GNU notes */
76 	if (note->n_namesz != sizeof(ELF_NOTE_GNU) ||
77 	    strncmp(note_name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) != 0)
78 		return (false);
79 
80 	/* Only handle GNU property notes */
81 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0)
82 		return (false);
83 
84 	/*
85 	 * note_data[0] - Type
86 	 * note_data[1] - Length
87 	 * note_data[2] - Data
88 	 * note_data[3] - Padding?
89 	 */
90 	note_data = (const uint32_t *)(note_name + note->n_namesz);
91 
92 	/* Only handle AArch64 feature notes */
93 	if (note_data[0] != GNU_PROPERTY_AARCH64_FEATURE_1_AND)
94 		return (false);
95 
96 	/* We expect at least 4 bytes of data */
97 	if (note_data[1] < 4)
98 		return (false);
99 
100 	/* TODO: Only guard if HWCAP2_BTI is set */
101 	if ((note_data[2] & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) {
102 		struct arm64_guard_page_args guard;
103 
104 		guard.addr = (uintptr_t)obj->mapbase;
105 		guard.len = obj->mapsize;
106 
107 		sysarch(ARM64_GUARD_PAGE, &guard);
108 	}
109 
110 	return (true);
111 }
112 
113 void
114 init_pltgot(Obj_Entry *obj)
115 {
116 
117 	if (obj->pltgot != NULL) {
118 		obj->pltgot[1] = (Elf_Addr) obj;
119 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
120 	}
121 }
122 
123 int
124 do_copy_relocations(Obj_Entry *dstobj)
125 {
126 	const Obj_Entry *srcobj, *defobj;
127 	const Elf_Rela *relalim;
128 	const Elf_Rela *rela;
129 	const Elf_Sym *srcsym;
130 	const Elf_Sym *dstsym;
131 	const void *srcaddr;
132 	const char *name;
133 	void *dstaddr;
134 	SymLook req;
135 	size_t size;
136 	int res;
137 
138 	/*
139 	 * COPY relocs are invalid outside of the main program
140 	 */
141 	assert(dstobj->mainprog);
142 
143 	relalim = (const Elf_Rela *)((const char *)dstobj->rela +
144 	    dstobj->relasize);
145 	for (rela = dstobj->rela; rela < relalim; rela++) {
146 		if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
147 			continue;
148 
149 		dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
150 		dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
151 		name = dstobj->strtab + dstsym->st_name;
152 		size = dstsym->st_size;
153 
154 		symlook_init(&req, name);
155 		req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
156 		req.flags = SYMLOOK_EARLY;
157 
158 		for (srcobj = globallist_next(dstobj); srcobj != NULL;
159 		     srcobj = globallist_next(srcobj)) {
160 			res = symlook_obj(&req, srcobj);
161 			if (res == 0) {
162 				srcsym = req.sym_out;
163 				defobj = req.defobj_out;
164 				break;
165 			}
166 		}
167 		if (srcobj == NULL) {
168 			_rtld_error("Undefined symbol \"%s\" referenced from "
169 			    "COPY relocation in %s", name, dstobj->path);
170 			return (-1);
171 		}
172 
173 		srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
174 		memcpy(dstaddr, srcaddr, size);
175 	}
176 
177 	return (0);
178 }
179 
180 struct tls_data {
181 	Elf_Addr	dtv_gen;
182 	int		tls_index;
183 	Elf_Addr	tls_offs;
184 };
185 
186 static Elf_Addr
187 reloc_tlsdesc_alloc(int tlsindex, Elf_Addr tlsoffs)
188 {
189 	struct tls_data *tlsdesc;
190 
191 	tlsdesc = xmalloc(sizeof(struct tls_data));
192 	tlsdesc->dtv_gen = tls_dtv_generation;
193 	tlsdesc->tls_index = tlsindex;
194 	tlsdesc->tls_offs = tlsoffs;
195 
196 	return ((Elf_Addr)tlsdesc);
197 }
198 
199 static void
200 reloc_tlsdesc(const Obj_Entry *obj, const Elf_Rela *rela, Elf_Addr *where,
201     int flags, RtldLockState *lockstate)
202 {
203 	const Elf_Sym *def;
204 	const Obj_Entry *defobj;
205 	Elf_Addr offs;
206 
207 
208 	offs = 0;
209 	if (ELF_R_SYM(rela->r_info) != 0) {
210 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags,
211 			    NULL, lockstate);
212 		if (def == NULL)
213 			rtld_die();
214 		offs = def->st_value;
215 		obj = defobj;
216 		if (def->st_shndx == SHN_UNDEF) {
217 			/* Weak undefined thread variable */
218 			where[0] = (Elf_Addr)_rtld_tlsdesc_undef;
219 			where[1] = rela->r_addend;
220 			return;
221 		}
222 	}
223 	offs += rela->r_addend;
224 
225 	if (obj->tlsoffset != 0) {
226 		/* Variable is in initialy allocated TLS segment */
227 		where[0] = (Elf_Addr)_rtld_tlsdesc_static;
228 		where[1] = obj->tlsoffset + offs;
229 	} else {
230 		/* TLS offest is unknown at load time, use dynamic resolving */
231 		where[0] = (Elf_Addr)_rtld_tlsdesc_dynamic;
232 		where[1] = reloc_tlsdesc_alloc(obj->tlsindex, offs);
233 	}
234 }
235 
236 /*
237  * Process the PLT relocations.
238  */
239 int
240 reloc_plt(Obj_Entry *obj, int flags, RtldLockState *lockstate)
241 {
242 	const Obj_Entry *defobj;
243 	const Elf_Rela *relalim;
244 	const Elf_Rela *rela;
245 	const Elf_Sym *def, *sym;
246 	bool lazy;
247 
248 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
249 	    obj->pltrelasize);
250 	for (rela = obj->pltrela; rela < relalim; rela++) {
251 		Elf_Addr *where, target;
252 
253 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
254 
255 		switch(ELF_R_TYPE(rela->r_info)) {
256 		case R_AARCH64_JUMP_SLOT:
257 			lazy = true;
258 			if (obj->variant_pcs) {
259 				sym = &obj->symtab[ELF_R_SYM(rela->r_info)];
260 				/*
261 				 * Variant PCS functions don't follow the
262 				 * standard register convention. Because of
263 				 * this we can't use lazy relocation and
264 				 * need to set the target address.
265 				 */
266 				if ((sym->st_other & STO_AARCH64_VARIANT_PCS) !=
267 				    0)
268 					lazy = false;
269 			}
270 			if (lazy) {
271 				*where += (Elf_Addr)obj->relocbase;
272 			} else {
273 				def = find_symdef(ELF_R_SYM(rela->r_info), obj,
274 				    &defobj, SYMLOOK_IN_PLT | flags, NULL,
275 				    lockstate);
276 				if (def == NULL)
277 					return (-1);
278 				if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC){
279 					obj->gnu_ifunc = true;
280 					continue;
281 				}
282 				target = (Elf_Addr)(defobj->relocbase +
283 				    def->st_value);
284 				/*
285 				 * Ignore ld_bind_not as it requires lazy
286 				 * binding
287 				 */
288 				*where = target;
289 			}
290 			break;
291 		case R_AARCH64_TLSDESC:
292 			reloc_tlsdesc(obj, rela, where, SYMLOOK_IN_PLT | flags,
293 			    lockstate);
294 			break;
295 		case R_AARCH64_IRELATIVE:
296 			obj->irelative = true;
297 			break;
298 		case R_AARCH64_NONE:
299 			break;
300 		default:
301 			_rtld_error("Unknown relocation type %u in PLT",
302 			    (unsigned int)ELF_R_TYPE(rela->r_info));
303 			return (-1);
304 		}
305 	}
306 
307 	return (0);
308 }
309 
310 /*
311  * LD_BIND_NOW was set - force relocation for all jump slots
312  */
313 int
314 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
315 {
316 	const Obj_Entry *defobj;
317 	const Elf_Rela *relalim;
318 	const Elf_Rela *rela;
319 	const Elf_Sym *def;
320 
321 	if (obj->jmpslots_done)
322 		return (0);
323 
324 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
325 	    obj->pltrelasize);
326 	for (rela = obj->pltrela; rela < relalim; rela++) {
327 		Elf_Addr *where, target;
328 
329 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
330 		switch(ELF_R_TYPE(rela->r_info)) {
331 		case R_AARCH64_JUMP_SLOT:
332 			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
333 			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
334 			if (def == NULL)
335 				return (-1);
336 			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
337 				obj->gnu_ifunc = true;
338 				continue;
339 			}
340 			target = (Elf_Addr)(defobj->relocbase + def->st_value);
341 			reloc_jmpslot(where, target, defobj, obj,
342 			    (const Elf_Rel *)rela);
343 			break;
344 		}
345 	}
346 	obj->jmpslots_done = true;
347 
348 	return (0);
349 }
350 
351 static void
352 reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
353     RtldLockState *lockstate)
354 {
355 	Elf_Addr *where, target, *ptr;
356 
357 	ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
358 	where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
359 	lock_release(rtld_bind_lock, lockstate);
360 	target = call_ifunc_resolver(ptr);
361 	wlock_acquire(rtld_bind_lock, lockstate);
362 	*where = target;
363 }
364 
365 int
366 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
367 {
368 	const Elf_Rela *relalim;
369 	const Elf_Rela *rela;
370 
371 	if (!obj->irelative)
372 		return (0);
373 	obj->irelative = false;
374 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
375 	    obj->pltrelasize);
376 	for (rela = obj->pltrela;  rela < relalim;  rela++) {
377 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
378 			reloc_iresolve_one(obj, rela, lockstate);
379 	}
380 	return (0);
381 }
382 
383 int
384 reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
385 {
386 	const Elf_Rela *relalim;
387 	const Elf_Rela *rela;
388 
389 	if (!obj->irelative_nonplt)
390 		return (0);
391 	obj->irelative_nonplt = false;
392 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
393 	for (rela = obj->rela;  rela < relalim;  rela++) {
394 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
395 			reloc_iresolve_one(obj, rela, lockstate);
396 	}
397 	return (0);
398 }
399 
400 int
401 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
402    struct Struct_RtldLockState *lockstate)
403 {
404 	const Elf_Rela *relalim;
405 	const Elf_Rela *rela;
406 	Elf_Addr *where, target;
407 	const Elf_Sym *def;
408 	const Obj_Entry *defobj;
409 
410 	if (!obj->gnu_ifunc)
411 		return (0);
412 	relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
413 	for (rela = obj->pltrela;  rela < relalim;  rela++) {
414 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_JUMP_SLOT) {
415 			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
416 			def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
417 			    SYMLOOK_IN_PLT | flags, NULL, lockstate);
418 			if (def == NULL)
419 				return (-1);
420 			if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
421 				continue;
422 			lock_release(rtld_bind_lock, lockstate);
423 			target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
424 			wlock_acquire(rtld_bind_lock, lockstate);
425 			reloc_jmpslot(where, target, defobj, obj,
426 			    (const Elf_Rel *)rela);
427 		}
428 	}
429 	obj->gnu_ifunc = false;
430 	return (0);
431 }
432 
433 Elf_Addr
434 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
435     const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
436     const Elf_Rel *rel)
437 {
438 
439 	assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT ||
440 	    ELF_R_TYPE(rel->r_info) == R_AARCH64_IRELATIVE);
441 
442 	if (*where != target && !ld_bind_not)
443 		*where = target;
444 	return (target);
445 }
446 
447 void
448 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
449 {
450 
451 }
452 
453 /*
454  * Process non-PLT relocations
455  */
456 int
457 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
458     RtldLockState *lockstate)
459 {
460 	const Obj_Entry *defobj;
461 	const Elf_Rela *relalim;
462 	const Elf_Rela *rela;
463 	const Elf_Sym *def;
464 	SymCache *cache;
465 	Elf_Addr *where, symval;
466 
467 	/*
468 	 * The dynamic loader may be called from a thread, we have
469 	 * limited amounts of stack available so we cannot use alloca().
470 	 */
471 	if (obj == obj_rtld)
472 		cache = NULL;
473 	else
474 		cache = calloc(obj->dynsymcount, sizeof(SymCache));
475 		/* No need to check for NULL here */
476 
477 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
478 	for (rela = obj->rela; rela < relalim; rela++) {
479 		/*
480 		 * First, resolve symbol for relocations which
481 		 * reference symbols.
482 		 */
483 		switch (ELF_R_TYPE(rela->r_info)) {
484 		case R_AARCH64_ABS64:
485 		case R_AARCH64_GLOB_DAT:
486 		case R_AARCH64_TLS_TPREL64:
487 		case R_AARCH64_TLS_DTPREL64:
488 		case R_AARCH64_TLS_DTPMOD64:
489 			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
490 			    &defobj, flags, cache, lockstate);
491 			if (def == NULL)
492 				return (-1);
493 			/*
494 			 * If symbol is IFUNC, only perform relocation
495 			 * when caller allowed it by passing
496 			 * SYMLOOK_IFUNC flag.  Skip the relocations
497 			 * otherwise.
498 			 *
499 			 * Also error out in case IFUNC relocations
500 			 * are specified for TLS, which cannot be
501 			 * usefully interpreted.
502 			 */
503 			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
504 				switch (ELF_R_TYPE(rela->r_info)) {
505 				case R_AARCH64_ABS64:
506 				case R_AARCH64_GLOB_DAT:
507 					if ((flags & SYMLOOK_IFUNC) == 0) {
508 						obj->non_plt_gnu_ifunc = true;
509 						continue;
510 					}
511 					symval = (Elf_Addr)rtld_resolve_ifunc(
512 					    defobj, def);
513 					break;
514 				default:
515 					_rtld_error("%s: IFUNC for TLS reloc",
516 					    obj->path);
517 					return (-1);
518 				}
519 			} else {
520 				if ((flags & SYMLOOK_IFUNC) != 0)
521 					continue;
522 				symval = (Elf_Addr)defobj->relocbase +
523 				    def->st_value;
524 			}
525 			break;
526 		default:
527 			if ((flags & SYMLOOK_IFUNC) != 0)
528 				continue;
529 		}
530 
531 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
532 
533 		switch (ELF_R_TYPE(rela->r_info)) {
534 		case R_AARCH64_ABS64:
535 		case R_AARCH64_GLOB_DAT:
536 			*where = symval + rela->r_addend;
537 			break;
538 		case R_AARCH64_COPY:
539 			/*
540 			 * These are deferred until all other relocations have
541 			 * been done. All we do here is make sure that the
542 			 * COPY relocation is not in a shared library. They
543 			 * are allowed only in executable files.
544 			 */
545 			if (!obj->mainprog) {
546 				_rtld_error("%s: Unexpected R_AARCH64_COPY "
547 				    "relocation in shared library", obj->path);
548 				return (-1);
549 			}
550 			break;
551 		case R_AARCH64_TLSDESC:
552 			reloc_tlsdesc(obj, rela, where, flags, lockstate);
553 			break;
554 		case R_AARCH64_TLS_TPREL64:
555 			/*
556 			 * We lazily allocate offsets for static TLS as we
557 			 * see the first relocation that references the
558 			 * TLS block. This allows us to support (small
559 			 * amounts of) static TLS in dynamically loaded
560 			 * modules. If we run out of space, we generate an
561 			 * error.
562 			 */
563 			if (!defobj->tls_static) {
564 				if (!allocate_tls_offset(
565 				    __DECONST(Obj_Entry *, defobj))) {
566 					_rtld_error(
567 					    "%s: No space available for static "
568 					    "Thread Local Storage", obj->path);
569 					return (-1);
570 				}
571 			}
572 			*where = def->st_value + rela->r_addend +
573 			    defobj->tlsoffset;
574 			break;
575 
576 		/*
577 		 * !!! BEWARE !!!
578 		 * ARM ELF ABI defines TLS_DTPMOD64 as 1029, and TLS_DTPREL64
579 		 * as 1028. But actual bfd linker and the glibc RTLD linker
580 		 * treats TLS_DTPMOD64 as 1028 and TLS_DTPREL64 1029.
581 		 */
582 		case R_AARCH64_TLS_DTPREL64: /* efectively is TLS_DTPMOD64 */
583 			*where += (Elf_Addr)defobj->tlsindex;
584 			break;
585 		case R_AARCH64_TLS_DTPMOD64: /* efectively is TLS_DTPREL64 */
586 			*where += (Elf_Addr)(def->st_value + rela->r_addend);
587 			break;
588 		case R_AARCH64_RELATIVE:
589 			*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
590 			break;
591 		case R_AARCH64_NONE:
592 			break;
593 		case R_AARCH64_IRELATIVE:
594 			obj->irelative_nonplt = true;
595 			break;
596 		default:
597 			rtld_printf("%s: Unhandled relocation %lu\n",
598 			    obj->path, ELF_R_TYPE(rela->r_info));
599 			return (-1);
600 		}
601 	}
602 
603 	return (0);
604 }
605 
606 void
607 allocate_initial_tls(Obj_Entry *objs)
608 {
609 
610 	/*
611 	* Fix the size of the static TLS block by using the maximum
612 	* offset allocated so far and adding a bit for dynamic modules to
613 	* use.
614 	*/
615 	tls_static_space = tls_last_offset + tls_last_size +
616 	    ld_static_tls_extra;
617 
618 	_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
619 }
620 
621 void *
622 __tls_get_addr(tls_index* ti)
623 {
624 	uintptr_t **dtvp;
625 
626 	dtvp = &_tcb_get()->tcb_dtv;
627 	return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset));
628 }
629