xref: /freebsd/libexec/rtld-elf/aarch64/reloc.c (revision 02d06043ba88f931f9debd5aa519fc303ca70d11)
1 /*-
2  * Copyright (c) 2014-2015 The FreeBSD Foundation
3  *
4  * Portions of this software were developed by Andrew Turner
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 
31 #include <machine/sysarch.h>
32 
33 #include <stdlib.h>
34 
35 #include "debug.h"
36 #include "rtld.h"
37 #include "rtld_printf.h"
38 
39 /*
40  * This is not the correct prototype, but we only need it for
41  * a function pointer to a simple asm function.
42  */
43 void *_rtld_tlsdesc_static(void *);
44 void *_rtld_tlsdesc_undef(void *);
45 void *_rtld_tlsdesc_dynamic(void *);
46 
47 void _exit(int);
48 
49 bool
arch_digest_dynamic(struct Struct_Obj_Entry * obj,const Elf_Dyn * dynp)50 arch_digest_dynamic(struct Struct_Obj_Entry *obj, const Elf_Dyn *dynp)
51 {
52 	if (dynp->d_tag == DT_AARCH64_VARIANT_PCS) {
53 		obj->variant_pcs = true;
54 		return (true);
55 	}
56 
57 	return (false);
58 }
59 
60 bool
arch_digest_note(struct Struct_Obj_Entry * obj __unused,const Elf_Note * note)61 arch_digest_note(struct Struct_Obj_Entry *obj __unused, const Elf_Note *note)
62 {
63 	const char *note_name;
64 	const uint32_t *note_data;
65 
66 	note_name = (const char *)(note + 1);
67 	/* Only handle GNU notes */
68 	if (note->n_namesz != sizeof(ELF_NOTE_GNU) ||
69 	    strncmp(note_name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) != 0)
70 		return (false);
71 
72 	/* Only handle GNU property notes */
73 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0)
74 		return (false);
75 
76 	/*
77 	 * note_data[0] - Type
78 	 * note_data[1] - Length
79 	 * note_data[2] - Data
80 	 * note_data[3] - Padding?
81 	 */
82 	note_data = (const uint32_t *)(note_name + note->n_namesz);
83 
84 	/* Only handle AArch64 feature notes */
85 	if (note_data[0] != GNU_PROPERTY_AARCH64_FEATURE_1_AND)
86 		return (false);
87 
88 	/* We expect at least 4 bytes of data */
89 	if (note_data[1] < 4)
90 		return (false);
91 
92 	/* TODO: Only guard if HWCAP2_BTI is set */
93 	if ((note_data[2] & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) {
94 		struct arm64_guard_page_args guard;
95 
96 		guard.addr = (uintptr_t)obj->mapbase;
97 		guard.len = obj->mapsize;
98 
99 		sysarch(ARM64_GUARD_PAGE, &guard);
100 	}
101 
102 	return (true);
103 }
104 
105 void
init_pltgot(Obj_Entry * obj)106 init_pltgot(Obj_Entry *obj)
107 {
108 
109 	if (obj->pltgot != NULL) {
110 		obj->pltgot[1] = (Elf_Addr) obj;
111 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
112 	}
113 }
114 
115 int
do_copy_relocations(Obj_Entry * dstobj)116 do_copy_relocations(Obj_Entry *dstobj)
117 {
118 	const Obj_Entry *srcobj, *defobj;
119 	const Elf_Rela *relalim;
120 	const Elf_Rela *rela;
121 	const Elf_Sym *srcsym;
122 	const Elf_Sym *dstsym;
123 	const void *srcaddr;
124 	const char *name;
125 	void *dstaddr;
126 	SymLook req;
127 	size_t size;
128 	int res;
129 
130 	/*
131 	 * COPY relocs are invalid outside of the main program
132 	 */
133 	assert(dstobj->mainprog);
134 
135 	relalim = (const Elf_Rela *)((const char *)dstobj->rela +
136 	    dstobj->relasize);
137 	for (rela = dstobj->rela; rela < relalim; rela++) {
138 		if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
139 			continue;
140 
141 		dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
142 		dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
143 		name = dstobj->strtab + dstsym->st_name;
144 		size = dstsym->st_size;
145 
146 		symlook_init(&req, name);
147 		req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
148 		req.flags = SYMLOOK_EARLY;
149 
150 		for (srcobj = globallist_next(dstobj); srcobj != NULL;
151 		     srcobj = globallist_next(srcobj)) {
152 			res = symlook_obj(&req, srcobj);
153 			if (res == 0) {
154 				srcsym = req.sym_out;
155 				defobj = req.defobj_out;
156 				break;
157 			}
158 		}
159 		if (srcobj == NULL) {
160 			_rtld_error("Undefined symbol \"%s\" referenced from "
161 			    "COPY relocation in %s", name, dstobj->path);
162 			return (-1);
163 		}
164 
165 		srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
166 		memcpy(dstaddr, srcaddr, size);
167 	}
168 
169 	return (0);
170 }
171 
172 struct tls_data {
173 	Elf_Addr	dtv_gen;
174 	int		tls_index;
175 	Elf_Addr	tls_offs;
176 };
177 
178 static struct tls_data *
reloc_tlsdesc_alloc(int tlsindex,Elf_Addr tlsoffs)179 reloc_tlsdesc_alloc(int tlsindex, Elf_Addr tlsoffs)
180 {
181 	struct tls_data *tlsdesc;
182 
183 	tlsdesc = xmalloc(sizeof(struct tls_data));
184 	tlsdesc->dtv_gen = tls_dtv_generation;
185 	tlsdesc->tls_index = tlsindex;
186 	tlsdesc->tls_offs = tlsoffs;
187 
188 	return (tlsdesc);
189 }
190 
191 struct tlsdesc_entry {
192 	void	*(*func)(void *);
193 	union {
194 		Elf_Ssize	addend;
195 		Elf_Size	offset;
196 		struct tls_data	*data;
197 	};
198 };
199 
200 static void
reloc_tlsdesc(const Obj_Entry * obj,const Elf_Rela * rela,struct tlsdesc_entry * where,int flags,RtldLockState * lockstate)201 reloc_tlsdesc(const Obj_Entry *obj, const Elf_Rela *rela,
202     struct tlsdesc_entry *where, int flags, RtldLockState *lockstate)
203 {
204 	const Elf_Sym *def;
205 	const Obj_Entry *defobj;
206 	Elf_Addr offs;
207 
208 	offs = 0;
209 	if (ELF_R_SYM(rela->r_info) != 0) {
210 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags,
211 			    NULL, lockstate);
212 		if (def == NULL)
213 			rtld_die();
214 		offs = def->st_value;
215 		obj = defobj;
216 		if (def->st_shndx == SHN_UNDEF) {
217 			/* Weak undefined thread variable */
218 			where->func = _rtld_tlsdesc_undef;
219 			where->addend = rela->r_addend;
220 			return;
221 		}
222 	}
223 	offs += rela->r_addend;
224 
225 	if (obj->tlsoffset != 0) {
226 		/* Variable is in initialy allocated TLS segment */
227 		where->func = _rtld_tlsdesc_static;
228 		where->offset = obj->tlsoffset + offs;
229 	} else {
230 		/* TLS offest is unknown at load time, use dynamic resolving */
231 		where->func = _rtld_tlsdesc_dynamic;
232 		where->data = reloc_tlsdesc_alloc(obj->tlsindex, offs);
233 	}
234 }
235 
236 /*
237  * Process the PLT relocations.
238  */
239 int
reloc_plt(Obj_Entry * obj,int flags,RtldLockState * lockstate)240 reloc_plt(Obj_Entry *obj, int flags, RtldLockState *lockstate)
241 {
242 	const Obj_Entry *defobj;
243 	const Elf_Rela *relalim;
244 	const Elf_Rela *rela;
245 	const Elf_Sym *def, *sym;
246 	bool lazy;
247 
248 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
249 	    obj->pltrelasize);
250 	for (rela = obj->pltrela; rela < relalim; rela++) {
251 		Elf_Addr *where, target;
252 
253 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
254 
255 		switch(ELF_R_TYPE(rela->r_info)) {
256 		case R_AARCH64_JUMP_SLOT:
257 			lazy = true;
258 			if (obj->variant_pcs) {
259 				sym = &obj->symtab[ELF_R_SYM(rela->r_info)];
260 				/*
261 				 * Variant PCS functions don't follow the
262 				 * standard register convention. Because of
263 				 * this we can't use lazy relocation and
264 				 * need to set the target address.
265 				 */
266 				if ((sym->st_other & STO_AARCH64_VARIANT_PCS) !=
267 				    0)
268 					lazy = false;
269 			}
270 			if (lazy) {
271 				*where += (Elf_Addr)obj->relocbase;
272 			} else {
273 				def = find_symdef(ELF_R_SYM(rela->r_info), obj,
274 				    &defobj, SYMLOOK_IN_PLT | flags, NULL,
275 				    lockstate);
276 				if (def == NULL)
277 					return (-1);
278 				if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC){
279 					obj->gnu_ifunc = true;
280 					continue;
281 				}
282 				target = (Elf_Addr)(defobj->relocbase +
283 				    def->st_value);
284 				/*
285 				 * Ignore ld_bind_not as it requires lazy
286 				 * binding
287 				 */
288 				*where = target;
289 			}
290 			break;
291 		case R_AARCH64_TLSDESC:
292 			reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
293 			    SYMLOOK_IN_PLT | flags, lockstate);
294 			break;
295 		case R_AARCH64_IRELATIVE:
296 			obj->irelative = true;
297 			break;
298 		case R_AARCH64_NONE:
299 			break;
300 		default:
301 			_rtld_error("Unknown relocation type %u in PLT",
302 			    (unsigned int)ELF_R_TYPE(rela->r_info));
303 			return (-1);
304 		}
305 	}
306 
307 	return (0);
308 }
309 
310 /*
311  * LD_BIND_NOW was set - force relocation for all jump slots
312  */
313 int
reloc_jmpslots(Obj_Entry * obj,int flags,RtldLockState * lockstate)314 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
315 {
316 	const Obj_Entry *defobj;
317 	const Elf_Rela *relalim;
318 	const Elf_Rela *rela;
319 	const Elf_Sym *def;
320 
321 	if (obj->jmpslots_done)
322 		return (0);
323 
324 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
325 	    obj->pltrelasize);
326 	for (rela = obj->pltrela; rela < relalim; rela++) {
327 		Elf_Addr *where, target;
328 
329 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
330 		switch(ELF_R_TYPE(rela->r_info)) {
331 		case R_AARCH64_JUMP_SLOT:
332 			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
333 			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
334 			if (def == NULL)
335 				return (-1);
336 			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
337 				obj->gnu_ifunc = true;
338 				continue;
339 			}
340 			target = (Elf_Addr)(defobj->relocbase + def->st_value);
341 			reloc_jmpslot(where, target, defobj, obj,
342 			    (const Elf_Rel *)rela);
343 			break;
344 		}
345 	}
346 	obj->jmpslots_done = true;
347 
348 	return (0);
349 }
350 
351 static void
reloc_iresolve_one(Obj_Entry * obj,const Elf_Rela * rela,RtldLockState * lockstate)352 reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
353     RtldLockState *lockstate)
354 {
355 	Elf_Addr *where, target, *ptr;
356 
357 	ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
358 	where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
359 	lock_release(rtld_bind_lock, lockstate);
360 	target = call_ifunc_resolver(ptr);
361 	wlock_acquire(rtld_bind_lock, lockstate);
362 	*where = target;
363 }
364 
365 int
reloc_iresolve(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)366 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
367 {
368 	const Elf_Rela *relalim;
369 	const Elf_Rela *rela;
370 
371 	if (!obj->irelative)
372 		return (0);
373 	obj->irelative = false;
374 	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
375 	    obj->pltrelasize);
376 	for (rela = obj->pltrela;  rela < relalim;  rela++) {
377 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
378 			reloc_iresolve_one(obj, rela, lockstate);
379 	}
380 	return (0);
381 }
382 
383 int
reloc_iresolve_nonplt(Obj_Entry * obj,struct Struct_RtldLockState * lockstate)384 reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
385 {
386 	const Elf_Rela *relalim;
387 	const Elf_Rela *rela;
388 
389 	if (!obj->irelative_nonplt)
390 		return (0);
391 	obj->irelative_nonplt = false;
392 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
393 	for (rela = obj->rela;  rela < relalim;  rela++) {
394 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
395 			reloc_iresolve_one(obj, rela, lockstate);
396 	}
397 	return (0);
398 }
399 
400 int
reloc_gnu_ifunc(Obj_Entry * obj,int flags,struct Struct_RtldLockState * lockstate)401 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
402    struct Struct_RtldLockState *lockstate)
403 {
404 	const Elf_Rela *relalim;
405 	const Elf_Rela *rela;
406 	Elf_Addr *where, target;
407 	const Elf_Sym *def;
408 	const Obj_Entry *defobj;
409 
410 	if (!obj->gnu_ifunc)
411 		return (0);
412 	relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
413 	for (rela = obj->pltrela;  rela < relalim;  rela++) {
414 		if (ELF_R_TYPE(rela->r_info) == R_AARCH64_JUMP_SLOT) {
415 			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
416 			def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
417 			    SYMLOOK_IN_PLT | flags, NULL, lockstate);
418 			if (def == NULL)
419 				return (-1);
420 			if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
421 				continue;
422 			lock_release(rtld_bind_lock, lockstate);
423 			target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
424 			wlock_acquire(rtld_bind_lock, lockstate);
425 			reloc_jmpslot(where, target, defobj, obj,
426 			    (const Elf_Rel *)rela);
427 		}
428 	}
429 	obj->gnu_ifunc = false;
430 	return (0);
431 }
432 
433 Elf_Addr
reloc_jmpslot(Elf_Addr * where,Elf_Addr target,const Obj_Entry * defobj __unused,const Obj_Entry * obj __unused,const Elf_Rel * rel)434 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
435     const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
436     const Elf_Rel *rel)
437 {
438 
439 	assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT ||
440 	    ELF_R_TYPE(rel->r_info) == R_AARCH64_IRELATIVE);
441 
442 	if (*where != target && !ld_bind_not)
443 		*where = target;
444 	return (target);
445 }
446 
447 void
ifunc_init(Elf_Auxinfo * aux_info[__min_size (AT_COUNT)]__unused)448 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)] __unused)
449 {
450 
451 }
452 
453 /*
454  * Process non-PLT relocations
455  */
456 int
reloc_non_plt(Obj_Entry * obj,Obj_Entry * obj_rtld,int flags,RtldLockState * lockstate)457 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
458     RtldLockState *lockstate)
459 {
460 	const Obj_Entry *defobj;
461 	const Elf_Rela *relalim;
462 	const Elf_Rela *rela;
463 	const Elf_Sym *def;
464 	SymCache *cache;
465 	Elf_Addr *where, symval;
466 
467 	/*
468 	 * The dynamic loader may be called from a thread, we have
469 	 * limited amounts of stack available so we cannot use alloca().
470 	 */
471 	if (obj == obj_rtld)
472 		cache = NULL;
473 	else
474 		cache = calloc(obj->dynsymcount, sizeof(SymCache));
475 		/* No need to check for NULL here */
476 
477 	relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
478 	for (rela = obj->rela; rela < relalim; rela++) {
479 		/*
480 		 * First, resolve symbol for relocations which
481 		 * reference symbols.
482 		 */
483 		switch (ELF_R_TYPE(rela->r_info)) {
484 		case R_AARCH64_ABS64:
485 		case R_AARCH64_GLOB_DAT:
486 		case R_AARCH64_TLS_TPREL64:
487 		case R_AARCH64_TLS_DTPREL64:
488 		case R_AARCH64_TLS_DTPMOD64:
489 			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
490 			    &defobj, flags, cache, lockstate);
491 			if (def == NULL)
492 				return (-1);
493 			/*
494 			 * If symbol is IFUNC, only perform relocation
495 			 * when caller allowed it by passing
496 			 * SYMLOOK_IFUNC flag.  Skip the relocations
497 			 * otherwise.
498 			 *
499 			 * Also error out in case IFUNC relocations
500 			 * are specified for TLS, which cannot be
501 			 * usefully interpreted.
502 			 */
503 			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
504 				switch (ELF_R_TYPE(rela->r_info)) {
505 				case R_AARCH64_ABS64:
506 				case R_AARCH64_GLOB_DAT:
507 					if ((flags & SYMLOOK_IFUNC) == 0) {
508 						obj->non_plt_gnu_ifunc = true;
509 						continue;
510 					}
511 					symval = (Elf_Addr)rtld_resolve_ifunc(
512 					    defobj, def);
513 					break;
514 				default:
515 					_rtld_error("%s: IFUNC for TLS reloc",
516 					    obj->path);
517 					return (-1);
518 				}
519 			} else {
520 				if ((flags & SYMLOOK_IFUNC) != 0)
521 					continue;
522 				symval = (Elf_Addr)defobj->relocbase +
523 				    def->st_value;
524 			}
525 			break;
526 		default:
527 			if ((flags & SYMLOOK_IFUNC) != 0)
528 				continue;
529 		}
530 
531 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
532 
533 		switch (ELF_R_TYPE(rela->r_info)) {
534 		case R_AARCH64_ABS64:
535 		case R_AARCH64_GLOB_DAT:
536 			*where = symval + rela->r_addend;
537 			break;
538 		case R_AARCH64_COPY:
539 			/*
540 			 * These are deferred until all other relocations have
541 			 * been done. All we do here is make sure that the
542 			 * COPY relocation is not in a shared library. They
543 			 * are allowed only in executable files.
544 			 */
545 			if (!obj->mainprog) {
546 				_rtld_error("%s: Unexpected R_AARCH64_COPY "
547 				    "relocation in shared library", obj->path);
548 				return (-1);
549 			}
550 			break;
551 		case R_AARCH64_TLSDESC:
552 			reloc_tlsdesc(obj, rela, (struct tlsdesc_entry *)where,
553 			    flags, lockstate);
554 			break;
555 		case R_AARCH64_TLS_TPREL64:
556 			/*
557 			 * We lazily allocate offsets for static TLS as we
558 			 * see the first relocation that references the
559 			 * TLS block. This allows us to support (small
560 			 * amounts of) static TLS in dynamically loaded
561 			 * modules. If we run out of space, we generate an
562 			 * error.
563 			 */
564 			if (!defobj->tls_static) {
565 				if (!allocate_tls_offset(
566 				    __DECONST(Obj_Entry *, defobj))) {
567 					_rtld_error(
568 					    "%s: No space available for static "
569 					    "Thread Local Storage", obj->path);
570 					return (-1);
571 				}
572 			}
573 			*where = def->st_value + rela->r_addend +
574 			    defobj->tlsoffset;
575 			break;
576 
577 		/*
578 		 * !!! BEWARE !!!
579 		 * ARM ELF ABI defines TLS_DTPMOD64 as 1029, and TLS_DTPREL64
580 		 * as 1028. But actual bfd linker and the glibc RTLD linker
581 		 * treats TLS_DTPMOD64 as 1028 and TLS_DTPREL64 1029.
582 		 */
583 		case R_AARCH64_TLS_DTPREL64: /* efectively is TLS_DTPMOD64 */
584 			*where += (Elf_Addr)defobj->tlsindex;
585 			break;
586 		case R_AARCH64_TLS_DTPMOD64: /* efectively is TLS_DTPREL64 */
587 			*where += (Elf_Addr)(def->st_value + rela->r_addend);
588 			break;
589 		case R_AARCH64_RELATIVE:
590 			*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
591 			break;
592 		case R_AARCH64_NONE:
593 			break;
594 		case R_AARCH64_IRELATIVE:
595 			obj->irelative_nonplt = true;
596 			break;
597 		default:
598 			rtld_printf("%s: Unhandled relocation %lu\n",
599 			    obj->path, ELF_R_TYPE(rela->r_info));
600 			return (-1);
601 		}
602 	}
603 
604 	return (0);
605 }
606 
607 void
allocate_initial_tls(Obj_Entry * objs)608 allocate_initial_tls(Obj_Entry *objs)
609 {
610 
611 	/*
612 	* Fix the size of the static TLS block by using the maximum
613 	* offset allocated so far and adding a bit for dynamic modules to
614 	* use.
615 	*/
616 	tls_static_space = tls_last_offset + tls_last_size +
617 	    ld_static_tls_extra;
618 
619 	_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
620 }
621 
622 void *
__tls_get_addr(tls_index * ti)623 __tls_get_addr(tls_index* ti)
624 {
625 	return (tls_get_addr_common(_tcb_get(), ti->ti_module, ti->ti_offset));
626 }
627