xref: /freebsd/libexec/rtld-elf/arm/reloc.c (revision 1b9cfd6a625dc82611846cb9a53c1886f7af3758)
1 /*	$NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $	*/
2 
3 #include <sys/param.h>
4 #include <sys/stat.h>
5 #include <sys/mman.h>
6 
7 #include <errno.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 
13 #include "machine/sysarch.h"
14 
15 #include "debug.h"
16 #include "rtld.h"
17 #include "rtld_paths.h"
18 
19 void
20 init_pltgot(Obj_Entry *obj)
21 {
22 	if (obj->pltgot != NULL) {
23 		obj->pltgot[1] = (Elf_Addr) obj;
24 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
25 	}
26 }
27 
28 int
29 do_copy_relocations(Obj_Entry *dstobj)
30 {
31 	const Elf_Rel *rellim;
32 	const Elf_Rel *rel;
33 
34 	assert(dstobj->mainprog);	/* COPY relocations are invalid elsewhere */
35 
36 	rellim = (const Elf_Rel *)((const char *) dstobj->rel + dstobj->relsize);
37 	for (rel = dstobj->rel;  rel < rellim;  rel++) {
38 		if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) {
39 	    		void *dstaddr;
40 			const Elf_Sym *dstsym;
41 			const char *name;
42 			size_t size;
43 			const void *srcaddr;
44 			const Elf_Sym *srcsym;
45 			const Obj_Entry *srcobj, *defobj;
46 			SymLook req;
47 			int res;
48 
49 			dstaddr = (void *)(dstobj->relocbase + rel->r_offset);
50 			dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info);
51 			name = dstobj->strtab + dstsym->st_name;
52 			size = dstsym->st_size;
53 
54 			symlook_init(&req, name);
55 			req.ventry = fetch_ventry(dstobj,
56 			    ELF_R_SYM(rel->r_info));
57 			req.flags = SYMLOOK_EARLY;
58 
59 			for (srcobj = globallist_next(dstobj); srcobj != NULL;
60 			    srcobj = globallist_next(srcobj)) {
61 				res = symlook_obj(&req, srcobj);
62 				if (res == 0) {
63 					srcsym = req.sym_out;
64 					defobj = req.defobj_out;
65 					break;
66 				}
67 			}
68 			if (srcobj == NULL) {
69 				_rtld_error(
70 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
71 				    name, dstobj->path);
72 				return (-1);
73 			}
74 
75 			srcaddr = (const void *)(defobj->relocbase +
76 			    srcsym->st_value);
77 			memcpy(dstaddr, srcaddr, size);
78 		}
79 	}
80 	return 0;
81 }
82 
83 void _rtld_bind_start(void);
84 void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
85 
86 void
87 _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
88 {
89 	const Elf_Rel *rel = NULL, *rellim;
90 	Elf_Addr relsz = 0;
91 	Elf_Addr *where;
92 
93 	for (; dynp->d_tag != DT_NULL; dynp++) {
94 		switch (dynp->d_tag) {
95 		case DT_REL:
96 			rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
97 			break;
98 		case DT_RELSZ:
99 			relsz = dynp->d_un.d_val;
100 			break;
101 		}
102 	}
103 	rellim = (const Elf_Rel *)((const char *)rel + relsz);
104 	for (; rel < rellim; rel++) {
105 		where = (Elf_Addr *)(relocbase + rel->r_offset);
106 
107 		*where += (Elf_Addr)relocbase;
108 	}
109 }
110 /*
111  * It is possible for the compiler to emit relocations for unaligned data.
112  * We handle this situation with these inlines.
113  */
114 #define	RELOC_ALIGNED_P(x) \
115 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
116 
117 static __inline Elf_Addr
118 load_ptr(void *where)
119 {
120 	Elf_Addr res;
121 
122 	memcpy(&res, where, sizeof(res));
123 
124 	return (res);
125 }
126 
127 static __inline void
128 store_ptr(void *where, Elf_Addr val)
129 {
130 
131 	memcpy(where, &val, sizeof(val));
132 }
133 
134 static int
135 reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache,
136     int flags, RtldLockState *lockstate)
137 {
138 	Elf_Addr        *where;
139 	const Elf_Sym   *def;
140 	const Obj_Entry *defobj;
141 	Elf_Addr         tmp;
142 	unsigned long	 symnum;
143 
144 	where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
145 	symnum = ELF_R_SYM(rel->r_info);
146 
147 	switch (ELF_R_TYPE(rel->r_info)) {
148 	case R_ARM_NONE:
149 		break;
150 
151 #if 1 /* XXX should not occur */
152 	case R_ARM_PC24: {	/* word32 S - P + A */
153 		Elf32_Sword addend;
154 
155 		/*
156 		 * Extract addend and sign-extend if needed.
157 		 */
158 		addend = *where;
159 		if (addend & 0x00800000)
160 			addend |= 0xff000000;
161 
162 		def = find_symdef(symnum, obj, &defobj, flags, cache,
163 		    lockstate);
164 		if (def == NULL)
165 				return -1;
166 			tmp = (Elf_Addr)obj->relocbase + def->st_value
167 			    - (Elf_Addr)where + (addend << 2);
168 			if ((tmp & 0xfe000000) != 0xfe000000 &&
169 			    (tmp & 0xfe000000) != 0) {
170 				_rtld_error(
171 				"%s: R_ARM_PC24 relocation @ %p to %s failed "
172 				"(displacement %ld (%#lx) out of range)",
173 				    obj->path, where,
174 				    obj->strtab + obj->symtab[symnum].st_name,
175 				    (long) tmp, (long) tmp);
176 				return -1;
177 			}
178 			tmp >>= 2;
179 			*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
180 			dbg("PC24 %s in %s --> %p @ %p in %s",
181 			    obj->strtab + obj->symtab[symnum].st_name,
182 			    obj->path, (void *)*where, where, defobj->path);
183 			break;
184 		}
185 #endif
186 
187 		case R_ARM_ABS32:	/* word32 B + S + A */
188 		case R_ARM_GLOB_DAT:	/* word32 B + S */
189 			def = find_symdef(symnum, obj, &defobj, flags, cache,
190 			    lockstate);
191 			if (def == NULL)
192 				return -1;
193 			if (__predict_true(RELOC_ALIGNED_P(where))) {
194 				tmp =  *where + (Elf_Addr)defobj->relocbase +
195 				    def->st_value;
196 				*where = tmp;
197 			} else {
198 				tmp = load_ptr(where) +
199 				    (Elf_Addr)defobj->relocbase +
200 				    def->st_value;
201 				store_ptr(where, tmp);
202 			}
203 			dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
204 			    obj->strtab + obj->symtab[symnum].st_name,
205 			    obj->path, (void *)tmp, where, defobj->path);
206 			break;
207 
208 		case R_ARM_RELATIVE:	/* word32 B + A */
209 			if (__predict_true(RELOC_ALIGNED_P(where))) {
210 				tmp = *where + (Elf_Addr)obj->relocbase;
211 				*where = tmp;
212 			} else {
213 				tmp = load_ptr(where) +
214 				    (Elf_Addr)obj->relocbase;
215 				store_ptr(where, tmp);
216 			}
217 			dbg("RELATIVE in %s --> %p", obj->path,
218 			    (void *)tmp);
219 			break;
220 
221 		case R_ARM_COPY:
222 			/*
223 			 * These are deferred until all other relocations have
224 			 * been done.  All we do here is make sure that the
225 			 * COPY relocation is not in a shared library.  They
226 			 * are allowed only in executable files.
227 			 */
228 			if (!obj->mainprog) {
229 				_rtld_error(
230 			"%s: Unexpected R_COPY relocation in shared library",
231 				    obj->path);
232 				return -1;
233 			}
234 			dbg("COPY (avoid in main)");
235 			break;
236 
237 		case R_ARM_TLS_DTPOFF32:
238 			def = find_symdef(symnum, obj, &defobj, flags, cache,
239 			    lockstate);
240 			if (def == NULL)
241 				return -1;
242 
243 			tmp = (Elf_Addr)(def->st_value);
244 			if (__predict_true(RELOC_ALIGNED_P(where)))
245 				*where = tmp;
246 			else
247 				store_ptr(where, tmp);
248 
249 			dbg("TLS_DTPOFF32 %s in %s --> %p",
250 			    obj->strtab + obj->symtab[symnum].st_name,
251 			    obj->path, (void *)tmp);
252 
253 			break;
254 		case R_ARM_TLS_DTPMOD32:
255 			def = find_symdef(symnum, obj, &defobj, flags, cache,
256 			    lockstate);
257 			if (def == NULL)
258 				return -1;
259 
260 			tmp = (Elf_Addr)(defobj->tlsindex);
261 			if (__predict_true(RELOC_ALIGNED_P(where)))
262 				*where = tmp;
263 			else
264 				store_ptr(where, tmp);
265 
266 			dbg("TLS_DTPMOD32 %s in %s --> %p",
267 			    obj->strtab + obj->symtab[symnum].st_name,
268 			    obj->path, (void *)tmp);
269 
270 			break;
271 
272 		case R_ARM_TLS_TPOFF32:
273 			def = find_symdef(symnum, obj, &defobj, flags, cache,
274 			    lockstate);
275 			if (def == NULL)
276 				return -1;
277 
278 			if (!defobj->tls_static && !allocate_tls_offset(obj))
279 				return -1;
280 
281 			tmp = (Elf_Addr)def->st_value + defobj->tlsoffset;
282 			if (__predict_true(RELOC_ALIGNED_P(where))) {
283 				tmp += *where;
284 				*where = tmp;
285 			} else {
286 				tmp += load_ptr(where);
287 				store_ptr(where, tmp);
288 			}
289 			dbg("TLS_TPOFF32 %s in %s --> %p",
290 			    obj->strtab + obj->symtab[symnum].st_name,
291 			    obj->path, (void *)tmp);
292 			break;
293 
294 
295 		default:
296 			dbg("sym = %lu, type = %lu, offset = %p, "
297 			    "contents = %p, symbol = %s",
298 			    symnum, (u_long)ELF_R_TYPE(rel->r_info),
299 			    (void *)rel->r_offset, (void *)load_ptr(where),
300 			    obj->strtab + obj->symtab[symnum].st_name);
301 			_rtld_error("%s: Unsupported relocation type %ld "
302 			    "in non-PLT relocations\n",
303 			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
304 			return -1;
305 	}
306 	return 0;
307 }
308 
309 /*
310  *  * Process non-PLT relocations
311  *   */
312 int
313 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
314     RtldLockState *lockstate)
315 {
316 	const Elf_Rel *rellim;
317 	const Elf_Rel *rel;
318 	SymCache *cache;
319 	int r = -1;
320 
321 	/* The relocation for the dynamic loader has already been done. */
322 	if (obj == obj_rtld)
323 		return (0);
324 	if ((flags & SYMLOOK_IFUNC) != 0)
325 		/* XXX not implemented */
326 		return (0);
327 
328 	/*
329  	 * The dynamic loader may be called from a thread, we have
330 	 * limited amounts of stack available so we cannot use alloca().
331 	 */
332 	cache = calloc(obj->dynsymcount, sizeof(SymCache));
333 	/* No need to check for NULL here */
334 
335 	rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize);
336 	for (rel = obj->rel; rel < rellim; rel++) {
337 		if (reloc_nonplt_object(obj, rel, cache, flags, lockstate) < 0)
338 			goto done;
339 	}
340 	r = 0;
341 done:
342 	if (cache != NULL)
343 		free(cache);
344 	return (r);
345 }
346 
347 /*
348  *  * Process the PLT relocations.
349  *   */
350 int
351 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
352 {
353 	const Elf_Rel *rellim;
354 	const Elf_Rel *rel;
355 
356 	rellim = (const Elf_Rel *)((const char *)obj->pltrel +
357 	    obj->pltrelsize);
358 	for (rel = obj->pltrel;  rel < rellim;  rel++) {
359 		Elf_Addr *where;
360 
361 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
362 
363 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
364 		*where += (Elf_Addr )obj->relocbase;
365 	}
366 
367 	return (0);
368 }
369 
370 /*
371  *  * LD_BIND_NOW was set - force relocation for all jump slots
372  *   */
373 int
374 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
375 {
376 	const Obj_Entry *defobj;
377 	const Elf_Rel *rellim;
378 	const Elf_Rel *rel;
379 	const Elf_Sym *def;
380 	Elf_Addr *where;
381 	Elf_Addr target;
382 
383 	rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize);
384 	for (rel = obj->pltrel; rel < rellim; rel++) {
385 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
386 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
387 		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
388 		    SYMLOOK_IN_PLT | flags, NULL, lockstate);
389 		if (def == NULL) {
390 			dbg("reloc_jmpslots: sym not found");
391 			return (-1);
392 		}
393 
394 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
395 		reloc_jmpslot(where, target, defobj, obj,
396 		    (const Elf_Rel *) rel);
397 	}
398 
399 	obj->jmpslots_done = true;
400 
401 	return (0);
402 }
403 
404 int
405 reloc_iresolve(Obj_Entry *obj __unused,
406     struct Struct_RtldLockState *lockstate __unused)
407 {
408 
409 	/* XXX not implemented */
410 	return (0);
411 }
412 
413 int
414 reloc_iresolve_nonplt(Obj_Entry *obj __unused,
415     struct Struct_RtldLockState *lockstate __unused)
416 {
417 
418 	/* XXX not implemented */
419 	return (0);
420 }
421 
422 int
423 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
424     struct Struct_RtldLockState *lockstate __unused)
425 {
426 
427 	/* XXX not implemented */
428 	return (0);
429 }
430 
431 Elf_Addr
432 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
433     const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
434     const Elf_Rel *rel)
435 {
436 
437 	assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
438 
439 	if (*where != target && !ld_bind_not)
440 		*where = target;
441 	return (target);
442 }
443 
444 void
445 ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)] __unused)
446 {
447 
448 }
449 
450 void
451 allocate_initial_tls(Obj_Entry *objs)
452 {
453 	/*
454 	* Fix the size of the static TLS block by using the maximum
455 	* offset allocated so far and adding a bit for dynamic modules to
456 	* use.
457 	*/
458 
459 	tls_static_space = tls_last_offset + tls_last_size +
460 	    ld_static_tls_extra;
461 
462 	_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
463 }
464 
465 void *
466 __tls_get_addr(tls_index* ti)
467 {
468 	uintptr_t **dtvp;
469 
470 	dtvp = &_tcb_get()->tcb_dtv;
471 	return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset));
472 }
473