xref: /freebsd/libexec/rtld-elf/arm/reloc.c (revision 031beb4e239bfce798af17f5fe8dba8bcaf13d99)
1 /*	$NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $	*/
2 
3 #include <sys/cdefs.h>
4 #include <sys/param.h>
5 #include <sys/stat.h>
6 #include <sys/mman.h>
7 
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <unistd.h>
13 
14 #include "machine/sysarch.h"
15 
16 #include "debug.h"
17 #include "rtld.h"
18 #include "rtld_paths.h"
19 
20 void
21 init_pltgot(Obj_Entry *obj)
22 {
23 	if (obj->pltgot != NULL) {
24 		obj->pltgot[1] = (Elf_Addr) obj;
25 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
26 	}
27 }
28 
29 int
30 do_copy_relocations(Obj_Entry *dstobj)
31 {
32 	const Elf_Rel *rellim;
33 	const Elf_Rel *rel;
34 
35 	assert(dstobj->mainprog);	/* COPY relocations are invalid elsewhere */
36 
37 	rellim = (const Elf_Rel *)((const char *) dstobj->rel + dstobj->relsize);
38 	for (rel = dstobj->rel;  rel < rellim;  rel++) {
39 		if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) {
40 	    		void *dstaddr;
41 			const Elf_Sym *dstsym;
42 			const char *name;
43 			size_t size;
44 			const void *srcaddr;
45 			const Elf_Sym *srcsym;
46 			const Obj_Entry *srcobj, *defobj;
47 			SymLook req;
48 			int res;
49 
50 			dstaddr = (void *)(dstobj->relocbase + rel->r_offset);
51 			dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info);
52 			name = dstobj->strtab + dstsym->st_name;
53 			size = dstsym->st_size;
54 
55 			symlook_init(&req, name);
56 			req.ventry = fetch_ventry(dstobj,
57 			    ELF_R_SYM(rel->r_info));
58 			req.flags = SYMLOOK_EARLY;
59 
60 			for (srcobj = globallist_next(dstobj); srcobj != NULL;
61 			    srcobj = globallist_next(srcobj)) {
62 				res = symlook_obj(&req, srcobj);
63 				if (res == 0) {
64 					srcsym = req.sym_out;
65 					defobj = req.defobj_out;
66 					break;
67 				}
68 			}
69 			if (srcobj == NULL) {
70 				_rtld_error(
71 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
72 				    name, dstobj->path);
73 				return (-1);
74 			}
75 
76 			srcaddr = (const void *)(defobj->relocbase +
77 			    srcsym->st_value);
78 			memcpy(dstaddr, srcaddr, size);
79 		}
80 	}
81 	return 0;
82 }
83 
84 void _rtld_bind_start(void);
85 void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
86 
87 void
88 _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
89 {
90 	const Elf_Rel *rel = NULL, *rellim;
91 	Elf_Addr relsz = 0;
92 	Elf_Addr *where;
93 
94 	for (; dynp->d_tag != DT_NULL; dynp++) {
95 		switch (dynp->d_tag) {
96 		case DT_REL:
97 			rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
98 			break;
99 		case DT_RELSZ:
100 			relsz = dynp->d_un.d_val;
101 			break;
102 		}
103 	}
104 	rellim = (const Elf_Rel *)((const char *)rel + relsz);
105 	for (; rel < rellim; rel++) {
106 		where = (Elf_Addr *)(relocbase + rel->r_offset);
107 
108 		*where += (Elf_Addr)relocbase;
109 	}
110 }
111 /*
112  * It is possible for the compiler to emit relocations for unaligned data.
113  * We handle this situation with these inlines.
114  */
115 #define	RELOC_ALIGNED_P(x) \
116 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
117 
118 static __inline Elf_Addr
119 load_ptr(void *where)
120 {
121 	Elf_Addr res;
122 
123 	memcpy(&res, where, sizeof(res));
124 
125 	return (res);
126 }
127 
128 static __inline void
129 store_ptr(void *where, Elf_Addr val)
130 {
131 
132 	memcpy(where, &val, sizeof(val));
133 }
134 
135 static int
136 reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache,
137     int flags, RtldLockState *lockstate)
138 {
139 	Elf_Addr        *where;
140 	const Elf_Sym   *def;
141 	const Obj_Entry *defobj;
142 	Elf_Addr         tmp;
143 	unsigned long	 symnum;
144 
145 	where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
146 	symnum = ELF_R_SYM(rel->r_info);
147 
148 	switch (ELF_R_TYPE(rel->r_info)) {
149 	case R_ARM_NONE:
150 		break;
151 
152 #if 1 /* XXX should not occur */
153 	case R_ARM_PC24: {	/* word32 S - P + A */
154 		Elf32_Sword addend;
155 
156 		/*
157 		 * Extract addend and sign-extend if needed.
158 		 */
159 		addend = *where;
160 		if (addend & 0x00800000)
161 			addend |= 0xff000000;
162 
163 		def = find_symdef(symnum, obj, &defobj, flags, cache,
164 		    lockstate);
165 		if (def == NULL)
166 				return -1;
167 			tmp = (Elf_Addr)obj->relocbase + def->st_value
168 			    - (Elf_Addr)where + (addend << 2);
169 			if ((tmp & 0xfe000000) != 0xfe000000 &&
170 			    (tmp & 0xfe000000) != 0) {
171 				_rtld_error(
172 				"%s: R_ARM_PC24 relocation @ %p to %s failed "
173 				"(displacement %ld (%#lx) out of range)",
174 				    obj->path, where,
175 				    obj->strtab + obj->symtab[symnum].st_name,
176 				    (long) tmp, (long) tmp);
177 				return -1;
178 			}
179 			tmp >>= 2;
180 			*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
181 			dbg("PC24 %s in %s --> %p @ %p in %s",
182 			    obj->strtab + obj->symtab[symnum].st_name,
183 			    obj->path, (void *)*where, where, defobj->path);
184 			break;
185 		}
186 #endif
187 
188 		case R_ARM_ABS32:	/* word32 B + S + A */
189 		case R_ARM_GLOB_DAT:	/* word32 B + S */
190 			def = find_symdef(symnum, obj, &defobj, flags, cache,
191 			    lockstate);
192 			if (def == NULL)
193 				return -1;
194 			if (__predict_true(RELOC_ALIGNED_P(where))) {
195 				tmp =  *where + (Elf_Addr)defobj->relocbase +
196 				    def->st_value;
197 				*where = tmp;
198 			} else {
199 				tmp = load_ptr(where) +
200 				    (Elf_Addr)defobj->relocbase +
201 				    def->st_value;
202 				store_ptr(where, tmp);
203 			}
204 			dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
205 			    obj->strtab + obj->symtab[symnum].st_name,
206 			    obj->path, (void *)tmp, where, defobj->path);
207 			break;
208 
209 		case R_ARM_RELATIVE:	/* word32 B + A */
210 			if (__predict_true(RELOC_ALIGNED_P(where))) {
211 				tmp = *where + (Elf_Addr)obj->relocbase;
212 				*where = tmp;
213 			} else {
214 				tmp = load_ptr(where) +
215 				    (Elf_Addr)obj->relocbase;
216 				store_ptr(where, tmp);
217 			}
218 			dbg("RELATIVE in %s --> %p", obj->path,
219 			    (void *)tmp);
220 			break;
221 
222 		case R_ARM_COPY:
223 			/*
224 			 * These are deferred until all other relocations have
225 			 * been done.  All we do here is make sure that the
226 			 * COPY relocation is not in a shared library.  They
227 			 * are allowed only in executable files.
228 			 */
229 			if (!obj->mainprog) {
230 				_rtld_error(
231 			"%s: Unexpected R_COPY relocation in shared library",
232 				    obj->path);
233 				return -1;
234 			}
235 			dbg("COPY (avoid in main)");
236 			break;
237 
238 		case R_ARM_TLS_DTPOFF32:
239 			def = find_symdef(symnum, obj, &defobj, flags, cache,
240 			    lockstate);
241 			if (def == NULL)
242 				return -1;
243 
244 			tmp = (Elf_Addr)(def->st_value);
245 			if (__predict_true(RELOC_ALIGNED_P(where)))
246 				*where = tmp;
247 			else
248 				store_ptr(where, tmp);
249 
250 			dbg("TLS_DTPOFF32 %s in %s --> %p",
251 			    obj->strtab + obj->symtab[symnum].st_name,
252 			    obj->path, (void *)tmp);
253 
254 			break;
255 		case R_ARM_TLS_DTPMOD32:
256 			def = find_symdef(symnum, obj, &defobj, flags, cache,
257 			    lockstate);
258 			if (def == NULL)
259 				return -1;
260 
261 			tmp = (Elf_Addr)(defobj->tlsindex);
262 			if (__predict_true(RELOC_ALIGNED_P(where)))
263 				*where = tmp;
264 			else
265 				store_ptr(where, tmp);
266 
267 			dbg("TLS_DTPMOD32 %s in %s --> %p",
268 			    obj->strtab + obj->symtab[symnum].st_name,
269 			    obj->path, (void *)tmp);
270 
271 			break;
272 
273 		case R_ARM_TLS_TPOFF32:
274 			def = find_symdef(symnum, obj, &defobj, flags, cache,
275 			    lockstate);
276 			if (def == NULL)
277 				return -1;
278 
279 			if (!defobj->tls_static && !allocate_tls_offset(obj))
280 				return -1;
281 
282 			tmp = (Elf_Addr)def->st_value + defobj->tlsoffset;
283 			if (__predict_true(RELOC_ALIGNED_P(where)))
284 				*where = tmp;
285 			else
286 				store_ptr(where, tmp);
287 			dbg("TLS_TPOFF32 %s in %s --> %p",
288 			    obj->strtab + obj->symtab[symnum].st_name,
289 			    obj->path, (void *)tmp);
290 			break;
291 
292 
293 		default:
294 			dbg("sym = %lu, type = %lu, offset = %p, "
295 			    "contents = %p, symbol = %s",
296 			    symnum, (u_long)ELF_R_TYPE(rel->r_info),
297 			    (void *)rel->r_offset, (void *)load_ptr(where),
298 			    obj->strtab + obj->symtab[symnum].st_name);
299 			_rtld_error("%s: Unsupported relocation type %ld "
300 			    "in non-PLT relocations\n",
301 			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
302 			return -1;
303 	}
304 	return 0;
305 }
306 
307 /*
308  *  * Process non-PLT relocations
309  *   */
310 int
311 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
312     RtldLockState *lockstate)
313 {
314 	const Elf_Rel *rellim;
315 	const Elf_Rel *rel;
316 	SymCache *cache;
317 	int r = -1;
318 
319 	/* The relocation for the dynamic loader has already been done. */
320 	if (obj == obj_rtld)
321 		return (0);
322 	if ((flags & SYMLOOK_IFUNC) != 0)
323 		/* XXX not implemented */
324 		return (0);
325 
326 	/*
327  	 * The dynamic loader may be called from a thread, we have
328 	 * limited amounts of stack available so we cannot use alloca().
329 	 */
330 	cache = calloc(obj->dynsymcount, sizeof(SymCache));
331 	/* No need to check for NULL here */
332 
333 	rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize);
334 	for (rel = obj->rel; rel < rellim; rel++) {
335 		if (reloc_nonplt_object(obj, rel, cache, flags, lockstate) < 0)
336 			goto done;
337 	}
338 	r = 0;
339 done:
340 	if (cache != NULL)
341 		free(cache);
342 	return (r);
343 }
344 
345 /*
346  *  * Process the PLT relocations.
347  *   */
348 int
349 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
350 {
351 	const Elf_Rel *rellim;
352 	const Elf_Rel *rel;
353 
354 	rellim = (const Elf_Rel *)((const char *)obj->pltrel +
355 	    obj->pltrelsize);
356 	for (rel = obj->pltrel;  rel < rellim;  rel++) {
357 		Elf_Addr *where;
358 
359 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
360 
361 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
362 		*where += (Elf_Addr )obj->relocbase;
363 	}
364 
365 	return (0);
366 }
367 
368 /*
369  *  * LD_BIND_NOW was set - force relocation for all jump slots
370  *   */
371 int
372 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
373 {
374 	const Obj_Entry *defobj;
375 	const Elf_Rel *rellim;
376 	const Elf_Rel *rel;
377 	const Elf_Sym *def;
378 	Elf_Addr *where;
379 	Elf_Addr target;
380 
381 	rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize);
382 	for (rel = obj->pltrel; rel < rellim; rel++) {
383 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
384 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
385 		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
386 		    SYMLOOK_IN_PLT | flags, NULL, lockstate);
387 		if (def == NULL) {
388 			dbg("reloc_jmpslots: sym not found");
389 			return (-1);
390 		}
391 
392 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
393 		reloc_jmpslot(where, target, defobj, obj,
394 		    (const Elf_Rel *) rel);
395 	}
396 
397 	obj->jmpslots_done = true;
398 
399 	return (0);
400 }
401 
402 int
403 reloc_iresolve(Obj_Entry *obj __unused,
404     struct Struct_RtldLockState *lockstate __unused)
405 {
406 
407 	/* XXX not implemented */
408 	return (0);
409 }
410 
411 int
412 reloc_iresolve_nonplt(Obj_Entry *obj __unused,
413     struct Struct_RtldLockState *lockstate __unused)
414 {
415 
416 	/* XXX not implemented */
417 	return (0);
418 }
419 
420 int
421 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
422     struct Struct_RtldLockState *lockstate __unused)
423 {
424 
425 	/* XXX not implemented */
426 	return (0);
427 }
428 
429 Elf_Addr
430 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
431     const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
432     const Elf_Rel *rel)
433 {
434 
435 	assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
436 
437 	if (*where != target && !ld_bind_not)
438 		*where = target;
439 	return (target);
440 }
441 
442 void
443 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
444 {
445 
446 }
447 
448 void
449 allocate_initial_tls(Obj_Entry *objs)
450 {
451 	/*
452 	* Fix the size of the static TLS block by using the maximum
453 	* offset allocated so far and adding a bit for dynamic modules to
454 	* use.
455 	*/
456 
457 	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
458 
459 	_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
460 }
461 
462 void *
463 __tls_get_addr(tls_index* ti)
464 {
465 	uintptr_t **dtvp;
466 
467 	dtvp = &_tcb_get()->tcb_dtv;
468 	return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset));
469 }
470