xref: /freebsd/libexec/rtld-elf/powerpc64/reloc.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*      $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $   */
2 
3 /*-
4  * Copyright (C) 1998   Tsubai Masanari
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 
35 #include <errno.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <machine/cpu.h>
41 #include <machine/md_var.h>
42 
43 #include "debug.h"
44 #include "rtld.h"
45 
46 struct funcdesc {
47 	Elf_Addr addr;
48 	Elf_Addr toc;
49 	Elf_Addr env;
50 };
51 
52 /*
53  * Process the R_PPC_COPY relocations
54  */
55 int
56 do_copy_relocations(Obj_Entry *dstobj)
57 {
58 	const Elf_Rela *relalim;
59 	const Elf_Rela *rela;
60 
61 	/*
62 	 * COPY relocs are invalid outside of the main program
63 	 */
64 	assert(dstobj->mainprog);
65 
66 	relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela +
67 	    dstobj->relasize);
68 	for (rela = dstobj->rela;  rela < relalim;  rela++) {
69 		void *dstaddr;
70 		const Elf_Sym *dstsym;
71 		const char *name;
72 		unsigned long hash;
73 		size_t size;
74 		const void *srcaddr;
75 		const Elf_Sym *srcsym = NULL;
76 		Obj_Entry *srcobj;
77 		const Ver_Entry *ve;
78 
79 		if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) {
80 			continue;
81 		}
82 
83 		dstaddr = (void *) (dstobj->relocbase + rela->r_offset);
84 		dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
85 		name = dstobj->strtab + dstsym->st_name;
86 		hash = elf_hash(name);
87 		size = dstsym->st_size;
88 		ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
89 
90 		for (srcobj = dstobj->next;  srcobj != NULL;
91 		     srcobj = srcobj->next) {
92 			if ((srcsym = symlook_obj(name, hash, srcobj, ve, 0))
93 			    != NULL) {
94 				break;
95 			}
96 		}
97 
98 		if (srcobj == NULL) {
99 			_rtld_error("Undefined symbol \"%s\" "
100 				    " referenced from COPY"
101 				    " relocation in %s", name, dstobj->path);
102 			return (-1);
103 		}
104 
105 		srcaddr = (const void *) (srcobj->relocbase+srcsym->st_value);
106 		memcpy(dstaddr, srcaddr, size);
107 		dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size);
108 	}
109 
110 	return (0);
111 }
112 
113 
114 /*
115  * Perform early relocation of the run-time linker image
116  */
117 void
118 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
119 {
120 	const Elf_Rela *rela = 0, *relalim;
121 	Elf_Addr relasz = 0;
122 	Elf_Addr *where;
123 
124 	/*
125 	 * Extract the rela/relasz values from the dynamic section
126 	 */
127 	for (; dynp->d_tag != DT_NULL; dynp++) {
128 		switch (dynp->d_tag) {
129 		case DT_RELA:
130 			rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr);
131 			break;
132 		case DT_RELASZ:
133 			relasz = dynp->d_un.d_val;
134 			break;
135 		}
136 	}
137 
138 	/*
139 	 * Relocate these values
140 	 */
141 	relalim = (const Elf_Rela *)((caddr_t)rela + relasz);
142 	for (; rela < relalim; rela++) {
143 		where = (Elf_Addr *)(relocbase + rela->r_offset);
144 		*where = (Elf_Addr)(relocbase + rela->r_addend);
145 	}
146 }
147 
148 
149 /*
150  * Relocate a non-PLT object with addend.
151  */
152 static int
153 reloc_nonplt_object(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
154 		    SymCache *cache)
155 {
156 	Elf_Addr        *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
157 	const Elf_Sym   *def;
158 	const Obj_Entry *defobj;
159 	Elf_Addr         tmp;
160 
161 	switch (ELF_R_TYPE(rela->r_info)) {
162 
163 	case R_PPC_NONE:
164 		break;
165 
166         case R_PPC64_ADDR64:    /* doubleword64 S + A */
167         case R_PPC_GLOB_DAT:
168 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
169 				  false, cache);
170 		if (def == NULL) {
171 			return (-1);
172 		}
173 
174                 tmp = (Elf_Addr)(defobj->relocbase + def->st_value +
175                     rela->r_addend);
176 
177 		/* Don't issue write if unnecessary; avoid COW page fault */
178                 if (*where != tmp) {
179                         *where = tmp;
180 		}
181                 break;
182 
183         case R_PPC_RELATIVE:  /* doubleword64 B + A */
184 		tmp = (Elf_Addr)(obj->relocbase + rela->r_addend);
185 
186 		/* As above, don't issue write unnecessarily */
187 		if (*where != tmp) {
188 			*where = tmp;
189 		}
190 		break;
191 
192 	case R_PPC_COPY:
193 		/*
194 		 * These are deferred until all other relocations
195 		 * have been done.  All we do here is make sure
196 		 * that the COPY relocation is not in a shared
197 		 * library.  They are allowed only in executable
198 		 * files.
199 		 */
200 		if (!obj->mainprog) {
201 			_rtld_error("%s: Unexpected R_COPY "
202 				    " relocation in shared library",
203 				    obj->path);
204 			return (-1);
205 		}
206 		break;
207 
208 	case R_PPC_JMP_SLOT:
209 		/*
210 		 * These will be handled by the plt/jmpslot routines
211 		 */
212 		break;
213 
214 	case R_PPC64_DTPMOD64:
215 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
216 		    false, cache);
217 
218 		if (def == NULL)
219 			return (-1);
220 
221 		*where = (Elf_Addr) defobj->tlsindex;
222 
223 		break;
224 
225 	case R_PPC64_TPREL64:
226 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
227 		    false, cache);
228 
229 		if (def == NULL)
230 			return (-1);
231 
232 		/*
233 		 * We lazily allocate offsets for static TLS as we
234 		 * see the first relocation that references the
235 		 * TLS block. This allows us to support (small
236 		 * amounts of) static TLS in dynamically loaded
237 		 * modules. If we run out of space, we generate an
238 		 * error.
239 		 */
240 		if (!defobj->tls_done) {
241 			if (!allocate_tls_offset((Obj_Entry*) defobj)) {
242 				_rtld_error("%s: No space available for static "
243 				    "Thread Local Storage", obj->path);
244 				return (-1);
245 			}
246 		}
247 
248 		*(Elf_Addr **)where = *where * sizeof(Elf_Addr)
249 		    + (Elf_Addr *)(def->st_value + rela->r_addend
250 		    + defobj->tlsoffset - TLS_TP_OFFSET);
251 
252 		break;
253 
254 	case R_PPC64_DTPREL64:
255 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
256 		    false, cache);
257 
258 		if (def == NULL)
259 			return (-1);
260 
261 		*where += (Elf_Addr)(def->st_value + rela->r_addend
262 		    - TLS_DTV_OFFSET);
263 
264 		break;
265 
266 	default:
267 		_rtld_error("%s: Unsupported relocation type %ld"
268 			    " in non-PLT relocations\n", obj->path,
269 			    ELF_R_TYPE(rela->r_info));
270 		return (-1);
271         }
272 	return (0);
273 }
274 
275 
276 /*
277  * Process non-PLT relocations
278  */
279 int
280 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
281 {
282 	const Elf_Rela *relalim;
283 	const Elf_Rela *rela;
284 	SymCache *cache;
285 	int bytes = obj->nchains * sizeof(SymCache);
286 	int r = -1;
287 
288 	/*
289 	 * The dynamic loader may be called from a thread, we have
290 	 * limited amounts of stack available so we cannot use alloca().
291 	 */
292 	if (obj != obj_rtld) {
293 		cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON,
294 		    -1, 0);
295 		if (cache == MAP_FAILED)
296 			cache = NULL;
297 	} else
298 		cache = NULL;
299 
300 	/*
301 	 * From the SVR4 PPC ABI:
302 	 * "The PowerPC family uses only the Elf32_Rela relocation
303 	 *  entries with explicit addends."
304 	 */
305 	relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
306 	for (rela = obj->rela; rela < relalim; rela++) {
307 		if (reloc_nonplt_object(obj_rtld, obj, rela, cache) < 0)
308 			goto done;
309 	}
310 	r = 0;
311 done:
312 	if (cache) {
313 		munmap(cache, bytes);
314 	}
315 	return (r);
316 }
317 
318 
319 /*
320  * Initialise a PLT slot to the resolving trampoline
321  */
322 static int
323 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
324 {
325 	Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
326 	Elf_Addr *glink;
327 	long reloff;
328 
329 	reloff = rela - obj->pltrela;
330 
331 	if (obj->priv == NULL)
332 		obj->priv = malloc(obj->pltrelasize);
333 	glink = obj->priv + reloff*sizeof(Elf_Addr)*2;
334 
335 	if ((reloff < 0) || (reloff >= 0x8000)) {
336 		return (-1);
337 	}
338 
339 	dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%p", (void *)where, reloff, glink);
340 
341 	memcpy(where, _rtld_bind_start, sizeof(struct funcdesc));
342 	((struct funcdesc *)(where))->env = (Elf_Addr)glink;
343 	*(glink++) = (Elf_Addr)obj;
344 	*(glink++) = reloff*sizeof(Elf_Rela);
345 
346 	return (0);
347 }
348 
349 
350 /*
351  * Process the PLT relocations.
352  */
353 int
354 reloc_plt(Obj_Entry *obj)
355 {
356 	const Elf_Rela *relalim;
357 	const Elf_Rela *rela;
358 
359 	if (obj->pltrelasize != 0) {
360 		relalim = (const Elf_Rela *)((char *)obj->pltrela +
361 		    obj->pltrelasize);
362 		for (rela = obj->pltrela;  rela < relalim;  rela++) {
363 			assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
364 
365 			if (reloc_plt_object(obj, rela) < 0) {
366 				return (-1);
367 			}
368 		}
369 	}
370 
371 	return (0);
372 }
373 
374 
375 /*
376  * LD_BIND_NOW was set - force relocation for all jump slots
377  */
378 int
379 reloc_jmpslots(Obj_Entry *obj)
380 {
381 	const Obj_Entry *defobj;
382 	const Elf_Rela *relalim;
383 	const Elf_Rela *rela;
384 	const Elf_Sym *def;
385 	Elf_Addr *where;
386 	Elf_Addr target;
387 
388 	relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
389 	for (rela = obj->pltrela; rela < relalim; rela++) {
390 		assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
391 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
392 		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
393 		   true, NULL);
394 		if (def == NULL) {
395 			dbg("reloc_jmpslots: sym not found");
396 			return (-1);
397 		}
398 
399 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
400 
401 #if 0
402 		/* PG XXX */
403 		dbg("\"%s\" in \"%s\" --> %p in \"%s\"",
404 		    defobj->strtab + def->st_name, basename(obj->path),
405 		    (void *)target, basename(defobj->path));
406 #endif
407 
408 		if (def == &sym_zero) {
409 			/* Zero undefined weak symbols */
410 			bzero(where, sizeof(struct funcdesc));
411 		} else {
412 			reloc_jmpslot(where, target, defobj, obj,
413 			    (const Elf_Rel *) rela);
414 		}
415 	}
416 
417 	obj->jmpslots_done = true;
418 
419 	return (0);
420 }
421 
422 
423 /*
424  * Update the value of a PLT jump slot.
425  */
426 Elf_Addr
427 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj,
428 	      const Obj_Entry *obj, const Elf_Rel *rel)
429 {
430 	dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)",
431 	    (void *)wherep, (void *)target, *(Elf_Addr *)target,
432 	    (Elf_Addr)defobj->relocbase);
433 
434 	/*
435 	 * At the PLT entry pointed at by `wherep', construct
436 	 * a direct transfer to the now fully resolved function
437 	 * address.
438 	 */
439 
440 	memcpy(wherep, (void *)target, sizeof(struct funcdesc));
441 	if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) {
442 		/*
443 		 * XXX: It is possible (e.g. LD_BIND_NOW) that the function
444 		 * descriptor we are copying has not yet been relocated.
445 		 * If this happens, fix it.
446 		 */
447 
448 		((struct funcdesc *)(wherep))->addr +=
449 		    (Elf_Addr)defobj->relocbase;
450 		((struct funcdesc *)(wherep))->toc +=
451 		    (Elf_Addr)defobj->relocbase;
452 	}
453 
454 	__asm __volatile("dcbst 0,%0; sync" :: "r"(wherep) : "memory");
455 
456 	return (target);
457 }
458 
459 void
460 init_pltgot(Obj_Entry *obj)
461 {
462 }
463 
464 void
465 allocate_initial_tls(Obj_Entry *list)
466 {
467 	register Elf_Addr **tp __asm__("r13");
468 	Elf_Addr **_tp;
469 
470 	/*
471 	* Fix the size of the static TLS block by using the maximum
472 	* offset allocated so far and adding a bit for dynamic modules to
473 	* use.
474 	*/
475 
476 	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
477 
478 	_tp = (Elf_Addr **) ((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16)
479 	    + TLS_TP_OFFSET + TLS_TCB_SIZE);
480 
481 	/*
482 	 * XXX gcc seems to ignore 'tp = _tp;'
483 	 */
484 
485 	__asm __volatile("mr %0,%1" : "=r"(tp) : "r"(_tp));
486 }
487 
488 void*
489 __tls_get_addr(tls_index* ti)
490 {
491 	register Elf_Addr **tp __asm__("r13");
492 	char *p;
493 
494 	p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET
495 	    - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset);
496 
497 	return (p + TLS_DTV_OFFSET);
498 }
499