xref: /freebsd/libexec/rtld-elf/riscv/reloc.c (revision d9f0ce31900a48d1a2bfc1c8c86f79d1e831451a)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * This software was developed by the University of Cambridge Computer
10  * Laboratory as part of the CTSRD Project, with support from the UK Higher
11  * Education Innovation Fund (HEIF).
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/types.h>
39 
40 #include <stdlib.h>
41 
42 #include "debug.h"
43 #include "rtld.h"
44 #include "rtld_printf.h"
45 
46 /*
47  * It is possible for the compiler to emit relocations for unaligned data.
48  * We handle this situation with these inlines.
49  */
50 #define	RELOC_ALIGNED_P(x) \
51 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
52 
53 void _exit(int);
54 
55 uint64_t
56 set_gp(Obj_Entry *obj)
57 {
58 	uint64_t old;
59 	SymLook req;
60 	uint64_t gp;
61 	int res;
62 
63 	__asm __volatile("mv    %0, gp" : "=r"(old));
64 
65 	symlook_init(&req, "_gp");
66 	req.ventry = NULL;
67 	req.flags = SYMLOOK_EARLY;
68 	res = symlook_obj(&req, obj);
69 
70 	if (res == 0) {
71 		gp = req.sym_out->st_value;
72 		__asm __volatile("mv    gp, %0" :: "r"(gp));
73 	}
74 
75 	return (old);
76 }
77 
78 void
79 init_pltgot(Obj_Entry *obj)
80 {
81 
82 	if (obj->pltgot != NULL) {
83 		obj->pltgot[0] = (Elf_Addr)&_rtld_bind_start;
84 		obj->pltgot[1] = (Elf_Addr)obj;
85 	}
86 }
87 
88 int
89 do_copy_relocations(Obj_Entry *dstobj)
90 {
91 	const Obj_Entry *srcobj, *defobj;
92 	const Elf_Rela *relalim;
93 	const Elf_Rela *rela;
94 	const Elf_Sym *srcsym;
95 	const Elf_Sym *dstsym;
96 	const void *srcaddr;
97 	const char *name;
98 	void *dstaddr;
99 	SymLook req;
100 	size_t size;
101 	int res;
102 
103 	/*
104 	 * COPY relocs are invalid outside of the main program
105 	 */
106 	assert(dstobj->mainprog);
107 
108 	relalim = (const Elf_Rela *)((char *)dstobj->rela +
109 	    dstobj->relasize);
110 	for (rela = dstobj->rela; rela < relalim; rela++) {
111 		if (ELF_R_TYPE(rela->r_info) != R_RISCV_COPY)
112 			continue;
113 
114 		dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
115 		dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
116 		name = dstobj->strtab + dstsym->st_name;
117 		size = dstsym->st_size;
118 
119 		symlook_init(&req, name);
120 		req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
121 		req.flags = SYMLOOK_EARLY;
122 
123 		for (srcobj = globallist_next(dstobj); srcobj != NULL;
124 		     srcobj = globallist_next(srcobj)) {
125 			res = symlook_obj(&req, srcobj);
126 			if (res == 0) {
127 				srcsym = req.sym_out;
128 				defobj = req.defobj_out;
129 				break;
130 			}
131 		}
132 		if (srcobj == NULL) {
133 			_rtld_error(
134 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
135 			    name, dstobj->path);
136 			return (-1);
137 		}
138 
139 		srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
140 		memcpy(dstaddr, srcaddr, size);
141 	}
142 
143 	return (0);
144 }
145 
146 /*
147  * Process the PLT relocations.
148  */
149 int
150 reloc_plt(Obj_Entry *obj)
151 {
152 	const Elf_Rela *relalim;
153 	const Elf_Rela *rela;
154 
155 	relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
156 	for (rela = obj->pltrela; rela < relalim; rela++) {
157 		Elf_Addr *where;
158 
159 		assert(ELF_R_TYPE(rela->r_info) == R_RISCV_JUMP_SLOT);
160 
161 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
162 		*where += (Elf_Addr)obj->relocbase;
163 	}
164 
165 	return (0);
166 }
167 
168 /*
169  * LD_BIND_NOW was set - force relocation for all jump slots
170  */
171 int
172 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
173 {
174 	const Obj_Entry *defobj;
175 	const Elf_Rela *relalim;
176 	const Elf_Rela *rela;
177 	const Elf_Sym *def;
178 
179 	relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
180 	for (rela = obj->pltrela; rela < relalim; rela++) {
181 		Elf_Addr *where;
182 
183 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
184 		switch(ELF_R_TYPE(rela->r_info)) {
185 		case R_RISCV_JUMP_SLOT:
186 			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
187 			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
188 			if (def == NULL) {
189 				dbg("reloc_jmpslots: sym not found");
190 				return (-1);
191 			}
192 
193 			*where = (Elf_Addr)(defobj->relocbase + def->st_value);
194 			break;
195 		default:
196 			_rtld_error("Unknown relocation type %x in jmpslot",
197 			    (unsigned int)ELF_R_TYPE(rela->r_info));
198 			return (-1);
199 		}
200 	}
201 
202 	return (0);
203 }
204 
205 int
206 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
207 {
208 
209 	/* XXX not implemented */
210 	return (0);
211 }
212 
213 int
214 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
215    struct Struct_RtldLockState *lockstate)
216 {
217 
218 	/* XXX not implemented */
219 	return (0);
220 }
221 
222 Elf_Addr
223 reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
224     const Obj_Entry *obj, const Elf_Rel *rel)
225 {
226 
227 	assert(ELF_R_TYPE(rel->r_info) == R_RISCV_JUMP_SLOT);
228 
229 	if (*where != target)
230 		*where = target;
231 
232 	return target;
233 }
234 
235 /*
236  * Process non-PLT relocations
237  */
238 int
239 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
240     RtldLockState *lockstate)
241 {
242 	const Obj_Entry *defobj;
243 	const Elf_Rela *relalim;
244 	const Elf_Rela *rela;
245 	const Elf_Sym *def;
246 	SymCache *cache;
247 	Elf_Addr *where;
248 	unsigned long symnum;
249 
250 	if ((flags & SYMLOOK_IFUNC) != 0)
251 		/* XXX not implemented */
252 		return (0);
253 
254 	/*
255 	 * The dynamic loader may be called from a thread, we have
256 	 * limited amounts of stack available so we cannot use alloca().
257 	 */
258 	if (obj == obj_rtld)
259 		cache = NULL;
260 	else
261 		cache = calloc(obj->dynsymcount, sizeof(SymCache));
262 		/* No need to check for NULL here */
263 
264 	relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
265 	for (rela = obj->rela; rela < relalim; rela++) {
266 		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
267 		symnum = ELF_R_SYM(rela->r_info);
268 
269 		switch (ELF_R_TYPE(rela->r_info)) {
270 		case R_RISCV_JUMP_SLOT:
271 			/* This will be handled by the plt/jmpslot routines */
272 			break;
273 		case R_RISCV_NONE:
274 			break;
275 		case R_RISCV_64:
276 			def = find_symdef(symnum, obj, &defobj, flags, cache,
277 			    lockstate);
278 			if (def == NULL)
279 				return (-1);
280 
281 			*where = (Elf_Addr)(defobj->relocbase + def->st_value +
282 			    rela->r_addend);
283 			break;
284 		case R_RISCV_TLS_DTPMOD64:
285 			def = find_symdef(symnum, obj, &defobj, flags, cache,
286 			    lockstate);
287 			if (def == NULL)
288 				return -1;
289 
290 			*where += (Elf_Addr)defobj->tlsindex;
291 			break;
292 		case R_RISCV_COPY:
293 			/*
294 			 * These are deferred until all other relocations have
295 			 * been done. All we do here is make sure that the
296 			 * COPY relocation is not in a shared library. They
297 			 * are allowed only in executable files.
298 			 */
299 			if (!obj->mainprog) {
300 				_rtld_error("%s: Unexpected R_RISCV_COPY "
301 				    "relocation in shared library", obj->path);
302 				return (-1);
303 			}
304 			break;
305 		case R_RISCV_TLS_DTPREL64:
306 			def = find_symdef(symnum, obj, &defobj, flags, cache,
307 			    lockstate);
308 			if (def == NULL)
309 				return (-1);
310 			/*
311 			 * We lazily allocate offsets for static TLS as we
312 			 * see the first relocation that references the
313 			 * TLS block. This allows us to support (small
314 			 * amounts of) static TLS in dynamically loaded
315 			 * modules. If we run out of space, we generate an
316 			 * error.
317 			 */
318 			if (!defobj->tls_done) {
319 				if (!allocate_tls_offset((Obj_Entry*) defobj)) {
320 					_rtld_error(
321 					    "%s: No space available for static "
322 					    "Thread Local Storage", obj->path);
323 					return (-1);
324 				}
325 			}
326 
327 			*where += (Elf_Addr)(def->st_value + rela->r_addend
328 			    - TLS_DTV_OFFSET);
329 			break;
330 		case R_RISCV_TLS_TPREL64:
331 			def = find_symdef(symnum, obj, &defobj, flags, cache,
332 			    lockstate);
333 			if (def == NULL)
334 				return (-1);
335 
336 			/*
337 			 * We lazily allocate offsets for static TLS as we
338 			 * see the first relocation that references the
339 			 * TLS block. This allows us to support (small
340 			 * amounts of) static TLS in dynamically loaded
341 			 * modules. If we run out of space, we generate an
342 			 * error.
343 			 */
344 			if (!defobj->tls_done) {
345 				if (!allocate_tls_offset((Obj_Entry*) defobj)) {
346 					_rtld_error(
347 					    "%s: No space available for static "
348 					    "Thread Local Storage", obj->path);
349 					return (-1);
350 				}
351 			}
352 
353 			*where = (def->st_value + rela->r_addend +
354 			    defobj->tlsoffset - TLS_TP_OFFSET);
355 			break;
356 		case R_RISCV_RELATIVE:
357 			*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
358 			break;
359 		default:
360 			rtld_printf("%s: Unhandled relocation %lu\n",
361 			    obj->path, ELF_R_TYPE(rela->r_info));
362 			return (-1);
363 		}
364 	}
365 
366 	return (0);
367 }
368 
369 void
370 allocate_initial_tls(Obj_Entry *objs)
371 {
372 	Elf_Addr **tp;
373 
374 	/*
375 	 * Fix the size of the static TLS block by using the maximum
376 	 * offset allocated so far and adding a bit for dynamic modules to
377 	 * use.
378 	 */
379 	tls_static_space = tls_last_offset + tls_last_size +
380 	    RTLD_STATIC_TLS_EXTRA;
381 
382 	tp = (Elf_Addr **) ((char *)allocate_tls(objs, NULL, TLS_TCB_SIZE, 16)
383 	    + TLS_TP_OFFSET + TLS_TCB_SIZE);
384 
385 	__asm __volatile("mv  tp, %0" :: "r"(tp));
386 }
387 
388 void *
389 __tls_get_addr(tls_index* ti)
390 {
391 	char *_tp;
392 	void *p;
393 
394 	__asm __volatile("mv %0, tp" : "=r" (_tp));
395 
396 	p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)_tp - TLS_TP_OFFSET
397 	    - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset);
398 
399 	return (p + TLS_DTV_OFFSET);
400 }
401