xref: /freebsd/sys/arm/arm/elf_machdep.c (revision 85007872d1227006adf2ce119fe30de856cbe12d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 1996-1998 John D. Polstra.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/kernel.h>
30 #include <sys/systm.h>
31 #include <sys/exec.h>
32 #include <sys/imgact.h>
33 #include <sys/linker.h>
34 #include <sys/reg.h>
35 #include <sys/sysent.h>
36 #include <sys/imgact_elf.h>
37 #include <sys/proc.h>
38 #include <sys/syscall.h>
39 #include <sys/signalvar.h>
40 #include <sys/vnode.h>
41 
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_param.h>
45 
46 #include <machine/elf.h>
47 #include <machine/md_var.h>
48 #include <machine/stack.h>
49 #ifdef VFP
50 #include <machine/vfp.h>
51 #endif
52 
53 #include "opt_ddb.h"            /* for OPT_DDB */
54 #include "opt_global.h"         /* for OPT_KDTRACE_HOOKS */
55 #include "opt_stack.h"          /* for OPT_STACK */
56 
57 static bool elf32_arm_abi_supported(const struct image_params *,
58     const int32_t *, const uint32_t *);
59 
60 u_long elf_hwcap;
61 u_long elf_hwcap2;
62 
63 struct sysentvec elf32_freebsd_sysvec = {
64 	.sv_size	= SYS_MAXSYSCALL,
65 	.sv_table	= sysent,
66 	.sv_fixup	= __elfN(freebsd_fixup),
67 	.sv_sendsig	= sendsig,
68 	.sv_sigcode	= sigcode,
69 	.sv_szsigcode	= &szsigcode,
70 	.sv_name	= "FreeBSD ELF32",
71 	.sv_coredump	= __elfN(coredump),
72 	.sv_elf_core_osabi = ELFOSABI_FREEBSD,
73 	.sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR,
74 	.sv_elf_core_prepare_notes = __elfN(prepare_notes),
75 	.sv_minsigstksz	= MINSIGSTKSZ,
76 	.sv_minuser	= VM_MIN_ADDRESS,
77 	.sv_maxuser	= VM_MAXUSER_ADDRESS,
78 	.sv_usrstack	= USRSTACK,
79 	.sv_psstrings	= PS_STRINGS,
80 	.sv_psstringssz	= sizeof(struct ps_strings),
81 	.sv_stackprot	= VM_PROT_ALL,
82 	.sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs),
83 	.sv_copyout_strings = exec_copyout_strings,
84 	.sv_setregs	= exec_setregs,
85 	.sv_fixlimit	= NULL,
86 	.sv_maxssiz	= NULL,
87 	.sv_flags	=
88 			  SV_ASLR | SV_SHP | SV_TIMEKEEP | SV_RNG_SEED_VER |
89 			  SV_ABI_FREEBSD | SV_ILP32 | SV_SIGSYS,
90 	.sv_set_syscall_retval = cpu_set_syscall_retval,
91 	.sv_fetch_syscall_args = cpu_fetch_syscall_args,
92 	.sv_syscallnames = syscallnames,
93 	.sv_shared_page_base = SHAREDPAGE,
94 	.sv_shared_page_len = PAGE_SIZE,
95 	.sv_schedtail	= NULL,
96 	.sv_thread_detach = NULL,
97 	.sv_trap	= NULL,
98 	.sv_hwcap	= &elf_hwcap,
99 	.sv_hwcap2	= &elf_hwcap2,
100 	.sv_hwcap3	= NULL,
101 	.sv_hwcap4	= NULL,
102 	.sv_onexec_old	= exec_onexec_old,
103 	.sv_onexit	= exit_onexit,
104 	.sv_regset_begin = SET_BEGIN(__elfN(regset)),
105 	.sv_regset_end  = SET_LIMIT(__elfN(regset)),
106 };
107 INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
108 
109 static Elf32_Brandinfo freebsd_brand_info = {
110 	.brand		= ELFOSABI_FREEBSD,
111 	.machine	= EM_ARM,
112 	.compat_3_brand	= "FreeBSD",
113 	.interp_path	= "/libexec/ld-elf.so.1",
114 	.sysvec		= &elf32_freebsd_sysvec,
115 	.interp_newpath	= NULL,
116 	.brand_note	= &elf32_freebsd_brandnote,
117 	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
118 	.header_supported= elf32_arm_abi_supported,
119 };
120 
121 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
122 	(sysinit_cfunc_t) elf32_insert_brand_entry,
123 	&freebsd_brand_info);
124 
125 static bool
elf32_arm_abi_supported(const struct image_params * imgp,const int32_t * osrel __unused,const uint32_t * fctl0 __unused)126 elf32_arm_abi_supported(const struct image_params *imgp,
127     const int32_t *osrel __unused, const uint32_t *fctl0 __unused)
128 {
129 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
130 
131 	/*
132 	 * When configured for EABI, FreeBSD supports EABI vesions 4 and 5.
133 	 */
134 	if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
135 		if (bootverbose)
136 			uprintf("Attempting to execute non EABI binary (rev %d) image %s",
137 			    EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname);
138 		return (false);
139 	}
140 	return (true);
141 }
142 
143 void
elf32_dump_thread(struct thread * td __unused,void * dst __unused,size_t * off __unused)144 elf32_dump_thread(struct thread *td __unused, void *dst __unused,
145     size_t *off __unused)
146 {
147 }
148 
149 bool
elf_is_ifunc_reloc(Elf_Size r_info __unused)150 elf_is_ifunc_reloc(Elf_Size r_info __unused)
151 {
152 
153 	return (false);
154 }
155 
156 /*
157  * It is possible for the compiler to emit relocations for unaligned data.
158  * We handle this situation with these inlines.
159  */
160 #define	RELOC_ALIGNED_P(x) \
161 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
162 
163 static __inline Elf_Addr
load_ptr(Elf_Addr * where)164 load_ptr(Elf_Addr *where)
165 {
166 	Elf_Addr res;
167 
168 	if (RELOC_ALIGNED_P(where))
169 		return *where;
170 	memcpy(&res, where, sizeof(res));
171 	return (res);
172 }
173 
174 static __inline void
store_ptr(Elf_Addr * where,Elf_Addr val)175 store_ptr(Elf_Addr *where, Elf_Addr val)
176 {
177 	if (RELOC_ALIGNED_P(where))
178 		*where = val;
179 	else
180 		memcpy(where, &val, sizeof(val));
181 }
182 #undef RELOC_ALIGNED_P
183 
184 /* Process one elf relocation with addend. */
185 static int
elf_reloc_internal(linker_file_t lf,Elf_Addr relocbase,const void * data,int type,int local,elf_lookup_fn lookup)186 elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
187     int type, int local, elf_lookup_fn lookup)
188 {
189 	Elf_Addr *where;
190 	Elf_Addr addr;
191 	Elf_Addr addend;
192 	Elf_Word rtype, symidx;
193 	const Elf_Rel *rel;
194 	const Elf_Rela *rela;
195 	int error;
196 
197 	switch (type) {
198 	case ELF_RELOC_REL:
199 		rel = (const Elf_Rel *)data;
200 		where = (Elf_Addr *) (relocbase + rel->r_offset);
201 		addend = load_ptr(where);
202 		rtype = ELF_R_TYPE(rel->r_info);
203 		symidx = ELF_R_SYM(rel->r_info);
204 		break;
205 	case ELF_RELOC_RELA:
206 		rela = (const Elf_Rela *)data;
207 		where = (Elf_Addr *) (relocbase + rela->r_offset);
208 		addend = rela->r_addend;
209 		rtype = ELF_R_TYPE(rela->r_info);
210 		symidx = ELF_R_SYM(rela->r_info);
211 		break;
212 	default:
213 		panic("unknown reloc type %d\n", type);
214 	}
215 
216 	if (local) {
217 		if (rtype == R_ARM_RELATIVE) {	/* A + B */
218 			addr = elf_relocaddr(lf, relocbase + addend);
219 			if (load_ptr(where) != addr)
220 				store_ptr(where, addr);
221 		}
222 		return (0);
223 	}
224 
225 	switch (rtype) {
226 		case R_ARM_NONE:	/* none */
227 			break;
228 
229 		case R_ARM_ABS32:
230 			error = lookup(lf, symidx, 1, &addr);
231 			if (error != 0)
232 				return (-1);
233 			store_ptr(where, addr + load_ptr(where));
234 			break;
235 
236 		case R_ARM_COPY:	/* none */
237 			/*
238 			 * There shouldn't be copy relocations in kernel
239 			 * objects.
240 			 */
241 			printf("kldload: unexpected R_COPY relocation, "
242 			    "symbol index %d\n", symidx);
243 			return (-1);
244 			break;
245 
246 		case R_ARM_JUMP_SLOT:
247 			error = lookup(lf, symidx, 1, &addr);
248 			if (error == 0) {
249 				store_ptr(where, addr);
250 				return (0);
251 			}
252 			return (-1);
253 		case R_ARM_RELATIVE:
254 			break;
255 
256 		default:
257 			printf("kldload: unexpected relocation type %d, "
258 			    "symbol index %d\n", rtype, symidx);
259 			return (-1);
260 	}
261 	return(0);
262 }
263 
264 int
elf_reloc(linker_file_t lf,Elf_Addr relocbase,const void * data,int type,elf_lookup_fn lookup)265 elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
266     elf_lookup_fn lookup)
267 {
268 
269 	return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
270 }
271 
272 int
elf_reloc_local(linker_file_t lf,Elf_Addr relocbase,const void * data,int type,elf_lookup_fn lookup)273 elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
274     int type, elf_lookup_fn lookup)
275 {
276 
277 	return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
278 }
279 
280 int
elf_cpu_load_file(linker_file_t lf)281 elf_cpu_load_file(linker_file_t lf)
282 {
283 
284 	/*
285 	 * The pmap code does not do an icache sync upon establishing executable
286 	 * mappings in the kernel pmap.  It's an optimization based on the fact
287 	 * that kernel memory allocations always have EXECUTABLE protection even
288 	 * when the memory isn't going to hold executable code.  The only time
289 	 * kernel memory holding instructions does need a sync is after loading
290 	 * a kernel module, and that's when this function gets called.
291 	 *
292 	 * This syncs data and instruction caches after loading a module.  We
293 	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
294 	 * that on entry.  Even if data cache maintenance was done by IO code,
295 	 * the relocation fixup process creates dirty cache entries that we must
296 	 * write back before doing icache sync. The instruction cache sync also
297 	 * invalidates the branch predictor cache on platforms that have one.
298 	 */
299 	if (lf->id == 1)
300 		return (0);
301 	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
302 	icache_inv_all();
303 
304 #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK)
305 	/*
306 	 * Inform the stack(9) code of the new module, so it can acquire its
307 	 * per-module unwind data.
308 	 */
309 	unwind_module_loaded(lf);
310 #endif
311 
312 	return (0);
313 }
314 
315 int
elf_cpu_parse_dynamic(caddr_t loadbase __unused,Elf_Dyn * dynamic __unused)316 elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused)
317 {
318 
319 	return (0);
320 }
321 
322 int
elf_cpu_unload_file(linker_file_t lf)323 elf_cpu_unload_file(linker_file_t lf)
324 {
325 
326 #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK)
327 	/* Inform the stack(9) code that this module is gone. */
328 	unwind_module_unloaded(lf);
329 #endif
330 	return (0);
331 }
332