xref: /freebsd/sys/kern/imgact_elf.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/param.h>
34 #include <sys/exec.h>
35 #include <sys/fcntl.h>
36 #include <sys/imgact.h>
37 #include <sys/imgact_elf.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/mman.h>
43 #include <sys/namei.h>
44 #include <sys/pioctl.h>
45 #include <sys/proc.h>
46 #include <sys/procfs.h>
47 #include <sys/resourcevar.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/stat.h>
51 #include <sys/sx.h>
52 #include <sys/syscall.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysent.h>
55 #include <sys/vnode.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_param.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_extern.h>
64 
65 #include <machine/elf.h>
66 #include <machine/md_var.h>
67 
68 #define OLD_EI_BRAND	8
69 
70 __ElfType(Brandinfo);
71 __ElfType(Auxargs);
72 
73 static int __elfN(check_header)(const Elf_Ehdr *hdr);
74 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
75     const char *interp);
76 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
77     u_long *entry, size_t pagesize);
78 static int __elfN(load_section)(struct proc *p,
79     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
80     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
81     vm_prot_t prot, size_t pagesize);
82 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
83 
84 static int elf_trace = 0;
85 #if __ELF_WORD_SIZE == 32
86 SYSCTL_INT(_debug, OID_AUTO, elf32_trace, CTLFLAG_RW, &elf_trace, 0, "");
87 #else
88 SYSCTL_INT(_debug, OID_AUTO, elf64_trace, CTLFLAG_RW, &elf_trace, 0, "");
89 #endif
90 
91 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
92 extern int fallback_elf_brand;
93 
94 int
95 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
96 {
97 	int i;
98 
99 	for (i = 0; i < MAX_BRANDS; i++) {
100 		if (elf_brand_list[i] == NULL) {
101 			elf_brand_list[i] = entry;
102 			break;
103 		}
104 	}
105 	if (i == MAX_BRANDS)
106 		return (-1);
107 	return (0);
108 }
109 
110 int
111 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
112 {
113 	int i;
114 
115 	for (i = 0; i < MAX_BRANDS; i++) {
116 		if (elf_brand_list[i] == entry) {
117 			elf_brand_list[i] = NULL;
118 			break;
119 		}
120 	}
121 	if (i == MAX_BRANDS)
122 		return (-1);
123 	return (0);
124 }
125 
126 int
127 __elfN(brand_inuse)(Elf_Brandinfo *entry)
128 {
129 	struct proc *p;
130 	int rval = FALSE;
131 
132 	sx_slock(&allproc_lock);
133 	LIST_FOREACH(p, &allproc, p_list) {
134 		if (p->p_sysent == entry->sysvec) {
135 			rval = TRUE;
136 			break;
137 		}
138 	}
139 	sx_sunlock(&allproc_lock);
140 
141 	return (rval);
142 }
143 
144 static Elf_Brandinfo *
145 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
146 {
147 	Elf_Brandinfo *bi;
148 	int i;
149 
150 	/*
151 	 * We support three types of branding -- (1) the ELF EI_OSABI field
152 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
153 	 * branding w/in the ELF header, and (3) path of the `interp_path'
154 	 * field.  We should also look for an ".note.ABI-tag" ELF section now
155 	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
156 	 */
157 
158 	/* If the executable has a brand, search for it in the brand list. */
159 	for (i = 0; i < MAX_BRANDS; i++) {
160 		bi = elf_brand_list[i];
161 		if (bi != NULL && hdr->e_machine == bi->machine &&
162 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
163 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
164 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
165 			return (bi);
166 	}
167 
168 	/* Lacking a known brand, search for a recognized interpreter. */
169 	if (interp != NULL) {
170 		for (i = 0; i < MAX_BRANDS; i++) {
171 			bi = elf_brand_list[i];
172 			if (bi != NULL && hdr->e_machine == bi->machine &&
173 			    strcmp(interp, bi->interp_path) == 0)
174 				return (bi);
175 		}
176 	}
177 
178 	/* Lacking a recognized interpreter, try the default brand */
179 	for (i = 0; i < MAX_BRANDS; i++) {
180 		bi = elf_brand_list[i];
181 		if (bi != NULL && hdr->e_machine == bi->machine &&
182 		    fallback_elf_brand == bi->brand)
183 			return (bi);
184 	}
185 	return (NULL);
186 }
187 
188 static int
189 __elfN(check_header)(const Elf_Ehdr *hdr)
190 {
191 	Elf_Brandinfo *bi;
192 	int i;
193 
194 	if (!IS_ELF(*hdr) ||
195 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
196 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
197 	    hdr->e_ident[EI_VERSION] != EV_CURRENT)
198 		return (ENOEXEC);
199 
200 	/*
201 	 * Make sure we have at least one brand for this machine.
202 	 */
203 
204 	for (i = 0; i < MAX_BRANDS; i++) {
205 		bi = elf_brand_list[i];
206 		if (bi != NULL && bi->machine == hdr->e_machine)
207 			break;
208 	}
209 	if (i == MAX_BRANDS)
210 		return (ENOEXEC);
211 
212 	if (hdr->e_version != ELF_TARG_VER)
213 		return (ENOEXEC);
214 
215 	return (0);
216 }
217 
218 static int
219 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
220 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
221 	vm_prot_t max)
222 {
223 	int error, rv;
224 	vm_offset_t off;
225 	vm_offset_t data_buf = 0;
226 
227 	/*
228 	 * Create the page if it doesn't exist yet. Ignore errors.
229 	 */
230 	vm_map_lock(map);
231 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
232 	    max, 0);
233 	vm_map_unlock(map);
234 
235 	/*
236 	 * Find the page from the underlying object.
237 	 */
238 	if (object) {
239 		vm_object_reference(object);
240 		rv = vm_map_find(exec_map,
241 				 object,
242 				 trunc_page(offset),
243 				 &data_buf,
244 				 PAGE_SIZE,
245 				 TRUE,
246 				 VM_PROT_READ,
247 				 VM_PROT_ALL,
248 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
249 		if (rv != KERN_SUCCESS) {
250 			vm_object_deallocate(object);
251 			return (rv);
252 		}
253 
254 		off = offset - trunc_page(offset);
255 		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
256 		    end - start);
257 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
258 		if (error) {
259 			return (KERN_FAILURE);
260 		}
261 	}
262 
263 	return (KERN_SUCCESS);
264 }
265 
266 static int
267 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
268 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
269 	vm_prot_t max, int cow)
270 {
271 	int rv;
272 
273 	if (start != trunc_page(start)) {
274 		rv = __elfN(map_partial)(map, object, offset, start,
275 		    round_page(start), prot, max);
276 		if (rv)
277 			return (rv);
278 		offset += round_page(start) - start;
279 		start = round_page(start);
280 	}
281 	if (end != round_page(end)) {
282 		rv = __elfN(map_partial)(map, object, offset +
283 		    trunc_page(end) - start, trunc_page(end), end, prot, max);
284 		if (rv)
285 			return (rv);
286 		end = trunc_page(end);
287 	}
288 	if (end > start) {
289 		if (offset & PAGE_MASK) {
290 			vm_offset_t data_buf, off;
291 			vm_size_t sz;
292 			int error;
293 
294 			/*
295 			 * The mapping is not page aligned. This means we have
296 			 * to copy the data. Sigh.
297 			 */
298 			rv = vm_map_find(map, 0, 0, &start, end - start,
299 			    FALSE, prot, max, 0);
300 			if (rv)
301 				return (rv);
302 			while (start < end) {
303 				vm_object_reference(object);
304 				rv = vm_map_find(exec_map,
305 						 object,
306 						 trunc_page(offset),
307 						 &data_buf,
308 						 2 * PAGE_SIZE,
309 						 TRUE,
310 						 VM_PROT_READ,
311 						 VM_PROT_ALL,
312 						 (MAP_COPY_ON_WRITE
313 						  | MAP_PREFAULT_PARTIAL));
314 				if (rv != KERN_SUCCESS) {
315 					vm_object_deallocate(object);
316 					return (rv);
317 				}
318 				off = offset - trunc_page(offset);
319 				sz = end - start;
320 				if (sz > PAGE_SIZE)
321 					sz = PAGE_SIZE;
322 				error = copyout((caddr_t)data_buf + off,
323 				    (caddr_t)start, sz);
324 				vm_map_remove(exec_map, data_buf,
325 				    data_buf + 2 * PAGE_SIZE);
326 				if (error) {
327 					return (KERN_FAILURE);
328 				}
329 				start += sz;
330 			}
331 			rv = KERN_SUCCESS;
332 		} else {
333 			vm_map_lock(map);
334 			rv = vm_map_insert(map, object, offset, start, end,
335 			    prot, max, cow);
336 			vm_map_unlock(map);
337 		}
338 		return (rv);
339 	} else {
340 		return (KERN_SUCCESS);
341 	}
342 }
343 
344 static int
345 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
346 	struct vnode *vp, vm_object_t object, vm_offset_t offset,
347 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
348 	size_t pagesize)
349 {
350 	size_t map_len;
351 	vm_offset_t map_addr;
352 	int error, rv;
353 	size_t copy_len;
354 	vm_offset_t file_addr;
355 	vm_offset_t data_buf = 0;
356 
357 	GIANT_REQUIRED;
358 
359 	error = 0;
360 
361 	/*
362 	 * It's necessary to fail if the filsz + offset taken from the
363 	 * header is greater than the actual file pager object's size.
364 	 * If we were to allow this, then the vm_map_find() below would
365 	 * walk right off the end of the file object and into the ether.
366 	 *
367 	 * While I'm here, might as well check for something else that
368 	 * is invalid: filsz cannot be greater than memsz.
369 	 */
370 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
371 	    filsz > memsz) {
372 		uprintf("elf_load_section: truncated ELF file\n");
373 		return (ENOEXEC);
374 	}
375 
376 #define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
377 #define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
378 
379 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
380 	file_addr = trunc_page_ps(offset, pagesize);
381 
382 	/*
383 	 * We have two choices.  We can either clear the data in the last page
384 	 * of an oversized mapping, or we can start the anon mapping a page
385 	 * early and copy the initialized data into that first page.  We
386 	 * choose the second..
387 	 */
388 	if (memsz > filsz)
389 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
390 	else
391 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
392 
393 	if (map_len != 0) {
394 		vm_object_reference(object);
395 		rv = __elfN(map_insert)(&vmspace->vm_map,
396 				      object,
397 				      file_addr,	/* file offset */
398 				      map_addr,		/* virtual start */
399 				      map_addr + map_len,/* virtual end */
400 				      prot,
401 				      VM_PROT_ALL,
402 				      MAP_COPY_ON_WRITE | MAP_PREFAULT);
403 		if (rv != KERN_SUCCESS) {
404 			vm_object_deallocate(object);
405 			return (EINVAL);
406 		}
407 
408 		/* we can stop now if we've covered it all */
409 		if (memsz == filsz) {
410 			return (0);
411 		}
412 	}
413 
414 
415 	/*
416 	 * We have to get the remaining bit of the file into the first part
417 	 * of the oversized map segment.  This is normally because the .data
418 	 * segment in the file is extended to provide bss.  It's a neat idea
419 	 * to try and save a page, but it's a pain in the behind to implement.
420 	 */
421 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
422 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
423 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
424 	    map_addr;
425 
426 	/* This had damn well better be true! */
427 	if (map_len != 0) {
428 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
429 		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
430 		if (rv != KERN_SUCCESS) {
431 			return (EINVAL);
432 		}
433 	}
434 
435 	if (copy_len != 0) {
436 		vm_offset_t off;
437 		vm_object_reference(object);
438 		rv = vm_map_find(exec_map,
439 				 object,
440 				 trunc_page(offset + filsz),
441 				 &data_buf,
442 				 PAGE_SIZE,
443 				 TRUE,
444 				 VM_PROT_READ,
445 				 VM_PROT_ALL,
446 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
447 		if (rv != KERN_SUCCESS) {
448 			vm_object_deallocate(object);
449 			return (EINVAL);
450 		}
451 
452 		/* send the page fragment to user space */
453 		off = trunc_page_ps(offset + filsz, pagesize) -
454 		    trunc_page(offset + filsz);
455 		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
456 		    copy_len);
457 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
458 		if (error) {
459 			return (error);
460 		}
461 	}
462 
463 	/*
464 	 * set it to the specified protection.
465 	 * XXX had better undo the damage from pasting over the cracks here!
466 	 */
467 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
468 	    round_page(map_addr + map_len),  prot, FALSE);
469 
470 	return (error);
471 }
472 
473 /*
474  * Load the file "file" into memory.  It may be either a shared object
475  * or an executable.
476  *
477  * The "addr" reference parameter is in/out.  On entry, it specifies
478  * the address where a shared object should be loaded.  If the file is
479  * an executable, this value is ignored.  On exit, "addr" specifies
480  * where the file was actually loaded.
481  *
482  * The "entry" reference parameter is out only.  On exit, it specifies
483  * the entry point for the loaded file.
484  */
485 static int
486 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
487 	u_long *entry, size_t pagesize)
488 {
489 	struct {
490 		struct nameidata nd;
491 		struct vattr attr;
492 		struct image_params image_params;
493 	} *tempdata;
494 	const Elf_Ehdr *hdr = NULL;
495 	const Elf_Phdr *phdr = NULL;
496 	struct nameidata *nd;
497 	struct vmspace *vmspace = p->p_vmspace;
498 	struct vattr *attr;
499 	struct image_params *imgp;
500 	vm_prot_t prot;
501 	u_long rbase;
502 	u_long base_addr = 0;
503 	int error, i, numsegs;
504 
505 	if (curthread->td_proc != p)
506 		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
507 
508 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
509 	nd = &tempdata->nd;
510 	attr = &tempdata->attr;
511 	imgp = &tempdata->image_params;
512 
513 	/*
514 	 * Initialize part of the common data
515 	 */
516 	imgp->proc = p;
517 	imgp->uap = NULL;
518 	imgp->attr = attr;
519 	imgp->firstpage = NULL;
520 	imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
521 	imgp->object = NULL;
522 
523 	if (imgp->image_header == NULL) {
524 		nd->ni_vp = NULL;
525 		error = ENOMEM;
526 		goto fail;
527 	}
528 
529 	/* XXXKSE */
530 	NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
531 
532 	if ((error = namei(nd)) != 0) {
533 		nd->ni_vp = NULL;
534 		goto fail;
535 	}
536 	NDFREE(nd, NDF_ONLY_PNBUF);
537 	imgp->vp = nd->ni_vp;
538 
539 	/*
540 	 * Check permissions, modes, uid, etc on the file, and "open" it.
541 	 */
542 	error = exec_check_permissions(imgp);
543 	if (error) {
544 		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
545 		goto fail;
546 	}
547 
548 	error = exec_map_first_page(imgp);
549 	/*
550 	 * Also make certain that the interpreter stays the same, so set
551 	 * its VV_TEXT flag, too.
552 	 */
553 	if (error == 0)
554 		nd->ni_vp->v_vflag |= VV_TEXT;
555 
556 	VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
557 	vm_object_reference(imgp->object);
558 
559 	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
560 	if (error)
561 		goto fail;
562 
563 	hdr = (const Elf_Ehdr *)imgp->image_header;
564 	if ((error = __elfN(check_header)(hdr)) != 0)
565 		goto fail;
566 	if (hdr->e_type == ET_DYN)
567 		rbase = *addr;
568 	else if (hdr->e_type == ET_EXEC)
569 		rbase = 0;
570 	else {
571 		error = ENOEXEC;
572 		goto fail;
573 	}
574 
575 	/* Only support headers that fit within first page for now */
576 	if ((hdr->e_phoff > PAGE_SIZE) ||
577 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
578 		error = ENOEXEC;
579 		goto fail;
580 	}
581 
582 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
583 
584 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
585 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
586 			prot = 0;
587 			if (phdr[i].p_flags & PF_X)
588   				prot |= VM_PROT_EXECUTE;
589 			if (phdr[i].p_flags & PF_W)
590   				prot |= VM_PROT_WRITE;
591 			if (phdr[i].p_flags & PF_R)
592   				prot |= VM_PROT_READ;
593 
594 			if ((error = __elfN(load_section)(p, vmspace,
595 			    nd->ni_vp, imgp->object, phdr[i].p_offset,
596 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
597 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
598 			    pagesize)) != 0)
599 				goto fail;
600 			/*
601 			 * Establish the base address if this is the
602 			 * first segment.
603 			 */
604 			if (numsegs == 0)
605   				base_addr = trunc_page(phdr[i].p_vaddr +
606 				    rbase);
607 			numsegs++;
608 		}
609 	}
610 	*addr = base_addr;
611 	*entry = (unsigned long)hdr->e_entry + rbase;
612 
613 fail:
614 	if (imgp->firstpage)
615 		exec_unmap_first_page(imgp);
616 	if (imgp->image_header)
617 		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
618 		    PAGE_SIZE);
619 	if (imgp->object)
620 		vm_object_deallocate(imgp->object);
621 
622 	if (nd->ni_vp)
623 		vrele(nd->ni_vp);
624 
625 	free(tempdata, M_TEMP);
626 
627 	return (error);
628 }
629 
630 static int
631 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
632 {
633 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
634 	const Elf_Phdr *phdr;
635 	Elf_Auxargs *elf_auxargs = NULL;
636 	struct vmspace *vmspace;
637 	vm_prot_t prot;
638 	u_long text_size = 0, data_size = 0, total_size = 0;
639 	u_long text_addr = 0, data_addr = 0;
640 	u_long seg_size, seg_addr;
641 	u_long addr, entry = 0, proghdr = 0;
642 	int error, i;
643 	const char *interp = NULL;
644 	Elf_Brandinfo *brand_info;
645 	char *path;
646 	struct thread *td = curthread;
647 	struct sysentvec *sv;
648 
649 	GIANT_REQUIRED;
650 
651 	/*
652 	 * Do we have a valid ELF header ?
653 	 */
654 	if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
655 		return (-1);
656 
657 	/*
658 	 * From here on down, we return an errno, not -1, as we've
659 	 * detected an ELF file.
660 	 */
661 
662 	if ((hdr->e_phoff > PAGE_SIZE) ||
663 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
664 		/* Only support headers in first page for now */
665 		return (ENOEXEC);
666 	}
667 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
668 
669 	/*
670 	 * From this point on, we may have resources that need to be freed.
671 	 */
672 
673 	VOP_UNLOCK(imgp->vp, 0, td);
674 
675 	for (i = 0; i < hdr->e_phnum; i++) {
676 		switch (phdr[i].p_type) {
677 	  	case PT_INTERP:	/* Path to interpreter */
678 			if (phdr[i].p_filesz > MAXPATHLEN ||
679 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
680 				error = ENOEXEC;
681 				goto fail;
682 			}
683 			interp = imgp->image_header + phdr[i].p_offset;
684 			break;
685 		default:
686 			break;
687 		}
688 	}
689 
690 	brand_info = __elfN(get_brandinfo)(hdr, interp);
691 	if (brand_info == NULL) {
692 		uprintf("ELF binary type \"%u\" not known.\n",
693 		    hdr->e_ident[EI_OSABI]);
694 		error = ENOEXEC;
695 		goto fail;
696 	}
697 	sv = brand_info->sysvec;
698 
699 	if ((error = exec_extract_strings(imgp)) != 0)
700 		goto fail;
701 
702 	exec_new_vmspace(imgp, sv);
703 
704 	vmspace = imgp->proc->p_vmspace;
705 
706 	for (i = 0; i < hdr->e_phnum; i++) {
707 		switch (phdr[i].p_type) {
708 		case PT_LOAD:	/* Loadable segment */
709 			prot = 0;
710 			if (phdr[i].p_flags & PF_X)
711   				prot |= VM_PROT_EXECUTE;
712 			if (phdr[i].p_flags & PF_W)
713   				prot |= VM_PROT_WRITE;
714 			if (phdr[i].p_flags & PF_R)
715   				prot |= VM_PROT_READ;
716 
717 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
718 			/*
719 			 * Some x86 binaries assume read == executable,
720 			 * notably the M3 runtime and therefore cvsup
721 			 */
722 			if (prot & VM_PROT_READ)
723 				prot |= VM_PROT_EXECUTE;
724 #endif
725 
726 			if ((error = __elfN(load_section)(imgp->proc, vmspace,
727 			    imgp->vp, imgp->object, phdr[i].p_offset,
728 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
729 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
730 			    sv->sv_pagesize)) != 0)
731   				goto fail;
732 
733 			seg_addr = trunc_page(phdr[i].p_vaddr);
734 			seg_size = round_page(phdr[i].p_memsz +
735 			    phdr[i].p_vaddr - seg_addr);
736 
737 			/*
738 			 * Is this .text or .data?  We can't use
739 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
740 			 * alpha terribly and possibly does other bad
741 			 * things so we stick to the old way of figuring
742 			 * it out:  If the segment contains the program
743 			 * entry point, it's a text segment, otherwise it
744 			 * is a data segment.
745 			 *
746 			 * Note that obreak() assumes that data_addr +
747 			 * data_size == end of data load area, and the ELF
748 			 * file format expects segments to be sorted by
749 			 * address.  If multiple data segments exist, the
750 			 * last one will be used.
751 			 */
752 			if (hdr->e_entry >= phdr[i].p_vaddr &&
753 			    hdr->e_entry < (phdr[i].p_vaddr +
754 			    phdr[i].p_memsz)) {
755 				text_size = seg_size;
756 				text_addr = seg_addr;
757 				entry = (u_long)hdr->e_entry;
758 			} else {
759 				data_size = seg_size;
760 				data_addr = seg_addr;
761 			}
762 			total_size += seg_size;
763 			break;
764 		case PT_PHDR: 	/* Program header table info */
765 			proghdr = phdr[i].p_vaddr;
766 			break;
767 		default:
768 			break;
769 		}
770 	}
771 
772 	if (data_addr == 0 && data_size == 0) {
773 		data_addr = text_addr;
774 		data_size = text_size;
775 	}
776 
777 	/*
778 	 * Check limits.  It should be safe to check the
779 	 * limits after loading the segments since we do
780 	 * not actually fault in all the segments pages.
781 	 */
782 	if (data_size >
783 	    imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
784 	    text_size > maxtsiz ||
785 	    total_size >
786 	    imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
787 		error = ENOMEM;
788 		goto fail;
789 	}
790 
791 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
792 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
793 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
794 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
795 
796 	addr = ELF_RTLD_ADDR(vmspace);
797 
798 	imgp->entry_addr = entry;
799 
800 	imgp->proc->p_sysent = sv;
801 	if (interp != NULL) {
802 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
803 		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
804 		    interp);
805 		if ((error = __elfN(load_file)(imgp->proc, path, &addr,
806 		    &imgp->entry_addr, sv->sv_pagesize)) != 0) {
807 			if ((error = __elfN(load_file)(imgp->proc, interp,
808 			    &addr, &imgp->entry_addr, sv->sv_pagesize)) != 0) {
809 				uprintf("ELF interpreter %s not found\n",
810 				    path);
811 				free(path, M_TEMP);
812 				goto fail;
813 			}
814 		}
815 		free(path, M_TEMP);
816 	}
817 
818 	/*
819 	 * Construct auxargs table (used by the fixup routine)
820 	 */
821 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
822 	elf_auxargs->execfd = -1;
823 	elf_auxargs->phdr = proghdr;
824 	elf_auxargs->phent = hdr->e_phentsize;
825 	elf_auxargs->phnum = hdr->e_phnum;
826 	elf_auxargs->pagesz = PAGE_SIZE;
827 	elf_auxargs->base = addr;
828 	elf_auxargs->flags = 0;
829 	elf_auxargs->entry = entry;
830 	elf_auxargs->trace = elf_trace;
831 
832 	imgp->auxargs = elf_auxargs;
833 	imgp->interpreted = 0;
834 
835 fail:
836 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
837 	return (error);
838 }
839 
840 #if __ELF_WORD_SIZE == 32
841 #define suword	suword32
842 #define stacktype u_int32_t
843 #else
844 #define suword	suword64
845 #define stacktype u_int64_t
846 #endif
847 
848 int
849 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
850 {
851 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
852 	stacktype *base;
853 	stacktype *pos;
854 
855 	base = (stacktype *)*stack_base;
856 	pos = base + (imgp->argc + imgp->envc + 2);
857 
858 	if (args->trace) {
859 		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
860 	}
861 	if (args->execfd != -1) {
862 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
863 	}
864 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
865 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
866 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
867 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
868 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
869 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
870 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
871 	AUXARGS_ENTRY(pos, AT_NULL, 0);
872 
873 	free(imgp->auxargs, M_TEMP);
874 	imgp->auxargs = NULL;
875 
876 	base--;
877 	suword(base, (long)imgp->argc);
878 	*stack_base = (register_t *)base;
879 	return (0);
880 }
881 
882 /*
883  * Code for generating ELF core dumps.
884  */
885 
886 typedef void (*segment_callback)(vm_map_entry_t, void *);
887 
888 /* Closure for cb_put_phdr(). */
889 struct phdr_closure {
890 	Elf_Phdr *phdr;		/* Program header to fill in */
891 	Elf_Off offset;		/* Offset of segment in core file */
892 };
893 
894 /* Closure for cb_size_segment(). */
895 struct sseg_closure {
896 	int count;		/* Count of writable segments. */
897 	size_t size;		/* Total size of all writable segments. */
898 };
899 
900 static void cb_put_phdr(vm_map_entry_t, void *);
901 static void cb_size_segment(vm_map_entry_t, void *);
902 static void each_writable_segment(struct proc *, segment_callback, void *);
903 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
904     int, void *, size_t);
905 static void __elfN(puthdr)(struct proc *, void *, size_t *,
906     const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
907 static void __elfN(putnote)(void *, size_t *, const char *, int,
908     const void *, size_t);
909 
910 extern int osreldate;
911 
912 int
913 __elfN(coredump)(td, vp, limit)
914 	struct thread *td;
915 	register struct vnode *vp;
916 	off_t limit;
917 {
918 	register struct proc *p = td->td_proc;
919 	register struct ucred *cred = td->td_ucred;
920 	int error = 0;
921 	struct sseg_closure seginfo;
922 	void *hdr;
923 	size_t hdrsize;
924 
925 	/* Size the program segments. */
926 	seginfo.count = 0;
927 	seginfo.size = 0;
928 	each_writable_segment(p, cb_size_segment, &seginfo);
929 
930 	/*
931 	 * Calculate the size of the core file header area by making
932 	 * a dry run of generating it.  Nothing is written, but the
933 	 * size is calculated.
934 	 */
935 	hdrsize = 0;
936 	__elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
937 	    (const prstatus_t *)NULL, (const prfpregset_t *)NULL,
938 	    (const prpsinfo_t *)NULL, seginfo.count);
939 
940 	if (hdrsize + seginfo.size >= limit)
941 		return (EFAULT);
942 
943 	/*
944 	 * Allocate memory for building the header, fill it up,
945 	 * and write it out.
946 	 */
947 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
948 	if (hdr == NULL) {
949 		return (EINVAL);
950 	}
951 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
952 
953 	/* Write the contents of all of the writable segments. */
954 	if (error == 0) {
955 		Elf_Phdr *php;
956 		off_t offset;
957 		int i;
958 
959 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
960 		offset = hdrsize;
961 		for (i = 0; i < seginfo.count; i++) {
962 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
963 			    (caddr_t)(uintptr_t)php->p_vaddr,
964 			    php->p_filesz, offset, UIO_USERSPACE,
965 			    IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
966 			    curthread); /* XXXKSE */
967 			if (error != 0)
968 				break;
969 			offset += php->p_filesz;
970 			php++;
971 		}
972 	}
973 	free(hdr, M_TEMP);
974 
975 	return (error);
976 }
977 
978 /*
979  * A callback for each_writable_segment() to write out the segment's
980  * program header entry.
981  */
982 static void
983 cb_put_phdr(entry, closure)
984 	vm_map_entry_t entry;
985 	void *closure;
986 {
987 	struct phdr_closure *phc = (struct phdr_closure *)closure;
988 	Elf_Phdr *phdr = phc->phdr;
989 
990 	phc->offset = round_page(phc->offset);
991 
992 	phdr->p_type = PT_LOAD;
993 	phdr->p_offset = phc->offset;
994 	phdr->p_vaddr = entry->start;
995 	phdr->p_paddr = 0;
996 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
997 	phdr->p_align = PAGE_SIZE;
998 	phdr->p_flags = 0;
999 	if (entry->protection & VM_PROT_READ)
1000 		phdr->p_flags |= PF_R;
1001 	if (entry->protection & VM_PROT_WRITE)
1002 		phdr->p_flags |= PF_W;
1003 	if (entry->protection & VM_PROT_EXECUTE)
1004 		phdr->p_flags |= PF_X;
1005 
1006 	phc->offset += phdr->p_filesz;
1007 	phc->phdr++;
1008 }
1009 
1010 /*
1011  * A callback for each_writable_segment() to gather information about
1012  * the number of segments and their total size.
1013  */
1014 static void
1015 cb_size_segment(entry, closure)
1016 	vm_map_entry_t entry;
1017 	void *closure;
1018 {
1019 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1020 
1021 	ssc->count++;
1022 	ssc->size += entry->end - entry->start;
1023 }
1024 
1025 /*
1026  * For each writable segment in the process's memory map, call the given
1027  * function with a pointer to the map entry and some arbitrary
1028  * caller-supplied data.
1029  */
1030 static void
1031 each_writable_segment(p, func, closure)
1032 	struct proc *p;
1033 	segment_callback func;
1034 	void *closure;
1035 {
1036 	vm_map_t map = &p->p_vmspace->vm_map;
1037 	vm_map_entry_t entry;
1038 
1039 	for (entry = map->header.next; entry != &map->header;
1040 	    entry = entry->next) {
1041 		vm_object_t obj;
1042 
1043 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1044 		    (entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) !=
1045 		    (VM_PROT_READ|VM_PROT_WRITE))
1046 			continue;
1047 
1048 		/*
1049 		** Dont include memory segment in the coredump if
1050 		** MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1051 		** madvise(2).
1052 		*/
1053 		if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
1054 			continue;
1055 
1056 		if ((obj = entry->object.vm_object) == NULL)
1057 			continue;
1058 
1059 		/* Find the deepest backing object. */
1060 		while (obj->backing_object != NULL)
1061 			obj = obj->backing_object;
1062 
1063 		/* Ignore memory-mapped devices and such things. */
1064 		if (obj->type != OBJT_DEFAULT &&
1065 		    obj->type != OBJT_SWAP &&
1066 		    obj->type != OBJT_VNODE)
1067 			continue;
1068 
1069 		(*func)(entry, closure);
1070 	}
1071 }
1072 
1073 /*
1074  * Write the core file header to the file, including padding up to
1075  * the page boundary.
1076  */
1077 static int
1078 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1079 	struct thread *td;
1080 	struct vnode *vp;
1081 	struct ucred *cred;
1082 	int numsegs;
1083 	size_t hdrsize;
1084 	void *hdr;
1085 {
1086 	struct {
1087 		prstatus_t status;
1088 		prfpregset_t fpregset;
1089 		prpsinfo_t psinfo;
1090 	} *tempdata;
1091 	struct proc *p = td->td_proc;
1092 	size_t off;
1093 	prstatus_t *status;
1094 	prfpregset_t *fpregset;
1095 	prpsinfo_t *psinfo;
1096 
1097 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
1098 	status = &tempdata->status;
1099 	fpregset = &tempdata->fpregset;
1100 	psinfo = &tempdata->psinfo;
1101 
1102 	/* Gather the information for the header. */
1103 	status->pr_version = PRSTATUS_VERSION;
1104 	status->pr_statussz = sizeof(prstatus_t);
1105 	status->pr_gregsetsz = sizeof(gregset_t);
1106 	status->pr_fpregsetsz = sizeof(fpregset_t);
1107 	status->pr_osreldate = osreldate;
1108 	status->pr_cursig = p->p_sig;
1109 	status->pr_pid = p->p_pid;
1110 	fill_regs(td, &status->pr_reg);
1111 
1112 	fill_fpregs(td, fpregset);
1113 
1114 	psinfo->pr_version = PRPSINFO_VERSION;
1115 	psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1116 	strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1117 
1118 	/* XXX - We don't fill in the command line arguments properly yet. */
1119 	strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
1120 
1121 	/* Fill in the header. */
1122 	bzero(hdr, hdrsize);
1123 	off = 0;
1124 	__elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
1125 
1126 	free(tempdata, M_TEMP);
1127 
1128 	/* Write it to the core file. */
1129 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1130 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1131 	    td)); /* XXXKSE */
1132 }
1133 
1134 static void
1135 __elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
1136     const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
1137 {
1138 	size_t ehoff;
1139 	size_t phoff;
1140 	size_t noteoff;
1141 	size_t notesz;
1142 
1143 	ehoff = *off;
1144 	*off += sizeof(Elf_Ehdr);
1145 
1146 	phoff = *off;
1147 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1148 
1149 	noteoff = *off;
1150 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1151 	    sizeof *status);
1152 	__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1153 	    sizeof *fpregset);
1154 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1155 	    sizeof *psinfo);
1156 	notesz = *off - noteoff;
1157 
1158 	/* Align up to a page boundary for the program segments. */
1159 	*off = round_page(*off);
1160 
1161 	if (dst != NULL) {
1162 		Elf_Ehdr *ehdr;
1163 		Elf_Phdr *phdr;
1164 		struct phdr_closure phc;
1165 
1166 		/*
1167 		 * Fill in the ELF header.
1168 		 */
1169 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1170 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1171 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1172 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1173 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1174 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1175 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1176 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1177 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1178 		ehdr->e_ident[EI_ABIVERSION] = 0;
1179 		ehdr->e_ident[EI_PAD] = 0;
1180 		ehdr->e_type = ET_CORE;
1181 		ehdr->e_machine = ELF_ARCH;
1182 		ehdr->e_version = EV_CURRENT;
1183 		ehdr->e_entry = 0;
1184 		ehdr->e_phoff = phoff;
1185 		ehdr->e_flags = 0;
1186 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1187 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1188 		ehdr->e_phnum = numsegs + 1;
1189 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1190 		ehdr->e_shnum = 0;
1191 		ehdr->e_shstrndx = SHN_UNDEF;
1192 
1193 		/*
1194 		 * Fill in the program header entries.
1195 		 */
1196 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1197 
1198 		/* The note segement. */
1199 		phdr->p_type = PT_NOTE;
1200 		phdr->p_offset = noteoff;
1201 		phdr->p_vaddr = 0;
1202 		phdr->p_paddr = 0;
1203 		phdr->p_filesz = notesz;
1204 		phdr->p_memsz = 0;
1205 		phdr->p_flags = 0;
1206 		phdr->p_align = 0;
1207 		phdr++;
1208 
1209 		/* All the writable segments from the program. */
1210 		phc.phdr = phdr;
1211 		phc.offset = *off;
1212 		each_writable_segment(p, cb_put_phdr, &phc);
1213 	}
1214 }
1215 
1216 static void
1217 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1218     const void *desc, size_t descsz)
1219 {
1220 	Elf_Note note;
1221 
1222 	note.n_namesz = strlen(name) + 1;
1223 	note.n_descsz = descsz;
1224 	note.n_type = type;
1225 	if (dst != NULL)
1226 		bcopy(&note, (char *)dst + *off, sizeof note);
1227 	*off += sizeof note;
1228 	if (dst != NULL)
1229 		bcopy(name, (char *)dst + *off, note.n_namesz);
1230 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1231 	if (dst != NULL)
1232 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1233 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1234 }
1235 
1236 /*
1237  * Tell kern_execve.c about it, with a little help from the linker.
1238  */
1239 #if __ELF_WORD_SIZE == 32
1240 static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"};
1241 EXEC_SET(elf32, elf_execsw);
1242 #else
1243 static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"};
1244 EXEC_SET(elf64, elf_execsw);
1245 #endif
1246