xref: /freebsd/sys/kern/imgact_elf.c (revision 5773cccf19ef7b97e56c1101aa481c43149224da)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/param.h>
34 #include <sys/exec.h>
35 #include <sys/fcntl.h>
36 #include <sys/imgact.h>
37 #include <sys/imgact_elf.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/mman.h>
43 #include <sys/namei.h>
44 #include <sys/pioctl.h>
45 #include <sys/proc.h>
46 #include <sys/procfs.h>
47 #include <sys/resourcevar.h>
48 #include <sys/systm.h>
49 #include <sys/signalvar.h>
50 #include <sys/stat.h>
51 #include <sys/sx.h>
52 #include <sys/syscall.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysent.h>
55 #include <sys/vnode.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_param.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_extern.h>
64 
65 #include <machine/elf.h>
66 #include <machine/md_var.h>
67 
68 #define OLD_EI_BRAND	8
69 
70 __ElfType(Brandinfo);
71 __ElfType(Auxargs);
72 
73 static int __elfN(check_header)(const Elf_Ehdr *hdr);
74 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
75     const char *interp);
76 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
77     u_long *entry, size_t pagesize);
78 static int __elfN(load_section)(struct proc *p,
79     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
80     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
81     vm_prot_t prot, size_t pagesize);
82 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
83 
84 static int elf_trace = 0;
85 #if __ELF_WORD_SIZE == 32
86 SYSCTL_INT(_debug, OID_AUTO, elf32_trace, CTLFLAG_RW, &elf_trace, 0, "");
87 #else
88 SYSCTL_INT(_debug, OID_AUTO, elf64_trace, CTLFLAG_RW, &elf_trace, 0, "");
89 #endif
90 
91 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
92 extern int fallback_elf_brand;
93 
94 int
95 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
96 {
97 	int i;
98 
99 	for (i = 0; i < MAX_BRANDS; i++) {
100 		if (elf_brand_list[i] == NULL) {
101 			elf_brand_list[i] = entry;
102 			break;
103 		}
104 	}
105 	if (i == MAX_BRANDS)
106 		return (-1);
107 	return (0);
108 }
109 
110 int
111 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
112 {
113 	int i;
114 
115 	for (i = 0; i < MAX_BRANDS; i++) {
116 		if (elf_brand_list[i] == entry) {
117 			elf_brand_list[i] = NULL;
118 			break;
119 		}
120 	}
121 	if (i == MAX_BRANDS)
122 		return (-1);
123 	return (0);
124 }
125 
126 int
127 __elfN(brand_inuse)(Elf_Brandinfo *entry)
128 {
129 	struct proc *p;
130 	int rval = FALSE;
131 
132 	sx_slock(&allproc_lock);
133 	LIST_FOREACH(p, &allproc, p_list) {
134 		if (p->p_sysent == entry->sysvec) {
135 			rval = TRUE;
136 			break;
137 		}
138 	}
139 	sx_sunlock(&allproc_lock);
140 
141 	return (rval);
142 }
143 
144 static Elf_Brandinfo *
145 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
146 {
147 	Elf_Brandinfo *bi;
148 	int i;
149 
150 	/*
151 	 * We support three types of branding -- (1) the ELF EI_OSABI field
152 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
153 	 * branding w/in the ELF header, and (3) path of the `interp_path'
154 	 * field.  We should also look for an ".note.ABI-tag" ELF section now
155 	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
156 	 */
157 
158 	/* If the executable has a brand, search for it in the brand list. */
159 	for (i = 0; i < MAX_BRANDS; i++) {
160 		bi = elf_brand_list[i];
161 		if (bi != NULL && hdr->e_machine == bi->machine &&
162 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
163 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
164 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
165 			return (bi);
166 	}
167 
168 	/* Lacking a known brand, search for a recognized interpreter. */
169 	if (interp != NULL) {
170 		for (i = 0; i < MAX_BRANDS; i++) {
171 			bi = elf_brand_list[i];
172 			if (bi != NULL && hdr->e_machine == bi->machine &&
173 			    strcmp(interp, bi->interp_path) == 0)
174 				return (bi);
175 		}
176 	}
177 
178 	/* Lacking a recognized interpreter, try the default brand */
179 	for (i = 0; i < MAX_BRANDS; i++) {
180 		bi = elf_brand_list[i];
181 		if (bi != NULL && hdr->e_machine == bi->machine &&
182 		    fallback_elf_brand == bi->brand)
183 			return (bi);
184 	}
185 	return (NULL);
186 }
187 
188 static int
189 __elfN(check_header)(const Elf_Ehdr *hdr)
190 {
191 	Elf_Brandinfo *bi;
192 	int i;
193 
194 	if (!IS_ELF(*hdr) ||
195 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
196 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
197 	    hdr->e_ident[EI_VERSION] != EV_CURRENT)
198 		return (ENOEXEC);
199 
200 	/*
201 	 * Make sure we have at least one brand for this machine.
202 	 */
203 
204 	for (i = 0; i < MAX_BRANDS; i++) {
205 		bi = elf_brand_list[i];
206 		if (bi != NULL && bi->machine == hdr->e_machine)
207 			break;
208 	}
209 	if (i == MAX_BRANDS)
210 		return (ENOEXEC);
211 
212 	if (hdr->e_version != ELF_TARG_VER)
213 		return (ENOEXEC);
214 
215 	return (0);
216 }
217 
218 static int
219 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
220 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
221 	vm_prot_t max)
222 {
223 	int error, rv;
224 	vm_offset_t off;
225 	vm_offset_t data_buf = 0;
226 
227 	/*
228 	 * Create the page if it doesn't exist yet. Ignore errors.
229 	 */
230 	vm_map_lock(map);
231 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
232 	    max, 0);
233 	vm_map_unlock(map);
234 
235 	/*
236 	 * Find the page from the underlying object.
237 	 */
238 	if (object) {
239 		vm_object_reference(object);
240 		rv = vm_map_find(exec_map,
241 				 object,
242 				 trunc_page(offset),
243 				 &data_buf,
244 				 PAGE_SIZE,
245 				 TRUE,
246 				 VM_PROT_READ,
247 				 VM_PROT_ALL,
248 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
249 		if (rv != KERN_SUCCESS) {
250 			vm_object_deallocate(object);
251 			return (rv);
252 		}
253 
254 		off = offset - trunc_page(offset);
255 		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
256 		    end - start);
257 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
258 		if (error) {
259 			return (KERN_FAILURE);
260 		}
261 	}
262 
263 	return (KERN_SUCCESS);
264 }
265 
266 static int
267 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
268 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
269 	vm_prot_t max, int cow)
270 {
271 	int rv;
272 
273 	if (start != trunc_page(start)) {
274 		rv = __elfN(map_partial)(map, object, offset, start,
275 		    round_page(start), prot, max);
276 		if (rv)
277 			return (rv);
278 		offset += round_page(start) - start;
279 		start = round_page(start);
280 	}
281 	if (end != round_page(end)) {
282 		rv = __elfN(map_partial)(map, object, offset +
283 		    trunc_page(end) - start, trunc_page(end), end, prot, max);
284 		if (rv)
285 			return (rv);
286 		end = trunc_page(end);
287 	}
288 	if (end > start) {
289 		if (offset & PAGE_MASK) {
290 			vm_offset_t data_buf, off;
291 			vm_size_t sz;
292 			int error;
293 
294 			/*
295 			 * The mapping is not page aligned. This means we have
296 			 * to copy the data. Sigh.
297 			 */
298 			rv = vm_map_find(map, 0, 0, &start, end - start,
299 			    FALSE, prot, max, 0);
300 			if (rv)
301 				return (rv);
302 			while (start < end) {
303 				vm_object_reference(object);
304 				rv = vm_map_find(exec_map,
305 						 object,
306 						 trunc_page(offset),
307 						 &data_buf,
308 						 2 * PAGE_SIZE,
309 						 TRUE,
310 						 VM_PROT_READ,
311 						 VM_PROT_ALL,
312 						 (MAP_COPY_ON_WRITE
313 						  | MAP_PREFAULT_PARTIAL));
314 				if (rv != KERN_SUCCESS) {
315 					vm_object_deallocate(object);
316 					return (rv);
317 				}
318 				off = offset - trunc_page(offset);
319 				sz = end - start;
320 				if (sz > PAGE_SIZE)
321 					sz = PAGE_SIZE;
322 				error = copyout((caddr_t)data_buf + off,
323 				    (caddr_t)start, sz);
324 				vm_map_remove(exec_map, data_buf,
325 				    data_buf + 2 * PAGE_SIZE);
326 				if (error) {
327 					return (KERN_FAILURE);
328 				}
329 				start += sz;
330 			}
331 			rv = KERN_SUCCESS;
332 		} else {
333 			vm_map_lock(map);
334 			rv = vm_map_insert(map, object, offset, start, end,
335 			    prot, max, cow);
336 			vm_map_unlock(map);
337 		}
338 		return (rv);
339 	} else {
340 		return (KERN_SUCCESS);
341 	}
342 }
343 
344 static int
345 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
346 	struct vnode *vp, vm_object_t object, vm_offset_t offset,
347 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
348 	size_t pagesize)
349 {
350 	size_t map_len;
351 	vm_offset_t map_addr;
352 	int error, rv;
353 	size_t copy_len;
354 	vm_offset_t file_addr;
355 	vm_offset_t data_buf = 0;
356 
357 	GIANT_REQUIRED;
358 
359 	error = 0;
360 
361 	/*
362 	 * It's necessary to fail if the filsz + offset taken from the
363 	 * header is greater than the actual file pager object's size.
364 	 * If we were to allow this, then the vm_map_find() below would
365 	 * walk right off the end of the file object and into the ether.
366 	 *
367 	 * While I'm here, might as well check for something else that
368 	 * is invalid: filsz cannot be greater than memsz.
369 	 */
370 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
371 	    filsz > memsz) {
372 		uprintf("elf_load_section: truncated ELF file\n");
373 		return (ENOEXEC);
374 	}
375 
376 #define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
377 #define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
378 
379 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
380 	file_addr = trunc_page_ps(offset, pagesize);
381 
382 	/*
383 	 * We have two choices.  We can either clear the data in the last page
384 	 * of an oversized mapping, or we can start the anon mapping a page
385 	 * early and copy the initialized data into that first page.  We
386 	 * choose the second..
387 	 */
388 	if (memsz > filsz)
389 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
390 	else
391 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
392 
393 	if (map_len != 0) {
394 		vm_object_reference(object);
395 		rv = __elfN(map_insert)(&vmspace->vm_map,
396 				      object,
397 				      file_addr,	/* file offset */
398 				      map_addr,		/* virtual start */
399 				      map_addr + map_len,/* virtual end */
400 				      prot,
401 				      VM_PROT_ALL,
402 				      MAP_COPY_ON_WRITE | MAP_PREFAULT);
403 		if (rv != KERN_SUCCESS) {
404 			vm_object_deallocate(object);
405 			return (EINVAL);
406 		}
407 
408 		/* we can stop now if we've covered it all */
409 		if (memsz == filsz) {
410 			return (0);
411 		}
412 	}
413 
414 
415 	/*
416 	 * We have to get the remaining bit of the file into the first part
417 	 * of the oversized map segment.  This is normally because the .data
418 	 * segment in the file is extended to provide bss.  It's a neat idea
419 	 * to try and save a page, but it's a pain in the behind to implement.
420 	 */
421 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
422 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
423 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
424 	    map_addr;
425 
426 	/* This had damn well better be true! */
427 	if (map_len != 0) {
428 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
429 		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
430 		if (rv != KERN_SUCCESS) {
431 			return (EINVAL);
432 		}
433 	}
434 
435 	if (copy_len != 0) {
436 		vm_offset_t off;
437 		vm_object_reference(object);
438 		rv = vm_map_find(exec_map,
439 				 object,
440 				 trunc_page(offset + filsz),
441 				 &data_buf,
442 				 PAGE_SIZE,
443 				 TRUE,
444 				 VM_PROT_READ,
445 				 VM_PROT_ALL,
446 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
447 		if (rv != KERN_SUCCESS) {
448 			vm_object_deallocate(object);
449 			return (EINVAL);
450 		}
451 
452 		/* send the page fragment to user space */
453 		off = trunc_page_ps(offset + filsz, pagesize) -
454 		    trunc_page(offset + filsz);
455 		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
456 		    copy_len);
457 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
458 		if (error) {
459 			return (error);
460 		}
461 	}
462 
463 	/*
464 	 * set it to the specified protection.
465 	 * XXX had better undo the damage from pasting over the cracks here!
466 	 */
467 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
468 	    round_page(map_addr + map_len),  prot, FALSE);
469 
470 	return (error);
471 }
472 
473 /*
474  * Load the file "file" into memory.  It may be either a shared object
475  * or an executable.
476  *
477  * The "addr" reference parameter is in/out.  On entry, it specifies
478  * the address where a shared object should be loaded.  If the file is
479  * an executable, this value is ignored.  On exit, "addr" specifies
480  * where the file was actually loaded.
481  *
482  * The "entry" reference parameter is out only.  On exit, it specifies
483  * the entry point for the loaded file.
484  */
485 static int
486 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
487 	u_long *entry, size_t pagesize)
488 {
489 	struct {
490 		struct nameidata nd;
491 		struct vattr attr;
492 		struct image_params image_params;
493 	} *tempdata;
494 	const Elf_Ehdr *hdr = NULL;
495 	const Elf_Phdr *phdr = NULL;
496 	struct nameidata *nd;
497 	struct vmspace *vmspace = p->p_vmspace;
498 	struct vattr *attr;
499 	struct image_params *imgp;
500 	vm_prot_t prot;
501 	u_long rbase;
502 	u_long base_addr = 0;
503 	int error, i, numsegs;
504 
505 	if (curthread->td_proc != p)
506 		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
507 
508 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
509 	nd = &tempdata->nd;
510 	attr = &tempdata->attr;
511 	imgp = &tempdata->image_params;
512 
513 	/*
514 	 * Initialize part of the common data
515 	 */
516 	imgp->proc = p;
517 	imgp->userspace_argv = NULL;
518 	imgp->userspace_envv = NULL;
519 	imgp->attr = attr;
520 	imgp->firstpage = NULL;
521 	imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
522 	imgp->object = NULL;
523 	imgp->execlabel = NULL;
524 
525 	if (imgp->image_header == NULL) {
526 		nd->ni_vp = NULL;
527 		error = ENOMEM;
528 		goto fail;
529 	}
530 
531 	/* XXXKSE */
532 	NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
533 
534 	if ((error = namei(nd)) != 0) {
535 		nd->ni_vp = NULL;
536 		goto fail;
537 	}
538 	NDFREE(nd, NDF_ONLY_PNBUF);
539 	imgp->vp = nd->ni_vp;
540 
541 	/*
542 	 * Check permissions, modes, uid, etc on the file, and "open" it.
543 	 */
544 	error = exec_check_permissions(imgp);
545 	if (error) {
546 		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
547 		goto fail;
548 	}
549 
550 	error = exec_map_first_page(imgp);
551 	/*
552 	 * Also make certain that the interpreter stays the same, so set
553 	 * its VV_TEXT flag, too.
554 	 */
555 	if (error == 0)
556 		nd->ni_vp->v_vflag |= VV_TEXT;
557 
558 	VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
559 	vm_object_reference(imgp->object);
560 
561 	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
562 	if (error)
563 		goto fail;
564 
565 	hdr = (const Elf_Ehdr *)imgp->image_header;
566 	if ((error = __elfN(check_header)(hdr)) != 0)
567 		goto fail;
568 	if (hdr->e_type == ET_DYN)
569 		rbase = *addr;
570 	else if (hdr->e_type == ET_EXEC)
571 		rbase = 0;
572 	else {
573 		error = ENOEXEC;
574 		goto fail;
575 	}
576 
577 	/* Only support headers that fit within first page for now */
578 	if ((hdr->e_phoff > PAGE_SIZE) ||
579 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
580 		error = ENOEXEC;
581 		goto fail;
582 	}
583 
584 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
585 
586 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
587 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
588 			prot = 0;
589 			if (phdr[i].p_flags & PF_X)
590   				prot |= VM_PROT_EXECUTE;
591 			if (phdr[i].p_flags & PF_W)
592   				prot |= VM_PROT_WRITE;
593 			if (phdr[i].p_flags & PF_R)
594   				prot |= VM_PROT_READ;
595 
596 			if ((error = __elfN(load_section)(p, vmspace,
597 			    nd->ni_vp, imgp->object, phdr[i].p_offset,
598 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
599 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
600 			    pagesize)) != 0)
601 				goto fail;
602 			/*
603 			 * Establish the base address if this is the
604 			 * first segment.
605 			 */
606 			if (numsegs == 0)
607   				base_addr = trunc_page(phdr[i].p_vaddr +
608 				    rbase);
609 			numsegs++;
610 		}
611 	}
612 	*addr = base_addr;
613 	*entry = (unsigned long)hdr->e_entry + rbase;
614 
615 fail:
616 	if (imgp->firstpage)
617 		exec_unmap_first_page(imgp);
618 	if (imgp->image_header)
619 		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
620 		    PAGE_SIZE);
621 	if (imgp->object)
622 		vm_object_deallocate(imgp->object);
623 
624 	if (nd->ni_vp)
625 		vrele(nd->ni_vp);
626 
627 	free(tempdata, M_TEMP);
628 
629 	return (error);
630 }
631 
632 static int
633 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
634 {
635 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
636 	const Elf_Phdr *phdr;
637 	Elf_Auxargs *elf_auxargs = NULL;
638 	struct vmspace *vmspace;
639 	vm_prot_t prot;
640 	u_long text_size = 0, data_size = 0, total_size = 0;
641 	u_long text_addr = 0, data_addr = 0;
642 	u_long seg_size, seg_addr;
643 	u_long addr, entry = 0, proghdr = 0;
644 	int error, i;
645 	const char *interp = NULL;
646 	Elf_Brandinfo *brand_info;
647 	char *path;
648 	struct thread *td = curthread;
649 	struct sysentvec *sv;
650 
651 	GIANT_REQUIRED;
652 
653 	/*
654 	 * Do we have a valid ELF header ?
655 	 */
656 	if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
657 		return (-1);
658 
659 	/*
660 	 * From here on down, we return an errno, not -1, as we've
661 	 * detected an ELF file.
662 	 */
663 
664 	if ((hdr->e_phoff > PAGE_SIZE) ||
665 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
666 		/* Only support headers in first page for now */
667 		return (ENOEXEC);
668 	}
669 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
670 
671 	/*
672 	 * From this point on, we may have resources that need to be freed.
673 	 */
674 
675 	VOP_UNLOCK(imgp->vp, 0, td);
676 
677 	for (i = 0; i < hdr->e_phnum; i++) {
678 		switch (phdr[i].p_type) {
679 	  	case PT_INTERP:	/* Path to interpreter */
680 			if (phdr[i].p_filesz > MAXPATHLEN ||
681 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
682 				error = ENOEXEC;
683 				goto fail;
684 			}
685 			interp = imgp->image_header + phdr[i].p_offset;
686 			break;
687 		default:
688 			break;
689 		}
690 	}
691 
692 	brand_info = __elfN(get_brandinfo)(hdr, interp);
693 	if (brand_info == NULL) {
694 		uprintf("ELF binary type \"%u\" not known.\n",
695 		    hdr->e_ident[EI_OSABI]);
696 		error = ENOEXEC;
697 		goto fail;
698 	}
699 	sv = brand_info->sysvec;
700 
701 	if ((error = exec_extract_strings(imgp)) != 0)
702 		goto fail;
703 
704 	exec_new_vmspace(imgp, sv);
705 
706 	vmspace = imgp->proc->p_vmspace;
707 
708 	for (i = 0; i < hdr->e_phnum; i++) {
709 		switch (phdr[i].p_type) {
710 		case PT_LOAD:	/* Loadable segment */
711 			prot = 0;
712 			if (phdr[i].p_flags & PF_X)
713   				prot |= VM_PROT_EXECUTE;
714 			if (phdr[i].p_flags & PF_W)
715   				prot |= VM_PROT_WRITE;
716 			if (phdr[i].p_flags & PF_R)
717   				prot |= VM_PROT_READ;
718 
719 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
720 			/*
721 			 * Some x86 binaries assume read == executable,
722 			 * notably the M3 runtime and therefore cvsup
723 			 */
724 			if (prot & VM_PROT_READ)
725 				prot |= VM_PROT_EXECUTE;
726 #endif
727 
728 			if ((error = __elfN(load_section)(imgp->proc, vmspace,
729 			    imgp->vp, imgp->object, phdr[i].p_offset,
730 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
731 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
732 			    sv->sv_pagesize)) != 0)
733   				goto fail;
734 
735 			seg_addr = trunc_page(phdr[i].p_vaddr);
736 			seg_size = round_page(phdr[i].p_memsz +
737 			    phdr[i].p_vaddr - seg_addr);
738 
739 			/*
740 			 * Is this .text or .data?  We can't use
741 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
742 			 * alpha terribly and possibly does other bad
743 			 * things so we stick to the old way of figuring
744 			 * it out:  If the segment contains the program
745 			 * entry point, it's a text segment, otherwise it
746 			 * is a data segment.
747 			 *
748 			 * Note that obreak() assumes that data_addr +
749 			 * data_size == end of data load area, and the ELF
750 			 * file format expects segments to be sorted by
751 			 * address.  If multiple data segments exist, the
752 			 * last one will be used.
753 			 */
754 			if (hdr->e_entry >= phdr[i].p_vaddr &&
755 			    hdr->e_entry < (phdr[i].p_vaddr +
756 			    phdr[i].p_memsz)) {
757 				text_size = seg_size;
758 				text_addr = seg_addr;
759 				entry = (u_long)hdr->e_entry;
760 			} else {
761 				data_size = seg_size;
762 				data_addr = seg_addr;
763 			}
764 			total_size += seg_size;
765 			break;
766 		case PT_PHDR: 	/* Program header table info */
767 			proghdr = phdr[i].p_vaddr;
768 			break;
769 		default:
770 			break;
771 		}
772 	}
773 
774 	if (data_addr == 0 && data_size == 0) {
775 		data_addr = text_addr;
776 		data_size = text_size;
777 	}
778 
779 	/*
780 	 * Check limits.  It should be safe to check the
781 	 * limits after loading the segments since we do
782 	 * not actually fault in all the segments pages.
783 	 */
784 	if (data_size >
785 	    imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
786 	    text_size > maxtsiz ||
787 	    total_size >
788 	    imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
789 		error = ENOMEM;
790 		goto fail;
791 	}
792 
793 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
794 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
795 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
796 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
797 
798 	addr = ELF_RTLD_ADDR(vmspace);
799 
800 	imgp->entry_addr = entry;
801 
802 	imgp->proc->p_sysent = sv;
803 	if (interp != NULL) {
804 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
805 		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
806 		    interp);
807 		if ((error = __elfN(load_file)(imgp->proc, path, &addr,
808 		    &imgp->entry_addr, sv->sv_pagesize)) != 0) {
809 			if ((error = __elfN(load_file)(imgp->proc, interp,
810 			    &addr, &imgp->entry_addr, sv->sv_pagesize)) != 0) {
811 				uprintf("ELF interpreter %s not found\n",
812 				    path);
813 				free(path, M_TEMP);
814 				goto fail;
815 			}
816 		}
817 		free(path, M_TEMP);
818 	}
819 
820 	/*
821 	 * Construct auxargs table (used by the fixup routine)
822 	 */
823 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
824 	elf_auxargs->execfd = -1;
825 	elf_auxargs->phdr = proghdr;
826 	elf_auxargs->phent = hdr->e_phentsize;
827 	elf_auxargs->phnum = hdr->e_phnum;
828 	elf_auxargs->pagesz = PAGE_SIZE;
829 	elf_auxargs->base = addr;
830 	elf_auxargs->flags = 0;
831 	elf_auxargs->entry = entry;
832 	elf_auxargs->trace = elf_trace;
833 
834 	imgp->auxargs = elf_auxargs;
835 	imgp->interpreted = 0;
836 
837 fail:
838 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
839 	return (error);
840 }
841 
842 #if __ELF_WORD_SIZE == 32
843 #define suword	suword32
844 #define stacktype u_int32_t
845 #else
846 #define suword	suword64
847 #define stacktype u_int64_t
848 #endif
849 
850 int
851 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
852 {
853 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
854 	stacktype *base;
855 	stacktype *pos;
856 
857 	base = (stacktype *)*stack_base;
858 	pos = base + (imgp->argc + imgp->envc + 2);
859 
860 	if (args->trace) {
861 		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
862 	}
863 	if (args->execfd != -1) {
864 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
865 	}
866 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
867 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
868 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
869 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
870 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
871 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
872 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
873 	AUXARGS_ENTRY(pos, AT_NULL, 0);
874 
875 	free(imgp->auxargs, M_TEMP);
876 	imgp->auxargs = NULL;
877 
878 	base--;
879 	suword(base, (long)imgp->argc);
880 	*stack_base = (register_t *)base;
881 	return (0);
882 }
883 
884 /*
885  * Code for generating ELF core dumps.
886  */
887 
888 typedef void (*segment_callback)(vm_map_entry_t, void *);
889 
890 /* Closure for cb_put_phdr(). */
891 struct phdr_closure {
892 	Elf_Phdr *phdr;		/* Program header to fill in */
893 	Elf_Off offset;		/* Offset of segment in core file */
894 };
895 
896 /* Closure for cb_size_segment(). */
897 struct sseg_closure {
898 	int count;		/* Count of writable segments. */
899 	size_t size;		/* Total size of all writable segments. */
900 };
901 
902 static void cb_put_phdr(vm_map_entry_t, void *);
903 static void cb_size_segment(vm_map_entry_t, void *);
904 static void each_writable_segment(struct proc *, segment_callback, void *);
905 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
906     int, void *, size_t);
907 static void __elfN(puthdr)(struct proc *, void *, size_t *,
908     const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
909 static void __elfN(putnote)(void *, size_t *, const char *, int,
910     const void *, size_t);
911 
912 extern int osreldate;
913 
914 int
915 __elfN(coredump)(td, vp, limit)
916 	struct thread *td;
917 	register struct vnode *vp;
918 	off_t limit;
919 {
920 	register struct proc *p = td->td_proc;
921 	register struct ucred *cred = td->td_ucred;
922 	int error = 0;
923 	struct sseg_closure seginfo;
924 	void *hdr;
925 	size_t hdrsize;
926 
927 	/* Size the program segments. */
928 	seginfo.count = 0;
929 	seginfo.size = 0;
930 	each_writable_segment(p, cb_size_segment, &seginfo);
931 
932 	/*
933 	 * Calculate the size of the core file header area by making
934 	 * a dry run of generating it.  Nothing is written, but the
935 	 * size is calculated.
936 	 */
937 	hdrsize = 0;
938 	__elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
939 	    (const prstatus_t *)NULL, (const prfpregset_t *)NULL,
940 	    (const prpsinfo_t *)NULL, seginfo.count);
941 
942 	if (hdrsize + seginfo.size >= limit)
943 		return (EFAULT);
944 
945 	/*
946 	 * Allocate memory for building the header, fill it up,
947 	 * and write it out.
948 	 */
949 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
950 	if (hdr == NULL) {
951 		return (EINVAL);
952 	}
953 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
954 
955 	/* Write the contents of all of the writable segments. */
956 	if (error == 0) {
957 		Elf_Phdr *php;
958 		off_t offset;
959 		int i;
960 
961 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
962 		offset = hdrsize;
963 		for (i = 0; i < seginfo.count; i++) {
964 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
965 			    (caddr_t)(uintptr_t)php->p_vaddr,
966 			    php->p_filesz, offset, UIO_USERSPACE,
967 			    IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
968 			    curthread); /* XXXKSE */
969 			if (error != 0)
970 				break;
971 			offset += php->p_filesz;
972 			php++;
973 		}
974 	}
975 	free(hdr, M_TEMP);
976 
977 	return (error);
978 }
979 
980 /*
981  * A callback for each_writable_segment() to write out the segment's
982  * program header entry.
983  */
984 static void
985 cb_put_phdr(entry, closure)
986 	vm_map_entry_t entry;
987 	void *closure;
988 {
989 	struct phdr_closure *phc = (struct phdr_closure *)closure;
990 	Elf_Phdr *phdr = phc->phdr;
991 
992 	phc->offset = round_page(phc->offset);
993 
994 	phdr->p_type = PT_LOAD;
995 	phdr->p_offset = phc->offset;
996 	phdr->p_vaddr = entry->start;
997 	phdr->p_paddr = 0;
998 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
999 	phdr->p_align = PAGE_SIZE;
1000 	phdr->p_flags = 0;
1001 	if (entry->protection & VM_PROT_READ)
1002 		phdr->p_flags |= PF_R;
1003 	if (entry->protection & VM_PROT_WRITE)
1004 		phdr->p_flags |= PF_W;
1005 	if (entry->protection & VM_PROT_EXECUTE)
1006 		phdr->p_flags |= PF_X;
1007 
1008 	phc->offset += phdr->p_filesz;
1009 	phc->phdr++;
1010 }
1011 
1012 /*
1013  * A callback for each_writable_segment() to gather information about
1014  * the number of segments and their total size.
1015  */
1016 static void
1017 cb_size_segment(entry, closure)
1018 	vm_map_entry_t entry;
1019 	void *closure;
1020 {
1021 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1022 
1023 	ssc->count++;
1024 	ssc->size += entry->end - entry->start;
1025 }
1026 
1027 /*
1028  * For each writable segment in the process's memory map, call the given
1029  * function with a pointer to the map entry and some arbitrary
1030  * caller-supplied data.
1031  */
1032 static void
1033 each_writable_segment(p, func, closure)
1034 	struct proc *p;
1035 	segment_callback func;
1036 	void *closure;
1037 {
1038 	vm_map_t map = &p->p_vmspace->vm_map;
1039 	vm_map_entry_t entry;
1040 
1041 	for (entry = map->header.next; entry != &map->header;
1042 	    entry = entry->next) {
1043 		vm_object_t obj;
1044 
1045 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
1046 		    (entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) !=
1047 		    (VM_PROT_READ|VM_PROT_WRITE))
1048 			continue;
1049 
1050 		/*
1051 		** Dont include memory segment in the coredump if
1052 		** MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1053 		** madvise(2).
1054 		*/
1055 		if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
1056 			continue;
1057 
1058 		if ((obj = entry->object.vm_object) == NULL)
1059 			continue;
1060 
1061 		/* Find the deepest backing object. */
1062 		while (obj->backing_object != NULL)
1063 			obj = obj->backing_object;
1064 
1065 		/* Ignore memory-mapped devices and such things. */
1066 		if (obj->type != OBJT_DEFAULT &&
1067 		    obj->type != OBJT_SWAP &&
1068 		    obj->type != OBJT_VNODE)
1069 			continue;
1070 
1071 		(*func)(entry, closure);
1072 	}
1073 }
1074 
1075 /*
1076  * Write the core file header to the file, including padding up to
1077  * the page boundary.
1078  */
1079 static int
1080 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1081 	struct thread *td;
1082 	struct vnode *vp;
1083 	struct ucred *cred;
1084 	int numsegs;
1085 	size_t hdrsize;
1086 	void *hdr;
1087 {
1088 	struct {
1089 		prstatus_t status;
1090 		prfpregset_t fpregset;
1091 		prpsinfo_t psinfo;
1092 	} *tempdata;
1093 	struct proc *p = td->td_proc;
1094 	size_t off;
1095 	prstatus_t *status;
1096 	prfpregset_t *fpregset;
1097 	prpsinfo_t *psinfo;
1098 
1099 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
1100 	status = &tempdata->status;
1101 	fpregset = &tempdata->fpregset;
1102 	psinfo = &tempdata->psinfo;
1103 
1104 	/* Gather the information for the header. */
1105 	status->pr_version = PRSTATUS_VERSION;
1106 	status->pr_statussz = sizeof(prstatus_t);
1107 	status->pr_gregsetsz = sizeof(gregset_t);
1108 	status->pr_fpregsetsz = sizeof(fpregset_t);
1109 	status->pr_osreldate = osreldate;
1110 	status->pr_cursig = p->p_sig;
1111 	status->pr_pid = p->p_pid;
1112 	fill_regs(td, &status->pr_reg);
1113 
1114 	fill_fpregs(td, fpregset);
1115 
1116 	psinfo->pr_version = PRPSINFO_VERSION;
1117 	psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1118 	strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1119 
1120 	/* XXX - We don't fill in the command line arguments properly yet. */
1121 	strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
1122 
1123 	/* Fill in the header. */
1124 	bzero(hdr, hdrsize);
1125 	off = 0;
1126 	__elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
1127 
1128 	free(tempdata, M_TEMP);
1129 
1130 	/* Write it to the core file. */
1131 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1132 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1133 	    td)); /* XXXKSE */
1134 }
1135 
1136 static void
1137 __elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
1138     const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
1139 {
1140 	size_t ehoff;
1141 	size_t phoff;
1142 	size_t noteoff;
1143 	size_t notesz;
1144 
1145 	ehoff = *off;
1146 	*off += sizeof(Elf_Ehdr);
1147 
1148 	phoff = *off;
1149 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1150 
1151 	noteoff = *off;
1152 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1153 	    sizeof *status);
1154 	__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1155 	    sizeof *fpregset);
1156 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1157 	    sizeof *psinfo);
1158 	notesz = *off - noteoff;
1159 
1160 	/* Align up to a page boundary for the program segments. */
1161 	*off = round_page(*off);
1162 
1163 	if (dst != NULL) {
1164 		Elf_Ehdr *ehdr;
1165 		Elf_Phdr *phdr;
1166 		struct phdr_closure phc;
1167 
1168 		/*
1169 		 * Fill in the ELF header.
1170 		 */
1171 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1172 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1173 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1174 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1175 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1176 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1177 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1178 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1179 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1180 		ehdr->e_ident[EI_ABIVERSION] = 0;
1181 		ehdr->e_ident[EI_PAD] = 0;
1182 		ehdr->e_type = ET_CORE;
1183 		ehdr->e_machine = ELF_ARCH;
1184 		ehdr->e_version = EV_CURRENT;
1185 		ehdr->e_entry = 0;
1186 		ehdr->e_phoff = phoff;
1187 		ehdr->e_flags = 0;
1188 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1189 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1190 		ehdr->e_phnum = numsegs + 1;
1191 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1192 		ehdr->e_shnum = 0;
1193 		ehdr->e_shstrndx = SHN_UNDEF;
1194 
1195 		/*
1196 		 * Fill in the program header entries.
1197 		 */
1198 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1199 
1200 		/* The note segement. */
1201 		phdr->p_type = PT_NOTE;
1202 		phdr->p_offset = noteoff;
1203 		phdr->p_vaddr = 0;
1204 		phdr->p_paddr = 0;
1205 		phdr->p_filesz = notesz;
1206 		phdr->p_memsz = 0;
1207 		phdr->p_flags = 0;
1208 		phdr->p_align = 0;
1209 		phdr++;
1210 
1211 		/* All the writable segments from the program. */
1212 		phc.phdr = phdr;
1213 		phc.offset = *off;
1214 		each_writable_segment(p, cb_put_phdr, &phc);
1215 	}
1216 }
1217 
1218 static void
1219 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1220     const void *desc, size_t descsz)
1221 {
1222 	Elf_Note note;
1223 
1224 	note.n_namesz = strlen(name) + 1;
1225 	note.n_descsz = descsz;
1226 	note.n_type = type;
1227 	if (dst != NULL)
1228 		bcopy(&note, (char *)dst + *off, sizeof note);
1229 	*off += sizeof note;
1230 	if (dst != NULL)
1231 		bcopy(name, (char *)dst + *off, note.n_namesz);
1232 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1233 	if (dst != NULL)
1234 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1235 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1236 }
1237 
1238 /*
1239  * Tell kern_execve.c about it, with a little help from the linker.
1240  */
1241 #if __ELF_WORD_SIZE == 32
1242 static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"};
1243 EXEC_SET(elf32, elf_execsw);
1244 #else
1245 static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"};
1246 EXEC_SET(elf64, elf_execsw);
1247 #endif
1248