xref: /freebsd/sys/kern/imgact_elf.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_compat.h"
35 
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/systm.h>
53 #include <sys/signalvar.h>
54 #include <sys/stat.h>
55 #include <sys/sx.h>
56 #include <sys/syscall.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/vnode.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_param.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_extern.h>
68 
69 #include <machine/elf.h>
70 #include <machine/md_var.h>
71 
72 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
73 #include <machine/fpu.h>
74 #include <compat/ia32/ia32_reg.h>
75 #endif
76 
77 #define OLD_EI_BRAND	8
78 
79 static int __elfN(check_header)(const Elf_Ehdr *hdr);
80 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
81     const char *interp);
82 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
83     u_long *entry, size_t pagesize);
84 static int __elfN(load_section)(struct proc *p,
85     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87     vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89 
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
91     "");
92 
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98     &__elfN(fallback_brand));
99 
100 int __elfN(can_exec_dyn) = 0;
101 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
102 	can_exec_dyn, CTLFLAG_RW, &__elfN(can_exec_dyn), 0,
103 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " can exec shared libraries");
104 
105 static int elf_trace = 0;
106 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
107 
108 static int elf_legacy_coredump = 0;
109 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
110     &elf_legacy_coredump, 0, "");
111 
112 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
113 
114 int
115 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
116 {
117 	int i;
118 
119 	for (i = 0; i < MAX_BRANDS; i++) {
120 		if (elf_brand_list[i] == NULL) {
121 			elf_brand_list[i] = entry;
122 			break;
123 		}
124 	}
125 	if (i == MAX_BRANDS)
126 		return (-1);
127 	return (0);
128 }
129 
130 int
131 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
132 {
133 	int i;
134 
135 	for (i = 0; i < MAX_BRANDS; i++) {
136 		if (elf_brand_list[i] == entry) {
137 			elf_brand_list[i] = NULL;
138 			break;
139 		}
140 	}
141 	if (i == MAX_BRANDS)
142 		return (-1);
143 	return (0);
144 }
145 
146 int
147 __elfN(brand_inuse)(Elf_Brandinfo *entry)
148 {
149 	struct proc *p;
150 	int rval = FALSE;
151 
152 	sx_slock(&allproc_lock);
153 	LIST_FOREACH(p, &allproc, p_list) {
154 		if (p->p_sysent == entry->sysvec) {
155 			rval = TRUE;
156 			break;
157 		}
158 	}
159 	sx_sunlock(&allproc_lock);
160 
161 	return (rval);
162 }
163 
164 static Elf_Brandinfo *
165 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
166 {
167 	Elf_Brandinfo *bi;
168 	int i;
169 
170 	/*
171 	 * We support three types of branding -- (1) the ELF EI_OSABI field
172 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
173 	 * branding w/in the ELF header, and (3) path of the `interp_path'
174 	 * field.  We should also look for an ".note.ABI-tag" ELF section now
175 	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
176 	 */
177 
178 	/* If the executable has a brand, search for it in the brand list. */
179 	for (i = 0; i < MAX_BRANDS; i++) {
180 		bi = elf_brand_list[i];
181 		if (bi != NULL && hdr->e_machine == bi->machine &&
182 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
183 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
184 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
185 			return (bi);
186 	}
187 
188 	/* Lacking a known brand, search for a recognized interpreter. */
189 	if (interp != NULL) {
190 		for (i = 0; i < MAX_BRANDS; i++) {
191 			bi = elf_brand_list[i];
192 			if (bi != NULL && hdr->e_machine == bi->machine &&
193 			    strcmp(interp, bi->interp_path) == 0)
194 				return (bi);
195 		}
196 	}
197 
198 	/* Lacking a recognized interpreter, try the default brand */
199 	for (i = 0; i < MAX_BRANDS; i++) {
200 		bi = elf_brand_list[i];
201 		if (bi != NULL && hdr->e_machine == bi->machine &&
202 		    __elfN(fallback_brand) == bi->brand)
203 			return (bi);
204 	}
205 	return (NULL);
206 }
207 
208 static int
209 __elfN(check_header)(const Elf_Ehdr *hdr)
210 {
211 	Elf_Brandinfo *bi;
212 	int i;
213 
214 	if (!IS_ELF(*hdr) ||
215 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
216 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
217 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
218 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
219 	    hdr->e_version != ELF_TARG_VER)
220 		return (ENOEXEC);
221 
222 	/*
223 	 * Make sure we have at least one brand for this machine.
224 	 */
225 
226 	for (i = 0; i < MAX_BRANDS; i++) {
227 		bi = elf_brand_list[i];
228 		if (bi != NULL && bi->machine == hdr->e_machine)
229 			break;
230 	}
231 	if (i == MAX_BRANDS)
232 		return (ENOEXEC);
233 
234 	return (0);
235 }
236 
237 static int
238 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
239 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
240 	vm_prot_t max)
241 {
242 	int error, rv;
243 	vm_offset_t off;
244 	vm_offset_t data_buf = 0;
245 
246 	/*
247 	 * Create the page if it doesn't exist yet. Ignore errors.
248 	 */
249 	vm_map_lock(map);
250 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
251 	    max, 0);
252 	vm_map_unlock(map);
253 
254 	/*
255 	 * Find the page from the underlying object.
256 	 */
257 	if (object) {
258 		vm_object_reference(object);
259 		rv = vm_map_find(exec_map,
260 				 object,
261 				 trunc_page(offset),
262 				 &data_buf,
263 				 PAGE_SIZE,
264 				 TRUE,
265 				 VM_PROT_READ,
266 				 VM_PROT_ALL,
267 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
268 		if (rv != KERN_SUCCESS) {
269 			vm_object_deallocate(object);
270 			return (rv);
271 		}
272 
273 		off = offset - trunc_page(offset);
274 		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
275 		    end - start);
276 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
277 		if (error) {
278 			return (KERN_FAILURE);
279 		}
280 	}
281 
282 	return (KERN_SUCCESS);
283 }
284 
285 static int
286 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
287 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
288 	vm_prot_t max, int cow)
289 {
290 	vm_offset_t data_buf, off;
291 	vm_size_t sz;
292 	int error, rv;
293 
294 	if (start != trunc_page(start)) {
295 		rv = __elfN(map_partial)(map, object, offset, start,
296 		    round_page(start), prot, max);
297 		if (rv)
298 			return (rv);
299 		offset += round_page(start) - start;
300 		start = round_page(start);
301 	}
302 	if (end != round_page(end)) {
303 		rv = __elfN(map_partial)(map, object, offset +
304 		    trunc_page(end) - start, trunc_page(end), end, prot, max);
305 		if (rv)
306 			return (rv);
307 		end = trunc_page(end);
308 	}
309 	if (end > start) {
310 		if (offset & PAGE_MASK) {
311 			/*
312 			 * The mapping is not page aligned. This means we have
313 			 * to copy the data. Sigh.
314 			 */
315 			rv = vm_map_find(map, 0, 0, &start, end - start,
316 			    FALSE, prot, max, 0);
317 			if (rv)
318 				return (rv);
319 			data_buf = 0;
320 			while (start < end) {
321 				vm_object_reference(object);
322 				rv = vm_map_find(exec_map,
323 						 object,
324 						 trunc_page(offset),
325 						 &data_buf,
326 						 2 * PAGE_SIZE,
327 						 TRUE,
328 						 VM_PROT_READ,
329 						 VM_PROT_ALL,
330 						 (MAP_COPY_ON_WRITE
331 						  | MAP_PREFAULT_PARTIAL));
332 				if (rv != KERN_SUCCESS) {
333 					vm_object_deallocate(object);
334 					return (rv);
335 				}
336 				off = offset - trunc_page(offset);
337 				sz = end - start;
338 				if (sz > PAGE_SIZE)
339 					sz = PAGE_SIZE;
340 				error = copyout((caddr_t)data_buf + off,
341 				    (caddr_t)start, sz);
342 				vm_map_remove(exec_map, data_buf,
343 				    data_buf + 2 * PAGE_SIZE);
344 				if (error) {
345 					return (KERN_FAILURE);
346 				}
347 				start += sz;
348 			}
349 			rv = KERN_SUCCESS;
350 		} else {
351 			vm_map_lock(map);
352 			rv = vm_map_insert(map, object, offset, start, end,
353 			    prot, max, cow);
354 			vm_map_unlock(map);
355 		}
356 		return (rv);
357 	} else {
358 		return (KERN_SUCCESS);
359 	}
360 }
361 
362 static int
363 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
364 	struct vnode *vp, vm_object_t object, vm_offset_t offset,
365 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
366 	size_t pagesize)
367 {
368 	size_t map_len;
369 	vm_offset_t map_addr;
370 	int error, rv, cow;
371 	size_t copy_len;
372 	vm_offset_t file_addr;
373 	vm_offset_t data_buf = 0;
374 
375 	error = 0;
376 
377 	/*
378 	 * It's necessary to fail if the filsz + offset taken from the
379 	 * header is greater than the actual file pager object's size.
380 	 * If we were to allow this, then the vm_map_find() below would
381 	 * walk right off the end of the file object and into the ether.
382 	 *
383 	 * While I'm here, might as well check for something else that
384 	 * is invalid: filsz cannot be greater than memsz.
385 	 */
386 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
387 	    filsz > memsz) {
388 		uprintf("elf_load_section: truncated ELF file\n");
389 		return (ENOEXEC);
390 	}
391 
392 #define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
393 #define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
394 
395 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
396 	file_addr = trunc_page_ps(offset, pagesize);
397 
398 	/*
399 	 * We have two choices.  We can either clear the data in the last page
400 	 * of an oversized mapping, or we can start the anon mapping a page
401 	 * early and copy the initialized data into that first page.  We
402 	 * choose the second..
403 	 */
404 	if (memsz > filsz)
405 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
406 	else
407 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
408 
409 	if (map_len != 0) {
410 		vm_object_reference(object);
411 
412 		/* cow flags: don't dump readonly sections in core */
413 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
414 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
415 
416 		rv = __elfN(map_insert)(&vmspace->vm_map,
417 				      object,
418 				      file_addr,	/* file offset */
419 				      map_addr,		/* virtual start */
420 				      map_addr + map_len,/* virtual end */
421 				      prot,
422 				      VM_PROT_ALL,
423 				      cow);
424 		if (rv != KERN_SUCCESS) {
425 			vm_object_deallocate(object);
426 			return (EINVAL);
427 		}
428 
429 		/* we can stop now if we've covered it all */
430 		if (memsz == filsz) {
431 			return (0);
432 		}
433 	}
434 
435 
436 	/*
437 	 * We have to get the remaining bit of the file into the first part
438 	 * of the oversized map segment.  This is normally because the .data
439 	 * segment in the file is extended to provide bss.  It's a neat idea
440 	 * to try and save a page, but it's a pain in the behind to implement.
441 	 */
442 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
443 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
444 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
445 	    map_addr;
446 
447 	/* This had damn well better be true! */
448 	if (map_len != 0) {
449 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
450 		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
451 		if (rv != KERN_SUCCESS) {
452 			return (EINVAL);
453 		}
454 	}
455 
456 	if (copy_len != 0) {
457 		vm_offset_t off;
458 		vm_object_reference(object);
459 		rv = vm_map_find(exec_map,
460 				 object,
461 				 trunc_page(offset + filsz),
462 				 &data_buf,
463 				 PAGE_SIZE,
464 				 TRUE,
465 				 VM_PROT_READ,
466 				 VM_PROT_ALL,
467 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
468 		if (rv != KERN_SUCCESS) {
469 			vm_object_deallocate(object);
470 			return (EINVAL);
471 		}
472 
473 		/* send the page fragment to user space */
474 		off = trunc_page_ps(offset + filsz, pagesize) -
475 		    trunc_page(offset + filsz);
476 		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
477 		    copy_len);
478 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
479 		if (error) {
480 			return (error);
481 		}
482 	}
483 
484 	/*
485 	 * set it to the specified protection.
486 	 * XXX had better undo the damage from pasting over the cracks here!
487 	 */
488 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
489 	    round_page(map_addr + map_len),  prot, FALSE);
490 
491 	return (error);
492 }
493 
494 /*
495  * Load the file "file" into memory.  It may be either a shared object
496  * or an executable.
497  *
498  * The "addr" reference parameter is in/out.  On entry, it specifies
499  * the address where a shared object should be loaded.  If the file is
500  * an executable, this value is ignored.  On exit, "addr" specifies
501  * where the file was actually loaded.
502  *
503  * The "entry" reference parameter is out only.  On exit, it specifies
504  * the entry point for the loaded file.
505  */
506 static int
507 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
508 	u_long *entry, size_t pagesize)
509 {
510 	struct {
511 		struct nameidata nd;
512 		struct vattr attr;
513 		struct image_params image_params;
514 	} *tempdata;
515 	const Elf_Ehdr *hdr = NULL;
516 	const Elf_Phdr *phdr = NULL;
517 	struct nameidata *nd;
518 	struct vmspace *vmspace = p->p_vmspace;
519 	struct vattr *attr;
520 	struct image_params *imgp;
521 	vm_prot_t prot;
522 	u_long rbase;
523 	u_long base_addr = 0;
524 	int vfslocked, error, i, numsegs;
525 
526 	if (curthread->td_proc != p)
527 		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
528 
529 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
530 	nd = &tempdata->nd;
531 	attr = &tempdata->attr;
532 	imgp = &tempdata->image_params;
533 
534 	/*
535 	 * Initialize part of the common data
536 	 */
537 	imgp->proc = p;
538 	imgp->attr = attr;
539 	imgp->firstpage = NULL;
540 	imgp->image_header = NULL;
541 	imgp->object = NULL;
542 	imgp->execlabel = NULL;
543 
544 	/* XXXKSE */
545 	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
546 	    curthread);
547 	vfslocked = 0;
548 	if ((error = namei(nd)) != 0) {
549 		nd->ni_vp = NULL;
550 		goto fail;
551 	}
552 	vfslocked = NDHASGIANT(nd);
553 	NDFREE(nd, NDF_ONLY_PNBUF);
554 	imgp->vp = nd->ni_vp;
555 
556 	/*
557 	 * Check permissions, modes, uid, etc on the file, and "open" it.
558 	 */
559 	error = exec_check_permissions(imgp);
560 	if (error) {
561 		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
562 		goto fail;
563 	}
564 
565 	error = exec_map_first_page(imgp);
566 	/*
567 	 * Also make certain that the interpreter stays the same, so set
568 	 * its VV_TEXT flag, too.
569 	 */
570 	if (error == 0)
571 		nd->ni_vp->v_vflag |= VV_TEXT;
572 
573 	imgp->object = nd->ni_vp->v_object;
574 	vm_object_reference(imgp->object);
575 
576 	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
577 	if (error)
578 		goto fail;
579 
580 	hdr = (const Elf_Ehdr *)imgp->image_header;
581 	if ((error = __elfN(check_header)(hdr)) != 0)
582 		goto fail;
583 	if (hdr->e_type == ET_DYN)
584 		rbase = *addr;
585 	else if (hdr->e_type == ET_EXEC)
586 		rbase = 0;
587 	else {
588 		error = ENOEXEC;
589 		goto fail;
590 	}
591 
592 	/* Only support headers that fit within first page for now      */
593 	/*    (multiplication of two Elf_Half fields will not overflow) */
594 	if ((hdr->e_phoff > PAGE_SIZE) ||
595 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
596 		error = ENOEXEC;
597 		goto fail;
598 	}
599 
600 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
601 
602 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
603 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
604 			prot = 0;
605 			if (phdr[i].p_flags & PF_X)
606   				prot |= VM_PROT_EXECUTE;
607 			if (phdr[i].p_flags & PF_W)
608   				prot |= VM_PROT_WRITE;
609 			if (phdr[i].p_flags & PF_R)
610   				prot |= VM_PROT_READ;
611 
612 			if ((error = __elfN(load_section)(p, vmspace,
613 			    nd->ni_vp, imgp->object, phdr[i].p_offset,
614 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
615 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
616 			    pagesize)) != 0)
617 				goto fail;
618 			/*
619 			 * Establish the base address if this is the
620 			 * first segment.
621 			 */
622 			if (numsegs == 0)
623   				base_addr = trunc_page(phdr[i].p_vaddr +
624 				    rbase);
625 			numsegs++;
626 		}
627 	}
628 	*addr = base_addr;
629 	*entry = (unsigned long)hdr->e_entry + rbase;
630 
631 fail:
632 	if (imgp->firstpage)
633 		exec_unmap_first_page(imgp);
634 	if (imgp->object)
635 		vm_object_deallocate(imgp->object);
636 
637 	if (nd->ni_vp)
638 		vrele(nd->ni_vp);
639 
640 	VFS_UNLOCK_GIANT(vfslocked);
641 	free(tempdata, M_TEMP);
642 
643 	return (error);
644 }
645 
646 static int
647 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
648 {
649 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
650 	const Elf_Phdr *phdr;
651 	Elf_Auxargs *elf_auxargs = NULL;
652 	struct vmspace *vmspace;
653 	vm_prot_t prot;
654 	u_long text_size = 0, data_size = 0, total_size = 0;
655 	u_long text_addr = 0, data_addr = 0;
656 	u_long seg_size, seg_addr;
657 	u_long addr, entry = 0, proghdr = 0;
658 	int error = 0, i;
659 	const char *interp = NULL;
660 	Elf_Brandinfo *brand_info;
661 	char *path;
662 	struct thread *td = curthread;
663 	struct sysentvec *sv;
664 
665 	/*
666 	 * Do we have a valid ELF header ?
667 	 */
668 	if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC
669 	&& (!__elfN(can_exec_dyn) || hdr->e_type != ET_DYN)))
670 		return (-1);
671 
672 	/*
673 	 * From here on down, we return an errno, not -1, as we've
674 	 * detected an ELF file.
675 	 */
676 
677 	if ((hdr->e_phoff > PAGE_SIZE) ||
678 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
679 		/* Only support headers in first page for now */
680 		return (ENOEXEC);
681 	}
682 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
683 
684 	/*
685 	 * From this point on, we may have resources that need to be freed.
686 	 */
687 
688 	VOP_UNLOCK(imgp->vp, 0, td);
689 
690 	for (i = 0; i < hdr->e_phnum; i++) {
691 		switch (phdr[i].p_type) {
692 	  	case PT_INTERP:	/* Path to interpreter */
693 			if (phdr[i].p_filesz > MAXPATHLEN ||
694 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
695 				error = ENOEXEC;
696 				goto fail;
697 			}
698 			interp = imgp->image_header + phdr[i].p_offset;
699 			break;
700 		default:
701 			break;
702 		}
703 	}
704 
705 	brand_info = __elfN(get_brandinfo)(hdr, interp);
706 	if (brand_info == NULL) {
707 		uprintf("ELF binary type \"%u\" not known.\n",
708 		    hdr->e_ident[EI_OSABI]);
709 		error = ENOEXEC;
710 		goto fail;
711 	}
712 	sv = brand_info->sysvec;
713 	if (interp != NULL && brand_info->interp_newpath != NULL)
714 		interp = brand_info->interp_newpath;
715 
716 	exec_new_vmspace(imgp, sv);
717 
718 	vmspace = imgp->proc->p_vmspace;
719 
720 	for (i = 0; i < hdr->e_phnum; i++) {
721 		switch (phdr[i].p_type) {
722 		case PT_LOAD:	/* Loadable segment */
723 			prot = 0;
724 			if (phdr[i].p_flags & PF_X)
725   				prot |= VM_PROT_EXECUTE;
726 			if (phdr[i].p_flags & PF_W)
727   				prot |= VM_PROT_WRITE;
728 			if (phdr[i].p_flags & PF_R)
729   				prot |= VM_PROT_READ;
730 
731 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
732 			/*
733 			 * Some x86 binaries assume read == executable,
734 			 * notably the M3 runtime and therefore cvsup
735 			 */
736 			if (prot & VM_PROT_READ)
737 				prot |= VM_PROT_EXECUTE;
738 #endif
739 
740 			if ((error = __elfN(load_section)(imgp->proc, vmspace,
741 			    imgp->vp, imgp->object, phdr[i].p_offset,
742 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
743 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
744 			    sv->sv_pagesize)) != 0)
745   				goto fail;
746 
747 			/*
748 			 * If this segment contains the program headers,
749 			 * remember their virtual address for the AT_PHDR
750 			 * aux entry. Static binaries don't usually include
751 			 * a PT_PHDR entry.
752 			 */
753 			if (phdr[i].p_offset == 0 &&
754 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
755 				<= phdr[i].p_filesz)
756 				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
757 
758 			seg_addr = trunc_page(phdr[i].p_vaddr);
759 			seg_size = round_page(phdr[i].p_memsz +
760 			    phdr[i].p_vaddr - seg_addr);
761 
762 			/*
763 			 * Is this .text or .data?  We can't use
764 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
765 			 * alpha terribly and possibly does other bad
766 			 * things so we stick to the old way of figuring
767 			 * it out:  If the segment contains the program
768 			 * entry point, it's a text segment, otherwise it
769 			 * is a data segment.
770 			 *
771 			 * Note that obreak() assumes that data_addr +
772 			 * data_size == end of data load area, and the ELF
773 			 * file format expects segments to be sorted by
774 			 * address.  If multiple data segments exist, the
775 			 * last one will be used.
776 			 */
777 			if (hdr->e_entry >= phdr[i].p_vaddr &&
778 			    hdr->e_entry < (phdr[i].p_vaddr +
779 			    phdr[i].p_memsz)) {
780 				text_size = seg_size;
781 				text_addr = seg_addr;
782 				entry = (u_long)hdr->e_entry;
783 			} else {
784 				data_size = seg_size;
785 				data_addr = seg_addr;
786 			}
787 			total_size += seg_size;
788 			break;
789 		case PT_PHDR: 	/* Program header table info */
790 			proghdr = phdr[i].p_vaddr;
791 			break;
792 		default:
793 			break;
794 		}
795 	}
796 
797 	if (data_addr == 0 && data_size == 0) {
798 		data_addr = text_addr;
799 		data_size = text_size;
800 	}
801 
802 	/*
803 	 * Check limits.  It should be safe to check the
804 	 * limits after loading the segments since we do
805 	 * not actually fault in all the segments pages.
806 	 */
807 	PROC_LOCK(imgp->proc);
808 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
809 	    text_size > maxtsiz ||
810 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
811 		PROC_UNLOCK(imgp->proc);
812 		error = ENOMEM;
813 		goto fail;
814 	}
815 
816 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
817 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
818 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
819 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
820 
821 	/*
822 	 * We load the dynamic linker where a userland call
823 	 * to mmap(0, ...) would put it.  The rationale behind this
824 	 * calculation is that it leaves room for the heap to grow to
825 	 * its maximum allowed size.
826 	 */
827 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
828 	    lim_max(imgp->proc, RLIMIT_DATA));
829 	PROC_UNLOCK(imgp->proc);
830 
831 	imgp->entry_addr = entry;
832 
833 	imgp->proc->p_sysent = sv;
834 	if (interp != NULL && brand_info->emul_path != NULL &&
835 	    brand_info->emul_path[0] != '\0') {
836 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
837 		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
838 		    interp);
839 		error = __elfN(load_file)(imgp->proc, path, &addr,
840 		    &imgp->entry_addr, sv->sv_pagesize);
841 		free(path, M_TEMP);
842 		if (error == 0)
843 			interp = NULL;
844 	}
845 	if (interp != NULL) {
846 		error = __elfN(load_file)(imgp->proc, interp, &addr,
847 		    &imgp->entry_addr, sv->sv_pagesize);
848 		if (error != 0) {
849 			uprintf("ELF interpreter %s not found\n", interp);
850 			goto fail;
851 		}
852 	}
853 
854 	/*
855 	 * Construct auxargs table (used by the fixup routine)
856 	 */
857 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
858 	elf_auxargs->execfd = -1;
859 	elf_auxargs->phdr = proghdr;
860 	elf_auxargs->phent = hdr->e_phentsize;
861 	elf_auxargs->phnum = hdr->e_phnum;
862 	elf_auxargs->pagesz = PAGE_SIZE;
863 	elf_auxargs->base = addr;
864 	elf_auxargs->flags = 0;
865 	elf_auxargs->entry = entry;
866 	elf_auxargs->trace = elf_trace;
867 
868 	imgp->auxargs = elf_auxargs;
869 	imgp->interpreted = 0;
870 
871 fail:
872 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
873 	return (error);
874 }
875 
876 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
877 
878 int
879 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
880 {
881 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
882 	Elf_Addr *base;
883 	Elf_Addr *pos;
884 
885 	base = (Elf_Addr *)*stack_base;
886 	pos = base + (imgp->args->argc + imgp->args->envc + 2);
887 
888 	if (args->trace) {
889 		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
890 	}
891 	if (args->execfd != -1) {
892 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
893 	}
894 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
895 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
896 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
897 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
898 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
899 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
900 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
901 	AUXARGS_ENTRY(pos, AT_NULL, 0);
902 
903 	free(imgp->auxargs, M_TEMP);
904 	imgp->auxargs = NULL;
905 
906 	base--;
907 	suword(base, (long)imgp->args->argc);
908 	*stack_base = (register_t *)base;
909 	return (0);
910 }
911 
912 /*
913  * Code for generating ELF core dumps.
914  */
915 
916 typedef void (*segment_callback)(vm_map_entry_t, void *);
917 
918 /* Closure for cb_put_phdr(). */
919 struct phdr_closure {
920 	Elf_Phdr *phdr;		/* Program header to fill in */
921 	Elf_Off offset;		/* Offset of segment in core file */
922 };
923 
924 /* Closure for cb_size_segment(). */
925 struct sseg_closure {
926 	int count;		/* Count of writable segments. */
927 	size_t size;		/* Total size of all writable segments. */
928 };
929 
930 static void cb_put_phdr(vm_map_entry_t, void *);
931 static void cb_size_segment(vm_map_entry_t, void *);
932 static void each_writable_segment(struct thread *, segment_callback, void *);
933 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
934     int, void *, size_t);
935 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
936 static void __elfN(putnote)(void *, size_t *, const char *, int,
937     const void *, size_t);
938 
939 extern int osreldate;
940 
941 int
942 __elfN(coredump)(td, vp, limit)
943 	struct thread *td;
944 	struct vnode *vp;
945 	off_t limit;
946 {
947 	struct ucred *cred = td->td_ucred;
948 	int error = 0;
949 	struct sseg_closure seginfo;
950 	void *hdr;
951 	size_t hdrsize;
952 
953 	/* Size the program segments. */
954 	seginfo.count = 0;
955 	seginfo.size = 0;
956 	each_writable_segment(td, cb_size_segment, &seginfo);
957 
958 	/*
959 	 * Calculate the size of the core file header area by making
960 	 * a dry run of generating it.  Nothing is written, but the
961 	 * size is calculated.
962 	 */
963 	hdrsize = 0;
964 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
965 
966 	if (hdrsize + seginfo.size >= limit)
967 		return (EFAULT);
968 
969 	/*
970 	 * Allocate memory for building the header, fill it up,
971 	 * and write it out.
972 	 */
973 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
974 	if (hdr == NULL) {
975 		return (EINVAL);
976 	}
977 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
978 
979 	/* Write the contents of all of the writable segments. */
980 	if (error == 0) {
981 		Elf_Phdr *php;
982 		off_t offset;
983 		int i;
984 
985 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
986 		offset = hdrsize;
987 		for (i = 0; i < seginfo.count; i++) {
988 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
989 			    (caddr_t)(uintptr_t)php->p_vaddr,
990 			    php->p_filesz, offset, UIO_USERSPACE,
991 			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
992 			    curthread); /* XXXKSE */
993 			if (error != 0)
994 				break;
995 			offset += php->p_filesz;
996 			php++;
997 		}
998 	}
999 	free(hdr, M_TEMP);
1000 
1001 	return (error);
1002 }
1003 
1004 /*
1005  * A callback for each_writable_segment() to write out the segment's
1006  * program header entry.
1007  */
1008 static void
1009 cb_put_phdr(entry, closure)
1010 	vm_map_entry_t entry;
1011 	void *closure;
1012 {
1013 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1014 	Elf_Phdr *phdr = phc->phdr;
1015 
1016 	phc->offset = round_page(phc->offset);
1017 
1018 	phdr->p_type = PT_LOAD;
1019 	phdr->p_offset = phc->offset;
1020 	phdr->p_vaddr = entry->start;
1021 	phdr->p_paddr = 0;
1022 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1023 	phdr->p_align = PAGE_SIZE;
1024 	phdr->p_flags = 0;
1025 	if (entry->protection & VM_PROT_READ)
1026 		phdr->p_flags |= PF_R;
1027 	if (entry->protection & VM_PROT_WRITE)
1028 		phdr->p_flags |= PF_W;
1029 	if (entry->protection & VM_PROT_EXECUTE)
1030 		phdr->p_flags |= PF_X;
1031 
1032 	phc->offset += phdr->p_filesz;
1033 	phc->phdr++;
1034 }
1035 
1036 /*
1037  * A callback for each_writable_segment() to gather information about
1038  * the number of segments and their total size.
1039  */
1040 static void
1041 cb_size_segment(entry, closure)
1042 	vm_map_entry_t entry;
1043 	void *closure;
1044 {
1045 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1046 
1047 	ssc->count++;
1048 	ssc->size += entry->end - entry->start;
1049 }
1050 
1051 /*
1052  * For each writable segment in the process's memory map, call the given
1053  * function with a pointer to the map entry and some arbitrary
1054  * caller-supplied data.
1055  */
1056 static void
1057 each_writable_segment(td, func, closure)
1058 	struct thread *td;
1059 	segment_callback func;
1060 	void *closure;
1061 {
1062 	struct proc *p = td->td_proc;
1063 	vm_map_t map = &p->p_vmspace->vm_map;
1064 	vm_map_entry_t entry;
1065 
1066 	for (entry = map->header.next; entry != &map->header;
1067 	    entry = entry->next) {
1068 		vm_object_t obj;
1069 
1070 		/*
1071 		 * Don't dump inaccessible mappings, deal with legacy
1072 		 * coredump mode.
1073 		 *
1074 		 * Note that read-only segments related to the elf binary
1075 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1076 		 * need to arbitrarily ignore such segments.
1077 		 */
1078 		if (elf_legacy_coredump) {
1079 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1080 				continue;
1081 		} else {
1082 			if ((entry->protection & VM_PROT_ALL) == 0)
1083 				continue;
1084 		}
1085 
1086 		/*
1087 		 * Dont include memory segment in the coredump if
1088 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1089 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1090 		 * kernel map).
1091 		 */
1092 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1093 			continue;
1094 
1095 		if ((obj = entry->object.vm_object) == NULL)
1096 			continue;
1097 
1098 		/* Find the deepest backing object. */
1099 		while (obj->backing_object != NULL)
1100 			obj = obj->backing_object;
1101 
1102 		/* Ignore memory-mapped devices and such things. */
1103 		if (obj->type != OBJT_DEFAULT &&
1104 		    obj->type != OBJT_SWAP &&
1105 		    obj->type != OBJT_VNODE)
1106 			continue;
1107 
1108 		(*func)(entry, closure);
1109 	}
1110 }
1111 
1112 /*
1113  * Write the core file header to the file, including padding up to
1114  * the page boundary.
1115  */
1116 static int
1117 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1118 	struct thread *td;
1119 	struct vnode *vp;
1120 	struct ucred *cred;
1121 	int numsegs;
1122 	size_t hdrsize;
1123 	void *hdr;
1124 {
1125 	size_t off;
1126 
1127 	/* Fill in the header. */
1128 	bzero(hdr, hdrsize);
1129 	off = 0;
1130 	__elfN(puthdr)(td, hdr, &off, numsegs);
1131 
1132 	/* Write it to the core file. */
1133 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1134 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1135 	    td)); /* XXXKSE */
1136 }
1137 
1138 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1139 typedef struct prstatus32 elf_prstatus_t;
1140 typedef struct prpsinfo32 elf_prpsinfo_t;
1141 typedef struct fpreg32 elf_prfpregset_t;
1142 typedef struct fpreg32 elf_fpregset_t;
1143 typedef struct reg32 elf_gregset_t;
1144 #else
1145 typedef prstatus_t elf_prstatus_t;
1146 typedef prpsinfo_t elf_prpsinfo_t;
1147 typedef prfpregset_t elf_prfpregset_t;
1148 typedef prfpregset_t elf_fpregset_t;
1149 typedef gregset_t elf_gregset_t;
1150 #endif
1151 
1152 static void
1153 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1154 {
1155 	struct {
1156 		elf_prstatus_t status;
1157 		elf_prfpregset_t fpregset;
1158 		elf_prpsinfo_t psinfo;
1159 	} *tempdata;
1160 	elf_prstatus_t *status;
1161 	elf_prfpregset_t *fpregset;
1162 	elf_prpsinfo_t *psinfo;
1163 	struct proc *p;
1164 	struct thread *thr;
1165 	size_t ehoff, noteoff, notesz, phoff;
1166 
1167 	p = td->td_proc;
1168 
1169 	ehoff = *off;
1170 	*off += sizeof(Elf_Ehdr);
1171 
1172 	phoff = *off;
1173 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1174 
1175 	noteoff = *off;
1176 	/*
1177 	 * Don't allocate space for the notes if we're just calculating
1178 	 * the size of the header. We also don't collect the data.
1179 	 */
1180 	if (dst != NULL) {
1181 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1182 		status = &tempdata->status;
1183 		fpregset = &tempdata->fpregset;
1184 		psinfo = &tempdata->psinfo;
1185 	} else {
1186 		tempdata = NULL;
1187 		status = NULL;
1188 		fpregset = NULL;
1189 		psinfo = NULL;
1190 	}
1191 
1192 	if (dst != NULL) {
1193 		psinfo->pr_version = PRPSINFO_VERSION;
1194 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1195 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1196 		/*
1197 		 * XXX - We don't fill in the command line arguments properly
1198 		 * yet.
1199 		 */
1200 		strlcpy(psinfo->pr_psargs, p->p_comm,
1201 		    sizeof(psinfo->pr_psargs));
1202 	}
1203 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1204 	    sizeof *psinfo);
1205 
1206 	/*
1207 	 * To have the debugger select the right thread (LWP) as the initial
1208 	 * thread, we dump the state of the thread passed to us in td first.
1209 	 * This is the thread that causes the core dump and thus likely to
1210 	 * be the right thread one wants to have selected in the debugger.
1211 	 */
1212 	thr = td;
1213 	while (thr != NULL) {
1214 		if (dst != NULL) {
1215 			status->pr_version = PRSTATUS_VERSION;
1216 			status->pr_statussz = sizeof(elf_prstatus_t);
1217 			status->pr_gregsetsz = sizeof(elf_gregset_t);
1218 			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1219 			status->pr_osreldate = osreldate;
1220 			status->pr_cursig = p->p_sig;
1221 			status->pr_pid = thr->td_tid;
1222 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1223 			fill_regs32(thr, &status->pr_reg);
1224 			fill_fpregs32(thr, fpregset);
1225 #else
1226 			fill_regs(thr, &status->pr_reg);
1227 			fill_fpregs(thr, fpregset);
1228 #endif
1229 		}
1230 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1231 		    sizeof *status);
1232 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1233 		    sizeof *fpregset);
1234 		/*
1235 		 * Allow for MD specific notes, as well as any MD
1236 		 * specific preparations for writing MI notes.
1237 		 */
1238 		__elfN(dump_thread)(thr, dst, off);
1239 
1240 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1241 		    TAILQ_NEXT(thr, td_plist);
1242 		if (thr == td)
1243 			thr = TAILQ_NEXT(thr, td_plist);
1244 	}
1245 
1246 	notesz = *off - noteoff;
1247 
1248 	if (dst != NULL)
1249 		free(tempdata, M_TEMP);
1250 
1251 	/* Align up to a page boundary for the program segments. */
1252 	*off = round_page(*off);
1253 
1254 	if (dst != NULL) {
1255 		Elf_Ehdr *ehdr;
1256 		Elf_Phdr *phdr;
1257 		struct phdr_closure phc;
1258 
1259 		/*
1260 		 * Fill in the ELF header.
1261 		 */
1262 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1263 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1264 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1265 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1266 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1267 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1268 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1269 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1270 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1271 		ehdr->e_ident[EI_ABIVERSION] = 0;
1272 		ehdr->e_ident[EI_PAD] = 0;
1273 		ehdr->e_type = ET_CORE;
1274 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1275 		ehdr->e_machine = EM_386;
1276 #else
1277 		ehdr->e_machine = ELF_ARCH;
1278 #endif
1279 		ehdr->e_version = EV_CURRENT;
1280 		ehdr->e_entry = 0;
1281 		ehdr->e_phoff = phoff;
1282 		ehdr->e_flags = 0;
1283 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1284 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1285 		ehdr->e_phnum = numsegs + 1;
1286 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1287 		ehdr->e_shnum = 0;
1288 		ehdr->e_shstrndx = SHN_UNDEF;
1289 
1290 		/*
1291 		 * Fill in the program header entries.
1292 		 */
1293 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1294 
1295 		/* The note segement. */
1296 		phdr->p_type = PT_NOTE;
1297 		phdr->p_offset = noteoff;
1298 		phdr->p_vaddr = 0;
1299 		phdr->p_paddr = 0;
1300 		phdr->p_filesz = notesz;
1301 		phdr->p_memsz = 0;
1302 		phdr->p_flags = 0;
1303 		phdr->p_align = 0;
1304 		phdr++;
1305 
1306 		/* All the writable segments from the program. */
1307 		phc.phdr = phdr;
1308 		phc.offset = *off;
1309 		each_writable_segment(td, cb_put_phdr, &phc);
1310 	}
1311 }
1312 
1313 static void
1314 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1315     const void *desc, size_t descsz)
1316 {
1317 	Elf_Note note;
1318 
1319 	note.n_namesz = strlen(name) + 1;
1320 	note.n_descsz = descsz;
1321 	note.n_type = type;
1322 	if (dst != NULL)
1323 		bcopy(&note, (char *)dst + *off, sizeof note);
1324 	*off += sizeof note;
1325 	if (dst != NULL)
1326 		bcopy(name, (char *)dst + *off, note.n_namesz);
1327 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1328 	if (dst != NULL)
1329 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1330 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1331 }
1332 
1333 /*
1334  * Tell kern_execve.c about it, with a little help from the linker.
1335  */
1336 static struct execsw __elfN(execsw) = {
1337 	__CONCAT(exec_, __elfN(imgact)),
1338 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1339 };
1340 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1341