xref: /freebsd/sys/kern/imgact_elf.c (revision d2387d42b8da231a5b95cbc313825fb2aadf26f6)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/exec.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/imgact_elf.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/mman.h>
44 #include <sys/namei.h>
45 #include <sys/pioctl.h>
46 #include <sys/proc.h>
47 #include <sys/procfs.h>
48 #include <sys/resourcevar.h>
49 #include <sys/systm.h>
50 #include <sys/signalvar.h>
51 #include <sys/stat.h>
52 #include <sys/sx.h>
53 #include <sys/syscall.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysent.h>
56 #include <sys/vnode.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_param.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_extern.h>
65 
66 #include <machine/elf.h>
67 #include <machine/md_var.h>
68 
69 #define OLD_EI_BRAND	8
70 
71 static int __elfN(check_header)(const Elf_Ehdr *hdr);
72 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73     const char *interp);
74 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75     u_long *entry, size_t pagesize);
76 static int __elfN(load_section)(struct proc *p,
77     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79     vm_prot_t prot, size_t pagesize);
80 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81 
82 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83     "");
84 
85 int __elfN(fallback_brand) = -1;
86 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
87     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
88     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
89 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
90     &__elfN(fallback_brand));
91 
92 static int elf_trace = 0;
93 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
94 
95 static int elf_legacy_coredump = 0;
96 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
97     &elf_legacy_coredump, 0, "");
98 
99 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
100 
101 int
102 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
103 {
104 	int i;
105 
106 	for (i = 0; i < MAX_BRANDS; i++) {
107 		if (elf_brand_list[i] == NULL) {
108 			elf_brand_list[i] = entry;
109 			break;
110 		}
111 	}
112 	if (i == MAX_BRANDS)
113 		return (-1);
114 	return (0);
115 }
116 
117 int
118 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
119 {
120 	int i;
121 
122 	for (i = 0; i < MAX_BRANDS; i++) {
123 		if (elf_brand_list[i] == entry) {
124 			elf_brand_list[i] = NULL;
125 			break;
126 		}
127 	}
128 	if (i == MAX_BRANDS)
129 		return (-1);
130 	return (0);
131 }
132 
133 int
134 __elfN(brand_inuse)(Elf_Brandinfo *entry)
135 {
136 	struct proc *p;
137 	int rval = FALSE;
138 
139 	sx_slock(&allproc_lock);
140 	LIST_FOREACH(p, &allproc, p_list) {
141 		if (p->p_sysent == entry->sysvec) {
142 			rval = TRUE;
143 			break;
144 		}
145 	}
146 	sx_sunlock(&allproc_lock);
147 
148 	return (rval);
149 }
150 
151 static Elf_Brandinfo *
152 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
153 {
154 	Elf_Brandinfo *bi;
155 	int i;
156 
157 	/*
158 	 * We support three types of branding -- (1) the ELF EI_OSABI field
159 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
160 	 * branding w/in the ELF header, and (3) path of the `interp_path'
161 	 * field.  We should also look for an ".note.ABI-tag" ELF section now
162 	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
163 	 */
164 
165 	/* If the executable has a brand, search for it in the brand list. */
166 	for (i = 0; i < MAX_BRANDS; i++) {
167 		bi = elf_brand_list[i];
168 		if (bi != NULL && hdr->e_machine == bi->machine &&
169 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
170 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
171 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
172 			return (bi);
173 	}
174 
175 	/* Lacking a known brand, search for a recognized interpreter. */
176 	if (interp != NULL) {
177 		for (i = 0; i < MAX_BRANDS; i++) {
178 			bi = elf_brand_list[i];
179 			if (bi != NULL && hdr->e_machine == bi->machine &&
180 			    strcmp(interp, bi->interp_path) == 0)
181 				return (bi);
182 		}
183 	}
184 
185 	/* Lacking a recognized interpreter, try the default brand */
186 	for (i = 0; i < MAX_BRANDS; i++) {
187 		bi = elf_brand_list[i];
188 		if (bi != NULL && hdr->e_machine == bi->machine &&
189 		    __elfN(fallback_brand) == bi->brand)
190 			return (bi);
191 	}
192 	return (NULL);
193 }
194 
195 static int
196 __elfN(check_header)(const Elf_Ehdr *hdr)
197 {
198 	Elf_Brandinfo *bi;
199 	int i;
200 
201 	if (!IS_ELF(*hdr) ||
202 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
203 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
204 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
205 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
206 	    hdr->e_version != ELF_TARG_VER)
207 		return (ENOEXEC);
208 
209 	/*
210 	 * Make sure we have at least one brand for this machine.
211 	 */
212 
213 	for (i = 0; i < MAX_BRANDS; i++) {
214 		bi = elf_brand_list[i];
215 		if (bi != NULL && bi->machine == hdr->e_machine)
216 			break;
217 	}
218 	if (i == MAX_BRANDS)
219 		return (ENOEXEC);
220 
221 	return (0);
222 }
223 
224 static int
225 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
226 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
227 	vm_prot_t max)
228 {
229 	int error, rv;
230 	vm_offset_t off;
231 	vm_offset_t data_buf = 0;
232 
233 	/*
234 	 * Create the page if it doesn't exist yet. Ignore errors.
235 	 */
236 	vm_map_lock(map);
237 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
238 	    max, 0);
239 	vm_map_unlock(map);
240 
241 	/*
242 	 * Find the page from the underlying object.
243 	 */
244 	if (object) {
245 		vm_object_reference(object);
246 		rv = vm_map_find(exec_map,
247 				 object,
248 				 trunc_page(offset),
249 				 &data_buf,
250 				 PAGE_SIZE,
251 				 TRUE,
252 				 VM_PROT_READ,
253 				 VM_PROT_ALL,
254 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
255 		if (rv != KERN_SUCCESS) {
256 			vm_object_deallocate(object);
257 			return (rv);
258 		}
259 
260 		off = offset - trunc_page(offset);
261 		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
262 		    end - start);
263 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
264 		if (error) {
265 			return (KERN_FAILURE);
266 		}
267 	}
268 
269 	return (KERN_SUCCESS);
270 }
271 
272 static int
273 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
274 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
275 	vm_prot_t max, int cow)
276 {
277 	vm_offset_t data_buf, off;
278 	vm_size_t sz;
279 	int error, rv;
280 
281 	if (start != trunc_page(start)) {
282 		rv = __elfN(map_partial)(map, object, offset, start,
283 		    round_page(start), prot, max);
284 		if (rv)
285 			return (rv);
286 		offset += round_page(start) - start;
287 		start = round_page(start);
288 	}
289 	if (end != round_page(end)) {
290 		rv = __elfN(map_partial)(map, object, offset +
291 		    trunc_page(end) - start, trunc_page(end), end, prot, max);
292 		if (rv)
293 			return (rv);
294 		end = trunc_page(end);
295 	}
296 	if (end > start) {
297 		if (offset & PAGE_MASK) {
298 			/*
299 			 * The mapping is not page aligned. This means we have
300 			 * to copy the data. Sigh.
301 			 */
302 			rv = vm_map_find(map, 0, 0, &start, end - start,
303 			    FALSE, prot, max, 0);
304 			if (rv)
305 				return (rv);
306 			data_buf = 0;
307 			while (start < end) {
308 				vm_object_reference(object);
309 				rv = vm_map_find(exec_map,
310 						 object,
311 						 trunc_page(offset),
312 						 &data_buf,
313 						 2 * PAGE_SIZE,
314 						 TRUE,
315 						 VM_PROT_READ,
316 						 VM_PROT_ALL,
317 						 (MAP_COPY_ON_WRITE
318 						  | MAP_PREFAULT_PARTIAL));
319 				if (rv != KERN_SUCCESS) {
320 					vm_object_deallocate(object);
321 					return (rv);
322 				}
323 				off = offset - trunc_page(offset);
324 				sz = end - start;
325 				if (sz > PAGE_SIZE)
326 					sz = PAGE_SIZE;
327 				error = copyout((caddr_t)data_buf + off,
328 				    (caddr_t)start, sz);
329 				vm_map_remove(exec_map, data_buf,
330 				    data_buf + 2 * PAGE_SIZE);
331 				if (error) {
332 					return (KERN_FAILURE);
333 				}
334 				start += sz;
335 			}
336 			rv = KERN_SUCCESS;
337 		} else {
338 			vm_map_lock(map);
339 			rv = vm_map_insert(map, object, offset, start, end,
340 			    prot, max, cow);
341 			vm_map_unlock(map);
342 		}
343 		return (rv);
344 	} else {
345 		return (KERN_SUCCESS);
346 	}
347 }
348 
349 static int
350 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
351 	struct vnode *vp, vm_object_t object, vm_offset_t offset,
352 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
353 	size_t pagesize)
354 {
355 	size_t map_len;
356 	vm_offset_t map_addr;
357 	int error, rv, cow;
358 	size_t copy_len;
359 	vm_offset_t file_addr;
360 	vm_offset_t data_buf = 0;
361 
362 	GIANT_REQUIRED;
363 
364 	error = 0;
365 
366 	/*
367 	 * It's necessary to fail if the filsz + offset taken from the
368 	 * header is greater than the actual file pager object's size.
369 	 * If we were to allow this, then the vm_map_find() below would
370 	 * walk right off the end of the file object and into the ether.
371 	 *
372 	 * While I'm here, might as well check for something else that
373 	 * is invalid: filsz cannot be greater than memsz.
374 	 */
375 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
376 	    filsz > memsz) {
377 		uprintf("elf_load_section: truncated ELF file\n");
378 		return (ENOEXEC);
379 	}
380 
381 #define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
382 #define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
383 
384 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
385 	file_addr = trunc_page_ps(offset, pagesize);
386 
387 	/*
388 	 * We have two choices.  We can either clear the data in the last page
389 	 * of an oversized mapping, or we can start the anon mapping a page
390 	 * early and copy the initialized data into that first page.  We
391 	 * choose the second..
392 	 */
393 	if (memsz > filsz)
394 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
395 	else
396 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
397 
398 	if (map_len != 0) {
399 		vm_object_reference(object);
400 
401 		/* cow flags: don't dump readonly sections in core */
402 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
403 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
404 
405 		rv = __elfN(map_insert)(&vmspace->vm_map,
406 				      object,
407 				      file_addr,	/* file offset */
408 				      map_addr,		/* virtual start */
409 				      map_addr + map_len,/* virtual end */
410 				      prot,
411 				      VM_PROT_ALL,
412 				      cow);
413 		if (rv != KERN_SUCCESS) {
414 			vm_object_deallocate(object);
415 			return (EINVAL);
416 		}
417 
418 		/* we can stop now if we've covered it all */
419 		if (memsz == filsz) {
420 			return (0);
421 		}
422 	}
423 
424 
425 	/*
426 	 * We have to get the remaining bit of the file into the first part
427 	 * of the oversized map segment.  This is normally because the .data
428 	 * segment in the file is extended to provide bss.  It's a neat idea
429 	 * to try and save a page, but it's a pain in the behind to implement.
430 	 */
431 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
432 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
433 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
434 	    map_addr;
435 
436 	/* This had damn well better be true! */
437 	if (map_len != 0) {
438 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
439 		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
440 		if (rv != KERN_SUCCESS) {
441 			return (EINVAL);
442 		}
443 	}
444 
445 	if (copy_len != 0) {
446 		vm_offset_t off;
447 		vm_object_reference(object);
448 		rv = vm_map_find(exec_map,
449 				 object,
450 				 trunc_page(offset + filsz),
451 				 &data_buf,
452 				 PAGE_SIZE,
453 				 TRUE,
454 				 VM_PROT_READ,
455 				 VM_PROT_ALL,
456 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
457 		if (rv != KERN_SUCCESS) {
458 			vm_object_deallocate(object);
459 			return (EINVAL);
460 		}
461 
462 		/* send the page fragment to user space */
463 		off = trunc_page_ps(offset + filsz, pagesize) -
464 		    trunc_page(offset + filsz);
465 		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
466 		    copy_len);
467 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
468 		if (error) {
469 			return (error);
470 		}
471 	}
472 
473 	/*
474 	 * set it to the specified protection.
475 	 * XXX had better undo the damage from pasting over the cracks here!
476 	 */
477 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
478 	    round_page(map_addr + map_len),  prot, FALSE);
479 
480 	return (error);
481 }
482 
483 /*
484  * Load the file "file" into memory.  It may be either a shared object
485  * or an executable.
486  *
487  * The "addr" reference parameter is in/out.  On entry, it specifies
488  * the address where a shared object should be loaded.  If the file is
489  * an executable, this value is ignored.  On exit, "addr" specifies
490  * where the file was actually loaded.
491  *
492  * The "entry" reference parameter is out only.  On exit, it specifies
493  * the entry point for the loaded file.
494  */
495 static int
496 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
497 	u_long *entry, size_t pagesize)
498 {
499 	struct {
500 		struct nameidata nd;
501 		struct vattr attr;
502 		struct image_params image_params;
503 	} *tempdata;
504 	const Elf_Ehdr *hdr = NULL;
505 	const Elf_Phdr *phdr = NULL;
506 	struct nameidata *nd;
507 	struct vmspace *vmspace = p->p_vmspace;
508 	struct vattr *attr;
509 	struct image_params *imgp;
510 	vm_prot_t prot;
511 	u_long rbase;
512 	u_long base_addr = 0;
513 	int error, i, numsegs;
514 
515 	if (curthread->td_proc != p)
516 		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
517 
518 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
519 	nd = &tempdata->nd;
520 	attr = &tempdata->attr;
521 	imgp = &tempdata->image_params;
522 
523 	/*
524 	 * Initialize part of the common data
525 	 */
526 	imgp->proc = p;
527 	imgp->userspace_argv = NULL;
528 	imgp->userspace_envv = NULL;
529 	imgp->attr = attr;
530 	imgp->firstpage = NULL;
531 	imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
532 	imgp->object = NULL;
533 	imgp->execlabel = NULL;
534 
535 	if (imgp->image_header == NULL) {
536 		nd->ni_vp = NULL;
537 		error = ENOMEM;
538 		goto fail;
539 	}
540 
541 	/* XXXKSE */
542 	NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
543 
544 	if ((error = namei(nd)) != 0) {
545 		nd->ni_vp = NULL;
546 		goto fail;
547 	}
548 	NDFREE(nd, NDF_ONLY_PNBUF);
549 	imgp->vp = nd->ni_vp;
550 
551 	/*
552 	 * Check permissions, modes, uid, etc on the file, and "open" it.
553 	 */
554 	error = exec_check_permissions(imgp);
555 	if (error) {
556 		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
557 		goto fail;
558 	}
559 
560 	error = exec_map_first_page(imgp);
561 	/*
562 	 * Also make certain that the interpreter stays the same, so set
563 	 * its VV_TEXT flag, too.
564 	 */
565 	if (error == 0)
566 		nd->ni_vp->v_vflag |= VV_TEXT;
567 
568 	VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
569 	vm_object_reference(imgp->object);
570 
571 	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
572 	if (error)
573 		goto fail;
574 
575 	hdr = (const Elf_Ehdr *)imgp->image_header;
576 	if ((error = __elfN(check_header)(hdr)) != 0)
577 		goto fail;
578 	if (hdr->e_type == ET_DYN)
579 		rbase = *addr;
580 	else if (hdr->e_type == ET_EXEC)
581 		rbase = 0;
582 	else {
583 		error = ENOEXEC;
584 		goto fail;
585 	}
586 
587 	/* Only support headers that fit within first page for now      */
588 	/*    (multiplication of two Elf_Half fields will not overflow) */
589 	if ((hdr->e_phoff > PAGE_SIZE) ||
590 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
591 		error = ENOEXEC;
592 		goto fail;
593 	}
594 
595 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
596 
597 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
598 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
599 			prot = 0;
600 			if (phdr[i].p_flags & PF_X)
601   				prot |= VM_PROT_EXECUTE;
602 			if (phdr[i].p_flags & PF_W)
603   				prot |= VM_PROT_WRITE;
604 			if (phdr[i].p_flags & PF_R)
605   				prot |= VM_PROT_READ;
606 
607 			if ((error = __elfN(load_section)(p, vmspace,
608 			    nd->ni_vp, imgp->object, phdr[i].p_offset,
609 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
610 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
611 			    pagesize)) != 0)
612 				goto fail;
613 			/*
614 			 * Establish the base address if this is the
615 			 * first segment.
616 			 */
617 			if (numsegs == 0)
618   				base_addr = trunc_page(phdr[i].p_vaddr +
619 				    rbase);
620 			numsegs++;
621 		}
622 	}
623 	*addr = base_addr;
624 	*entry = (unsigned long)hdr->e_entry + rbase;
625 
626 fail:
627 	if (imgp->firstpage)
628 		exec_unmap_first_page(imgp);
629 	if (imgp->image_header)
630 		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
631 		    PAGE_SIZE);
632 	if (imgp->object)
633 		vm_object_deallocate(imgp->object);
634 
635 	if (nd->ni_vp)
636 		vrele(nd->ni_vp);
637 
638 	free(tempdata, M_TEMP);
639 
640 	return (error);
641 }
642 
643 static int
644 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
645 {
646 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
647 	const Elf_Phdr *phdr;
648 	Elf_Auxargs *elf_auxargs = NULL;
649 	struct vmspace *vmspace;
650 	vm_prot_t prot;
651 	u_long text_size = 0, data_size = 0, total_size = 0;
652 	u_long text_addr = 0, data_addr = 0;
653 	u_long seg_size, seg_addr;
654 	u_long addr, entry = 0, proghdr = 0;
655 	int error, i;
656 	const char *interp = NULL;
657 	Elf_Brandinfo *brand_info;
658 	char *path;
659 	struct thread *td = curthread;
660 	struct sysentvec *sv;
661 
662 	GIANT_REQUIRED;
663 
664 	/*
665 	 * Do we have a valid ELF header ?
666 	 */
667 	if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
668 		return (-1);
669 
670 	/*
671 	 * From here on down, we return an errno, not -1, as we've
672 	 * detected an ELF file.
673 	 */
674 
675 	if ((hdr->e_phoff > PAGE_SIZE) ||
676 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
677 		/* Only support headers in first page for now */
678 		return (ENOEXEC);
679 	}
680 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
681 
682 	/*
683 	 * From this point on, we may have resources that need to be freed.
684 	 */
685 
686 	VOP_UNLOCK(imgp->vp, 0, td);
687 
688 	for (i = 0; i < hdr->e_phnum; i++) {
689 		switch (phdr[i].p_type) {
690 	  	case PT_INTERP:	/* Path to interpreter */
691 			if (phdr[i].p_filesz > MAXPATHLEN ||
692 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
693 				error = ENOEXEC;
694 				goto fail;
695 			}
696 			interp = imgp->image_header + phdr[i].p_offset;
697 			break;
698 		default:
699 			break;
700 		}
701 	}
702 
703 	brand_info = __elfN(get_brandinfo)(hdr, interp);
704 	if (brand_info == NULL) {
705 		uprintf("ELF binary type \"%u\" not known.\n",
706 		    hdr->e_ident[EI_OSABI]);
707 		error = ENOEXEC;
708 		goto fail;
709 	}
710 	sv = brand_info->sysvec;
711 	if (interp != NULL && brand_info->interp_newpath != NULL)
712 		interp = brand_info->interp_newpath;
713 
714 	if ((error = exec_extract_strings(imgp)) != 0)
715 		goto fail;
716 
717 	exec_new_vmspace(imgp, sv);
718 
719 	vmspace = imgp->proc->p_vmspace;
720 
721 	for (i = 0; i < hdr->e_phnum; i++) {
722 		switch (phdr[i].p_type) {
723 		case PT_LOAD:	/* Loadable segment */
724 			prot = 0;
725 			if (phdr[i].p_flags & PF_X)
726   				prot |= VM_PROT_EXECUTE;
727 			if (phdr[i].p_flags & PF_W)
728   				prot |= VM_PROT_WRITE;
729 			if (phdr[i].p_flags & PF_R)
730   				prot |= VM_PROT_READ;
731 
732 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
733 			/*
734 			 * Some x86 binaries assume read == executable,
735 			 * notably the M3 runtime and therefore cvsup
736 			 */
737 			if (prot & VM_PROT_READ)
738 				prot |= VM_PROT_EXECUTE;
739 #endif
740 
741 			if ((error = __elfN(load_section)(imgp->proc, vmspace,
742 			    imgp->vp, imgp->object, phdr[i].p_offset,
743 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
744 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
745 			    sv->sv_pagesize)) != 0)
746   				goto fail;
747 
748 			seg_addr = trunc_page(phdr[i].p_vaddr);
749 			seg_size = round_page(phdr[i].p_memsz +
750 			    phdr[i].p_vaddr - seg_addr);
751 
752 			/*
753 			 * Is this .text or .data?  We can't use
754 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
755 			 * alpha terribly and possibly does other bad
756 			 * things so we stick to the old way of figuring
757 			 * it out:  If the segment contains the program
758 			 * entry point, it's a text segment, otherwise it
759 			 * is a data segment.
760 			 *
761 			 * Note that obreak() assumes that data_addr +
762 			 * data_size == end of data load area, and the ELF
763 			 * file format expects segments to be sorted by
764 			 * address.  If multiple data segments exist, the
765 			 * last one will be used.
766 			 */
767 			if (hdr->e_entry >= phdr[i].p_vaddr &&
768 			    hdr->e_entry < (phdr[i].p_vaddr +
769 			    phdr[i].p_memsz)) {
770 				text_size = seg_size;
771 				text_addr = seg_addr;
772 				entry = (u_long)hdr->e_entry;
773 			} else {
774 				data_size = seg_size;
775 				data_addr = seg_addr;
776 			}
777 			total_size += seg_size;
778 			break;
779 		case PT_PHDR: 	/* Program header table info */
780 			proghdr = phdr[i].p_vaddr;
781 			break;
782 		default:
783 			break;
784 		}
785 	}
786 
787 	if (data_addr == 0 && data_size == 0) {
788 		data_addr = text_addr;
789 		data_size = text_size;
790 	}
791 
792 	/*
793 	 * Check limits.  It should be safe to check the
794 	 * limits after loading the segments since we do
795 	 * not actually fault in all the segments pages.
796 	 */
797 	PROC_LOCK(imgp->proc);
798 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
799 	    text_size > maxtsiz ||
800 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
801 		PROC_UNLOCK(imgp->proc);
802 		error = ENOMEM;
803 		goto fail;
804 	}
805 
806 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
807 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
808 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
809 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
810 
811 	/*
812 	 * We load the dynamic linker where a userland call
813 	 * to mmap(0, ...) would put it.  The rationale behind this
814 	 * calculation is that it leaves room for the heap to grow to
815 	 * its maximum allowed size.
816 	 */
817 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
818 	    lim_max(imgp->proc, RLIMIT_DATA));
819 	PROC_UNLOCK(imgp->proc);
820 
821 	imgp->entry_addr = entry;
822 
823 	imgp->proc->p_sysent = sv;
824 	if (interp != NULL && brand_info->emul_path != NULL &&
825 	    brand_info->emul_path[0] != '\0') {
826 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
827 		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
828 		    interp);
829 		error = __elfN(load_file)(imgp->proc, path, &addr,
830 		    &imgp->entry_addr, sv->sv_pagesize);
831 		free(path, M_TEMP);
832 		if (error == 0)
833 			interp = NULL;
834 	}
835 	if (interp != NULL) {
836 		error = __elfN(load_file)(imgp->proc, interp, &addr,
837 		    &imgp->entry_addr, sv->sv_pagesize);
838 		if (error != 0) {
839 			uprintf("ELF interpreter %s not found\n", interp);
840 			goto fail;
841 		}
842 	}
843 
844 	/*
845 	 * Construct auxargs table (used by the fixup routine)
846 	 */
847 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
848 	elf_auxargs->execfd = -1;
849 	elf_auxargs->phdr = proghdr;
850 	elf_auxargs->phent = hdr->e_phentsize;
851 	elf_auxargs->phnum = hdr->e_phnum;
852 	elf_auxargs->pagesz = PAGE_SIZE;
853 	elf_auxargs->base = addr;
854 	elf_auxargs->flags = 0;
855 	elf_auxargs->entry = entry;
856 	elf_auxargs->trace = elf_trace;
857 
858 	imgp->auxargs = elf_auxargs;
859 	imgp->interpreted = 0;
860 
861 fail:
862 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
863 	return (error);
864 }
865 
866 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
867 
868 int
869 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
870 {
871 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
872 	Elf_Addr *base;
873 	Elf_Addr *pos;
874 
875 	base = (Elf_Addr *)*stack_base;
876 	pos = base + (imgp->argc + imgp->envc + 2);
877 
878 	if (args->trace) {
879 		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
880 	}
881 	if (args->execfd != -1) {
882 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
883 	}
884 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
885 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
886 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
887 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
888 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
889 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
890 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
891 	AUXARGS_ENTRY(pos, AT_NULL, 0);
892 
893 	free(imgp->auxargs, M_TEMP);
894 	imgp->auxargs = NULL;
895 
896 	base--;
897 	suword(base, (long)imgp->argc);
898 	*stack_base = (register_t *)base;
899 	return (0);
900 }
901 
902 /*
903  * Code for generating ELF core dumps.
904  */
905 
906 typedef void (*segment_callback)(vm_map_entry_t, void *);
907 
908 /* Closure for cb_put_phdr(). */
909 struct phdr_closure {
910 	Elf_Phdr *phdr;		/* Program header to fill in */
911 	Elf_Off offset;		/* Offset of segment in core file */
912 };
913 
914 /* Closure for cb_size_segment(). */
915 struct sseg_closure {
916 	int count;		/* Count of writable segments. */
917 	size_t size;		/* Total size of all writable segments. */
918 };
919 
920 static void cb_put_phdr(vm_map_entry_t, void *);
921 static void cb_size_segment(vm_map_entry_t, void *);
922 static void each_writable_segment(struct proc *, segment_callback, void *);
923 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
924     int, void *, size_t);
925 static void __elfN(puthdr)(struct proc *, void *, size_t *, int);
926 static void __elfN(putnote)(void *, size_t *, const char *, int,
927     const void *, size_t);
928 
929 extern int osreldate;
930 
931 int
932 __elfN(coredump)(td, vp, limit)
933 	struct thread *td;
934 	register struct vnode *vp;
935 	off_t limit;
936 {
937 	register struct proc *p = td->td_proc;
938 	register struct ucred *cred = td->td_ucred;
939 	int error = 0;
940 	struct sseg_closure seginfo;
941 	void *hdr;
942 	size_t hdrsize;
943 
944 	/* Size the program segments. */
945 	seginfo.count = 0;
946 	seginfo.size = 0;
947 	each_writable_segment(p, cb_size_segment, &seginfo);
948 
949 	/*
950 	 * Calculate the size of the core file header area by making
951 	 * a dry run of generating it.  Nothing is written, but the
952 	 * size is calculated.
953 	 */
954 	hdrsize = 0;
955 	__elfN(puthdr)(p, (void *)NULL, &hdrsize, seginfo.count);
956 
957 	if (hdrsize + seginfo.size >= limit)
958 		return (EFAULT);
959 
960 	/*
961 	 * Allocate memory for building the header, fill it up,
962 	 * and write it out.
963 	 */
964 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
965 	if (hdr == NULL) {
966 		return (EINVAL);
967 	}
968 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
969 
970 	/* Write the contents of all of the writable segments. */
971 	if (error == 0) {
972 		Elf_Phdr *php;
973 		off_t offset;
974 		int i;
975 
976 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
977 		offset = hdrsize;
978 		for (i = 0; i < seginfo.count; i++) {
979 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
980 			    (caddr_t)(uintptr_t)php->p_vaddr,
981 			    php->p_filesz, offset, UIO_USERSPACE,
982 			    IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
983 			    curthread); /* XXXKSE */
984 			if (error != 0)
985 				break;
986 			offset += php->p_filesz;
987 			php++;
988 		}
989 	}
990 	free(hdr, M_TEMP);
991 
992 	return (error);
993 }
994 
995 /*
996  * A callback for each_writable_segment() to write out the segment's
997  * program header entry.
998  */
999 static void
1000 cb_put_phdr(entry, closure)
1001 	vm_map_entry_t entry;
1002 	void *closure;
1003 {
1004 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1005 	Elf_Phdr *phdr = phc->phdr;
1006 
1007 	phc->offset = round_page(phc->offset);
1008 
1009 	phdr->p_type = PT_LOAD;
1010 	phdr->p_offset = phc->offset;
1011 	phdr->p_vaddr = entry->start;
1012 	phdr->p_paddr = 0;
1013 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1014 	phdr->p_align = PAGE_SIZE;
1015 	phdr->p_flags = 0;
1016 	if (entry->protection & VM_PROT_READ)
1017 		phdr->p_flags |= PF_R;
1018 	if (entry->protection & VM_PROT_WRITE)
1019 		phdr->p_flags |= PF_W;
1020 	if (entry->protection & VM_PROT_EXECUTE)
1021 		phdr->p_flags |= PF_X;
1022 
1023 	phc->offset += phdr->p_filesz;
1024 	phc->phdr++;
1025 }
1026 
1027 /*
1028  * A callback for each_writable_segment() to gather information about
1029  * the number of segments and their total size.
1030  */
1031 static void
1032 cb_size_segment(entry, closure)
1033 	vm_map_entry_t entry;
1034 	void *closure;
1035 {
1036 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1037 
1038 	ssc->count++;
1039 	ssc->size += entry->end - entry->start;
1040 }
1041 
1042 /*
1043  * For each writable segment in the process's memory map, call the given
1044  * function with a pointer to the map entry and some arbitrary
1045  * caller-supplied data.
1046  */
1047 static void
1048 each_writable_segment(p, func, closure)
1049 	struct proc *p;
1050 	segment_callback func;
1051 	void *closure;
1052 {
1053 	vm_map_t map = &p->p_vmspace->vm_map;
1054 	vm_map_entry_t entry;
1055 
1056 	for (entry = map->header.next; entry != &map->header;
1057 	    entry = entry->next) {
1058 		vm_object_t obj;
1059 
1060 		/*
1061 		 * Don't dump inaccessible mappings, deal with legacy
1062 		 * coredump mode.
1063 		 *
1064 		 * Note that read-only segments related to the elf binary
1065 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1066 		 * need to arbitrarily ignore such segments.
1067 		 */
1068 		if (elf_legacy_coredump) {
1069 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1070 				continue;
1071 		} else {
1072 			if ((entry->protection & VM_PROT_ALL) == 0)
1073 				continue;
1074 		}
1075 
1076 		/*
1077 		 * Dont include memory segment in the coredump if
1078 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1079 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1080 		 * kernel map).
1081 		 */
1082 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1083 			continue;
1084 
1085 		if ((obj = entry->object.vm_object) == NULL)
1086 			continue;
1087 
1088 		/* Find the deepest backing object. */
1089 		while (obj->backing_object != NULL)
1090 			obj = obj->backing_object;
1091 
1092 		/* Ignore memory-mapped devices and such things. */
1093 		if (obj->type != OBJT_DEFAULT &&
1094 		    obj->type != OBJT_SWAP &&
1095 		    obj->type != OBJT_VNODE)
1096 			continue;
1097 
1098 		(*func)(entry, closure);
1099 	}
1100 }
1101 
1102 /*
1103  * Write the core file header to the file, including padding up to
1104  * the page boundary.
1105  */
1106 static int
1107 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1108 	struct thread *td;
1109 	struct vnode *vp;
1110 	struct ucred *cred;
1111 	int numsegs;
1112 	size_t hdrsize;
1113 	void *hdr;
1114 {
1115 	struct proc *p = td->td_proc;
1116 	size_t off;
1117 
1118 	/* Fill in the header. */
1119 	bzero(hdr, hdrsize);
1120 	off = 0;
1121 	__elfN(puthdr)(p, hdr, &off, numsegs);
1122 
1123 	/* Write it to the core file. */
1124 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1125 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1126 	    td)); /* XXXKSE */
1127 }
1128 
1129 static void
1130 __elfN(puthdr)(struct proc *p, void *dst, size_t *off, int numsegs)
1131 {
1132 	struct {
1133 		prstatus_t status;
1134 		prfpregset_t fpregset;
1135 		prpsinfo_t psinfo;
1136 	} *tempdata;
1137 	prstatus_t *status;
1138 	prfpregset_t *fpregset;
1139 	prpsinfo_t *psinfo;
1140 	struct thread *first, *thr;
1141 	size_t ehoff, noteoff, notesz, phoff;
1142 
1143 	ehoff = *off;
1144 	*off += sizeof(Elf_Ehdr);
1145 
1146 	phoff = *off;
1147 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1148 
1149 	noteoff = *off;
1150 	/*
1151 	 * Don't allocate space for the notes if we're just calculating
1152 	 * the size of the header. We also don't collect the data.
1153 	 */
1154 	if (dst != NULL) {
1155 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1156 		status = &tempdata->status;
1157 		fpregset = &tempdata->fpregset;
1158 		psinfo = &tempdata->psinfo;
1159 	} else {
1160 		tempdata = NULL;
1161 		status = NULL;
1162 		fpregset = NULL;
1163 		psinfo = NULL;
1164 	}
1165 
1166 	if (dst != NULL) {
1167 		psinfo->pr_version = PRPSINFO_VERSION;
1168 		psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1169 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1170 		/*
1171 		 * XXX - We don't fill in the command line arguments properly
1172 		 * yet.
1173 		 */
1174 		strlcpy(psinfo->pr_psargs, p->p_comm,
1175 		    sizeof(psinfo->pr_psargs));
1176 	}
1177 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1178 	    sizeof *psinfo);
1179 
1180 	/*
1181 	 * We want to start with the registers of the first thread in the
1182 	 * process so that the .reg and .reg2 pseudo-sections created by bfd
1183 	 * will be identical to the .reg/$PID and .reg2/$PID pseudo-sections.
1184 	 * This makes sure that any tool that only looks for .reg and .reg2
1185 	 * and not for .reg/$PID and .reg2/$PID will behave the same as
1186 	 * before. The first thread is the thread with an ID equal to the
1187 	 * process' ID.
1188 	 */
1189 	first = TAILQ_FIRST(&p->p_threads);
1190 	while (first->td_tid > PID_MAX)
1191 		first = TAILQ_NEXT(first, td_plist);
1192 	thr = first;
1193 	do {
1194 		if (dst != NULL) {
1195 			status->pr_version = PRSTATUS_VERSION;
1196 			status->pr_statussz = sizeof(prstatus_t);
1197 			status->pr_gregsetsz = sizeof(gregset_t);
1198 			status->pr_fpregsetsz = sizeof(fpregset_t);
1199 			status->pr_osreldate = osreldate;
1200 			status->pr_cursig = p->p_sig;
1201 			status->pr_pid = thr->td_tid;
1202 			fill_regs(thr, &status->pr_reg);
1203 			fill_fpregs(thr, fpregset);
1204 		}
1205 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1206 		    sizeof *status);
1207 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1208 		    sizeof *fpregset);
1209 		/* XXX allow for MD specific notes. */
1210 		thr = (thr == first) ? TAILQ_FIRST(&p->p_threads) :
1211 		    TAILQ_NEXT(thr, td_plist);
1212 		if (thr == first)
1213 			thr = TAILQ_NEXT(thr, td_plist);
1214 	} while (thr != NULL);
1215 
1216 	notesz = *off - noteoff;
1217 
1218 	if (dst != NULL)
1219 		free(tempdata, M_TEMP);
1220 
1221 	/* Align up to a page boundary for the program segments. */
1222 	*off = round_page(*off);
1223 
1224 	if (dst != NULL) {
1225 		Elf_Ehdr *ehdr;
1226 		Elf_Phdr *phdr;
1227 		struct phdr_closure phc;
1228 
1229 		/*
1230 		 * Fill in the ELF header.
1231 		 */
1232 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1233 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1234 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1235 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1236 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1237 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1238 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1239 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1240 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1241 		ehdr->e_ident[EI_ABIVERSION] = 0;
1242 		ehdr->e_ident[EI_PAD] = 0;
1243 		ehdr->e_type = ET_CORE;
1244 		ehdr->e_machine = ELF_ARCH;
1245 		ehdr->e_version = EV_CURRENT;
1246 		ehdr->e_entry = 0;
1247 		ehdr->e_phoff = phoff;
1248 		ehdr->e_flags = 0;
1249 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1250 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1251 		ehdr->e_phnum = numsegs + 1;
1252 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1253 		ehdr->e_shnum = 0;
1254 		ehdr->e_shstrndx = SHN_UNDEF;
1255 
1256 		/*
1257 		 * Fill in the program header entries.
1258 		 */
1259 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1260 
1261 		/* The note segement. */
1262 		phdr->p_type = PT_NOTE;
1263 		phdr->p_offset = noteoff;
1264 		phdr->p_vaddr = 0;
1265 		phdr->p_paddr = 0;
1266 		phdr->p_filesz = notesz;
1267 		phdr->p_memsz = 0;
1268 		phdr->p_flags = 0;
1269 		phdr->p_align = 0;
1270 		phdr++;
1271 
1272 		/* All the writable segments from the program. */
1273 		phc.phdr = phdr;
1274 		phc.offset = *off;
1275 		each_writable_segment(p, cb_put_phdr, &phc);
1276 	}
1277 }
1278 
1279 static void
1280 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1281     const void *desc, size_t descsz)
1282 {
1283 	Elf_Note note;
1284 
1285 	note.n_namesz = strlen(name) + 1;
1286 	note.n_descsz = descsz;
1287 	note.n_type = type;
1288 	if (dst != NULL)
1289 		bcopy(&note, (char *)dst + *off, sizeof note);
1290 	*off += sizeof note;
1291 	if (dst != NULL)
1292 		bcopy(name, (char *)dst + *off, note.n_namesz);
1293 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1294 	if (dst != NULL)
1295 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1296 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1297 }
1298 
1299 /*
1300  * Tell kern_execve.c about it, with a little help from the linker.
1301  */
1302 static struct execsw __elfN(execsw) = {
1303 	__CONCAT(exec_, __elfN(imgact)),
1304 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1305 };
1306 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1307