xref: /freebsd/sys/kern/imgact_elf.c (revision 63f9a4cb2684a303e3eb2ffed39c03a2e2b28ae0)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/exec.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/imgact_elf.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/mman.h>
44 #include <sys/namei.h>
45 #include <sys/pioctl.h>
46 #include <sys/proc.h>
47 #include <sys/procfs.h>
48 #include <sys/resourcevar.h>
49 #include <sys/systm.h>
50 #include <sys/signalvar.h>
51 #include <sys/stat.h>
52 #include <sys/sx.h>
53 #include <sys/syscall.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysent.h>
56 #include <sys/vnode.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_param.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_extern.h>
65 
66 #include <machine/elf.h>
67 #include <machine/md_var.h>
68 
69 #define OLD_EI_BRAND	8
70 
71 static int __elfN(check_header)(const Elf_Ehdr *hdr);
72 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73     const char *interp);
74 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75     u_long *entry, size_t pagesize);
76 static int __elfN(load_section)(struct proc *p,
77     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79     vm_prot_t prot, size_t pagesize);
80 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81 
82 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83     "");
84 
85 #ifdef __arm__
86 int __elfN(fallback_brand) = 9;
87 #else
88 int __elfN(fallback_brand) = -1;
89 #endif
90 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
91     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
92     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
93 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
94     &__elfN(fallback_brand));
95 
96 static int elf_trace = 0;
97 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
98 
99 static int elf_legacy_coredump = 0;
100 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
101     &elf_legacy_coredump, 0, "");
102 
103 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
104 
105 int
106 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
107 {
108 	int i;
109 
110 	for (i = 0; i < MAX_BRANDS; i++) {
111 		if (elf_brand_list[i] == NULL) {
112 			elf_brand_list[i] = entry;
113 			break;
114 		}
115 	}
116 	if (i == MAX_BRANDS)
117 		return (-1);
118 	return (0);
119 }
120 
121 int
122 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
123 {
124 	int i;
125 
126 	for (i = 0; i < MAX_BRANDS; i++) {
127 		if (elf_brand_list[i] == entry) {
128 			elf_brand_list[i] = NULL;
129 			break;
130 		}
131 	}
132 	if (i == MAX_BRANDS)
133 		return (-1);
134 	return (0);
135 }
136 
137 int
138 __elfN(brand_inuse)(Elf_Brandinfo *entry)
139 {
140 	struct proc *p;
141 	int rval = FALSE;
142 
143 	sx_slock(&allproc_lock);
144 	LIST_FOREACH(p, &allproc, p_list) {
145 		if (p->p_sysent == entry->sysvec) {
146 			rval = TRUE;
147 			break;
148 		}
149 	}
150 	sx_sunlock(&allproc_lock);
151 
152 	return (rval);
153 }
154 
155 static Elf_Brandinfo *
156 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
157 {
158 	Elf_Brandinfo *bi;
159 	int i;
160 
161 	/*
162 	 * We support three types of branding -- (1) the ELF EI_OSABI field
163 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
164 	 * branding w/in the ELF header, and (3) path of the `interp_path'
165 	 * field.  We should also look for an ".note.ABI-tag" ELF section now
166 	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
167 	 */
168 
169 	/* If the executable has a brand, search for it in the brand list. */
170 	for (i = 0; i < MAX_BRANDS; i++) {
171 		bi = elf_brand_list[i];
172 		if (bi != NULL && hdr->e_machine == bi->machine &&
173 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
174 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
175 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
176 			return (bi);
177 	}
178 
179 	/* Lacking a known brand, search for a recognized interpreter. */
180 	if (interp != NULL) {
181 		for (i = 0; i < MAX_BRANDS; i++) {
182 			bi = elf_brand_list[i];
183 			if (bi != NULL && hdr->e_machine == bi->machine &&
184 			    strcmp(interp, bi->interp_path) == 0)
185 				return (bi);
186 		}
187 	}
188 
189 	/* Lacking a recognized interpreter, try the default brand */
190 	for (i = 0; i < MAX_BRANDS; i++) {
191 		bi = elf_brand_list[i];
192 		if (bi != NULL && hdr->e_machine == bi->machine &&
193 		    __elfN(fallback_brand) == bi->brand)
194 			return (bi);
195 	}
196 	return (NULL);
197 }
198 
199 static int
200 __elfN(check_header)(const Elf_Ehdr *hdr)
201 {
202 	Elf_Brandinfo *bi;
203 	int i;
204 
205 	if (!IS_ELF(*hdr) ||
206 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
207 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
208 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
209 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
210 	    hdr->e_version != ELF_TARG_VER)
211 		return (ENOEXEC);
212 
213 	/*
214 	 * Make sure we have at least one brand for this machine.
215 	 */
216 
217 	for (i = 0; i < MAX_BRANDS; i++) {
218 		bi = elf_brand_list[i];
219 		if (bi != NULL && bi->machine == hdr->e_machine)
220 			break;
221 	}
222 	if (i == MAX_BRANDS)
223 		return (ENOEXEC);
224 
225 	return (0);
226 }
227 
228 static int
229 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
230 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
231 	vm_prot_t max)
232 {
233 	int error, rv;
234 	vm_offset_t off;
235 	vm_offset_t data_buf = 0;
236 
237 	/*
238 	 * Create the page if it doesn't exist yet. Ignore errors.
239 	 */
240 	vm_map_lock(map);
241 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
242 	    max, 0);
243 	vm_map_unlock(map);
244 
245 	/*
246 	 * Find the page from the underlying object.
247 	 */
248 	if (object) {
249 		vm_object_reference(object);
250 		rv = vm_map_find(exec_map,
251 				 object,
252 				 trunc_page(offset),
253 				 &data_buf,
254 				 PAGE_SIZE,
255 				 TRUE,
256 				 VM_PROT_READ,
257 				 VM_PROT_ALL,
258 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
259 		if (rv != KERN_SUCCESS) {
260 			vm_object_deallocate(object);
261 			return (rv);
262 		}
263 
264 		off = offset - trunc_page(offset);
265 		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
266 		    end - start);
267 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
268 		if (error) {
269 			return (KERN_FAILURE);
270 		}
271 	}
272 
273 	return (KERN_SUCCESS);
274 }
275 
276 static int
277 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
278 	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
279 	vm_prot_t max, int cow)
280 {
281 	vm_offset_t data_buf, off;
282 	vm_size_t sz;
283 	int error, rv;
284 
285 	if (start != trunc_page(start)) {
286 		rv = __elfN(map_partial)(map, object, offset, start,
287 		    round_page(start), prot, max);
288 		if (rv)
289 			return (rv);
290 		offset += round_page(start) - start;
291 		start = round_page(start);
292 	}
293 	if (end != round_page(end)) {
294 		rv = __elfN(map_partial)(map, object, offset +
295 		    trunc_page(end) - start, trunc_page(end), end, prot, max);
296 		if (rv)
297 			return (rv);
298 		end = trunc_page(end);
299 	}
300 	if (end > start) {
301 		if (offset & PAGE_MASK) {
302 			/*
303 			 * The mapping is not page aligned. This means we have
304 			 * to copy the data. Sigh.
305 			 */
306 			rv = vm_map_find(map, 0, 0, &start, end - start,
307 			    FALSE, prot, max, 0);
308 			if (rv)
309 				return (rv);
310 			data_buf = 0;
311 			while (start < end) {
312 				vm_object_reference(object);
313 				rv = vm_map_find(exec_map,
314 						 object,
315 						 trunc_page(offset),
316 						 &data_buf,
317 						 2 * PAGE_SIZE,
318 						 TRUE,
319 						 VM_PROT_READ,
320 						 VM_PROT_ALL,
321 						 (MAP_COPY_ON_WRITE
322 						  | MAP_PREFAULT_PARTIAL));
323 				if (rv != KERN_SUCCESS) {
324 					vm_object_deallocate(object);
325 					return (rv);
326 				}
327 				off = offset - trunc_page(offset);
328 				sz = end - start;
329 				if (sz > PAGE_SIZE)
330 					sz = PAGE_SIZE;
331 				error = copyout((caddr_t)data_buf + off,
332 				    (caddr_t)start, sz);
333 				vm_map_remove(exec_map, data_buf,
334 				    data_buf + 2 * PAGE_SIZE);
335 				if (error) {
336 					return (KERN_FAILURE);
337 				}
338 				start += sz;
339 			}
340 			rv = KERN_SUCCESS;
341 		} else {
342 			vm_map_lock(map);
343 			rv = vm_map_insert(map, object, offset, start, end,
344 			    prot, max, cow);
345 			vm_map_unlock(map);
346 		}
347 		return (rv);
348 	} else {
349 		return (KERN_SUCCESS);
350 	}
351 }
352 
353 static int
354 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
355 	struct vnode *vp, vm_object_t object, vm_offset_t offset,
356 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
357 	size_t pagesize)
358 {
359 	size_t map_len;
360 	vm_offset_t map_addr;
361 	int error, rv, cow;
362 	size_t copy_len;
363 	vm_offset_t file_addr;
364 	vm_offset_t data_buf = 0;
365 
366 	GIANT_REQUIRED;
367 
368 	error = 0;
369 
370 	/*
371 	 * It's necessary to fail if the filsz + offset taken from the
372 	 * header is greater than the actual file pager object's size.
373 	 * If we were to allow this, then the vm_map_find() below would
374 	 * walk right off the end of the file object and into the ether.
375 	 *
376 	 * While I'm here, might as well check for something else that
377 	 * is invalid: filsz cannot be greater than memsz.
378 	 */
379 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
380 	    filsz > memsz) {
381 		uprintf("elf_load_section: truncated ELF file\n");
382 		return (ENOEXEC);
383 	}
384 
385 #define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
386 #define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
387 
388 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
389 	file_addr = trunc_page_ps(offset, pagesize);
390 
391 	/*
392 	 * We have two choices.  We can either clear the data in the last page
393 	 * of an oversized mapping, or we can start the anon mapping a page
394 	 * early and copy the initialized data into that first page.  We
395 	 * choose the second..
396 	 */
397 	if (memsz > filsz)
398 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
399 	else
400 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
401 
402 	if (map_len != 0) {
403 		vm_object_reference(object);
404 
405 		/* cow flags: don't dump readonly sections in core */
406 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
407 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
408 
409 		rv = __elfN(map_insert)(&vmspace->vm_map,
410 				      object,
411 				      file_addr,	/* file offset */
412 				      map_addr,		/* virtual start */
413 				      map_addr + map_len,/* virtual end */
414 				      prot,
415 				      VM_PROT_ALL,
416 				      cow);
417 		if (rv != KERN_SUCCESS) {
418 			vm_object_deallocate(object);
419 			return (EINVAL);
420 		}
421 
422 		/* we can stop now if we've covered it all */
423 		if (memsz == filsz) {
424 			return (0);
425 		}
426 	}
427 
428 
429 	/*
430 	 * We have to get the remaining bit of the file into the first part
431 	 * of the oversized map segment.  This is normally because the .data
432 	 * segment in the file is extended to provide bss.  It's a neat idea
433 	 * to try and save a page, but it's a pain in the behind to implement.
434 	 */
435 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
436 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
437 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
438 	    map_addr;
439 
440 	/* This had damn well better be true! */
441 	if (map_len != 0) {
442 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
443 		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
444 		if (rv != KERN_SUCCESS) {
445 			return (EINVAL);
446 		}
447 	}
448 
449 	if (copy_len != 0) {
450 		vm_offset_t off;
451 		vm_object_reference(object);
452 		rv = vm_map_find(exec_map,
453 				 object,
454 				 trunc_page(offset + filsz),
455 				 &data_buf,
456 				 PAGE_SIZE,
457 				 TRUE,
458 				 VM_PROT_READ,
459 				 VM_PROT_ALL,
460 				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
461 		if (rv != KERN_SUCCESS) {
462 			vm_object_deallocate(object);
463 			return (EINVAL);
464 		}
465 
466 		/* send the page fragment to user space */
467 		off = trunc_page_ps(offset + filsz, pagesize) -
468 		    trunc_page(offset + filsz);
469 		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
470 		    copy_len);
471 		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
472 		if (error) {
473 			return (error);
474 		}
475 	}
476 
477 	/*
478 	 * set it to the specified protection.
479 	 * XXX had better undo the damage from pasting over the cracks here!
480 	 */
481 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
482 	    round_page(map_addr + map_len),  prot, FALSE);
483 
484 	return (error);
485 }
486 
487 /*
488  * Load the file "file" into memory.  It may be either a shared object
489  * or an executable.
490  *
491  * The "addr" reference parameter is in/out.  On entry, it specifies
492  * the address where a shared object should be loaded.  If the file is
493  * an executable, this value is ignored.  On exit, "addr" specifies
494  * where the file was actually loaded.
495  *
496  * The "entry" reference parameter is out only.  On exit, it specifies
497  * the entry point for the loaded file.
498  */
499 static int
500 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
501 	u_long *entry, size_t pagesize)
502 {
503 	struct {
504 		struct nameidata nd;
505 		struct vattr attr;
506 		struct image_params image_params;
507 	} *tempdata;
508 	const Elf_Ehdr *hdr = NULL;
509 	const Elf_Phdr *phdr = NULL;
510 	struct nameidata *nd;
511 	struct vmspace *vmspace = p->p_vmspace;
512 	struct vattr *attr;
513 	struct image_params *imgp;
514 	vm_prot_t prot;
515 	u_long rbase;
516 	u_long base_addr = 0;
517 	int error, i, numsegs;
518 
519 	if (curthread->td_proc != p)
520 		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
521 
522 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
523 	nd = &tempdata->nd;
524 	attr = &tempdata->attr;
525 	imgp = &tempdata->image_params;
526 
527 	/*
528 	 * Initialize part of the common data
529 	 */
530 	imgp->proc = p;
531 	imgp->userspace_argv = NULL;
532 	imgp->userspace_envv = NULL;
533 	imgp->attr = attr;
534 	imgp->firstpage = NULL;
535 	imgp->image_header = NULL;
536 	imgp->object = NULL;
537 	imgp->execlabel = NULL;
538 
539 	/* XXXKSE */
540 	NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
541 
542 	if ((error = namei(nd)) != 0) {
543 		nd->ni_vp = NULL;
544 		goto fail;
545 	}
546 	NDFREE(nd, NDF_ONLY_PNBUF);
547 	imgp->vp = nd->ni_vp;
548 
549 	/*
550 	 * Check permissions, modes, uid, etc on the file, and "open" it.
551 	 */
552 	error = exec_check_permissions(imgp);
553 	if (error) {
554 		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
555 		goto fail;
556 	}
557 
558 	error = exec_map_first_page(imgp);
559 	/*
560 	 * Also make certain that the interpreter stays the same, so set
561 	 * its VV_TEXT flag, too.
562 	 */
563 	if (error == 0)
564 		nd->ni_vp->v_vflag |= VV_TEXT;
565 
566 	VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
567 	vm_object_reference(imgp->object);
568 
569 	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
570 	if (error)
571 		goto fail;
572 
573 	hdr = (const Elf_Ehdr *)imgp->image_header;
574 	if ((error = __elfN(check_header)(hdr)) != 0)
575 		goto fail;
576 	if (hdr->e_type == ET_DYN)
577 		rbase = *addr;
578 	else if (hdr->e_type == ET_EXEC)
579 		rbase = 0;
580 	else {
581 		error = ENOEXEC;
582 		goto fail;
583 	}
584 
585 	/* Only support headers that fit within first page for now      */
586 	/*    (multiplication of two Elf_Half fields will not overflow) */
587 	if ((hdr->e_phoff > PAGE_SIZE) ||
588 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
589 		error = ENOEXEC;
590 		goto fail;
591 	}
592 
593 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
594 
595 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
596 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
597 			prot = 0;
598 			if (phdr[i].p_flags & PF_X)
599   				prot |= VM_PROT_EXECUTE;
600 			if (phdr[i].p_flags & PF_W)
601   				prot |= VM_PROT_WRITE;
602 			if (phdr[i].p_flags & PF_R)
603   				prot |= VM_PROT_READ;
604 
605 			if ((error = __elfN(load_section)(p, vmspace,
606 			    nd->ni_vp, imgp->object, phdr[i].p_offset,
607 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
608 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
609 			    pagesize)) != 0)
610 				goto fail;
611 			/*
612 			 * Establish the base address if this is the
613 			 * first segment.
614 			 */
615 			if (numsegs == 0)
616   				base_addr = trunc_page(phdr[i].p_vaddr +
617 				    rbase);
618 			numsegs++;
619 		}
620 	}
621 	*addr = base_addr;
622 	*entry = (unsigned long)hdr->e_entry + rbase;
623 
624 fail:
625 	if (imgp->firstpage)
626 		exec_unmap_first_page(imgp);
627 	if (imgp->object)
628 		vm_object_deallocate(imgp->object);
629 
630 	if (nd->ni_vp)
631 		vrele(nd->ni_vp);
632 
633 	free(tempdata, M_TEMP);
634 
635 	return (error);
636 }
637 
638 static int
639 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
640 {
641 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
642 	const Elf_Phdr *phdr;
643 	Elf_Auxargs *elf_auxargs = NULL;
644 	struct vmspace *vmspace;
645 	vm_prot_t prot;
646 	u_long text_size = 0, data_size = 0, total_size = 0;
647 	u_long text_addr = 0, data_addr = 0;
648 	u_long seg_size, seg_addr;
649 	u_long addr, entry = 0, proghdr = 0;
650 	int error, i;
651 	const char *interp = NULL;
652 	Elf_Brandinfo *brand_info;
653 	char *path;
654 	struct thread *td = curthread;
655 	struct sysentvec *sv;
656 
657 	GIANT_REQUIRED;
658 
659 	/*
660 	 * Do we have a valid ELF header ?
661 	 */
662 	if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
663 		return (-1);
664 
665 	/*
666 	 * From here on down, we return an errno, not -1, as we've
667 	 * detected an ELF file.
668 	 */
669 
670 	if ((hdr->e_phoff > PAGE_SIZE) ||
671 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
672 		/* Only support headers in first page for now */
673 		return (ENOEXEC);
674 	}
675 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
676 
677 	/*
678 	 * From this point on, we may have resources that need to be freed.
679 	 */
680 
681 	VOP_UNLOCK(imgp->vp, 0, td);
682 
683 	for (i = 0; i < hdr->e_phnum; i++) {
684 		switch (phdr[i].p_type) {
685 	  	case PT_INTERP:	/* Path to interpreter */
686 			if (phdr[i].p_filesz > MAXPATHLEN ||
687 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
688 				error = ENOEXEC;
689 				goto fail;
690 			}
691 			interp = imgp->image_header + phdr[i].p_offset;
692 			break;
693 		default:
694 			break;
695 		}
696 	}
697 
698 	brand_info = __elfN(get_brandinfo)(hdr, interp);
699 	if (brand_info == NULL) {
700 		uprintf("ELF binary type \"%u\" not known.\n",
701 		    hdr->e_ident[EI_OSABI]);
702 		error = ENOEXEC;
703 		goto fail;
704 	}
705 	sv = brand_info->sysvec;
706 	if (interp != NULL && brand_info->interp_newpath != NULL)
707 		interp = brand_info->interp_newpath;
708 
709 	if ((error = exec_extract_strings(imgp)) != 0)
710 		goto fail;
711 
712 	exec_new_vmspace(imgp, sv);
713 
714 	vmspace = imgp->proc->p_vmspace;
715 
716 	for (i = 0; i < hdr->e_phnum; i++) {
717 		switch (phdr[i].p_type) {
718 		case PT_LOAD:	/* Loadable segment */
719 			prot = 0;
720 			if (phdr[i].p_flags & PF_X)
721   				prot |= VM_PROT_EXECUTE;
722 			if (phdr[i].p_flags & PF_W)
723   				prot |= VM_PROT_WRITE;
724 			if (phdr[i].p_flags & PF_R)
725   				prot |= VM_PROT_READ;
726 
727 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
728 			/*
729 			 * Some x86 binaries assume read == executable,
730 			 * notably the M3 runtime and therefore cvsup
731 			 */
732 			if (prot & VM_PROT_READ)
733 				prot |= VM_PROT_EXECUTE;
734 #endif
735 
736 			if ((error = __elfN(load_section)(imgp->proc, vmspace,
737 			    imgp->vp, imgp->object, phdr[i].p_offset,
738 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
739 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
740 			    sv->sv_pagesize)) != 0)
741   				goto fail;
742 
743 			/*
744 			 * If this segment contains the program headers,
745 			 * remember their virtual address for the AT_PHDR
746 			 * aux entry. Static binaries don't usually include
747 			 * a PT_PHDR entry.
748 			 */
749 			if (phdr[i].p_offset == 0 &&
750 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
751 				<= phdr[i].p_filesz)
752 				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
753 
754 			seg_addr = trunc_page(phdr[i].p_vaddr);
755 			seg_size = round_page(phdr[i].p_memsz +
756 			    phdr[i].p_vaddr - seg_addr);
757 
758 			/*
759 			 * Is this .text or .data?  We can't use
760 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
761 			 * alpha terribly and possibly does other bad
762 			 * things so we stick to the old way of figuring
763 			 * it out:  If the segment contains the program
764 			 * entry point, it's a text segment, otherwise it
765 			 * is a data segment.
766 			 *
767 			 * Note that obreak() assumes that data_addr +
768 			 * data_size == end of data load area, and the ELF
769 			 * file format expects segments to be sorted by
770 			 * address.  If multiple data segments exist, the
771 			 * last one will be used.
772 			 */
773 			if (hdr->e_entry >= phdr[i].p_vaddr &&
774 			    hdr->e_entry < (phdr[i].p_vaddr +
775 			    phdr[i].p_memsz)) {
776 				text_size = seg_size;
777 				text_addr = seg_addr;
778 				entry = (u_long)hdr->e_entry;
779 			} else {
780 				data_size = seg_size;
781 				data_addr = seg_addr;
782 			}
783 			total_size += seg_size;
784 			break;
785 		case PT_PHDR: 	/* Program header table info */
786 			proghdr = phdr[i].p_vaddr;
787 			break;
788 		default:
789 			break;
790 		}
791 	}
792 
793 	if (data_addr == 0 && data_size == 0) {
794 		data_addr = text_addr;
795 		data_size = text_size;
796 	}
797 
798 	/*
799 	 * Check limits.  It should be safe to check the
800 	 * limits after loading the segments since we do
801 	 * not actually fault in all the segments pages.
802 	 */
803 	PROC_LOCK(imgp->proc);
804 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
805 	    text_size > maxtsiz ||
806 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
807 		PROC_UNLOCK(imgp->proc);
808 		error = ENOMEM;
809 		goto fail;
810 	}
811 
812 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
813 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
814 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
815 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
816 
817 	/*
818 	 * We load the dynamic linker where a userland call
819 	 * to mmap(0, ...) would put it.  The rationale behind this
820 	 * calculation is that it leaves room for the heap to grow to
821 	 * its maximum allowed size.
822 	 */
823 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
824 	    lim_max(imgp->proc, RLIMIT_DATA));
825 	PROC_UNLOCK(imgp->proc);
826 
827 	imgp->entry_addr = entry;
828 
829 	imgp->proc->p_sysent = sv;
830 	if (interp != NULL && brand_info->emul_path != NULL &&
831 	    brand_info->emul_path[0] != '\0') {
832 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
833 		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
834 		    interp);
835 		error = __elfN(load_file)(imgp->proc, path, &addr,
836 		    &imgp->entry_addr, sv->sv_pagesize);
837 		free(path, M_TEMP);
838 		if (error == 0)
839 			interp = NULL;
840 	}
841 	if (interp != NULL) {
842 		error = __elfN(load_file)(imgp->proc, interp, &addr,
843 		    &imgp->entry_addr, sv->sv_pagesize);
844 		if (error != 0) {
845 			uprintf("ELF interpreter %s not found\n", interp);
846 			goto fail;
847 		}
848 	}
849 
850 	/*
851 	 * Construct auxargs table (used by the fixup routine)
852 	 */
853 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
854 	elf_auxargs->execfd = -1;
855 	elf_auxargs->phdr = proghdr;
856 	elf_auxargs->phent = hdr->e_phentsize;
857 	elf_auxargs->phnum = hdr->e_phnum;
858 	elf_auxargs->pagesz = PAGE_SIZE;
859 	elf_auxargs->base = addr;
860 	elf_auxargs->flags = 0;
861 	elf_auxargs->entry = entry;
862 	elf_auxargs->trace = elf_trace;
863 
864 	imgp->auxargs = elf_auxargs;
865 	imgp->interpreted = 0;
866 
867 fail:
868 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
869 	return (error);
870 }
871 
872 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
873 
874 int
875 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
876 {
877 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
878 	Elf_Addr *base;
879 	Elf_Addr *pos;
880 
881 	base = (Elf_Addr *)*stack_base;
882 	pos = base + (imgp->argc + imgp->envc + 2);
883 
884 	if (args->trace) {
885 		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
886 	}
887 	if (args->execfd != -1) {
888 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
889 	}
890 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
891 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
892 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
893 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
894 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
895 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
896 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
897 	AUXARGS_ENTRY(pos, AT_NULL, 0);
898 
899 	free(imgp->auxargs, M_TEMP);
900 	imgp->auxargs = NULL;
901 
902 	base--;
903 	suword(base, (long)imgp->argc);
904 	*stack_base = (register_t *)base;
905 	return (0);
906 }
907 
908 /*
909  * Code for generating ELF core dumps.
910  */
911 
912 typedef void (*segment_callback)(vm_map_entry_t, void *);
913 
914 /* Closure for cb_put_phdr(). */
915 struct phdr_closure {
916 	Elf_Phdr *phdr;		/* Program header to fill in */
917 	Elf_Off offset;		/* Offset of segment in core file */
918 };
919 
920 /* Closure for cb_size_segment(). */
921 struct sseg_closure {
922 	int count;		/* Count of writable segments. */
923 	size_t size;		/* Total size of all writable segments. */
924 };
925 
926 static void cb_put_phdr(vm_map_entry_t, void *);
927 static void cb_size_segment(vm_map_entry_t, void *);
928 static void each_writable_segment(struct thread *, segment_callback, void *);
929 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
930     int, void *, size_t);
931 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
932 static void __elfN(putnote)(void *, size_t *, const char *, int,
933     const void *, size_t);
934 
935 extern int osreldate;
936 
937 int
938 __elfN(coredump)(td, vp, limit)
939 	struct thread *td;
940 	struct vnode *vp;
941 	off_t limit;
942 {
943 	struct ucred *cred = td->td_ucred;
944 	int error = 0;
945 	struct sseg_closure seginfo;
946 	void *hdr;
947 	size_t hdrsize;
948 
949 	/* Size the program segments. */
950 	seginfo.count = 0;
951 	seginfo.size = 0;
952 	each_writable_segment(td, cb_size_segment, &seginfo);
953 
954 	/*
955 	 * Calculate the size of the core file header area by making
956 	 * a dry run of generating it.  Nothing is written, but the
957 	 * size is calculated.
958 	 */
959 	hdrsize = 0;
960 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
961 
962 	if (hdrsize + seginfo.size >= limit)
963 		return (EFAULT);
964 
965 	/*
966 	 * Allocate memory for building the header, fill it up,
967 	 * and write it out.
968 	 */
969 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
970 	if (hdr == NULL) {
971 		return (EINVAL);
972 	}
973 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
974 
975 	/* Write the contents of all of the writable segments. */
976 	if (error == 0) {
977 		Elf_Phdr *php;
978 		off_t offset;
979 		int i;
980 
981 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
982 		offset = hdrsize;
983 		for (i = 0; i < seginfo.count; i++) {
984 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
985 			    (caddr_t)(uintptr_t)php->p_vaddr,
986 			    php->p_filesz, offset, UIO_USERSPACE,
987 			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
988 			    curthread); /* XXXKSE */
989 			if (error != 0)
990 				break;
991 			offset += php->p_filesz;
992 			php++;
993 		}
994 	}
995 	free(hdr, M_TEMP);
996 
997 	return (error);
998 }
999 
1000 /*
1001  * A callback for each_writable_segment() to write out the segment's
1002  * program header entry.
1003  */
1004 static void
1005 cb_put_phdr(entry, closure)
1006 	vm_map_entry_t entry;
1007 	void *closure;
1008 {
1009 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1010 	Elf_Phdr *phdr = phc->phdr;
1011 
1012 	phc->offset = round_page(phc->offset);
1013 
1014 	phdr->p_type = PT_LOAD;
1015 	phdr->p_offset = phc->offset;
1016 	phdr->p_vaddr = entry->start;
1017 	phdr->p_paddr = 0;
1018 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1019 	phdr->p_align = PAGE_SIZE;
1020 	phdr->p_flags = 0;
1021 	if (entry->protection & VM_PROT_READ)
1022 		phdr->p_flags |= PF_R;
1023 	if (entry->protection & VM_PROT_WRITE)
1024 		phdr->p_flags |= PF_W;
1025 	if (entry->protection & VM_PROT_EXECUTE)
1026 		phdr->p_flags |= PF_X;
1027 
1028 	phc->offset += phdr->p_filesz;
1029 	phc->phdr++;
1030 }
1031 
1032 /*
1033  * A callback for each_writable_segment() to gather information about
1034  * the number of segments and their total size.
1035  */
1036 static void
1037 cb_size_segment(entry, closure)
1038 	vm_map_entry_t entry;
1039 	void *closure;
1040 {
1041 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1042 
1043 	ssc->count++;
1044 	ssc->size += entry->end - entry->start;
1045 }
1046 
1047 /*
1048  * For each writable segment in the process's memory map, call the given
1049  * function with a pointer to the map entry and some arbitrary
1050  * caller-supplied data.
1051  */
1052 static void
1053 each_writable_segment(td, func, closure)
1054 	struct thread *td;
1055 	segment_callback func;
1056 	void *closure;
1057 {
1058 	struct proc *p = td->td_proc;
1059 	vm_map_t map = &p->p_vmspace->vm_map;
1060 	vm_map_entry_t entry;
1061 
1062 	for (entry = map->header.next; entry != &map->header;
1063 	    entry = entry->next) {
1064 		vm_object_t obj;
1065 
1066 		/*
1067 		 * Don't dump inaccessible mappings, deal with legacy
1068 		 * coredump mode.
1069 		 *
1070 		 * Note that read-only segments related to the elf binary
1071 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1072 		 * need to arbitrarily ignore such segments.
1073 		 */
1074 		if (elf_legacy_coredump) {
1075 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1076 				continue;
1077 		} else {
1078 			if ((entry->protection & VM_PROT_ALL) == 0)
1079 				continue;
1080 		}
1081 
1082 		/*
1083 		 * Dont include memory segment in the coredump if
1084 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1085 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1086 		 * kernel map).
1087 		 */
1088 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1089 			continue;
1090 
1091 		if ((obj = entry->object.vm_object) == NULL)
1092 			continue;
1093 
1094 		/* Find the deepest backing object. */
1095 		while (obj->backing_object != NULL)
1096 			obj = obj->backing_object;
1097 
1098 		/* Ignore memory-mapped devices and such things. */
1099 		if (obj->type != OBJT_DEFAULT &&
1100 		    obj->type != OBJT_SWAP &&
1101 		    obj->type != OBJT_VNODE)
1102 			continue;
1103 
1104 		(*func)(entry, closure);
1105 	}
1106 }
1107 
1108 /*
1109  * Write the core file header to the file, including padding up to
1110  * the page boundary.
1111  */
1112 static int
1113 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1114 	struct thread *td;
1115 	struct vnode *vp;
1116 	struct ucred *cred;
1117 	int numsegs;
1118 	size_t hdrsize;
1119 	void *hdr;
1120 {
1121 	size_t off;
1122 
1123 	/* Fill in the header. */
1124 	bzero(hdr, hdrsize);
1125 	off = 0;
1126 	__elfN(puthdr)(td, hdr, &off, numsegs);
1127 
1128 	/* Write it to the core file. */
1129 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1130 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1131 	    td)); /* XXXKSE */
1132 }
1133 
1134 static void
1135 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1136 {
1137 	struct {
1138 		prstatus_t status;
1139 		prfpregset_t fpregset;
1140 		prpsinfo_t psinfo;
1141 	} *tempdata;
1142 	prstatus_t *status;
1143 	prfpregset_t *fpregset;
1144 	prpsinfo_t *psinfo;
1145 	struct proc *p;
1146 	struct thread *thr;
1147 	size_t ehoff, noteoff, notesz, phoff;
1148 
1149 	p = td->td_proc;
1150 
1151 	ehoff = *off;
1152 	*off += sizeof(Elf_Ehdr);
1153 
1154 	phoff = *off;
1155 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1156 
1157 	noteoff = *off;
1158 	/*
1159 	 * Don't allocate space for the notes if we're just calculating
1160 	 * the size of the header. We also don't collect the data.
1161 	 */
1162 	if (dst != NULL) {
1163 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1164 		status = &tempdata->status;
1165 		fpregset = &tempdata->fpregset;
1166 		psinfo = &tempdata->psinfo;
1167 	} else {
1168 		tempdata = NULL;
1169 		status = NULL;
1170 		fpregset = NULL;
1171 		psinfo = NULL;
1172 	}
1173 
1174 	if (dst != NULL) {
1175 		psinfo->pr_version = PRPSINFO_VERSION;
1176 		psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1177 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1178 		/*
1179 		 * XXX - We don't fill in the command line arguments properly
1180 		 * yet.
1181 		 */
1182 		strlcpy(psinfo->pr_psargs, p->p_comm,
1183 		    sizeof(psinfo->pr_psargs));
1184 	}
1185 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1186 	    sizeof *psinfo);
1187 
1188 	/*
1189 	 * To have the debugger select the right thread (LWP) as the initial
1190 	 * thread, we dump the state of the thread passed to us in td first.
1191 	 * This is the thread that causes the core dump and thus likely to
1192 	 * be the right thread one wants to have selected in the debugger.
1193 	 */
1194 	thr = td;
1195 	while (thr != NULL) {
1196 		if (dst != NULL) {
1197 			status->pr_version = PRSTATUS_VERSION;
1198 			status->pr_statussz = sizeof(prstatus_t);
1199 			status->pr_gregsetsz = sizeof(gregset_t);
1200 			status->pr_fpregsetsz = sizeof(fpregset_t);
1201 			status->pr_osreldate = osreldate;
1202 			status->pr_cursig = p->p_sig;
1203 			status->pr_pid = thr->td_tid;
1204 			fill_regs(thr, &status->pr_reg);
1205 			fill_fpregs(thr, fpregset);
1206 		}
1207 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1208 		    sizeof *status);
1209 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1210 		    sizeof *fpregset);
1211 		/*
1212 		 * Allow for MD specific notes, as well as any MD
1213 		 * specific preparations for writing MI notes.
1214 		 */
1215 		__elfN(dump_thread)(thr, dst, off);
1216 
1217 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1218 		    TAILQ_NEXT(thr, td_plist);
1219 		if (thr == td)
1220 			thr = TAILQ_NEXT(thr, td_plist);
1221 	}
1222 
1223 	notesz = *off - noteoff;
1224 
1225 	if (dst != NULL)
1226 		free(tempdata, M_TEMP);
1227 
1228 	/* Align up to a page boundary for the program segments. */
1229 	*off = round_page(*off);
1230 
1231 	if (dst != NULL) {
1232 		Elf_Ehdr *ehdr;
1233 		Elf_Phdr *phdr;
1234 		struct phdr_closure phc;
1235 
1236 		/*
1237 		 * Fill in the ELF header.
1238 		 */
1239 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1240 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1241 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1242 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1243 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1244 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1245 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1246 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1247 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1248 		ehdr->e_ident[EI_ABIVERSION] = 0;
1249 		ehdr->e_ident[EI_PAD] = 0;
1250 		ehdr->e_type = ET_CORE;
1251 		ehdr->e_machine = ELF_ARCH;
1252 		ehdr->e_version = EV_CURRENT;
1253 		ehdr->e_entry = 0;
1254 		ehdr->e_phoff = phoff;
1255 		ehdr->e_flags = 0;
1256 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1257 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1258 		ehdr->e_phnum = numsegs + 1;
1259 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1260 		ehdr->e_shnum = 0;
1261 		ehdr->e_shstrndx = SHN_UNDEF;
1262 
1263 		/*
1264 		 * Fill in the program header entries.
1265 		 */
1266 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1267 
1268 		/* The note segement. */
1269 		phdr->p_type = PT_NOTE;
1270 		phdr->p_offset = noteoff;
1271 		phdr->p_vaddr = 0;
1272 		phdr->p_paddr = 0;
1273 		phdr->p_filesz = notesz;
1274 		phdr->p_memsz = 0;
1275 		phdr->p_flags = 0;
1276 		phdr->p_align = 0;
1277 		phdr++;
1278 
1279 		/* All the writable segments from the program. */
1280 		phc.phdr = phdr;
1281 		phc.offset = *off;
1282 		each_writable_segment(td, cb_put_phdr, &phc);
1283 	}
1284 }
1285 
1286 static void
1287 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1288     const void *desc, size_t descsz)
1289 {
1290 	Elf_Note note;
1291 
1292 	note.n_namesz = strlen(name) + 1;
1293 	note.n_descsz = descsz;
1294 	note.n_type = type;
1295 	if (dst != NULL)
1296 		bcopy(&note, (char *)dst + *off, sizeof note);
1297 	*off += sizeof note;
1298 	if (dst != NULL)
1299 		bcopy(name, (char *)dst + *off, note.n_namesz);
1300 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1301 	if (dst != NULL)
1302 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1303 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1304 }
1305 
1306 /*
1307  * Tell kern_execve.c about it, with a little help from the linker.
1308  */
1309 static struct execsw __elfN(execsw) = {
1310 	__CONCAT(exec_, __elfN(imgact)),
1311 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1312 };
1313 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1314