xref: /freebsd/sys/kern/imgact_elf.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_compat.h"
35 
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/sx.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
72 
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
76 #endif
77 
78 #define OLD_EI_BRAND	8
79 
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
82     const char *interp, int32_t *osrel);
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84     u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87     vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89 static boolean_t __elfN(check_note)(struct image_params *imgp,
90     Elf_Brandnote *checknote, int32_t *osrel);
91 
92 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
93     "");
94 
95 int __elfN(fallback_brand) = -1;
96 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
97     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
98     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
99 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
100     &__elfN(fallback_brand));
101 
102 static int elf_legacy_coredump = 0;
103 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
104     &elf_legacy_coredump, 0, "");
105 
106 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
107 
108 #define	trunc_page_ps(va, ps)	((va) & ~(ps - 1))
109 #define	round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
110 #define	aligned(a, t)	(trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
111 
112 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
113 
114 Elf_Brandnote __elfN(freebsd_brandnote) = {
115 	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
116 	.hdr.n_descsz	= sizeof(int32_t),
117 	.hdr.n_type	= 1,
118 	.vendor		= FREEBSD_ABI_VENDOR,
119 	.flags		= BN_CAN_FETCH_OSREL
120 };
121 
122 int
123 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
124 {
125 	int i;
126 
127 	for (i = 0; i < MAX_BRANDS; i++) {
128 		if (elf_brand_list[i] == NULL) {
129 			elf_brand_list[i] = entry;
130 			break;
131 		}
132 	}
133 	if (i == MAX_BRANDS)
134 		return (-1);
135 	return (0);
136 }
137 
138 int
139 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
140 {
141 	int i;
142 
143 	for (i = 0; i < MAX_BRANDS; i++) {
144 		if (elf_brand_list[i] == entry) {
145 			elf_brand_list[i] = NULL;
146 			break;
147 		}
148 	}
149 	if (i == MAX_BRANDS)
150 		return (-1);
151 	return (0);
152 }
153 
154 int
155 __elfN(brand_inuse)(Elf_Brandinfo *entry)
156 {
157 	struct proc *p;
158 	int rval = FALSE;
159 
160 	sx_slock(&allproc_lock);
161 	FOREACH_PROC_IN_SYSTEM(p) {
162 		if (p->p_sysent == entry->sysvec) {
163 			rval = TRUE;
164 			break;
165 		}
166 	}
167 	sx_sunlock(&allproc_lock);
168 
169 	return (rval);
170 }
171 
172 static Elf_Brandinfo *
173 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
174     int32_t *osrel)
175 {
176 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
177 	Elf_Brandinfo *bi;
178 	boolean_t ret;
179 	int i;
180 
181 	/*
182 	 * We support four types of branding -- (1) the ELF EI_OSABI field
183 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
184 	 * branding w/in the ELF header, (3) path of the `interp_path'
185 	 * field, and (4) the ".note.ABI-tag" ELF section.
186 	 */
187 
188 	/* Look for an ".note.ABI-tag" ELF section */
189 	for (i = 0; i < MAX_BRANDS; i++) {
190 		bi = elf_brand_list[i];
191 		if (bi != NULL && hdr->e_machine == bi->machine &&
192 		    (bi->flags & BI_BRAND_NOTE) != 0) {
193 			ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
194 			if (ret)
195 				return (bi);
196 		}
197 	}
198 
199 	/* If the executable has a brand, search for it in the brand list. */
200 	for (i = 0; i < MAX_BRANDS; i++) {
201 		bi = elf_brand_list[i];
202 		if (bi != NULL && hdr->e_machine == bi->machine &&
203 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
204 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
205 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
206 			return (bi);
207 	}
208 
209 	/* Lacking a known brand, search for a recognized interpreter. */
210 	if (interp != NULL) {
211 		for (i = 0; i < MAX_BRANDS; i++) {
212 			bi = elf_brand_list[i];
213 			if (bi != NULL && hdr->e_machine == bi->machine &&
214 			    strcmp(interp, bi->interp_path) == 0)
215 				return (bi);
216 		}
217 	}
218 
219 	/* Lacking a recognized interpreter, try the default brand */
220 	for (i = 0; i < MAX_BRANDS; i++) {
221 		bi = elf_brand_list[i];
222 		if (bi != NULL && hdr->e_machine == bi->machine &&
223 		    __elfN(fallback_brand) == bi->brand)
224 			return (bi);
225 	}
226 	return (NULL);
227 }
228 
229 static int
230 __elfN(check_header)(const Elf_Ehdr *hdr)
231 {
232 	Elf_Brandinfo *bi;
233 	int i;
234 
235 	if (!IS_ELF(*hdr) ||
236 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
237 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
238 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
239 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
240 	    hdr->e_version != ELF_TARG_VER)
241 		return (ENOEXEC);
242 
243 	/*
244 	 * Make sure we have at least one brand for this machine.
245 	 */
246 
247 	for (i = 0; i < MAX_BRANDS; i++) {
248 		bi = elf_brand_list[i];
249 		if (bi != NULL && bi->machine == hdr->e_machine)
250 			break;
251 	}
252 	if (i == MAX_BRANDS)
253 		return (ENOEXEC);
254 
255 	return (0);
256 }
257 
258 static int
259 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
260     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
261 {
262 	struct sf_buf *sf;
263 	int error;
264 	vm_offset_t off;
265 
266 	/*
267 	 * Create the page if it doesn't exist yet. Ignore errors.
268 	 */
269 	vm_map_lock(map);
270 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
271 	    VM_PROT_ALL, VM_PROT_ALL, 0);
272 	vm_map_unlock(map);
273 
274 	/*
275 	 * Find the page from the underlying object.
276 	 */
277 	if (object) {
278 		sf = vm_imgact_map_page(object, offset);
279 		if (sf == NULL)
280 			return (KERN_FAILURE);
281 		off = offset - trunc_page(offset);
282 		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
283 		    end - start);
284 		vm_imgact_unmap_page(sf);
285 		if (error) {
286 			return (KERN_FAILURE);
287 		}
288 	}
289 
290 	return (KERN_SUCCESS);
291 }
292 
293 static int
294 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
295     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
296 {
297 	struct sf_buf *sf;
298 	vm_offset_t off;
299 	vm_size_t sz;
300 	int error, rv;
301 
302 	if (start != trunc_page(start)) {
303 		rv = __elfN(map_partial)(map, object, offset, start,
304 		    round_page(start), prot);
305 		if (rv)
306 			return (rv);
307 		offset += round_page(start) - start;
308 		start = round_page(start);
309 	}
310 	if (end != round_page(end)) {
311 		rv = __elfN(map_partial)(map, object, offset +
312 		    trunc_page(end) - start, trunc_page(end), end, prot);
313 		if (rv)
314 			return (rv);
315 		end = trunc_page(end);
316 	}
317 	if (end > start) {
318 		if (offset & PAGE_MASK) {
319 			/*
320 			 * The mapping is not page aligned. This means we have
321 			 * to copy the data. Sigh.
322 			 */
323 			rv = vm_map_find(map, NULL, 0, &start, end - start,
324 			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
325 			if (rv)
326 				return (rv);
327 			if (object == NULL)
328 				return (KERN_SUCCESS);
329 			for (; start < end; start += sz) {
330 				sf = vm_imgact_map_page(object, offset);
331 				if (sf == NULL)
332 					return (KERN_FAILURE);
333 				off = offset - trunc_page(offset);
334 				sz = end - start;
335 				if (sz > PAGE_SIZE - off)
336 					sz = PAGE_SIZE - off;
337 				error = copyout((caddr_t)sf_buf_kva(sf) + off,
338 				    (caddr_t)start, sz);
339 				vm_imgact_unmap_page(sf);
340 				if (error) {
341 					return (KERN_FAILURE);
342 				}
343 				offset += sz;
344 			}
345 			rv = KERN_SUCCESS;
346 		} else {
347 			vm_object_reference(object);
348 			vm_map_lock(map);
349 			rv = vm_map_insert(map, object, offset, start, end,
350 			    prot, VM_PROT_ALL, cow);
351 			vm_map_unlock(map);
352 			if (rv != KERN_SUCCESS)
353 				vm_object_deallocate(object);
354 		}
355 		return (rv);
356 	} else {
357 		return (KERN_SUCCESS);
358 	}
359 }
360 
361 static int
362 __elfN(load_section)(struct vmspace *vmspace,
363 	vm_object_t object, vm_offset_t offset,
364 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
365 	size_t pagesize)
366 {
367 	struct sf_buf *sf;
368 	size_t map_len;
369 	vm_offset_t map_addr;
370 	int error, rv, cow;
371 	size_t copy_len;
372 	vm_offset_t file_addr;
373 
374 	/*
375 	 * It's necessary to fail if the filsz + offset taken from the
376 	 * header is greater than the actual file pager object's size.
377 	 * If we were to allow this, then the vm_map_find() below would
378 	 * walk right off the end of the file object and into the ether.
379 	 *
380 	 * While I'm here, might as well check for something else that
381 	 * is invalid: filsz cannot be greater than memsz.
382 	 */
383 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
384 	    filsz > memsz) {
385 		uprintf("elf_load_section: truncated ELF file\n");
386 		return (ENOEXEC);
387 	}
388 
389 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
390 	file_addr = trunc_page_ps(offset, pagesize);
391 
392 	/*
393 	 * We have two choices.  We can either clear the data in the last page
394 	 * of an oversized mapping, or we can start the anon mapping a page
395 	 * early and copy the initialized data into that first page.  We
396 	 * choose the second..
397 	 */
398 	if (memsz > filsz)
399 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
400 	else
401 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
402 
403 	if (map_len != 0) {
404 		/* cow flags: don't dump readonly sections in core */
405 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
406 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
407 
408 		rv = __elfN(map_insert)(&vmspace->vm_map,
409 				      object,
410 				      file_addr,	/* file offset */
411 				      map_addr,		/* virtual start */
412 				      map_addr + map_len,/* virtual end */
413 				      prot,
414 				      cow);
415 		if (rv != KERN_SUCCESS)
416 			return (EINVAL);
417 
418 		/* we can stop now if we've covered it all */
419 		if (memsz == filsz) {
420 			return (0);
421 		}
422 	}
423 
424 
425 	/*
426 	 * We have to get the remaining bit of the file into the first part
427 	 * of the oversized map segment.  This is normally because the .data
428 	 * segment in the file is extended to provide bss.  It's a neat idea
429 	 * to try and save a page, but it's a pain in the behind to implement.
430 	 */
431 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
432 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
433 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
434 	    map_addr;
435 
436 	/* This had damn well better be true! */
437 	if (map_len != 0) {
438 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
439 		    map_addr + map_len, VM_PROT_ALL, 0);
440 		if (rv != KERN_SUCCESS) {
441 			return (EINVAL);
442 		}
443 	}
444 
445 	if (copy_len != 0) {
446 		vm_offset_t off;
447 
448 		sf = vm_imgact_map_page(object, offset + filsz);
449 		if (sf == NULL)
450 			return (EIO);
451 
452 		/* send the page fragment to user space */
453 		off = trunc_page_ps(offset + filsz, pagesize) -
454 		    trunc_page(offset + filsz);
455 		error = copyout((caddr_t)sf_buf_kva(sf) + off,
456 		    (caddr_t)map_addr, copy_len);
457 		vm_imgact_unmap_page(sf);
458 		if (error) {
459 			return (error);
460 		}
461 	}
462 
463 	/*
464 	 * set it to the specified protection.
465 	 * XXX had better undo the damage from pasting over the cracks here!
466 	 */
467 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
468 	    round_page(map_addr + map_len),  prot, FALSE);
469 
470 	return (0);
471 }
472 
473 /*
474  * Load the file "file" into memory.  It may be either a shared object
475  * or an executable.
476  *
477  * The "addr" reference parameter is in/out.  On entry, it specifies
478  * the address where a shared object should be loaded.  If the file is
479  * an executable, this value is ignored.  On exit, "addr" specifies
480  * where the file was actually loaded.
481  *
482  * The "entry" reference parameter is out only.  On exit, it specifies
483  * the entry point for the loaded file.
484  */
485 static int
486 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
487 	u_long *entry, size_t pagesize)
488 {
489 	struct {
490 		struct nameidata nd;
491 		struct vattr attr;
492 		struct image_params image_params;
493 	} *tempdata;
494 	const Elf_Ehdr *hdr = NULL;
495 	const Elf_Phdr *phdr = NULL;
496 	struct nameidata *nd;
497 	struct vmspace *vmspace = p->p_vmspace;
498 	struct vattr *attr;
499 	struct image_params *imgp;
500 	vm_prot_t prot;
501 	u_long rbase;
502 	u_long base_addr = 0;
503 	int vfslocked, error, i, numsegs;
504 
505 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
506 	nd = &tempdata->nd;
507 	attr = &tempdata->attr;
508 	imgp = &tempdata->image_params;
509 
510 	/*
511 	 * Initialize part of the common data
512 	 */
513 	imgp->proc = p;
514 	imgp->attr = attr;
515 	imgp->firstpage = NULL;
516 	imgp->image_header = NULL;
517 	imgp->object = NULL;
518 	imgp->execlabel = NULL;
519 
520 	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
521 	    curthread);
522 	vfslocked = 0;
523 	if ((error = namei(nd)) != 0) {
524 		nd->ni_vp = NULL;
525 		goto fail;
526 	}
527 	vfslocked = NDHASGIANT(nd);
528 	NDFREE(nd, NDF_ONLY_PNBUF);
529 	imgp->vp = nd->ni_vp;
530 
531 	/*
532 	 * Check permissions, modes, uid, etc on the file, and "open" it.
533 	 */
534 	error = exec_check_permissions(imgp);
535 	if (error)
536 		goto fail;
537 
538 	error = exec_map_first_page(imgp);
539 	if (error)
540 		goto fail;
541 
542 	/*
543 	 * Also make certain that the interpreter stays the same, so set
544 	 * its VV_TEXT flag, too.
545 	 */
546 	nd->ni_vp->v_vflag |= VV_TEXT;
547 
548 	imgp->object = nd->ni_vp->v_object;
549 
550 	hdr = (const Elf_Ehdr *)imgp->image_header;
551 	if ((error = __elfN(check_header)(hdr)) != 0)
552 		goto fail;
553 	if (hdr->e_type == ET_DYN)
554 		rbase = *addr;
555 	else if (hdr->e_type == ET_EXEC)
556 		rbase = 0;
557 	else {
558 		error = ENOEXEC;
559 		goto fail;
560 	}
561 
562 	/* Only support headers that fit within first page for now      */
563 	/*    (multiplication of two Elf_Half fields will not overflow) */
564 	if ((hdr->e_phoff > PAGE_SIZE) ||
565 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
566 		error = ENOEXEC;
567 		goto fail;
568 	}
569 
570 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
571 	if (!aligned(phdr, Elf_Addr)) {
572 		error = ENOEXEC;
573 		goto fail;
574 	}
575 
576 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
577 		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
578 			prot = 0;
579 			if (phdr[i].p_flags & PF_X)
580   				prot |= VM_PROT_EXECUTE;
581 			if (phdr[i].p_flags & PF_W)
582   				prot |= VM_PROT_WRITE;
583 			if (phdr[i].p_flags & PF_R)
584   				prot |= VM_PROT_READ;
585 
586 			if ((error = __elfN(load_section)(vmspace,
587 			    imgp->object, phdr[i].p_offset,
588 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
589 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
590 			    pagesize)) != 0)
591 				goto fail;
592 			/*
593 			 * Establish the base address if this is the
594 			 * first segment.
595 			 */
596 			if (numsegs == 0)
597   				base_addr = trunc_page(phdr[i].p_vaddr +
598 				    rbase);
599 			numsegs++;
600 		}
601 	}
602 	*addr = base_addr;
603 	*entry = (unsigned long)hdr->e_entry + rbase;
604 
605 fail:
606 	if (imgp->firstpage)
607 		exec_unmap_first_page(imgp);
608 
609 	if (nd->ni_vp)
610 		vput(nd->ni_vp);
611 
612 	VFS_UNLOCK_GIANT(vfslocked);
613 	free(tempdata, M_TEMP);
614 
615 	return (error);
616 }
617 
618 static int
619 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
620 {
621 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
622 	const Elf_Phdr *phdr;
623 	Elf_Auxargs *elf_auxargs;
624 	struct vmspace *vmspace;
625 	vm_prot_t prot;
626 	u_long text_size = 0, data_size = 0, total_size = 0;
627 	u_long text_addr = 0, data_addr = 0;
628 	u_long seg_size, seg_addr;
629 	u_long addr, entry = 0, proghdr = 0;
630 	int32_t osrel = 0;
631 	int error = 0, i;
632 	const char *interp = NULL, *newinterp = NULL;
633 	Elf_Brandinfo *brand_info;
634 	char *path;
635 	struct sysentvec *sv;
636 
637 	/*
638 	 * Do we have a valid ELF header ?
639 	 *
640 	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
641 	 * if particular brand doesn't support it.
642 	 */
643 	if (__elfN(check_header)(hdr) != 0 ||
644 	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
645 		return (-1);
646 
647 	/*
648 	 * From here on down, we return an errno, not -1, as we've
649 	 * detected an ELF file.
650 	 */
651 
652 	if ((hdr->e_phoff > PAGE_SIZE) ||
653 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
654 		/* Only support headers in first page for now */
655 		return (ENOEXEC);
656 	}
657 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
658 	if (!aligned(phdr, Elf_Addr))
659 		return (ENOEXEC);
660 	for (i = 0; i < hdr->e_phnum; i++) {
661 		if (phdr[i].p_type == PT_INTERP) {
662 			/* Path to interpreter */
663 			if (phdr[i].p_filesz > MAXPATHLEN ||
664 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
665 				return (ENOEXEC);
666 			interp = imgp->image_header + phdr[i].p_offset;
667 			break;
668 		}
669 	}
670 
671 	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
672 	if (brand_info == NULL) {
673 		uprintf("ELF binary type \"%u\" not known.\n",
674 		    hdr->e_ident[EI_OSABI]);
675 		return (ENOEXEC);
676 	}
677 	if (hdr->e_type == ET_DYN &&
678 	    (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
679 		return (ENOEXEC);
680 	sv = brand_info->sysvec;
681 	if (interp != NULL && brand_info->interp_newpath != NULL)
682 		newinterp = brand_info->interp_newpath;
683 
684 	/*
685 	 * Avoid a possible deadlock if the current address space is destroyed
686 	 * and that address space maps the locked vnode.  In the common case,
687 	 * the locked vnode's v_usecount is decremented but remains greater
688 	 * than zero.  Consequently, the vnode lock is not needed by vrele().
689 	 * However, in cases where the vnode lock is external, such as nullfs,
690 	 * v_usecount may become zero.
691 	 */
692 	VOP_UNLOCK(imgp->vp, 0);
693 
694 	error = exec_new_vmspace(imgp, sv);
695 	imgp->proc->p_sysent = sv;
696 
697 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
698 	if (error)
699 		return (error);
700 
701 	vmspace = imgp->proc->p_vmspace;
702 
703 	for (i = 0; i < hdr->e_phnum; i++) {
704 		switch (phdr[i].p_type) {
705 		case PT_LOAD:	/* Loadable segment */
706 			prot = 0;
707 			if (phdr[i].p_flags & PF_X)
708   				prot |= VM_PROT_EXECUTE;
709 			if (phdr[i].p_flags & PF_W)
710   				prot |= VM_PROT_WRITE;
711 			if (phdr[i].p_flags & PF_R)
712   				prot |= VM_PROT_READ;
713 
714 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
715 			/*
716 			 * Some x86 binaries assume read == executable,
717 			 * notably the M3 runtime and therefore cvsup
718 			 */
719 			if (prot & VM_PROT_READ)
720 				prot |= VM_PROT_EXECUTE;
721 #endif
722 
723 			if ((error = __elfN(load_section)(vmspace,
724 			    imgp->object, phdr[i].p_offset,
725 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
726 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
727 			    sv->sv_pagesize)) != 0)
728 				return (error);
729 
730 			/*
731 			 * If this segment contains the program headers,
732 			 * remember their virtual address for the AT_PHDR
733 			 * aux entry. Static binaries don't usually include
734 			 * a PT_PHDR entry.
735 			 */
736 			if (phdr[i].p_offset == 0 &&
737 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
738 				<= phdr[i].p_filesz)
739 				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
740 
741 			seg_addr = trunc_page(phdr[i].p_vaddr);
742 			seg_size = round_page(phdr[i].p_memsz +
743 			    phdr[i].p_vaddr - seg_addr);
744 
745 			/*
746 			 * Is this .text or .data?  We can't use
747 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
748 			 * alpha terribly and possibly does other bad
749 			 * things so we stick to the old way of figuring
750 			 * it out:  If the segment contains the program
751 			 * entry point, it's a text segment, otherwise it
752 			 * is a data segment.
753 			 *
754 			 * Note that obreak() assumes that data_addr +
755 			 * data_size == end of data load area, and the ELF
756 			 * file format expects segments to be sorted by
757 			 * address.  If multiple data segments exist, the
758 			 * last one will be used.
759 			 */
760 			if (hdr->e_entry >= phdr[i].p_vaddr &&
761 			    hdr->e_entry < (phdr[i].p_vaddr +
762 			    phdr[i].p_memsz)) {
763 				text_size = seg_size;
764 				text_addr = seg_addr;
765 				entry = (u_long)hdr->e_entry;
766 			} else {
767 				data_size = seg_size;
768 				data_addr = seg_addr;
769 			}
770 			total_size += seg_size;
771 			break;
772 		case PT_PHDR: 	/* Program header table info */
773 			proghdr = phdr[i].p_vaddr;
774 			break;
775 		default:
776 			break;
777 		}
778 	}
779 
780 	if (data_addr == 0 && data_size == 0) {
781 		data_addr = text_addr;
782 		data_size = text_size;
783 	}
784 
785 	/*
786 	 * Check limits.  It should be safe to check the
787 	 * limits after loading the segments since we do
788 	 * not actually fault in all the segments pages.
789 	 */
790 	PROC_LOCK(imgp->proc);
791 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
792 	    text_size > maxtsiz ||
793 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
794 		PROC_UNLOCK(imgp->proc);
795 		return (ENOMEM);
796 	}
797 
798 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
799 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
800 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
801 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
802 
803 	/*
804 	 * We load the dynamic linker where a userland call
805 	 * to mmap(0, ...) would put it.  The rationale behind this
806 	 * calculation is that it leaves room for the heap to grow to
807 	 * its maximum allowed size.
808 	 */
809 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
810 	    lim_max(imgp->proc, RLIMIT_DATA));
811 	PROC_UNLOCK(imgp->proc);
812 
813 	imgp->entry_addr = entry;
814 
815 	if (interp != NULL) {
816 		int have_interp = FALSE;
817 		VOP_UNLOCK(imgp->vp, 0);
818 		if (brand_info->emul_path != NULL &&
819 		    brand_info->emul_path[0] != '\0') {
820 			path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
821 			snprintf(path, MAXPATHLEN, "%s%s",
822 			    brand_info->emul_path, interp);
823 			error = __elfN(load_file)(imgp->proc, path, &addr,
824 			    &imgp->entry_addr, sv->sv_pagesize);
825 			free(path, M_TEMP);
826 			if (error == 0)
827 				have_interp = TRUE;
828 		}
829 		if (!have_interp && newinterp != NULL) {
830 			error = __elfN(load_file)(imgp->proc, newinterp, &addr,
831 			    &imgp->entry_addr, sv->sv_pagesize);
832 			if (error == 0)
833 				have_interp = TRUE;
834 		}
835 		if (!have_interp) {
836 			error = __elfN(load_file)(imgp->proc, interp, &addr,
837 			    &imgp->entry_addr, sv->sv_pagesize);
838 		}
839 		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
840 		if (error != 0) {
841 			uprintf("ELF interpreter %s not found\n", interp);
842 			return (error);
843 		}
844 	} else
845 		addr = 0;
846 
847 	/*
848 	 * Construct auxargs table (used by the fixup routine)
849 	 */
850 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
851 	elf_auxargs->execfd = -1;
852 	elf_auxargs->phdr = proghdr;
853 	elf_auxargs->phent = hdr->e_phentsize;
854 	elf_auxargs->phnum = hdr->e_phnum;
855 	elf_auxargs->pagesz = PAGE_SIZE;
856 	elf_auxargs->base = addr;
857 	elf_auxargs->flags = 0;
858 	elf_auxargs->entry = entry;
859 
860 	imgp->auxargs = elf_auxargs;
861 	imgp->interpreted = 0;
862 	imgp->proc->p_osrel = osrel;
863 
864 	return (error);
865 }
866 
867 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
868 
869 int
870 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
871 {
872 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
873 	Elf_Addr *base;
874 	Elf_Addr *pos;
875 
876 	base = (Elf_Addr *)*stack_base;
877 	pos = base + (imgp->args->argc + imgp->args->envc + 2);
878 
879 	if (args->execfd != -1)
880 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
881 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
882 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
883 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
884 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
885 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
886 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
887 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
888 	if (imgp->execpathp != 0)
889 		AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
890 	AUXARGS_ENTRY(pos, AT_NULL, 0);
891 
892 	free(imgp->auxargs, M_TEMP);
893 	imgp->auxargs = NULL;
894 
895 	base--;
896 	suword(base, (long)imgp->args->argc);
897 	*stack_base = (register_t *)base;
898 	return (0);
899 }
900 
901 /*
902  * Code for generating ELF core dumps.
903  */
904 
905 typedef void (*segment_callback)(vm_map_entry_t, void *);
906 
907 /* Closure for cb_put_phdr(). */
908 struct phdr_closure {
909 	Elf_Phdr *phdr;		/* Program header to fill in */
910 	Elf_Off offset;		/* Offset of segment in core file */
911 };
912 
913 /* Closure for cb_size_segment(). */
914 struct sseg_closure {
915 	int count;		/* Count of writable segments. */
916 	size_t size;		/* Total size of all writable segments. */
917 };
918 
919 static void cb_put_phdr(vm_map_entry_t, void *);
920 static void cb_size_segment(vm_map_entry_t, void *);
921 static void each_writable_segment(struct thread *, segment_callback, void *);
922 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
923     int, void *, size_t);
924 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
925 static void __elfN(putnote)(void *, size_t *, const char *, int,
926     const void *, size_t);
927 
928 int
929 __elfN(coredump)(td, vp, limit)
930 	struct thread *td;
931 	struct vnode *vp;
932 	off_t limit;
933 {
934 	struct ucred *cred = td->td_ucred;
935 	int error = 0;
936 	struct sseg_closure seginfo;
937 	void *hdr;
938 	size_t hdrsize;
939 
940 	/* Size the program segments. */
941 	seginfo.count = 0;
942 	seginfo.size = 0;
943 	each_writable_segment(td, cb_size_segment, &seginfo);
944 
945 	/*
946 	 * Calculate the size of the core file header area by making
947 	 * a dry run of generating it.  Nothing is written, but the
948 	 * size is calculated.
949 	 */
950 	hdrsize = 0;
951 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
952 
953 	if (hdrsize + seginfo.size >= limit)
954 		return (EFAULT);
955 
956 	/*
957 	 * Allocate memory for building the header, fill it up,
958 	 * and write it out.
959 	 */
960 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
961 	if (hdr == NULL) {
962 		return (EINVAL);
963 	}
964 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
965 
966 	/* Write the contents of all of the writable segments. */
967 	if (error == 0) {
968 		Elf_Phdr *php;
969 		off_t offset;
970 		int i;
971 
972 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
973 		offset = hdrsize;
974 		for (i = 0; i < seginfo.count; i++) {
975 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
976 			    (caddr_t)(uintptr_t)php->p_vaddr,
977 			    php->p_filesz, offset, UIO_USERSPACE,
978 			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
979 			    curthread);
980 			if (error != 0)
981 				break;
982 			offset += php->p_filesz;
983 			php++;
984 		}
985 	}
986 	free(hdr, M_TEMP);
987 
988 	return (error);
989 }
990 
991 /*
992  * A callback for each_writable_segment() to write out the segment's
993  * program header entry.
994  */
995 static void
996 cb_put_phdr(entry, closure)
997 	vm_map_entry_t entry;
998 	void *closure;
999 {
1000 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1001 	Elf_Phdr *phdr = phc->phdr;
1002 
1003 	phc->offset = round_page(phc->offset);
1004 
1005 	phdr->p_type = PT_LOAD;
1006 	phdr->p_offset = phc->offset;
1007 	phdr->p_vaddr = entry->start;
1008 	phdr->p_paddr = 0;
1009 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1010 	phdr->p_align = PAGE_SIZE;
1011 	phdr->p_flags = 0;
1012 	if (entry->protection & VM_PROT_READ)
1013 		phdr->p_flags |= PF_R;
1014 	if (entry->protection & VM_PROT_WRITE)
1015 		phdr->p_flags |= PF_W;
1016 	if (entry->protection & VM_PROT_EXECUTE)
1017 		phdr->p_flags |= PF_X;
1018 
1019 	phc->offset += phdr->p_filesz;
1020 	phc->phdr++;
1021 }
1022 
1023 /*
1024  * A callback for each_writable_segment() to gather information about
1025  * the number of segments and their total size.
1026  */
1027 static void
1028 cb_size_segment(entry, closure)
1029 	vm_map_entry_t entry;
1030 	void *closure;
1031 {
1032 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1033 
1034 	ssc->count++;
1035 	ssc->size += entry->end - entry->start;
1036 }
1037 
1038 /*
1039  * For each writable segment in the process's memory map, call the given
1040  * function with a pointer to the map entry and some arbitrary
1041  * caller-supplied data.
1042  */
1043 static void
1044 each_writable_segment(td, func, closure)
1045 	struct thread *td;
1046 	segment_callback func;
1047 	void *closure;
1048 {
1049 	struct proc *p = td->td_proc;
1050 	vm_map_t map = &p->p_vmspace->vm_map;
1051 	vm_map_entry_t entry;
1052 	vm_object_t backing_object, object;
1053 	boolean_t ignore_entry;
1054 
1055 	vm_map_lock_read(map);
1056 	for (entry = map->header.next; entry != &map->header;
1057 	    entry = entry->next) {
1058 		/*
1059 		 * Don't dump inaccessible mappings, deal with legacy
1060 		 * coredump mode.
1061 		 *
1062 		 * Note that read-only segments related to the elf binary
1063 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1064 		 * need to arbitrarily ignore such segments.
1065 		 */
1066 		if (elf_legacy_coredump) {
1067 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1068 				continue;
1069 		} else {
1070 			if ((entry->protection & VM_PROT_ALL) == 0)
1071 				continue;
1072 		}
1073 
1074 		/*
1075 		 * Dont include memory segment in the coredump if
1076 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1077 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1078 		 * kernel map).
1079 		 */
1080 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1081 			continue;
1082 
1083 		if ((object = entry->object.vm_object) == NULL)
1084 			continue;
1085 
1086 		/* Ignore memory-mapped devices and such things. */
1087 		VM_OBJECT_LOCK(object);
1088 		while ((backing_object = object->backing_object) != NULL) {
1089 			VM_OBJECT_LOCK(backing_object);
1090 			VM_OBJECT_UNLOCK(object);
1091 			object = backing_object;
1092 		}
1093 		ignore_entry = object->type != OBJT_DEFAULT &&
1094 		    object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1095 		VM_OBJECT_UNLOCK(object);
1096 		if (ignore_entry)
1097 			continue;
1098 
1099 		(*func)(entry, closure);
1100 	}
1101 	vm_map_unlock_read(map);
1102 }
1103 
1104 /*
1105  * Write the core file header to the file, including padding up to
1106  * the page boundary.
1107  */
1108 static int
1109 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1110 	struct thread *td;
1111 	struct vnode *vp;
1112 	struct ucred *cred;
1113 	int numsegs;
1114 	size_t hdrsize;
1115 	void *hdr;
1116 {
1117 	size_t off;
1118 
1119 	/* Fill in the header. */
1120 	bzero(hdr, hdrsize);
1121 	off = 0;
1122 	__elfN(puthdr)(td, hdr, &off, numsegs);
1123 
1124 	/* Write it to the core file. */
1125 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1126 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1127 	    td));
1128 }
1129 
1130 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1131 typedef struct prstatus32 elf_prstatus_t;
1132 typedef struct prpsinfo32 elf_prpsinfo_t;
1133 typedef struct fpreg32 elf_prfpregset_t;
1134 typedef struct fpreg32 elf_fpregset_t;
1135 typedef struct reg32 elf_gregset_t;
1136 #else
1137 typedef prstatus_t elf_prstatus_t;
1138 typedef prpsinfo_t elf_prpsinfo_t;
1139 typedef prfpregset_t elf_prfpregset_t;
1140 typedef prfpregset_t elf_fpregset_t;
1141 typedef gregset_t elf_gregset_t;
1142 #endif
1143 
1144 static void
1145 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1146 {
1147 	struct {
1148 		elf_prstatus_t status;
1149 		elf_prfpregset_t fpregset;
1150 		elf_prpsinfo_t psinfo;
1151 	} *tempdata;
1152 	elf_prstatus_t *status;
1153 	elf_prfpregset_t *fpregset;
1154 	elf_prpsinfo_t *psinfo;
1155 	struct proc *p;
1156 	struct thread *thr;
1157 	size_t ehoff, noteoff, notesz, phoff;
1158 
1159 	p = td->td_proc;
1160 
1161 	ehoff = *off;
1162 	*off += sizeof(Elf_Ehdr);
1163 
1164 	phoff = *off;
1165 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1166 
1167 	noteoff = *off;
1168 	/*
1169 	 * Don't allocate space for the notes if we're just calculating
1170 	 * the size of the header. We also don't collect the data.
1171 	 */
1172 	if (dst != NULL) {
1173 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1174 		status = &tempdata->status;
1175 		fpregset = &tempdata->fpregset;
1176 		psinfo = &tempdata->psinfo;
1177 	} else {
1178 		tempdata = NULL;
1179 		status = NULL;
1180 		fpregset = NULL;
1181 		psinfo = NULL;
1182 	}
1183 
1184 	if (dst != NULL) {
1185 		psinfo->pr_version = PRPSINFO_VERSION;
1186 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1187 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1188 		/*
1189 		 * XXX - We don't fill in the command line arguments properly
1190 		 * yet.
1191 		 */
1192 		strlcpy(psinfo->pr_psargs, p->p_comm,
1193 		    sizeof(psinfo->pr_psargs));
1194 	}
1195 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1196 	    sizeof *psinfo);
1197 
1198 	/*
1199 	 * To have the debugger select the right thread (LWP) as the initial
1200 	 * thread, we dump the state of the thread passed to us in td first.
1201 	 * This is the thread that causes the core dump and thus likely to
1202 	 * be the right thread one wants to have selected in the debugger.
1203 	 */
1204 	thr = td;
1205 	while (thr != NULL) {
1206 		if (dst != NULL) {
1207 			status->pr_version = PRSTATUS_VERSION;
1208 			status->pr_statussz = sizeof(elf_prstatus_t);
1209 			status->pr_gregsetsz = sizeof(elf_gregset_t);
1210 			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1211 			status->pr_osreldate = osreldate;
1212 			status->pr_cursig = p->p_sig;
1213 			status->pr_pid = thr->td_tid;
1214 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1215 			fill_regs32(thr, &status->pr_reg);
1216 			fill_fpregs32(thr, fpregset);
1217 #else
1218 			fill_regs(thr, &status->pr_reg);
1219 			fill_fpregs(thr, fpregset);
1220 #endif
1221 		}
1222 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1223 		    sizeof *status);
1224 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1225 		    sizeof *fpregset);
1226 		/*
1227 		 * Allow for MD specific notes, as well as any MD
1228 		 * specific preparations for writing MI notes.
1229 		 */
1230 		__elfN(dump_thread)(thr, dst, off);
1231 
1232 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1233 		    TAILQ_NEXT(thr, td_plist);
1234 		if (thr == td)
1235 			thr = TAILQ_NEXT(thr, td_plist);
1236 	}
1237 
1238 	notesz = *off - noteoff;
1239 
1240 	if (dst != NULL)
1241 		free(tempdata, M_TEMP);
1242 
1243 	/* Align up to a page boundary for the program segments. */
1244 	*off = round_page(*off);
1245 
1246 	if (dst != NULL) {
1247 		Elf_Ehdr *ehdr;
1248 		Elf_Phdr *phdr;
1249 		struct phdr_closure phc;
1250 
1251 		/*
1252 		 * Fill in the ELF header.
1253 		 */
1254 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1255 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1256 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1257 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1258 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1259 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1260 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1261 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1262 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1263 		ehdr->e_ident[EI_ABIVERSION] = 0;
1264 		ehdr->e_ident[EI_PAD] = 0;
1265 		ehdr->e_type = ET_CORE;
1266 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1267 		ehdr->e_machine = EM_386;
1268 #else
1269 		ehdr->e_machine = ELF_ARCH;
1270 #endif
1271 		ehdr->e_version = EV_CURRENT;
1272 		ehdr->e_entry = 0;
1273 		ehdr->e_phoff = phoff;
1274 		ehdr->e_flags = 0;
1275 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1276 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1277 		ehdr->e_phnum = numsegs + 1;
1278 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1279 		ehdr->e_shnum = 0;
1280 		ehdr->e_shstrndx = SHN_UNDEF;
1281 
1282 		/*
1283 		 * Fill in the program header entries.
1284 		 */
1285 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1286 
1287 		/* The note segement. */
1288 		phdr->p_type = PT_NOTE;
1289 		phdr->p_offset = noteoff;
1290 		phdr->p_vaddr = 0;
1291 		phdr->p_paddr = 0;
1292 		phdr->p_filesz = notesz;
1293 		phdr->p_memsz = 0;
1294 		phdr->p_flags = 0;
1295 		phdr->p_align = 0;
1296 		phdr++;
1297 
1298 		/* All the writable segments from the program. */
1299 		phc.phdr = phdr;
1300 		phc.offset = *off;
1301 		each_writable_segment(td, cb_put_phdr, &phc);
1302 	}
1303 }
1304 
1305 static void
1306 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1307     const void *desc, size_t descsz)
1308 {
1309 	Elf_Note note;
1310 
1311 	note.n_namesz = strlen(name) + 1;
1312 	note.n_descsz = descsz;
1313 	note.n_type = type;
1314 	if (dst != NULL)
1315 		bcopy(&note, (char *)dst + *off, sizeof note);
1316 	*off += sizeof note;
1317 	if (dst != NULL)
1318 		bcopy(name, (char *)dst + *off, note.n_namesz);
1319 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1320 	if (dst != NULL)
1321 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1322 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1323 }
1324 
1325 /*
1326  * Try to find the appropriate ABI-note section for checknote,
1327  * fetch the osreldate for binary from the ELF OSABI-note. Only the
1328  * first page of the image is searched, the same as for headers.
1329  */
1330 static boolean_t
1331 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1332     int32_t *osrel)
1333 {
1334 	const Elf_Note *note, *note0, *note_end;
1335 	const Elf_Phdr *phdr, *pnote;
1336 	const Elf_Ehdr *hdr;
1337 	const char *note_name;
1338 	int i;
1339 
1340 	pnote = NULL;
1341 	hdr = (const Elf_Ehdr *)imgp->image_header;
1342 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1343 
1344 	for (i = 0; i < hdr->e_phnum; i++) {
1345 		if (phdr[i].p_type == PT_NOTE) {
1346 			pnote = &phdr[i];
1347 			break;
1348 		}
1349 	}
1350 
1351 	if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1352 	    pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1353 		return (FALSE);
1354 
1355 	note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1356 	note_end = (const Elf_Note *)(imgp->image_header +
1357 	    pnote->p_offset + pnote->p_filesz);
1358 	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1359 		if (!aligned(note, Elf32_Addr))
1360 			return (FALSE);
1361 		if (note->n_namesz != checknote->hdr.n_namesz ||
1362 		    note->n_descsz != checknote->hdr.n_descsz ||
1363 		    note->n_type != checknote->hdr.n_type)
1364 			goto nextnote;
1365 		note_name = (const char *)(note + 1);
1366 		if (strncmp(checknote->vendor, note_name,
1367 		    checknote->hdr.n_namesz) != 0)
1368 			goto nextnote;
1369 
1370 		/*
1371 		 * Fetch the osreldate for binary
1372 		 * from the ELF OSABI-note if necessary.
1373 		 */
1374 		if ((checknote->flags & BN_CAN_FETCH_OSREL) != 0 &&
1375 		    osrel != NULL)
1376 			*osrel = *(const int32_t *) (note_name +
1377 			    roundup2(checknote->hdr.n_namesz,
1378 			    sizeof(Elf32_Addr)));
1379 		return (TRUE);
1380 
1381 nextnote:
1382 		note = (const Elf_Note *)((const char *)(note + 1) +
1383 		    roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1384 		    roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1385 	}
1386 
1387 	return (FALSE);
1388 }
1389 
1390 /*
1391  * Tell kern_execve.c about it, with a little help from the linker.
1392  */
1393 static struct execsw __elfN(execsw) = {
1394 	__CONCAT(exec_, __elfN(imgact)),
1395 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1396 };
1397 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1398