xref: /freebsd/sys/kern/imgact_elf.c (revision 4c8945a06b01a5c8122cdeb402af36bb46a06acc)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_compat.h"
35 #include "opt_core.h"
36 
37 #include <sys/param.h>
38 #include <sys/exec.h>
39 #include <sys/fcntl.h>
40 #include <sys/imgact.h>
41 #include <sys/imgact_elf.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/mman.h>
48 #include <sys/namei.h>
49 #include <sys/pioctl.h>
50 #include <sys/proc.h>
51 #include <sys/procfs.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/smp.h>
55 #include <sys/systm.h>
56 #include <sys/signalvar.h>
57 #include <sys/stat.h>
58 #include <sys/sx.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/vnode.h>
63 #include <sys/syslog.h>
64 #include <sys/eventhandler.h>
65 
66 #include <net/zlib.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_param.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_extern.h>
75 
76 #include <machine/elf.h>
77 #include <machine/md_var.h>
78 
79 #define OLD_EI_BRAND	8
80 
81 static int __elfN(check_header)(const Elf_Ehdr *hdr);
82 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
83     const char *interp, int32_t *osrel);
84 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
85     u_long *entry, size_t pagesize);
86 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
87     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
88     vm_prot_t prot, size_t pagesize);
89 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
90 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
91     int32_t *osrel);
92 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
93 static boolean_t __elfN(check_note)(struct image_params *imgp,
94     Elf_Brandnote *checknote, int32_t *osrel);
95 static vm_prot_t __elfN(trans_prot)(Elf_Word);
96 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
97 
98 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
99     "");
100 
101 #ifdef COMPRESS_USER_CORES
102 static int compress_core(gzFile, char *, char *, unsigned int,
103     struct thread * td);
104 #define CORE_BUF_SIZE	(16 * 1024)
105 #endif
106 
107 int __elfN(fallback_brand) = -1;
108 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
109     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
110     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
111 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
112     &__elfN(fallback_brand));
113 
114 static int elf_legacy_coredump = 0;
115 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
116     &elf_legacy_coredump, 0, "");
117 
118 static int __elfN(nxstack) = 0;
119 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
120     nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
121     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
122 
123 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
124 
125 #define	trunc_page_ps(va, ps)	((va) & ~(ps - 1))
126 #define	round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
127 #define	aligned(a, t)	(trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
128 
129 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
130 
131 Elf_Brandnote __elfN(freebsd_brandnote) = {
132 	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
133 	.hdr.n_descsz	= sizeof(int32_t),
134 	.hdr.n_type	= 1,
135 	.vendor		= FREEBSD_ABI_VENDOR,
136 	.flags		= BN_TRANSLATE_OSREL,
137 	.trans_osrel	= __elfN(freebsd_trans_osrel)
138 };
139 
140 static boolean_t
141 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
142 {
143 	uintptr_t p;
144 
145 	p = (uintptr_t)(note + 1);
146 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
147 	*osrel = *(const int32_t *)(p);
148 
149 	return (TRUE);
150 }
151 
152 static const char GNU_ABI_VENDOR[] = "GNU";
153 static int GNU_KFREEBSD_ABI_DESC = 3;
154 
155 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
156 	.hdr.n_namesz	= sizeof(GNU_ABI_VENDOR),
157 	.hdr.n_descsz	= 16,	/* XXX at least 16 */
158 	.hdr.n_type	= 1,
159 	.vendor		= GNU_ABI_VENDOR,
160 	.flags		= BN_TRANSLATE_OSREL,
161 	.trans_osrel	= kfreebsd_trans_osrel
162 };
163 
164 static boolean_t
165 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
166 {
167 	const Elf32_Word *desc;
168 	uintptr_t p;
169 
170 	p = (uintptr_t)(note + 1);
171 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
172 
173 	desc = (const Elf32_Word *)p;
174 	if (desc[0] != GNU_KFREEBSD_ABI_DESC)
175 		return (FALSE);
176 
177 	/*
178 	 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
179 	 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
180 	 */
181 	*osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
182 
183 	return (TRUE);
184 }
185 
186 int
187 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
188 {
189 	int i;
190 
191 	for (i = 0; i < MAX_BRANDS; i++) {
192 		if (elf_brand_list[i] == NULL) {
193 			elf_brand_list[i] = entry;
194 			break;
195 		}
196 	}
197 	if (i == MAX_BRANDS) {
198 		printf("WARNING: %s: could not insert brandinfo entry: %p\n",
199 			__func__, entry);
200 		return (-1);
201 	}
202 	return (0);
203 }
204 
205 int
206 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
207 {
208 	int i;
209 
210 	for (i = 0; i < MAX_BRANDS; i++) {
211 		if (elf_brand_list[i] == entry) {
212 			elf_brand_list[i] = NULL;
213 			break;
214 		}
215 	}
216 	if (i == MAX_BRANDS)
217 		return (-1);
218 	return (0);
219 }
220 
221 int
222 __elfN(brand_inuse)(Elf_Brandinfo *entry)
223 {
224 	struct proc *p;
225 	int rval = FALSE;
226 
227 	sx_slock(&allproc_lock);
228 	FOREACH_PROC_IN_SYSTEM(p) {
229 		if (p->p_sysent == entry->sysvec) {
230 			rval = TRUE;
231 			break;
232 		}
233 	}
234 	sx_sunlock(&allproc_lock);
235 
236 	return (rval);
237 }
238 
239 static Elf_Brandinfo *
240 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
241     int32_t *osrel)
242 {
243 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
244 	Elf_Brandinfo *bi;
245 	boolean_t ret;
246 	int i;
247 
248 	/*
249 	 * We support four types of branding -- (1) the ELF EI_OSABI field
250 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
251 	 * branding w/in the ELF header, (3) path of the `interp_path'
252 	 * field, and (4) the ".note.ABI-tag" ELF section.
253 	 */
254 
255 	/* Look for an ".note.ABI-tag" ELF section */
256 	for (i = 0; i < MAX_BRANDS; i++) {
257 		bi = elf_brand_list[i];
258 		if (bi == NULL)
259 			continue;
260 		if (hdr->e_machine == bi->machine && (bi->flags &
261 		    (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
262 			ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
263 			if (ret)
264 				return (bi);
265 		}
266 	}
267 
268 	/* If the executable has a brand, search for it in the brand list. */
269 	for (i = 0; i < MAX_BRANDS; i++) {
270 		bi = elf_brand_list[i];
271 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
272 			continue;
273 		if (hdr->e_machine == bi->machine &&
274 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
275 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
276 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
277 			return (bi);
278 	}
279 
280 	/* Lacking a known brand, search for a recognized interpreter. */
281 	if (interp != NULL) {
282 		for (i = 0; i < MAX_BRANDS; i++) {
283 			bi = elf_brand_list[i];
284 			if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
285 				continue;
286 			if (hdr->e_machine == bi->machine &&
287 			    strcmp(interp, bi->interp_path) == 0)
288 				return (bi);
289 		}
290 	}
291 
292 	/* Lacking a recognized interpreter, try the default brand */
293 	for (i = 0; i < MAX_BRANDS; i++) {
294 		bi = elf_brand_list[i];
295 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
296 			continue;
297 		if (hdr->e_machine == bi->machine &&
298 		    __elfN(fallback_brand) == bi->brand)
299 			return (bi);
300 	}
301 	return (NULL);
302 }
303 
304 static int
305 __elfN(check_header)(const Elf_Ehdr *hdr)
306 {
307 	Elf_Brandinfo *bi;
308 	int i;
309 
310 	if (!IS_ELF(*hdr) ||
311 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
312 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
313 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
314 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
315 	    hdr->e_version != ELF_TARG_VER)
316 		return (ENOEXEC);
317 
318 	/*
319 	 * Make sure we have at least one brand for this machine.
320 	 */
321 
322 	for (i = 0; i < MAX_BRANDS; i++) {
323 		bi = elf_brand_list[i];
324 		if (bi != NULL && bi->machine == hdr->e_machine)
325 			break;
326 	}
327 	if (i == MAX_BRANDS)
328 		return (ENOEXEC);
329 
330 	return (0);
331 }
332 
333 static int
334 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
335     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
336 {
337 	struct sf_buf *sf;
338 	int error;
339 	vm_offset_t off;
340 
341 	/*
342 	 * Create the page if it doesn't exist yet. Ignore errors.
343 	 */
344 	vm_map_lock(map);
345 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
346 	    VM_PROT_ALL, VM_PROT_ALL, 0);
347 	vm_map_unlock(map);
348 
349 	/*
350 	 * Find the page from the underlying object.
351 	 */
352 	if (object) {
353 		sf = vm_imgact_map_page(object, offset);
354 		if (sf == NULL)
355 			return (KERN_FAILURE);
356 		off = offset - trunc_page(offset);
357 		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
358 		    end - start);
359 		vm_imgact_unmap_page(sf);
360 		if (error) {
361 			return (KERN_FAILURE);
362 		}
363 	}
364 
365 	return (KERN_SUCCESS);
366 }
367 
368 static int
369 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
370     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
371 {
372 	struct sf_buf *sf;
373 	vm_offset_t off;
374 	vm_size_t sz;
375 	int error, rv;
376 
377 	if (start != trunc_page(start)) {
378 		rv = __elfN(map_partial)(map, object, offset, start,
379 		    round_page(start), prot);
380 		if (rv)
381 			return (rv);
382 		offset += round_page(start) - start;
383 		start = round_page(start);
384 	}
385 	if (end != round_page(end)) {
386 		rv = __elfN(map_partial)(map, object, offset +
387 		    trunc_page(end) - start, trunc_page(end), end, prot);
388 		if (rv)
389 			return (rv);
390 		end = trunc_page(end);
391 	}
392 	if (end > start) {
393 		if (offset & PAGE_MASK) {
394 			/*
395 			 * The mapping is not page aligned. This means we have
396 			 * to copy the data. Sigh.
397 			 */
398 			rv = vm_map_find(map, NULL, 0, &start, end - start,
399 			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
400 			if (rv)
401 				return (rv);
402 			if (object == NULL)
403 				return (KERN_SUCCESS);
404 			for (; start < end; start += sz) {
405 				sf = vm_imgact_map_page(object, offset);
406 				if (sf == NULL)
407 					return (KERN_FAILURE);
408 				off = offset - trunc_page(offset);
409 				sz = end - start;
410 				if (sz > PAGE_SIZE - off)
411 					sz = PAGE_SIZE - off;
412 				error = copyout((caddr_t)sf_buf_kva(sf) + off,
413 				    (caddr_t)start, sz);
414 				vm_imgact_unmap_page(sf);
415 				if (error) {
416 					return (KERN_FAILURE);
417 				}
418 				offset += sz;
419 			}
420 			rv = KERN_SUCCESS;
421 		} else {
422 			vm_object_reference(object);
423 			vm_map_lock(map);
424 			rv = vm_map_insert(map, object, offset, start, end,
425 			    prot, VM_PROT_ALL, cow);
426 			vm_map_unlock(map);
427 			if (rv != KERN_SUCCESS)
428 				vm_object_deallocate(object);
429 		}
430 		return (rv);
431 	} else {
432 		return (KERN_SUCCESS);
433 	}
434 }
435 
436 static int
437 __elfN(load_section)(struct vmspace *vmspace,
438 	vm_object_t object, vm_offset_t offset,
439 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
440 	size_t pagesize)
441 {
442 	struct sf_buf *sf;
443 	size_t map_len;
444 	vm_offset_t map_addr;
445 	int error, rv, cow;
446 	size_t copy_len;
447 	vm_offset_t file_addr;
448 
449 	/*
450 	 * It's necessary to fail if the filsz + offset taken from the
451 	 * header is greater than the actual file pager object's size.
452 	 * If we were to allow this, then the vm_map_find() below would
453 	 * walk right off the end of the file object and into the ether.
454 	 *
455 	 * While I'm here, might as well check for something else that
456 	 * is invalid: filsz cannot be greater than memsz.
457 	 */
458 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
459 	    filsz > memsz) {
460 		uprintf("elf_load_section: truncated ELF file\n");
461 		return (ENOEXEC);
462 	}
463 
464 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
465 	file_addr = trunc_page_ps(offset, pagesize);
466 
467 	/*
468 	 * We have two choices.  We can either clear the data in the last page
469 	 * of an oversized mapping, or we can start the anon mapping a page
470 	 * early and copy the initialized data into that first page.  We
471 	 * choose the second..
472 	 */
473 	if (memsz > filsz)
474 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
475 	else
476 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
477 
478 	if (map_len != 0) {
479 		/* cow flags: don't dump readonly sections in core */
480 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
481 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
482 
483 		rv = __elfN(map_insert)(&vmspace->vm_map,
484 				      object,
485 				      file_addr,	/* file offset */
486 				      map_addr,		/* virtual start */
487 				      map_addr + map_len,/* virtual end */
488 				      prot,
489 				      cow);
490 		if (rv != KERN_SUCCESS)
491 			return (EINVAL);
492 
493 		/* we can stop now if we've covered it all */
494 		if (memsz == filsz) {
495 			return (0);
496 		}
497 	}
498 
499 
500 	/*
501 	 * We have to get the remaining bit of the file into the first part
502 	 * of the oversized map segment.  This is normally because the .data
503 	 * segment in the file is extended to provide bss.  It's a neat idea
504 	 * to try and save a page, but it's a pain in the behind to implement.
505 	 */
506 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
507 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
508 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
509 	    map_addr;
510 
511 	/* This had damn well better be true! */
512 	if (map_len != 0) {
513 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
514 		    map_addr + map_len, VM_PROT_ALL, 0);
515 		if (rv != KERN_SUCCESS) {
516 			return (EINVAL);
517 		}
518 	}
519 
520 	if (copy_len != 0) {
521 		vm_offset_t off;
522 
523 		sf = vm_imgact_map_page(object, offset + filsz);
524 		if (sf == NULL)
525 			return (EIO);
526 
527 		/* send the page fragment to user space */
528 		off = trunc_page_ps(offset + filsz, pagesize) -
529 		    trunc_page(offset + filsz);
530 		error = copyout((caddr_t)sf_buf_kva(sf) + off,
531 		    (caddr_t)map_addr, copy_len);
532 		vm_imgact_unmap_page(sf);
533 		if (error) {
534 			return (error);
535 		}
536 	}
537 
538 	/*
539 	 * set it to the specified protection.
540 	 * XXX had better undo the damage from pasting over the cracks here!
541 	 */
542 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
543 	    round_page(map_addr + map_len),  prot, FALSE);
544 
545 	return (0);
546 }
547 
548 /*
549  * Load the file "file" into memory.  It may be either a shared object
550  * or an executable.
551  *
552  * The "addr" reference parameter is in/out.  On entry, it specifies
553  * the address where a shared object should be loaded.  If the file is
554  * an executable, this value is ignored.  On exit, "addr" specifies
555  * where the file was actually loaded.
556  *
557  * The "entry" reference parameter is out only.  On exit, it specifies
558  * the entry point for the loaded file.
559  */
560 static int
561 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
562 	u_long *entry, size_t pagesize)
563 {
564 	struct {
565 		struct nameidata nd;
566 		struct vattr attr;
567 		struct image_params image_params;
568 	} *tempdata;
569 	const Elf_Ehdr *hdr = NULL;
570 	const Elf_Phdr *phdr = NULL;
571 	struct nameidata *nd;
572 	struct vmspace *vmspace = p->p_vmspace;
573 	struct vattr *attr;
574 	struct image_params *imgp;
575 	vm_prot_t prot;
576 	u_long rbase;
577 	u_long base_addr = 0;
578 	int vfslocked, error, i, numsegs;
579 
580 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
581 	nd = &tempdata->nd;
582 	attr = &tempdata->attr;
583 	imgp = &tempdata->image_params;
584 
585 	/*
586 	 * Initialize part of the common data
587 	 */
588 	imgp->proc = p;
589 	imgp->attr = attr;
590 	imgp->firstpage = NULL;
591 	imgp->image_header = NULL;
592 	imgp->object = NULL;
593 	imgp->execlabel = NULL;
594 
595 	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
596 	    curthread);
597 	vfslocked = 0;
598 	if ((error = namei(nd)) != 0) {
599 		nd->ni_vp = NULL;
600 		goto fail;
601 	}
602 	vfslocked = NDHASGIANT(nd);
603 	NDFREE(nd, NDF_ONLY_PNBUF);
604 	imgp->vp = nd->ni_vp;
605 
606 	/*
607 	 * Check permissions, modes, uid, etc on the file, and "open" it.
608 	 */
609 	error = exec_check_permissions(imgp);
610 	if (error)
611 		goto fail;
612 
613 	error = exec_map_first_page(imgp);
614 	if (error)
615 		goto fail;
616 
617 	/*
618 	 * Also make certain that the interpreter stays the same, so set
619 	 * its VV_TEXT flag, too.
620 	 */
621 	nd->ni_vp->v_vflag |= VV_TEXT;
622 
623 	imgp->object = nd->ni_vp->v_object;
624 
625 	hdr = (const Elf_Ehdr *)imgp->image_header;
626 	if ((error = __elfN(check_header)(hdr)) != 0)
627 		goto fail;
628 	if (hdr->e_type == ET_DYN)
629 		rbase = *addr;
630 	else if (hdr->e_type == ET_EXEC)
631 		rbase = 0;
632 	else {
633 		error = ENOEXEC;
634 		goto fail;
635 	}
636 
637 	/* Only support headers that fit within first page for now      */
638 	/*    (multiplication of two Elf_Half fields will not overflow) */
639 	if ((hdr->e_phoff > PAGE_SIZE) ||
640 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
641 		error = ENOEXEC;
642 		goto fail;
643 	}
644 
645 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
646 	if (!aligned(phdr, Elf_Addr)) {
647 		error = ENOEXEC;
648 		goto fail;
649 	}
650 
651 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
652 		if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
653 			/* Loadable segment */
654 			prot = __elfN(trans_prot)(phdr[i].p_flags);
655 			if ((error = __elfN(load_section)(vmspace,
656 			    imgp->object, phdr[i].p_offset,
657 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
658 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
659 			    pagesize)) != 0)
660 				goto fail;
661 			/*
662 			 * Establish the base address if this is the
663 			 * first segment.
664 			 */
665 			if (numsegs == 0)
666   				base_addr = trunc_page(phdr[i].p_vaddr +
667 				    rbase);
668 			numsegs++;
669 		}
670 	}
671 	*addr = base_addr;
672 	*entry = (unsigned long)hdr->e_entry + rbase;
673 
674 fail:
675 	if (imgp->firstpage)
676 		exec_unmap_first_page(imgp);
677 
678 	if (nd->ni_vp)
679 		vput(nd->ni_vp);
680 
681 	VFS_UNLOCK_GIANT(vfslocked);
682 	free(tempdata, M_TEMP);
683 
684 	return (error);
685 }
686 
687 static int
688 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
689 {
690 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
691 	const Elf_Phdr *phdr;
692 	Elf_Auxargs *elf_auxargs;
693 	struct vmspace *vmspace;
694 	vm_prot_t prot;
695 	u_long text_size = 0, data_size = 0, total_size = 0;
696 	u_long text_addr = 0, data_addr = 0;
697 	u_long seg_size, seg_addr;
698 	u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
699 	int32_t osrel = 0;
700 	int error = 0, i, n;
701 	const char *interp = NULL, *newinterp = NULL;
702 	Elf_Brandinfo *brand_info;
703 	char *path;
704 	struct sysentvec *sv;
705 
706 	/*
707 	 * Do we have a valid ELF header ?
708 	 *
709 	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
710 	 * if particular brand doesn't support it.
711 	 */
712 	if (__elfN(check_header)(hdr) != 0 ||
713 	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
714 		return (-1);
715 
716 	/*
717 	 * From here on down, we return an errno, not -1, as we've
718 	 * detected an ELF file.
719 	 */
720 
721 	if ((hdr->e_phoff > PAGE_SIZE) ||
722 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
723 		/* Only support headers in first page for now */
724 		return (ENOEXEC);
725 	}
726 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
727 	if (!aligned(phdr, Elf_Addr))
728 		return (ENOEXEC);
729 	n = 0;
730 	baddr = 0;
731 	for (i = 0; i < hdr->e_phnum; i++) {
732 		switch (phdr[i].p_type) {
733 		case PT_LOAD:
734 			if (n == 0)
735 				baddr = phdr[i].p_vaddr;
736 			n++;
737 			break;
738 		case PT_INTERP:
739 			/* Path to interpreter */
740 			if (phdr[i].p_filesz > MAXPATHLEN ||
741 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
742 				return (ENOEXEC);
743 			interp = imgp->image_header + phdr[i].p_offset;
744 			break;
745 		case PT_GNU_STACK:
746 			if (__elfN(nxstack))
747 				imgp->stack_prot =
748 				    __elfN(trans_prot)(phdr[i].p_flags);
749 			break;
750 		}
751 	}
752 
753 	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
754 	if (brand_info == NULL) {
755 		uprintf("ELF binary type \"%u\" not known.\n",
756 		    hdr->e_ident[EI_OSABI]);
757 		return (ENOEXEC);
758 	}
759 	if (hdr->e_type == ET_DYN) {
760 		if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
761 			return (ENOEXEC);
762 		/*
763 		 * Honour the base load address from the dso if it is
764 		 * non-zero for some reason.
765 		 */
766 		if (baddr == 0)
767 			et_dyn_addr = ET_DYN_LOAD_ADDR;
768 		else
769 			et_dyn_addr = 0;
770 	} else
771 		et_dyn_addr = 0;
772 	sv = brand_info->sysvec;
773 	if (interp != NULL && brand_info->interp_newpath != NULL)
774 		newinterp = brand_info->interp_newpath;
775 
776 	/*
777 	 * Avoid a possible deadlock if the current address space is destroyed
778 	 * and that address space maps the locked vnode.  In the common case,
779 	 * the locked vnode's v_usecount is decremented but remains greater
780 	 * than zero.  Consequently, the vnode lock is not needed by vrele().
781 	 * However, in cases where the vnode lock is external, such as nullfs,
782 	 * v_usecount may become zero.
783 	 */
784 	VOP_UNLOCK(imgp->vp, 0);
785 
786 	error = exec_new_vmspace(imgp, sv);
787 	imgp->proc->p_sysent = sv;
788 
789 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
790 	if (error)
791 		return (error);
792 
793 	vmspace = imgp->proc->p_vmspace;
794 
795 	for (i = 0; i < hdr->e_phnum; i++) {
796 		switch (phdr[i].p_type) {
797 		case PT_LOAD:	/* Loadable segment */
798 			if (phdr[i].p_memsz == 0)
799 				break;
800 			prot = __elfN(trans_prot)(phdr[i].p_flags);
801 
802 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
803 			/*
804 			 * Some x86 binaries assume read == executable,
805 			 * notably the M3 runtime and therefore cvsup
806 			 */
807 			if (prot & VM_PROT_READ)
808 				prot |= VM_PROT_EXECUTE;
809 #endif
810 
811 			if ((error = __elfN(load_section)(vmspace,
812 			    imgp->object, phdr[i].p_offset,
813 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
814 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
815 			    sv->sv_pagesize)) != 0)
816 				return (error);
817 
818 			/*
819 			 * If this segment contains the program headers,
820 			 * remember their virtual address for the AT_PHDR
821 			 * aux entry. Static binaries don't usually include
822 			 * a PT_PHDR entry.
823 			 */
824 			if (phdr[i].p_offset == 0 &&
825 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
826 				<= phdr[i].p_filesz)
827 				proghdr = phdr[i].p_vaddr + hdr->e_phoff +
828 				    et_dyn_addr;
829 
830 			seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
831 			seg_size = round_page(phdr[i].p_memsz +
832 			    phdr[i].p_vaddr + et_dyn_addr - seg_addr);
833 
834 			/*
835 			 * Make the largest executable segment the official
836 			 * text segment and all others data.
837 			 *
838 			 * Note that obreak() assumes that data_addr +
839 			 * data_size == end of data load area, and the ELF
840 			 * file format expects segments to be sorted by
841 			 * address.  If multiple data segments exist, the
842 			 * last one will be used.
843 			 */
844 
845 			if (phdr[i].p_flags & PF_X && text_size < seg_size) {
846 				text_size = seg_size;
847 				text_addr = seg_addr;
848 			} else {
849 				data_size = seg_size;
850 				data_addr = seg_addr;
851 			}
852 			total_size += seg_size;
853 			break;
854 		case PT_PHDR: 	/* Program header table info */
855 			proghdr = phdr[i].p_vaddr + et_dyn_addr;
856 			break;
857 		default:
858 			break;
859 		}
860 	}
861 
862 	if (data_addr == 0 && data_size == 0) {
863 		data_addr = text_addr;
864 		data_size = text_size;
865 	}
866 
867 	entry = (u_long)hdr->e_entry + et_dyn_addr;
868 
869 	/*
870 	 * Check limits.  It should be safe to check the
871 	 * limits after loading the segments since we do
872 	 * not actually fault in all the segments pages.
873 	 */
874 	PROC_LOCK(imgp->proc);
875 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
876 	    text_size > maxtsiz ||
877 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
878 		PROC_UNLOCK(imgp->proc);
879 		return (ENOMEM);
880 	}
881 
882 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
883 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
884 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
885 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
886 
887 	/*
888 	 * We load the dynamic linker where a userland call
889 	 * to mmap(0, ...) would put it.  The rationale behind this
890 	 * calculation is that it leaves room for the heap to grow to
891 	 * its maximum allowed size.
892 	 */
893 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
894 	    lim_max(imgp->proc, RLIMIT_DATA));
895 	PROC_UNLOCK(imgp->proc);
896 
897 	imgp->entry_addr = entry;
898 
899 	if (interp != NULL) {
900 		int have_interp = FALSE;
901 		VOP_UNLOCK(imgp->vp, 0);
902 		if (brand_info->emul_path != NULL &&
903 		    brand_info->emul_path[0] != '\0') {
904 			path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
905 			snprintf(path, MAXPATHLEN, "%s%s",
906 			    brand_info->emul_path, interp);
907 			error = __elfN(load_file)(imgp->proc, path, &addr,
908 			    &imgp->entry_addr, sv->sv_pagesize);
909 			free(path, M_TEMP);
910 			if (error == 0)
911 				have_interp = TRUE;
912 		}
913 		if (!have_interp && newinterp != NULL) {
914 			error = __elfN(load_file)(imgp->proc, newinterp, &addr,
915 			    &imgp->entry_addr, sv->sv_pagesize);
916 			if (error == 0)
917 				have_interp = TRUE;
918 		}
919 		if (!have_interp) {
920 			error = __elfN(load_file)(imgp->proc, interp, &addr,
921 			    &imgp->entry_addr, sv->sv_pagesize);
922 		}
923 		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
924 		if (error != 0) {
925 			uprintf("ELF interpreter %s not found\n", interp);
926 			return (error);
927 		}
928 	} else
929 		addr = et_dyn_addr;
930 
931 	/*
932 	 * Construct auxargs table (used by the fixup routine)
933 	 */
934 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
935 	elf_auxargs->execfd = -1;
936 	elf_auxargs->phdr = proghdr;
937 	elf_auxargs->phent = hdr->e_phentsize;
938 	elf_auxargs->phnum = hdr->e_phnum;
939 	elf_auxargs->pagesz = PAGE_SIZE;
940 	elf_auxargs->base = addr;
941 	elf_auxargs->flags = 0;
942 	elf_auxargs->entry = entry;
943 
944 	imgp->auxargs = elf_auxargs;
945 	imgp->interpreted = 0;
946 	imgp->reloc_base = addr;
947 	imgp->proc->p_osrel = osrel;
948 
949 	return (error);
950 }
951 
952 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
953 
954 int
955 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
956 {
957 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
958 	Elf_Addr *base;
959 	Elf_Addr *pos;
960 
961 	base = (Elf_Addr *)*stack_base;
962 	pos = base + (imgp->args->argc + imgp->args->envc + 2);
963 
964 	if (args->execfd != -1)
965 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
966 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
967 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
968 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
969 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
970 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
971 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
972 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
973 	if (imgp->execpathp != 0)
974 		AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
975 	AUXARGS_ENTRY(pos, AT_OSRELDATE, osreldate);
976 	if (imgp->canary != 0) {
977 		AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary);
978 		AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
979 	}
980 	AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
981 	if (imgp->pagesizes != 0) {
982 		AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes);
983 		AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
984 	}
985 	AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
986 	    != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
987 	    imgp->sysent->sv_stackprot);
988 	AUXARGS_ENTRY(pos, AT_NULL, 0);
989 
990 	free(imgp->auxargs, M_TEMP);
991 	imgp->auxargs = NULL;
992 
993 	base--;
994 	suword(base, (long)imgp->args->argc);
995 	*stack_base = (register_t *)base;
996 	return (0);
997 }
998 
999 /*
1000  * Code for generating ELF core dumps.
1001  */
1002 
1003 typedef void (*segment_callback)(vm_map_entry_t, void *);
1004 
1005 /* Closure for cb_put_phdr(). */
1006 struct phdr_closure {
1007 	Elf_Phdr *phdr;		/* Program header to fill in */
1008 	Elf_Off offset;		/* Offset of segment in core file */
1009 };
1010 
1011 /* Closure for cb_size_segment(). */
1012 struct sseg_closure {
1013 	int count;		/* Count of writable segments. */
1014 	size_t size;		/* Total size of all writable segments. */
1015 };
1016 
1017 static void cb_put_phdr(vm_map_entry_t, void *);
1018 static void cb_size_segment(vm_map_entry_t, void *);
1019 static void each_writable_segment(struct thread *, segment_callback, void *);
1020 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1021     int, void *, size_t, gzFile);
1022 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
1023 static void __elfN(putnote)(void *, size_t *, const char *, int,
1024     const void *, size_t);
1025 
1026 #ifdef COMPRESS_USER_CORES
1027 extern int compress_user_cores;
1028 extern int compress_user_cores_gzlevel;
1029 #endif
1030 
1031 static int
1032 core_output(struct vnode *vp, void *base, size_t len, off_t offset,
1033     struct ucred *active_cred, struct ucred *file_cred,
1034     struct thread *td, char *core_buf, gzFile gzfile) {
1035 
1036 	int error;
1037 	if (gzfile) {
1038 #ifdef COMPRESS_USER_CORES
1039 		error = compress_core(gzfile, base, core_buf, len, td);
1040 #else
1041 		panic("shouldn't be here");
1042 #endif
1043 	} else {
1044 		error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
1045 		    UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
1046 		    NULL, td);
1047 	}
1048 	return (error);
1049 }
1050 
1051 int
1052 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1053 {
1054 	struct ucred *cred = td->td_ucred;
1055 	int error = 0;
1056 	struct sseg_closure seginfo;
1057 	void *hdr;
1058 	size_t hdrsize;
1059 
1060 	gzFile gzfile = Z_NULL;
1061 	char *core_buf = NULL;
1062 #ifdef COMPRESS_USER_CORES
1063 	char gzopen_flags[8];
1064 	char *p;
1065 	int doing_compress = flags & IMGACT_CORE_COMPRESS;
1066 #endif
1067 
1068 	hdr = NULL;
1069 
1070 #ifdef COMPRESS_USER_CORES
1071         if (doing_compress) {
1072                 p = gzopen_flags;
1073                 *p++ = 'w';
1074                 if (compress_user_cores_gzlevel >= 0 &&
1075                     compress_user_cores_gzlevel <= 9)
1076                         *p++ = '0' + compress_user_cores_gzlevel;
1077                 *p = 0;
1078                 gzfile = gz_open("", gzopen_flags, vp);
1079                 if (gzfile == Z_NULL) {
1080                         error = EFAULT;
1081                         goto done;
1082                 }
1083                 core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1084                 if (!core_buf) {
1085                         error = ENOMEM;
1086                         goto done;
1087                 }
1088         }
1089 #endif
1090 
1091 	/* Size the program segments. */
1092 	seginfo.count = 0;
1093 	seginfo.size = 0;
1094 	each_writable_segment(td, cb_size_segment, &seginfo);
1095 
1096 	/*
1097 	 * Calculate the size of the core file header area by making
1098 	 * a dry run of generating it.  Nothing is written, but the
1099 	 * size is calculated.
1100 	 */
1101 	hdrsize = 0;
1102 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
1103 
1104 	if (hdrsize + seginfo.size >= limit) {
1105 		error = EFAULT;
1106 		goto done;
1107 	}
1108 
1109 	/*
1110 	 * Allocate memory for building the header, fill it up,
1111 	 * and write it out.
1112 	 */
1113 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1114 	if (hdr == NULL) {
1115 		error = EINVAL;
1116 		goto done;
1117 	}
1118 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize,
1119 	    gzfile);
1120 
1121 	/* Write the contents of all of the writable segments. */
1122 	if (error == 0) {
1123 		Elf_Phdr *php;
1124 		off_t offset;
1125 		int i;
1126 
1127 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1128 		offset = hdrsize;
1129 		for (i = 0; i < seginfo.count; i++) {
1130 			error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr,
1131 			    php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile);
1132 			if (error != 0)
1133 				break;
1134 			offset += php->p_filesz;
1135 			php++;
1136 		}
1137 	}
1138 	if (error) {
1139 		log(LOG_WARNING,
1140 		    "Failed to write core file for process %s (error %d)\n",
1141 		    curproc->p_comm, error);
1142 	}
1143 
1144 done:
1145 #ifdef COMPRESS_USER_CORES
1146 	if (core_buf)
1147 		free(core_buf, M_TEMP);
1148 	if (gzfile)
1149 		gzclose(gzfile);
1150 #endif
1151 
1152 	free(hdr, M_TEMP);
1153 
1154 	return (error);
1155 }
1156 
1157 /*
1158  * A callback for each_writable_segment() to write out the segment's
1159  * program header entry.
1160  */
1161 static void
1162 cb_put_phdr(entry, closure)
1163 	vm_map_entry_t entry;
1164 	void *closure;
1165 {
1166 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1167 	Elf_Phdr *phdr = phc->phdr;
1168 
1169 	phc->offset = round_page(phc->offset);
1170 
1171 	phdr->p_type = PT_LOAD;
1172 	phdr->p_offset = phc->offset;
1173 	phdr->p_vaddr = entry->start;
1174 	phdr->p_paddr = 0;
1175 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1176 	phdr->p_align = PAGE_SIZE;
1177 	phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1178 
1179 	phc->offset += phdr->p_filesz;
1180 	phc->phdr++;
1181 }
1182 
1183 /*
1184  * A callback for each_writable_segment() to gather information about
1185  * the number of segments and their total size.
1186  */
1187 static void
1188 cb_size_segment(entry, closure)
1189 	vm_map_entry_t entry;
1190 	void *closure;
1191 {
1192 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1193 
1194 	ssc->count++;
1195 	ssc->size += entry->end - entry->start;
1196 }
1197 
1198 /*
1199  * For each writable segment in the process's memory map, call the given
1200  * function with a pointer to the map entry and some arbitrary
1201  * caller-supplied data.
1202  */
1203 static void
1204 each_writable_segment(td, func, closure)
1205 	struct thread *td;
1206 	segment_callback func;
1207 	void *closure;
1208 {
1209 	struct proc *p = td->td_proc;
1210 	vm_map_t map = &p->p_vmspace->vm_map;
1211 	vm_map_entry_t entry;
1212 	vm_object_t backing_object, object;
1213 	boolean_t ignore_entry;
1214 
1215 	vm_map_lock_read(map);
1216 	for (entry = map->header.next; entry != &map->header;
1217 	    entry = entry->next) {
1218 		/*
1219 		 * Don't dump inaccessible mappings, deal with legacy
1220 		 * coredump mode.
1221 		 *
1222 		 * Note that read-only segments related to the elf binary
1223 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1224 		 * need to arbitrarily ignore such segments.
1225 		 */
1226 		if (elf_legacy_coredump) {
1227 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1228 				continue;
1229 		} else {
1230 			if ((entry->protection & VM_PROT_ALL) == 0)
1231 				continue;
1232 		}
1233 
1234 		/*
1235 		 * Dont include memory segment in the coredump if
1236 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1237 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1238 		 * kernel map).
1239 		 */
1240 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1241 			continue;
1242 
1243 		if ((object = entry->object.vm_object) == NULL)
1244 			continue;
1245 
1246 		/* Ignore memory-mapped devices and such things. */
1247 		VM_OBJECT_LOCK(object);
1248 		while ((backing_object = object->backing_object) != NULL) {
1249 			VM_OBJECT_LOCK(backing_object);
1250 			VM_OBJECT_UNLOCK(object);
1251 			object = backing_object;
1252 		}
1253 		ignore_entry = object->type != OBJT_DEFAULT &&
1254 		    object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1255 		VM_OBJECT_UNLOCK(object);
1256 		if (ignore_entry)
1257 			continue;
1258 
1259 		(*func)(entry, closure);
1260 	}
1261 	vm_map_unlock_read(map);
1262 }
1263 
1264 /*
1265  * Write the core file header to the file, including padding up to
1266  * the page boundary.
1267  */
1268 static int
1269 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize, gzfile)
1270 	struct thread *td;
1271 	struct vnode *vp;
1272 	struct ucred *cred;
1273 	int numsegs;
1274 	size_t hdrsize;
1275 	void *hdr;
1276 	gzFile gzfile;
1277 {
1278 	size_t off;
1279 
1280 	/* Fill in the header. */
1281 	bzero(hdr, hdrsize);
1282 	off = 0;
1283 	__elfN(puthdr)(td, hdr, &off, numsegs);
1284 
1285 	if (!gzfile) {
1286 		/* Write it to the core file. */
1287 		return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1288 			UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1289 			td));
1290 	} else {
1291 #ifdef COMPRESS_USER_CORES
1292 		if (gzwrite(gzfile, hdr, hdrsize) != hdrsize) {
1293 			log(LOG_WARNING,
1294 			    "Failed to compress core file header for process"
1295 			    " %s.\n", curproc->p_comm);
1296 			return (EFAULT);
1297 		}
1298 		else {
1299 			return (0);
1300 		}
1301 #else
1302 		panic("shouldn't be here");
1303 #endif
1304 	}
1305 }
1306 
1307 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1308 #include <compat/freebsd32/freebsd32.h>
1309 
1310 typedef struct prstatus32 elf_prstatus_t;
1311 typedef struct prpsinfo32 elf_prpsinfo_t;
1312 typedef struct fpreg32 elf_prfpregset_t;
1313 typedef struct fpreg32 elf_fpregset_t;
1314 typedef struct reg32 elf_gregset_t;
1315 typedef struct thrmisc32 elf_thrmisc_t;
1316 #else
1317 typedef prstatus_t elf_prstatus_t;
1318 typedef prpsinfo_t elf_prpsinfo_t;
1319 typedef prfpregset_t elf_prfpregset_t;
1320 typedef prfpregset_t elf_fpregset_t;
1321 typedef gregset_t elf_gregset_t;
1322 typedef thrmisc_t elf_thrmisc_t;
1323 #endif
1324 
1325 static void
1326 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1327 {
1328 	struct {
1329 		elf_prstatus_t status;
1330 		elf_prfpregset_t fpregset;
1331 		elf_prpsinfo_t psinfo;
1332 		elf_thrmisc_t thrmisc;
1333 	} *tempdata;
1334 	elf_prstatus_t *status;
1335 	elf_prfpregset_t *fpregset;
1336 	elf_prpsinfo_t *psinfo;
1337 	elf_thrmisc_t *thrmisc;
1338 	struct proc *p;
1339 	struct thread *thr;
1340 	size_t ehoff, noteoff, notesz, phoff;
1341 
1342 	p = td->td_proc;
1343 
1344 	ehoff = *off;
1345 	*off += sizeof(Elf_Ehdr);
1346 
1347 	phoff = *off;
1348 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1349 
1350 	noteoff = *off;
1351 	/*
1352 	 * Don't allocate space for the notes if we're just calculating
1353 	 * the size of the header. We also don't collect the data.
1354 	 */
1355 	if (dst != NULL) {
1356 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1357 		status = &tempdata->status;
1358 		fpregset = &tempdata->fpregset;
1359 		psinfo = &tempdata->psinfo;
1360 		thrmisc = &tempdata->thrmisc;
1361 	} else {
1362 		tempdata = NULL;
1363 		status = NULL;
1364 		fpregset = NULL;
1365 		psinfo = NULL;
1366 		thrmisc = NULL;
1367 	}
1368 
1369 	if (dst != NULL) {
1370 		psinfo->pr_version = PRPSINFO_VERSION;
1371 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1372 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1373 		/*
1374 		 * XXX - We don't fill in the command line arguments properly
1375 		 * yet.
1376 		 */
1377 		strlcpy(psinfo->pr_psargs, p->p_comm,
1378 		    sizeof(psinfo->pr_psargs));
1379 	}
1380 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1381 	    sizeof *psinfo);
1382 
1383 	/*
1384 	 * To have the debugger select the right thread (LWP) as the initial
1385 	 * thread, we dump the state of the thread passed to us in td first.
1386 	 * This is the thread that causes the core dump and thus likely to
1387 	 * be the right thread one wants to have selected in the debugger.
1388 	 */
1389 	thr = td;
1390 	while (thr != NULL) {
1391 		if (dst != NULL) {
1392 			status->pr_version = PRSTATUS_VERSION;
1393 			status->pr_statussz = sizeof(elf_prstatus_t);
1394 			status->pr_gregsetsz = sizeof(elf_gregset_t);
1395 			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1396 			status->pr_osreldate = osreldate;
1397 			status->pr_cursig = p->p_sig;
1398 			status->pr_pid = thr->td_tid;
1399 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1400 			fill_regs32(thr, &status->pr_reg);
1401 			fill_fpregs32(thr, fpregset);
1402 #else
1403 			fill_regs(thr, &status->pr_reg);
1404 			fill_fpregs(thr, fpregset);
1405 #endif
1406 			memset(&thrmisc->_pad, 0, sizeof (thrmisc->_pad));
1407 			strcpy(thrmisc->pr_tname, thr->td_name);
1408 		}
1409 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1410 		    sizeof *status);
1411 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1412 		    sizeof *fpregset);
1413 		__elfN(putnote)(dst, off, "FreeBSD", NT_THRMISC, thrmisc,
1414 		    sizeof *thrmisc);
1415 		/*
1416 		 * Allow for MD specific notes, as well as any MD
1417 		 * specific preparations for writing MI notes.
1418 		 */
1419 		__elfN(dump_thread)(thr, dst, off);
1420 
1421 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1422 		    TAILQ_NEXT(thr, td_plist);
1423 		if (thr == td)
1424 			thr = TAILQ_NEXT(thr, td_plist);
1425 	}
1426 
1427 	notesz = *off - noteoff;
1428 
1429 	if (dst != NULL)
1430 		free(tempdata, M_TEMP);
1431 
1432 	/* Align up to a page boundary for the program segments. */
1433 	*off = round_page(*off);
1434 
1435 	if (dst != NULL) {
1436 		Elf_Ehdr *ehdr;
1437 		Elf_Phdr *phdr;
1438 		struct phdr_closure phc;
1439 
1440 		/*
1441 		 * Fill in the ELF header.
1442 		 */
1443 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1444 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1445 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1446 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1447 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1448 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1449 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1450 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1451 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1452 		ehdr->e_ident[EI_ABIVERSION] = 0;
1453 		ehdr->e_ident[EI_PAD] = 0;
1454 		ehdr->e_type = ET_CORE;
1455 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1456 		ehdr->e_machine = ELF_ARCH32;
1457 #else
1458 		ehdr->e_machine = ELF_ARCH;
1459 #endif
1460 		ehdr->e_version = EV_CURRENT;
1461 		ehdr->e_entry = 0;
1462 		ehdr->e_phoff = phoff;
1463 		ehdr->e_flags = 0;
1464 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1465 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1466 		ehdr->e_phnum = numsegs + 1;
1467 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1468 		ehdr->e_shnum = 0;
1469 		ehdr->e_shstrndx = SHN_UNDEF;
1470 
1471 		/*
1472 		 * Fill in the program header entries.
1473 		 */
1474 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1475 
1476 		/* The note segement. */
1477 		phdr->p_type = PT_NOTE;
1478 		phdr->p_offset = noteoff;
1479 		phdr->p_vaddr = 0;
1480 		phdr->p_paddr = 0;
1481 		phdr->p_filesz = notesz;
1482 		phdr->p_memsz = 0;
1483 		phdr->p_flags = 0;
1484 		phdr->p_align = 0;
1485 		phdr++;
1486 
1487 		/* All the writable segments from the program. */
1488 		phc.phdr = phdr;
1489 		phc.offset = *off;
1490 		each_writable_segment(td, cb_put_phdr, &phc);
1491 	}
1492 }
1493 
1494 static void
1495 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1496     const void *desc, size_t descsz)
1497 {
1498 	Elf_Note note;
1499 
1500 	note.n_namesz = strlen(name) + 1;
1501 	note.n_descsz = descsz;
1502 	note.n_type = type;
1503 	if (dst != NULL)
1504 		bcopy(&note, (char *)dst + *off, sizeof note);
1505 	*off += sizeof note;
1506 	if (dst != NULL)
1507 		bcopy(name, (char *)dst + *off, note.n_namesz);
1508 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1509 	if (dst != NULL)
1510 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1511 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1512 }
1513 
1514 /*
1515  * Try to find the appropriate ABI-note section for checknote,
1516  * fetch the osreldate for binary from the ELF OSABI-note. Only the
1517  * first page of the image is searched, the same as for headers.
1518  */
1519 static boolean_t
1520 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1521     int32_t *osrel)
1522 {
1523 	const Elf_Note *note, *note0, *note_end;
1524 	const Elf_Phdr *phdr, *pnote;
1525 	const Elf_Ehdr *hdr;
1526 	const char *note_name;
1527 	int i;
1528 
1529 	pnote = NULL;
1530 	hdr = (const Elf_Ehdr *)imgp->image_header;
1531 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1532 
1533 	for (i = 0; i < hdr->e_phnum; i++) {
1534 		if (phdr[i].p_type == PT_NOTE) {
1535 			pnote = &phdr[i];
1536 			break;
1537 		}
1538 	}
1539 
1540 	if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1541 	    pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1542 		return (FALSE);
1543 
1544 	note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1545 	note_end = (const Elf_Note *)(imgp->image_header +
1546 	    pnote->p_offset + pnote->p_filesz);
1547 	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1548 		if (!aligned(note, Elf32_Addr))
1549 			return (FALSE);
1550 		if (note->n_namesz != checknote->hdr.n_namesz ||
1551 		    note->n_descsz != checknote->hdr.n_descsz ||
1552 		    note->n_type != checknote->hdr.n_type)
1553 			goto nextnote;
1554 		note_name = (const char *)(note + 1);
1555 		if (strncmp(checknote->vendor, note_name,
1556 		    checknote->hdr.n_namesz) != 0)
1557 			goto nextnote;
1558 
1559 		/*
1560 		 * Fetch the osreldate for binary
1561 		 * from the ELF OSABI-note if necessary.
1562 		 */
1563 		if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
1564 		    checknote->trans_osrel != NULL)
1565 			return (checknote->trans_osrel(note, osrel));
1566 		return (TRUE);
1567 
1568 nextnote:
1569 		note = (const Elf_Note *)((const char *)(note + 1) +
1570 		    roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1571 		    roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1572 	}
1573 
1574 	return (FALSE);
1575 }
1576 
1577 /*
1578  * Tell kern_execve.c about it, with a little help from the linker.
1579  */
1580 static struct execsw __elfN(execsw) = {
1581 	__CONCAT(exec_, __elfN(imgact)),
1582 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1583 };
1584 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1585 
1586 #ifdef COMPRESS_USER_CORES
1587 /*
1588  * Compress and write out a core segment for a user process.
1589  *
1590  * 'inbuf' is the starting address of a VM segment in the process' address
1591  * space that is to be compressed and written out to the core file.  'dest_buf'
1592  * is a buffer in the kernel's address space.  The segment is copied from
1593  * 'inbuf' to 'dest_buf' first before being processed by the compression
1594  * routine gzwrite().  This copying is necessary because the content of the VM
1595  * segment may change between the compression pass and the crc-computation pass
1596  * in gzwrite().  This is because realtime threads may preempt the UNIX kernel.
1597  */
1598 static int
1599 compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
1600     struct thread *td)
1601 {
1602 	int len_compressed;
1603 	int error = 0;
1604 	unsigned int chunk_len;
1605 
1606 	while (len) {
1607 		chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
1608 		copyin(inbuf, dest_buf, chunk_len);
1609 		len_compressed = gzwrite(file, dest_buf, chunk_len);
1610 
1611 		EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed);
1612 
1613 		if ((unsigned int)len_compressed != chunk_len) {
1614 			log(LOG_WARNING,
1615 			    "compress_core: length mismatch (0x%x returned, "
1616 			    "0x%x expected)\n", len_compressed, chunk_len);
1617 			EVENTHANDLER_INVOKE(app_coredump_error, td,
1618 			    "compress_core: length mismatch %x -> %x",
1619 			    chunk_len, len_compressed);
1620 			error = EFAULT;
1621 			break;
1622 		}
1623 		inbuf += chunk_len;
1624 		len -= chunk_len;
1625 		if (ticks - PCPU_GET(switchticks) >= hogticks)
1626 			uio_yield();
1627 	}
1628 
1629 	return (error);
1630 }
1631 #endif /* COMPRESS_USER_CORES */
1632 
1633 static vm_prot_t
1634 __elfN(trans_prot)(Elf_Word flags)
1635 {
1636 	vm_prot_t prot;
1637 
1638 	prot = 0;
1639 	if (flags & PF_X)
1640 		prot |= VM_PROT_EXECUTE;
1641 	if (flags & PF_W)
1642 		prot |= VM_PROT_WRITE;
1643 	if (flags & PF_R)
1644 		prot |= VM_PROT_READ;
1645 	return (prot);
1646 }
1647 
1648 static Elf_Word
1649 __elfN(untrans_prot)(vm_prot_t prot)
1650 {
1651 	Elf_Word flags;
1652 
1653 	flags = 0;
1654 	if (prot & VM_PROT_EXECUTE)
1655 		flags |= PF_X;
1656 	if (prot & VM_PROT_READ)
1657 		flags |= PF_R;
1658 	if (prot & VM_PROT_WRITE)
1659 		flags |= PF_W;
1660 	return (flags);
1661 }
1662