xref: /freebsd/sys/kern/imgact_elf.c (revision d940309d8031453f693814d105395736aadd2f15)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_compat.h"
35 #include "opt_core.h"
36 
37 #include <sys/param.h>
38 #include <sys/exec.h>
39 #include <sys/fcntl.h>
40 #include <sys/imgact.h>
41 #include <sys/imgact_elf.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/mman.h>
48 #include <sys/namei.h>
49 #include <sys/pioctl.h>
50 #include <sys/proc.h>
51 #include <sys/procfs.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sf_buf.h>
54 #include <sys/systm.h>
55 #include <sys/signalvar.h>
56 #include <sys/stat.h>
57 #include <sys/sx.h>
58 #include <sys/syscall.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
61 #include <sys/vnode.h>
62 #include <sys/syslog.h>
63 #include <sys/eventhandler.h>
64 
65 #include <net/zlib.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_extern.h>
74 
75 #include <machine/elf.h>
76 #include <machine/md_var.h>
77 
78 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
79 #include <machine/fpu.h>
80 #include <compat/ia32/ia32_reg.h>
81 #endif
82 
83 #define OLD_EI_BRAND	8
84 
85 static int __elfN(check_header)(const Elf_Ehdr *hdr);
86 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
87     const char *interp, int32_t *osrel);
88 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
89     u_long *entry, size_t pagesize);
90 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
91     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
92     vm_prot_t prot, size_t pagesize);
93 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
94 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
95     int32_t *osrel);
96 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
97 static boolean_t __elfN(check_note)(struct image_params *imgp,
98     Elf_Brandnote *checknote, int32_t *osrel);
99 
100 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
101     "");
102 
103 #ifdef COMPRESS_USER_CORES
104 static int compress_core(gzFile, char *, char *, unsigned int,
105     struct thread * td);
106 #define CORE_BUF_SIZE	(16 * 1024)
107 #endif
108 
109 int __elfN(fallback_brand) = -1;
110 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
111     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
112     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
113 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
114     &__elfN(fallback_brand));
115 
116 static int elf_legacy_coredump = 0;
117 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
118     &elf_legacy_coredump, 0, "");
119 
120 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
121 
122 #define	trunc_page_ps(va, ps)	((va) & ~(ps - 1))
123 #define	round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
124 #define	aligned(a, t)	(trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
125 
126 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
127 
128 Elf_Brandnote __elfN(freebsd_brandnote) = {
129 	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
130 	.hdr.n_descsz	= sizeof(int32_t),
131 	.hdr.n_type	= 1,
132 	.vendor		= FREEBSD_ABI_VENDOR,
133 	.flags		= BN_TRANSLATE_OSREL,
134 	.trans_osrel	= __elfN(freebsd_trans_osrel)
135 };
136 
137 static boolean_t
138 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
139 {
140 	uintptr_t p;
141 
142 	p = (uintptr_t)(note + 1);
143 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
144 	*osrel = *(const int32_t *)(p);
145 
146 	return (TRUE);
147 }
148 
149 static const char GNU_ABI_VENDOR[] = "GNU";
150 static int GNU_KFREEBSD_ABI_DESC = 3;
151 
152 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
153 	.hdr.n_namesz	= sizeof(GNU_ABI_VENDOR),
154 	.hdr.n_descsz	= 16,	/* XXX at least 16 */
155 	.hdr.n_type	= 1,
156 	.vendor		= GNU_ABI_VENDOR,
157 	.flags		= BN_TRANSLATE_OSREL,
158 	.trans_osrel	= kfreebsd_trans_osrel
159 };
160 
161 static boolean_t
162 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
163 {
164 	const Elf32_Word *desc;
165 	uintptr_t p;
166 
167 	p = (uintptr_t)(note + 1);
168 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
169 
170 	desc = (const Elf32_Word *)p;
171 	if (desc[0] != GNU_KFREEBSD_ABI_DESC)
172 		return (FALSE);
173 
174 	/*
175 	 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
176 	 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
177 	 */
178 	*osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
179 
180 	return (TRUE);
181 }
182 
183 int
184 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
185 {
186 	int i;
187 
188 	for (i = 0; i < MAX_BRANDS; i++) {
189 		if (elf_brand_list[i] == NULL) {
190 			elf_brand_list[i] = entry;
191 			break;
192 		}
193 	}
194 	if (i == MAX_BRANDS) {
195 		printf("WARNING: %s: could not insert brandinfo entry: %p\n",
196 			__func__, entry);
197 		return (-1);
198 	}
199 	return (0);
200 }
201 
202 int
203 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
204 {
205 	int i;
206 
207 	for (i = 0; i < MAX_BRANDS; i++) {
208 		if (elf_brand_list[i] == entry) {
209 			elf_brand_list[i] = NULL;
210 			break;
211 		}
212 	}
213 	if (i == MAX_BRANDS)
214 		return (-1);
215 	return (0);
216 }
217 
218 int
219 __elfN(brand_inuse)(Elf_Brandinfo *entry)
220 {
221 	struct proc *p;
222 	int rval = FALSE;
223 
224 	sx_slock(&allproc_lock);
225 	FOREACH_PROC_IN_SYSTEM(p) {
226 		if (p->p_sysent == entry->sysvec) {
227 			rval = TRUE;
228 			break;
229 		}
230 	}
231 	sx_sunlock(&allproc_lock);
232 
233 	return (rval);
234 }
235 
236 static Elf_Brandinfo *
237 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
238     int32_t *osrel)
239 {
240 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
241 	Elf_Brandinfo *bi;
242 	boolean_t ret;
243 	int i;
244 
245 	/*
246 	 * We support four types of branding -- (1) the ELF EI_OSABI field
247 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
248 	 * branding w/in the ELF header, (3) path of the `interp_path'
249 	 * field, and (4) the ".note.ABI-tag" ELF section.
250 	 */
251 
252 	/* Look for an ".note.ABI-tag" ELF section */
253 	for (i = 0; i < MAX_BRANDS; i++) {
254 		bi = elf_brand_list[i];
255 		if (bi == NULL)
256 			continue;
257 		if (hdr->e_machine == bi->machine && (bi->flags &
258 		    (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
259 			ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
260 			if (ret)
261 				return (bi);
262 		}
263 	}
264 
265 	/* If the executable has a brand, search for it in the brand list. */
266 	for (i = 0; i < MAX_BRANDS; i++) {
267 		bi = elf_brand_list[i];
268 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
269 			continue;
270 		if (hdr->e_machine == bi->machine &&
271 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
272 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
273 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
274 			return (bi);
275 	}
276 
277 	/* Lacking a known brand, search for a recognized interpreter. */
278 	if (interp != NULL) {
279 		for (i = 0; i < MAX_BRANDS; i++) {
280 			bi = elf_brand_list[i];
281 			if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
282 				continue;
283 			if (hdr->e_machine == bi->machine &&
284 			    strcmp(interp, bi->interp_path) == 0)
285 				return (bi);
286 		}
287 	}
288 
289 	/* Lacking a recognized interpreter, try the default brand */
290 	for (i = 0; i < MAX_BRANDS; i++) {
291 		bi = elf_brand_list[i];
292 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
293 			continue;
294 		if (hdr->e_machine == bi->machine &&
295 		    __elfN(fallback_brand) == bi->brand)
296 			return (bi);
297 	}
298 	return (NULL);
299 }
300 
301 static int
302 __elfN(check_header)(const Elf_Ehdr *hdr)
303 {
304 	Elf_Brandinfo *bi;
305 	int i;
306 
307 	if (!IS_ELF(*hdr) ||
308 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
309 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
310 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
311 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
312 	    hdr->e_version != ELF_TARG_VER)
313 		return (ENOEXEC);
314 
315 	/*
316 	 * Make sure we have at least one brand for this machine.
317 	 */
318 
319 	for (i = 0; i < MAX_BRANDS; i++) {
320 		bi = elf_brand_list[i];
321 		if (bi != NULL && bi->machine == hdr->e_machine)
322 			break;
323 	}
324 	if (i == MAX_BRANDS)
325 		return (ENOEXEC);
326 
327 	return (0);
328 }
329 
330 static int
331 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
332     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
333 {
334 	struct sf_buf *sf;
335 	int error;
336 	vm_offset_t off;
337 
338 	/*
339 	 * Create the page if it doesn't exist yet. Ignore errors.
340 	 */
341 	vm_map_lock(map);
342 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
343 	    VM_PROT_ALL, VM_PROT_ALL, 0);
344 	vm_map_unlock(map);
345 
346 	/*
347 	 * Find the page from the underlying object.
348 	 */
349 	if (object) {
350 		sf = vm_imgact_map_page(object, offset);
351 		if (sf == NULL)
352 			return (KERN_FAILURE);
353 		off = offset - trunc_page(offset);
354 		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
355 		    end - start);
356 		vm_imgact_unmap_page(sf);
357 		if (error) {
358 			return (KERN_FAILURE);
359 		}
360 	}
361 
362 	return (KERN_SUCCESS);
363 }
364 
365 static int
366 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
367     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
368 {
369 	struct sf_buf *sf;
370 	vm_offset_t off;
371 	vm_size_t sz;
372 	int error, rv;
373 
374 	if (start != trunc_page(start)) {
375 		rv = __elfN(map_partial)(map, object, offset, start,
376 		    round_page(start), prot);
377 		if (rv)
378 			return (rv);
379 		offset += round_page(start) - start;
380 		start = round_page(start);
381 	}
382 	if (end != round_page(end)) {
383 		rv = __elfN(map_partial)(map, object, offset +
384 		    trunc_page(end) - start, trunc_page(end), end, prot);
385 		if (rv)
386 			return (rv);
387 		end = trunc_page(end);
388 	}
389 	if (end > start) {
390 		if (offset & PAGE_MASK) {
391 			/*
392 			 * The mapping is not page aligned. This means we have
393 			 * to copy the data. Sigh.
394 			 */
395 			rv = vm_map_find(map, NULL, 0, &start, end - start,
396 			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
397 			if (rv)
398 				return (rv);
399 			if (object == NULL)
400 				return (KERN_SUCCESS);
401 			for (; start < end; start += sz) {
402 				sf = vm_imgact_map_page(object, offset);
403 				if (sf == NULL)
404 					return (KERN_FAILURE);
405 				off = offset - trunc_page(offset);
406 				sz = end - start;
407 				if (sz > PAGE_SIZE - off)
408 					sz = PAGE_SIZE - off;
409 				error = copyout((caddr_t)sf_buf_kva(sf) + off,
410 				    (caddr_t)start, sz);
411 				vm_imgact_unmap_page(sf);
412 				if (error) {
413 					return (KERN_FAILURE);
414 				}
415 				offset += sz;
416 			}
417 			rv = KERN_SUCCESS;
418 		} else {
419 			vm_object_reference(object);
420 			vm_map_lock(map);
421 			rv = vm_map_insert(map, object, offset, start, end,
422 			    prot, VM_PROT_ALL, cow);
423 			vm_map_unlock(map);
424 			if (rv != KERN_SUCCESS)
425 				vm_object_deallocate(object);
426 		}
427 		return (rv);
428 	} else {
429 		return (KERN_SUCCESS);
430 	}
431 }
432 
433 static int
434 __elfN(load_section)(struct vmspace *vmspace,
435 	vm_object_t object, vm_offset_t offset,
436 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
437 	size_t pagesize)
438 {
439 	struct sf_buf *sf;
440 	size_t map_len;
441 	vm_offset_t map_addr;
442 	int error, rv, cow;
443 	size_t copy_len;
444 	vm_offset_t file_addr;
445 
446 	/*
447 	 * It's necessary to fail if the filsz + offset taken from the
448 	 * header is greater than the actual file pager object's size.
449 	 * If we were to allow this, then the vm_map_find() below would
450 	 * walk right off the end of the file object and into the ether.
451 	 *
452 	 * While I'm here, might as well check for something else that
453 	 * is invalid: filsz cannot be greater than memsz.
454 	 */
455 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
456 	    filsz > memsz) {
457 		uprintf("elf_load_section: truncated ELF file\n");
458 		return (ENOEXEC);
459 	}
460 
461 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
462 	file_addr = trunc_page_ps(offset, pagesize);
463 
464 	/*
465 	 * We have two choices.  We can either clear the data in the last page
466 	 * of an oversized mapping, or we can start the anon mapping a page
467 	 * early and copy the initialized data into that first page.  We
468 	 * choose the second..
469 	 */
470 	if (memsz > filsz)
471 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
472 	else
473 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
474 
475 	if (map_len != 0) {
476 		/* cow flags: don't dump readonly sections in core */
477 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
478 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
479 
480 		rv = __elfN(map_insert)(&vmspace->vm_map,
481 				      object,
482 				      file_addr,	/* file offset */
483 				      map_addr,		/* virtual start */
484 				      map_addr + map_len,/* virtual end */
485 				      prot,
486 				      cow);
487 		if (rv != KERN_SUCCESS)
488 			return (EINVAL);
489 
490 		/* we can stop now if we've covered it all */
491 		if (memsz == filsz) {
492 			return (0);
493 		}
494 	}
495 
496 
497 	/*
498 	 * We have to get the remaining bit of the file into the first part
499 	 * of the oversized map segment.  This is normally because the .data
500 	 * segment in the file is extended to provide bss.  It's a neat idea
501 	 * to try and save a page, but it's a pain in the behind to implement.
502 	 */
503 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
504 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
505 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
506 	    map_addr;
507 
508 	/* This had damn well better be true! */
509 	if (map_len != 0) {
510 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
511 		    map_addr + map_len, VM_PROT_ALL, 0);
512 		if (rv != KERN_SUCCESS) {
513 			return (EINVAL);
514 		}
515 	}
516 
517 	if (copy_len != 0) {
518 		vm_offset_t off;
519 
520 		sf = vm_imgact_map_page(object, offset + filsz);
521 		if (sf == NULL)
522 			return (EIO);
523 
524 		/* send the page fragment to user space */
525 		off = trunc_page_ps(offset + filsz, pagesize) -
526 		    trunc_page(offset + filsz);
527 		error = copyout((caddr_t)sf_buf_kva(sf) + off,
528 		    (caddr_t)map_addr, copy_len);
529 		vm_imgact_unmap_page(sf);
530 		if (error) {
531 			return (error);
532 		}
533 	}
534 
535 	/*
536 	 * set it to the specified protection.
537 	 * XXX had better undo the damage from pasting over the cracks here!
538 	 */
539 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
540 	    round_page(map_addr + map_len),  prot, FALSE);
541 
542 	return (0);
543 }
544 
545 /*
546  * Load the file "file" into memory.  It may be either a shared object
547  * or an executable.
548  *
549  * The "addr" reference parameter is in/out.  On entry, it specifies
550  * the address where a shared object should be loaded.  If the file is
551  * an executable, this value is ignored.  On exit, "addr" specifies
552  * where the file was actually loaded.
553  *
554  * The "entry" reference parameter is out only.  On exit, it specifies
555  * the entry point for the loaded file.
556  */
557 static int
558 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
559 	u_long *entry, size_t pagesize)
560 {
561 	struct {
562 		struct nameidata nd;
563 		struct vattr attr;
564 		struct image_params image_params;
565 	} *tempdata;
566 	const Elf_Ehdr *hdr = NULL;
567 	const Elf_Phdr *phdr = NULL;
568 	struct nameidata *nd;
569 	struct vmspace *vmspace = p->p_vmspace;
570 	struct vattr *attr;
571 	struct image_params *imgp;
572 	vm_prot_t prot;
573 	u_long rbase;
574 	u_long base_addr = 0;
575 	int vfslocked, error, i, numsegs;
576 
577 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
578 	nd = &tempdata->nd;
579 	attr = &tempdata->attr;
580 	imgp = &tempdata->image_params;
581 
582 	/*
583 	 * Initialize part of the common data
584 	 */
585 	imgp->proc = p;
586 	imgp->attr = attr;
587 	imgp->firstpage = NULL;
588 	imgp->image_header = NULL;
589 	imgp->object = NULL;
590 	imgp->execlabel = NULL;
591 
592 	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
593 	    curthread);
594 	vfslocked = 0;
595 	if ((error = namei(nd)) != 0) {
596 		nd->ni_vp = NULL;
597 		goto fail;
598 	}
599 	vfslocked = NDHASGIANT(nd);
600 	NDFREE(nd, NDF_ONLY_PNBUF);
601 	imgp->vp = nd->ni_vp;
602 
603 	/*
604 	 * Check permissions, modes, uid, etc on the file, and "open" it.
605 	 */
606 	error = exec_check_permissions(imgp);
607 	if (error)
608 		goto fail;
609 
610 	error = exec_map_first_page(imgp);
611 	if (error)
612 		goto fail;
613 
614 	/*
615 	 * Also make certain that the interpreter stays the same, so set
616 	 * its VV_TEXT flag, too.
617 	 */
618 	nd->ni_vp->v_vflag |= VV_TEXT;
619 
620 	imgp->object = nd->ni_vp->v_object;
621 
622 	hdr = (const Elf_Ehdr *)imgp->image_header;
623 	if ((error = __elfN(check_header)(hdr)) != 0)
624 		goto fail;
625 	if (hdr->e_type == ET_DYN)
626 		rbase = *addr;
627 	else if (hdr->e_type == ET_EXEC)
628 		rbase = 0;
629 	else {
630 		error = ENOEXEC;
631 		goto fail;
632 	}
633 
634 	/* Only support headers that fit within first page for now      */
635 	/*    (multiplication of two Elf_Half fields will not overflow) */
636 	if ((hdr->e_phoff > PAGE_SIZE) ||
637 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
638 		error = ENOEXEC;
639 		goto fail;
640 	}
641 
642 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
643 	if (!aligned(phdr, Elf_Addr)) {
644 		error = ENOEXEC;
645 		goto fail;
646 	}
647 
648 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
649 		if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
650 			/* Loadable segment */
651 			prot = 0;
652 			if (phdr[i].p_flags & PF_X)
653   				prot |= VM_PROT_EXECUTE;
654 			if (phdr[i].p_flags & PF_W)
655   				prot |= VM_PROT_WRITE;
656 			if (phdr[i].p_flags & PF_R)
657   				prot |= VM_PROT_READ;
658 
659 			if ((error = __elfN(load_section)(vmspace,
660 			    imgp->object, phdr[i].p_offset,
661 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
662 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
663 			    pagesize)) != 0)
664 				goto fail;
665 			/*
666 			 * Establish the base address if this is the
667 			 * first segment.
668 			 */
669 			if (numsegs == 0)
670   				base_addr = trunc_page(phdr[i].p_vaddr +
671 				    rbase);
672 			numsegs++;
673 		}
674 	}
675 	*addr = base_addr;
676 	*entry = (unsigned long)hdr->e_entry + rbase;
677 
678 fail:
679 	if (imgp->firstpage)
680 		exec_unmap_first_page(imgp);
681 
682 	if (nd->ni_vp)
683 		vput(nd->ni_vp);
684 
685 	VFS_UNLOCK_GIANT(vfslocked);
686 	free(tempdata, M_TEMP);
687 
688 	return (error);
689 }
690 
691 static int
692 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
693 {
694 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
695 	const Elf_Phdr *phdr;
696 	Elf_Auxargs *elf_auxargs;
697 	struct vmspace *vmspace;
698 	vm_prot_t prot;
699 	u_long text_size = 0, data_size = 0, total_size = 0;
700 	u_long text_addr = 0, data_addr = 0;
701 	u_long seg_size, seg_addr;
702 	u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
703 	int32_t osrel = 0;
704 	int error = 0, i, n;
705 	const char *interp = NULL, *newinterp = NULL;
706 	Elf_Brandinfo *brand_info;
707 	char *path;
708 	struct sysentvec *sv;
709 
710 	/*
711 	 * Do we have a valid ELF header ?
712 	 *
713 	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
714 	 * if particular brand doesn't support it.
715 	 */
716 	if (__elfN(check_header)(hdr) != 0 ||
717 	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
718 		return (-1);
719 
720 	/*
721 	 * From here on down, we return an errno, not -1, as we've
722 	 * detected an ELF file.
723 	 */
724 
725 	if ((hdr->e_phoff > PAGE_SIZE) ||
726 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
727 		/* Only support headers in first page for now */
728 		return (ENOEXEC);
729 	}
730 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
731 	if (!aligned(phdr, Elf_Addr))
732 		return (ENOEXEC);
733 	n = 0;
734 	baddr = 0;
735 	for (i = 0; i < hdr->e_phnum; i++) {
736 		if (phdr[i].p_type == PT_LOAD) {
737 			if (n == 0)
738 				baddr = phdr[i].p_vaddr;
739 			n++;
740 			continue;
741 		}
742 		if (phdr[i].p_type == PT_INTERP) {
743 			/* Path to interpreter */
744 			if (phdr[i].p_filesz > MAXPATHLEN ||
745 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
746 				return (ENOEXEC);
747 			interp = imgp->image_header + phdr[i].p_offset;
748 			continue;
749 		}
750 	}
751 
752 	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
753 	if (brand_info == NULL) {
754 		uprintf("ELF binary type \"%u\" not known.\n",
755 		    hdr->e_ident[EI_OSABI]);
756 		return (ENOEXEC);
757 	}
758 	if (hdr->e_type == ET_DYN) {
759 		if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
760 			return (ENOEXEC);
761 		/*
762 		 * Honour the base load address from the dso if it is
763 		 * non-zero for some reason.
764 		 */
765 		if (baddr == 0)
766 			et_dyn_addr = ET_DYN_LOAD_ADDR;
767 		else
768 			et_dyn_addr = 0;
769 	} else
770 		et_dyn_addr = 0;
771 	sv = brand_info->sysvec;
772 	if (interp != NULL && brand_info->interp_newpath != NULL)
773 		newinterp = brand_info->interp_newpath;
774 
775 	/*
776 	 * Avoid a possible deadlock if the current address space is destroyed
777 	 * and that address space maps the locked vnode.  In the common case,
778 	 * the locked vnode's v_usecount is decremented but remains greater
779 	 * than zero.  Consequently, the vnode lock is not needed by vrele().
780 	 * However, in cases where the vnode lock is external, such as nullfs,
781 	 * v_usecount may become zero.
782 	 */
783 	VOP_UNLOCK(imgp->vp, 0);
784 
785 	error = exec_new_vmspace(imgp, sv);
786 	imgp->proc->p_sysent = sv;
787 
788 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
789 	if (error)
790 		return (error);
791 
792 	vmspace = imgp->proc->p_vmspace;
793 
794 	for (i = 0; i < hdr->e_phnum; i++) {
795 		switch (phdr[i].p_type) {
796 		case PT_LOAD:	/* Loadable segment */
797 			if (phdr[i].p_memsz == 0)
798 				break;
799 			prot = 0;
800 			if (phdr[i].p_flags & PF_X)
801   				prot |= VM_PROT_EXECUTE;
802 			if (phdr[i].p_flags & PF_W)
803   				prot |= VM_PROT_WRITE;
804 			if (phdr[i].p_flags & PF_R)
805   				prot |= VM_PROT_READ;
806 
807 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
808 			/*
809 			 * Some x86 binaries assume read == executable,
810 			 * notably the M3 runtime and therefore cvsup
811 			 */
812 			if (prot & VM_PROT_READ)
813 				prot |= VM_PROT_EXECUTE;
814 #endif
815 
816 			if ((error = __elfN(load_section)(vmspace,
817 			    imgp->object, phdr[i].p_offset,
818 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
819 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
820 			    sv->sv_pagesize)) != 0)
821 				return (error);
822 
823 			/*
824 			 * If this segment contains the program headers,
825 			 * remember their virtual address for the AT_PHDR
826 			 * aux entry. Static binaries don't usually include
827 			 * a PT_PHDR entry.
828 			 */
829 			if (phdr[i].p_offset == 0 &&
830 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
831 				<= phdr[i].p_filesz)
832 				proghdr = phdr[i].p_vaddr + hdr->e_phoff +
833 				    et_dyn_addr;
834 
835 			seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
836 			seg_size = round_page(phdr[i].p_memsz +
837 			    phdr[i].p_vaddr + et_dyn_addr - seg_addr);
838 
839 			/*
840 			 * Is this .text or .data?  We can't use
841 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
842 			 * alpha terribly and possibly does other bad
843 			 * things so we stick to the old way of figuring
844 			 * it out:  If the segment contains the program
845 			 * entry point, it's a text segment, otherwise it
846 			 * is a data segment.
847 			 *
848 			 * Note that obreak() assumes that data_addr +
849 			 * data_size == end of data load area, and the ELF
850 			 * file format expects segments to be sorted by
851 			 * address.  If multiple data segments exist, the
852 			 * last one will be used.
853 			 */
854 			if (hdr->e_entry >= phdr[i].p_vaddr &&
855 			    hdr->e_entry < (phdr[i].p_vaddr +
856 			    phdr[i].p_memsz)) {
857 				text_size = seg_size;
858 				text_addr = seg_addr;
859 				entry = (u_long)hdr->e_entry + et_dyn_addr;
860 			} else {
861 				data_size = seg_size;
862 				data_addr = seg_addr;
863 			}
864 			total_size += seg_size;
865 			break;
866 		case PT_PHDR: 	/* Program header table info */
867 			proghdr = phdr[i].p_vaddr + et_dyn_addr;
868 			break;
869 		default:
870 			break;
871 		}
872 	}
873 
874 	if (data_addr == 0 && data_size == 0) {
875 		data_addr = text_addr;
876 		data_size = text_size;
877 	}
878 
879 	/*
880 	 * Check limits.  It should be safe to check the
881 	 * limits after loading the segments since we do
882 	 * not actually fault in all the segments pages.
883 	 */
884 	PROC_LOCK(imgp->proc);
885 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
886 	    text_size > maxtsiz ||
887 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
888 		PROC_UNLOCK(imgp->proc);
889 		return (ENOMEM);
890 	}
891 
892 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
893 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
894 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
895 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
896 
897 	/*
898 	 * We load the dynamic linker where a userland call
899 	 * to mmap(0, ...) would put it.  The rationale behind this
900 	 * calculation is that it leaves room for the heap to grow to
901 	 * its maximum allowed size.
902 	 */
903 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
904 	    lim_max(imgp->proc, RLIMIT_DATA));
905 	PROC_UNLOCK(imgp->proc);
906 
907 	imgp->entry_addr = entry;
908 
909 	if (interp != NULL) {
910 		int have_interp = FALSE;
911 		VOP_UNLOCK(imgp->vp, 0);
912 		if (brand_info->emul_path != NULL &&
913 		    brand_info->emul_path[0] != '\0') {
914 			path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
915 			snprintf(path, MAXPATHLEN, "%s%s",
916 			    brand_info->emul_path, interp);
917 			error = __elfN(load_file)(imgp->proc, path, &addr,
918 			    &imgp->entry_addr, sv->sv_pagesize);
919 			free(path, M_TEMP);
920 			if (error == 0)
921 				have_interp = TRUE;
922 		}
923 		if (!have_interp && newinterp != NULL) {
924 			error = __elfN(load_file)(imgp->proc, newinterp, &addr,
925 			    &imgp->entry_addr, sv->sv_pagesize);
926 			if (error == 0)
927 				have_interp = TRUE;
928 		}
929 		if (!have_interp) {
930 			error = __elfN(load_file)(imgp->proc, interp, &addr,
931 			    &imgp->entry_addr, sv->sv_pagesize);
932 		}
933 		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
934 		if (error != 0) {
935 			uprintf("ELF interpreter %s not found\n", interp);
936 			return (error);
937 		}
938 	} else
939 		addr = et_dyn_addr;
940 
941 	/*
942 	 * Construct auxargs table (used by the fixup routine)
943 	 */
944 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
945 	elf_auxargs->execfd = -1;
946 	elf_auxargs->phdr = proghdr;
947 	elf_auxargs->phent = hdr->e_phentsize;
948 	elf_auxargs->phnum = hdr->e_phnum;
949 	elf_auxargs->pagesz = PAGE_SIZE;
950 	elf_auxargs->base = addr;
951 	elf_auxargs->flags = 0;
952 	elf_auxargs->entry = entry;
953 
954 	imgp->auxargs = elf_auxargs;
955 	imgp->interpreted = 0;
956 	imgp->proc->p_osrel = osrel;
957 
958 	return (error);
959 }
960 
961 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
962 
963 int
964 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
965 {
966 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
967 	Elf_Addr *base;
968 	Elf_Addr *pos;
969 
970 	base = (Elf_Addr *)*stack_base;
971 	pos = base + (imgp->args->argc + imgp->args->envc + 2);
972 
973 	if (args->execfd != -1)
974 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
975 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
976 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
977 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
978 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
979 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
980 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
981 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
982 	if (imgp->execpathp != 0)
983 		AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
984 	AUXARGS_ENTRY(pos, AT_NULL, 0);
985 
986 	free(imgp->auxargs, M_TEMP);
987 	imgp->auxargs = NULL;
988 
989 	base--;
990 	suword(base, (long)imgp->args->argc);
991 	*stack_base = (register_t *)base;
992 	return (0);
993 }
994 
995 /*
996  * Code for generating ELF core dumps.
997  */
998 
999 typedef void (*segment_callback)(vm_map_entry_t, void *);
1000 
1001 /* Closure for cb_put_phdr(). */
1002 struct phdr_closure {
1003 	Elf_Phdr *phdr;		/* Program header to fill in */
1004 	Elf_Off offset;		/* Offset of segment in core file */
1005 };
1006 
1007 /* Closure for cb_size_segment(). */
1008 struct sseg_closure {
1009 	int count;		/* Count of writable segments. */
1010 	size_t size;		/* Total size of all writable segments. */
1011 };
1012 
1013 static void cb_put_phdr(vm_map_entry_t, void *);
1014 static void cb_size_segment(vm_map_entry_t, void *);
1015 static void each_writable_segment(struct thread *, segment_callback, void *);
1016 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1017     int, void *, size_t, gzFile);
1018 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
1019 static void __elfN(putnote)(void *, size_t *, const char *, int,
1020     const void *, size_t);
1021 
1022 #ifdef COMPRESS_USER_CORES
1023 extern int compress_user_cores;
1024 extern int compress_user_cores_gzlevel;
1025 #endif
1026 
1027 static int
1028 core_output(struct vnode *vp, void *base, size_t len, off_t offset,
1029     struct ucred *active_cred, struct ucred *file_cred,
1030     struct thread *td, char *core_buf, gzFile gzfile) {
1031 
1032 	int error;
1033 	if (gzfile) {
1034 #ifdef COMPRESS_USER_CORES
1035 		error = compress_core(gzfile, base, core_buf, len, td);
1036 #else
1037 		panic("shouldn't be here");
1038 #endif
1039 	} else {
1040 		error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
1041 		    UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
1042 		    NULL, td);
1043 	}
1044 	return (error);
1045 }
1046 
1047 int
1048 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1049 {
1050 	struct ucred *cred = td->td_ucred;
1051 	int error = 0;
1052 	struct sseg_closure seginfo;
1053 	void *hdr;
1054 	size_t hdrsize;
1055 
1056 	gzFile gzfile = Z_NULL;
1057 	char *core_buf = NULL;
1058 #ifdef COMPRESS_USER_CORES
1059 	char gzopen_flags[8];
1060 	char *p;
1061 	int doing_compress = flags & IMGACT_CORE_COMPRESS;
1062 #endif
1063 
1064 	hdr = NULL;
1065 
1066 #ifdef COMPRESS_USER_CORES
1067         if (doing_compress) {
1068                 p = gzopen_flags;
1069                 *p++ = 'w';
1070                 if (compress_user_cores_gzlevel >= 0 &&
1071                     compress_user_cores_gzlevel <= 9)
1072                         *p++ = '0' + compress_user_cores_gzlevel;
1073                 *p = 0;
1074                 gzfile = gz_open("", gzopen_flags, vp);
1075                 if (gzfile == Z_NULL) {
1076                         error = EFAULT;
1077                         goto done;
1078                 }
1079                 core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1080                 if (!core_buf) {
1081                         error = ENOMEM;
1082                         goto done;
1083                 }
1084         }
1085 #endif
1086 
1087 	/* Size the program segments. */
1088 	seginfo.count = 0;
1089 	seginfo.size = 0;
1090 	each_writable_segment(td, cb_size_segment, &seginfo);
1091 
1092 	/*
1093 	 * Calculate the size of the core file header area by making
1094 	 * a dry run of generating it.  Nothing is written, but the
1095 	 * size is calculated.
1096 	 */
1097 	hdrsize = 0;
1098 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
1099 
1100 	if (hdrsize + seginfo.size >= limit)
1101 		return (EFAULT);
1102 
1103 	/*
1104 	 * Allocate memory for building the header, fill it up,
1105 	 * and write it out.
1106 	 */
1107 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1108 	if (hdr == NULL) {
1109 		return (EINVAL);
1110 	}
1111 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize,
1112 	    gzfile);
1113 
1114 	/* Write the contents of all of the writable segments. */
1115 	if (error == 0) {
1116 		Elf_Phdr *php;
1117 		off_t offset;
1118 		int i;
1119 
1120 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1121 		offset = hdrsize;
1122 		for (i = 0; i < seginfo.count; i++) {
1123 			error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr,
1124 			    php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile);
1125 			if (error != 0)
1126 				break;
1127 			offset += php->p_filesz;
1128 			php++;
1129 		}
1130 	}
1131 	if (error) {
1132 		log(LOG_WARNING,
1133 		    "Failed to write core file for process %s (error %d)\n",
1134 		    curproc->p_comm, error);
1135 	}
1136 
1137 #ifdef COMPRESS_USER_CORES
1138 done:
1139 #endif
1140 	if (core_buf)
1141 		free(core_buf, M_TEMP);
1142 	if (gzfile)
1143 		gzclose(gzfile);
1144 
1145 	free(hdr, M_TEMP);
1146 
1147 	return (error);
1148 }
1149 
1150 /*
1151  * A callback for each_writable_segment() to write out the segment's
1152  * program header entry.
1153  */
1154 static void
1155 cb_put_phdr(entry, closure)
1156 	vm_map_entry_t entry;
1157 	void *closure;
1158 {
1159 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1160 	Elf_Phdr *phdr = phc->phdr;
1161 
1162 	phc->offset = round_page(phc->offset);
1163 
1164 	phdr->p_type = PT_LOAD;
1165 	phdr->p_offset = phc->offset;
1166 	phdr->p_vaddr = entry->start;
1167 	phdr->p_paddr = 0;
1168 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1169 	phdr->p_align = PAGE_SIZE;
1170 	phdr->p_flags = 0;
1171 	if (entry->protection & VM_PROT_READ)
1172 		phdr->p_flags |= PF_R;
1173 	if (entry->protection & VM_PROT_WRITE)
1174 		phdr->p_flags |= PF_W;
1175 	if (entry->protection & VM_PROT_EXECUTE)
1176 		phdr->p_flags |= PF_X;
1177 
1178 	phc->offset += phdr->p_filesz;
1179 	phc->phdr++;
1180 }
1181 
1182 /*
1183  * A callback for each_writable_segment() to gather information about
1184  * the number of segments and their total size.
1185  */
1186 static void
1187 cb_size_segment(entry, closure)
1188 	vm_map_entry_t entry;
1189 	void *closure;
1190 {
1191 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1192 
1193 	ssc->count++;
1194 	ssc->size += entry->end - entry->start;
1195 }
1196 
1197 /*
1198  * For each writable segment in the process's memory map, call the given
1199  * function with a pointer to the map entry and some arbitrary
1200  * caller-supplied data.
1201  */
1202 static void
1203 each_writable_segment(td, func, closure)
1204 	struct thread *td;
1205 	segment_callback func;
1206 	void *closure;
1207 {
1208 	struct proc *p = td->td_proc;
1209 	vm_map_t map = &p->p_vmspace->vm_map;
1210 	vm_map_entry_t entry;
1211 	vm_object_t backing_object, object;
1212 	boolean_t ignore_entry;
1213 
1214 	vm_map_lock_read(map);
1215 	for (entry = map->header.next; entry != &map->header;
1216 	    entry = entry->next) {
1217 		/*
1218 		 * Don't dump inaccessible mappings, deal with legacy
1219 		 * coredump mode.
1220 		 *
1221 		 * Note that read-only segments related to the elf binary
1222 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1223 		 * need to arbitrarily ignore such segments.
1224 		 */
1225 		if (elf_legacy_coredump) {
1226 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1227 				continue;
1228 		} else {
1229 			if ((entry->protection & VM_PROT_ALL) == 0)
1230 				continue;
1231 		}
1232 
1233 		/*
1234 		 * Dont include memory segment in the coredump if
1235 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1236 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1237 		 * kernel map).
1238 		 */
1239 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1240 			continue;
1241 
1242 		if ((object = entry->object.vm_object) == NULL)
1243 			continue;
1244 
1245 		/* Ignore memory-mapped devices and such things. */
1246 		VM_OBJECT_LOCK(object);
1247 		while ((backing_object = object->backing_object) != NULL) {
1248 			VM_OBJECT_LOCK(backing_object);
1249 			VM_OBJECT_UNLOCK(object);
1250 			object = backing_object;
1251 		}
1252 		ignore_entry = object->type != OBJT_DEFAULT &&
1253 		    object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1254 		VM_OBJECT_UNLOCK(object);
1255 		if (ignore_entry)
1256 			continue;
1257 
1258 		(*func)(entry, closure);
1259 	}
1260 	vm_map_unlock_read(map);
1261 }
1262 
1263 /*
1264  * Write the core file header to the file, including padding up to
1265  * the page boundary.
1266  */
1267 static int
1268 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize, gzfile)
1269 	struct thread *td;
1270 	struct vnode *vp;
1271 	struct ucred *cred;
1272 	int numsegs;
1273 	size_t hdrsize;
1274 	void *hdr;
1275 	gzFile gzfile;
1276 {
1277 	size_t off;
1278 
1279 	/* Fill in the header. */
1280 	bzero(hdr, hdrsize);
1281 	off = 0;
1282 	__elfN(puthdr)(td, hdr, &off, numsegs);
1283 
1284 	if (!gzfile) {
1285 		/* Write it to the core file. */
1286 		return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1287 			UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1288 			td));
1289 	} else {
1290 #ifdef COMPRESS_USER_CORES
1291 		if (gzwrite(gzfile, hdr, hdrsize) != hdrsize) {
1292 			log(LOG_WARNING,
1293 			    "Failed to compress core file header for process"
1294 			    " %s.\n", curproc->p_comm);
1295 			return (EFAULT);
1296 		}
1297 		else {
1298 			return (0);
1299 		}
1300 #else
1301 		panic("shouldn't be here");
1302 #endif
1303 	}
1304 }
1305 
1306 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1307 typedef struct prstatus32 elf_prstatus_t;
1308 typedef struct prpsinfo32 elf_prpsinfo_t;
1309 typedef struct fpreg32 elf_prfpregset_t;
1310 typedef struct fpreg32 elf_fpregset_t;
1311 typedef struct reg32 elf_gregset_t;
1312 #else
1313 typedef prstatus_t elf_prstatus_t;
1314 typedef prpsinfo_t elf_prpsinfo_t;
1315 typedef prfpregset_t elf_prfpregset_t;
1316 typedef prfpregset_t elf_fpregset_t;
1317 typedef gregset_t elf_gregset_t;
1318 #endif
1319 
1320 static void
1321 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1322 {
1323 	struct {
1324 		elf_prstatus_t status;
1325 		elf_prfpregset_t fpregset;
1326 		elf_prpsinfo_t psinfo;
1327 	} *tempdata;
1328 	elf_prstatus_t *status;
1329 	elf_prfpregset_t *fpregset;
1330 	elf_prpsinfo_t *psinfo;
1331 	struct proc *p;
1332 	struct thread *thr;
1333 	size_t ehoff, noteoff, notesz, phoff;
1334 
1335 	p = td->td_proc;
1336 
1337 	ehoff = *off;
1338 	*off += sizeof(Elf_Ehdr);
1339 
1340 	phoff = *off;
1341 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1342 
1343 	noteoff = *off;
1344 	/*
1345 	 * Don't allocate space for the notes if we're just calculating
1346 	 * the size of the header. We also don't collect the data.
1347 	 */
1348 	if (dst != NULL) {
1349 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1350 		status = &tempdata->status;
1351 		fpregset = &tempdata->fpregset;
1352 		psinfo = &tempdata->psinfo;
1353 	} else {
1354 		tempdata = NULL;
1355 		status = NULL;
1356 		fpregset = NULL;
1357 		psinfo = NULL;
1358 	}
1359 
1360 	if (dst != NULL) {
1361 		psinfo->pr_version = PRPSINFO_VERSION;
1362 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1363 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1364 		/*
1365 		 * XXX - We don't fill in the command line arguments properly
1366 		 * yet.
1367 		 */
1368 		strlcpy(psinfo->pr_psargs, p->p_comm,
1369 		    sizeof(psinfo->pr_psargs));
1370 	}
1371 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1372 	    sizeof *psinfo);
1373 
1374 	/*
1375 	 * To have the debugger select the right thread (LWP) as the initial
1376 	 * thread, we dump the state of the thread passed to us in td first.
1377 	 * This is the thread that causes the core dump and thus likely to
1378 	 * be the right thread one wants to have selected in the debugger.
1379 	 */
1380 	thr = td;
1381 	while (thr != NULL) {
1382 		if (dst != NULL) {
1383 			status->pr_version = PRSTATUS_VERSION;
1384 			status->pr_statussz = sizeof(elf_prstatus_t);
1385 			status->pr_gregsetsz = sizeof(elf_gregset_t);
1386 			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1387 			status->pr_osreldate = osreldate;
1388 			status->pr_cursig = p->p_sig;
1389 			status->pr_pid = thr->td_tid;
1390 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1391 			fill_regs32(thr, &status->pr_reg);
1392 			fill_fpregs32(thr, fpregset);
1393 #else
1394 			fill_regs(thr, &status->pr_reg);
1395 			fill_fpregs(thr, fpregset);
1396 #endif
1397 		}
1398 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1399 		    sizeof *status);
1400 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1401 		    sizeof *fpregset);
1402 		/*
1403 		 * Allow for MD specific notes, as well as any MD
1404 		 * specific preparations for writing MI notes.
1405 		 */
1406 		__elfN(dump_thread)(thr, dst, off);
1407 
1408 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1409 		    TAILQ_NEXT(thr, td_plist);
1410 		if (thr == td)
1411 			thr = TAILQ_NEXT(thr, td_plist);
1412 	}
1413 
1414 	notesz = *off - noteoff;
1415 
1416 	if (dst != NULL)
1417 		free(tempdata, M_TEMP);
1418 
1419 	/* Align up to a page boundary for the program segments. */
1420 	*off = round_page(*off);
1421 
1422 	if (dst != NULL) {
1423 		Elf_Ehdr *ehdr;
1424 		Elf_Phdr *phdr;
1425 		struct phdr_closure phc;
1426 
1427 		/*
1428 		 * Fill in the ELF header.
1429 		 */
1430 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1431 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1432 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1433 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1434 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1435 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1436 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1437 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1438 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1439 		ehdr->e_ident[EI_ABIVERSION] = 0;
1440 		ehdr->e_ident[EI_PAD] = 0;
1441 		ehdr->e_type = ET_CORE;
1442 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1443 		ehdr->e_machine = EM_386;
1444 #else
1445 		ehdr->e_machine = ELF_ARCH;
1446 #endif
1447 		ehdr->e_version = EV_CURRENT;
1448 		ehdr->e_entry = 0;
1449 		ehdr->e_phoff = phoff;
1450 		ehdr->e_flags = 0;
1451 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1452 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1453 		ehdr->e_phnum = numsegs + 1;
1454 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1455 		ehdr->e_shnum = 0;
1456 		ehdr->e_shstrndx = SHN_UNDEF;
1457 
1458 		/*
1459 		 * Fill in the program header entries.
1460 		 */
1461 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1462 
1463 		/* The note segement. */
1464 		phdr->p_type = PT_NOTE;
1465 		phdr->p_offset = noteoff;
1466 		phdr->p_vaddr = 0;
1467 		phdr->p_paddr = 0;
1468 		phdr->p_filesz = notesz;
1469 		phdr->p_memsz = 0;
1470 		phdr->p_flags = 0;
1471 		phdr->p_align = 0;
1472 		phdr++;
1473 
1474 		/* All the writable segments from the program. */
1475 		phc.phdr = phdr;
1476 		phc.offset = *off;
1477 		each_writable_segment(td, cb_put_phdr, &phc);
1478 	}
1479 }
1480 
1481 static void
1482 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1483     const void *desc, size_t descsz)
1484 {
1485 	Elf_Note note;
1486 
1487 	note.n_namesz = strlen(name) + 1;
1488 	note.n_descsz = descsz;
1489 	note.n_type = type;
1490 	if (dst != NULL)
1491 		bcopy(&note, (char *)dst + *off, sizeof note);
1492 	*off += sizeof note;
1493 	if (dst != NULL)
1494 		bcopy(name, (char *)dst + *off, note.n_namesz);
1495 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1496 	if (dst != NULL)
1497 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1498 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1499 }
1500 
1501 /*
1502  * Try to find the appropriate ABI-note section for checknote,
1503  * fetch the osreldate for binary from the ELF OSABI-note. Only the
1504  * first page of the image is searched, the same as for headers.
1505  */
1506 static boolean_t
1507 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1508     int32_t *osrel)
1509 {
1510 	const Elf_Note *note, *note0, *note_end;
1511 	const Elf_Phdr *phdr, *pnote;
1512 	const Elf_Ehdr *hdr;
1513 	const char *note_name;
1514 	int i;
1515 
1516 	pnote = NULL;
1517 	hdr = (const Elf_Ehdr *)imgp->image_header;
1518 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1519 
1520 	for (i = 0; i < hdr->e_phnum; i++) {
1521 		if (phdr[i].p_type == PT_NOTE) {
1522 			pnote = &phdr[i];
1523 			break;
1524 		}
1525 	}
1526 
1527 	if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1528 	    pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1529 		return (FALSE);
1530 
1531 	note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1532 	note_end = (const Elf_Note *)(imgp->image_header +
1533 	    pnote->p_offset + pnote->p_filesz);
1534 	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1535 		if (!aligned(note, Elf32_Addr))
1536 			return (FALSE);
1537 		if (note->n_namesz != checknote->hdr.n_namesz ||
1538 		    note->n_descsz != checknote->hdr.n_descsz ||
1539 		    note->n_type != checknote->hdr.n_type)
1540 			goto nextnote;
1541 		note_name = (const char *)(note + 1);
1542 		if (strncmp(checknote->vendor, note_name,
1543 		    checknote->hdr.n_namesz) != 0)
1544 			goto nextnote;
1545 
1546 		/*
1547 		 * Fetch the osreldate for binary
1548 		 * from the ELF OSABI-note if necessary.
1549 		 */
1550 		if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
1551 		    checknote->trans_osrel != NULL)
1552 			return (checknote->trans_osrel(note, osrel));
1553 		return (TRUE);
1554 
1555 nextnote:
1556 		note = (const Elf_Note *)((const char *)(note + 1) +
1557 		    roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1558 		    roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1559 	}
1560 
1561 	return (FALSE);
1562 }
1563 
1564 /*
1565  * Tell kern_execve.c about it, with a little help from the linker.
1566  */
1567 static struct execsw __elfN(execsw) = {
1568 	__CONCAT(exec_, __elfN(imgact)),
1569 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1570 };
1571 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1572 
1573 #ifdef COMPRESS_USER_CORES
1574 /*
1575  * Compress and write out a core segment for a user process.
1576  *
1577  * 'inbuf' is the starting address of a VM segment in the process' address
1578  * space that is to be compressed and written out to the core file.  'dest_buf'
1579  * is a buffer in the kernel's address space.  The segment is copied from
1580  * 'inbuf' to 'dest_buf' first before being processed by the compression
1581  * routine gzwrite().  This copying is necessary because the content of the VM
1582  * segment may change between the compression pass and the crc-computation pass
1583  * in gzwrite().  This is because realtime threads may preempt the UNIX kernel.
1584  */
1585 static int
1586 compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
1587     struct thread *td)
1588 {
1589 	int len_compressed;
1590 	int error = 0;
1591 	unsigned int chunk_len;
1592 
1593 	while (len) {
1594 		chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
1595 		copyin(inbuf, dest_buf, chunk_len);
1596 		len_compressed = gzwrite(file, dest_buf, chunk_len);
1597 
1598 		EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed);
1599 
1600 		if ((unsigned int)len_compressed != chunk_len) {
1601 			log(LOG_WARNING,
1602 			    "compress_core: length mismatch (0x%x returned, "
1603 			    "0x%x expected)\n", len_compressed, chunk_len);
1604 			EVENTHANDLER_INVOKE(app_coredump_error, td,
1605 			    "compress_core: length mismatch %x -> %x",
1606 			    chunk_len, len_compressed);
1607 			error = EFAULT;
1608 			break;
1609 		}
1610 		inbuf += chunk_len;
1611 		len -= chunk_len;
1612 		if (ticks - PCPU_GET(switchticks) >= hogticks)
1613 			uio_yield();
1614 	}
1615 
1616 	return (error);
1617 }
1618 #endif /* COMPRESS_USER_CORES */
1619