xref: /freebsd/sys/kern/imgact_elf.c (revision bf839416381cb9f63a8a82ea6e897a22830a8009)
1e1743d02SSøren Schmidt /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
38a36da99SPedro F. Giffuni  *
486be94fcSTycho Nightingale  * Copyright (c) 2017 Dell EMC
5455d3589SDavid E. O'Brien  * Copyright (c) 2000-2001, 2003 David O'Brien
69a14aa01SUlrich Spörlein  * Copyright (c) 1995-1996 Søren Schmidt
7e1743d02SSøren Schmidt  * Copyright (c) 1996 Peter Wemm
8e1743d02SSøren Schmidt  * All rights reserved.
9e1743d02SSøren Schmidt  *
10e1743d02SSøren Schmidt  * Redistribution and use in source and binary forms, with or without
11e1743d02SSøren Schmidt  * modification, are permitted provided that the following conditions
12e1743d02SSøren Schmidt  * are met:
13e1743d02SSøren Schmidt  * 1. Redistributions of source code must retain the above copyright
14e1743d02SSøren Schmidt  *    notice, this list of conditions and the following disclaimer
15e1743d02SSøren Schmidt  *    in this position and unchanged.
16e1743d02SSøren Schmidt  * 2. Redistributions in binary form must reproduce the above copyright
17e1743d02SSøren Schmidt  *    notice, this list of conditions and the following disclaimer in the
18e1743d02SSøren Schmidt  *    documentation and/or other materials provided with the distribution.
19e1743d02SSøren Schmidt  * 3. The name of the author may not be used to endorse or promote products
2021dc7d4fSJens Schweikhardt  *    derived from this software without specific prior written permission
21e1743d02SSøren Schmidt  *
22e1743d02SSøren Schmidt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23e1743d02SSøren Schmidt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24e1743d02SSøren Schmidt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25e1743d02SSøren Schmidt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26e1743d02SSøren Schmidt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27e1743d02SSøren Schmidt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28e1743d02SSøren Schmidt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29e1743d02SSøren Schmidt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30e1743d02SSøren Schmidt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31e1743d02SSøren Schmidt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32e1743d02SSøren Schmidt  */
33e1743d02SSøren Schmidt 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3712bc222eSJonathan Anderson #include "opt_capsicum.h"
3862919d78SPeter Wemm 
39e1743d02SSøren Schmidt #include <sys/param.h>
404a144410SRobert Watson #include <sys/capsicum.h>
4178f57a9cSMark Johnston #include <sys/compressor.h>
42e1743d02SSøren Schmidt #include <sys/exec.h>
438c64af4fSJohn Polstra #include <sys/fcntl.h>
44e1743d02SSøren Schmidt #include <sys/imgact.h>
45e1743d02SSøren Schmidt #include <sys/imgact_elf.h>
46b96bd95bSIan Lepore #include <sys/jail.h>
47e1743d02SSøren Schmidt #include <sys/kernel.h>
48f34fa851SJohn Baldwin #include <sys/lock.h>
49e1743d02SSøren Schmidt #include <sys/malloc.h>
5068ff2a43SChristian S.J. Peron #include <sys/mount.h>
518c64af4fSJohn Polstra #include <sys/mman.h>
52a794e791SBruce Evans #include <sys/namei.h>
53a794e791SBruce Evans #include <sys/proc.h>
548c64af4fSJohn Polstra #include <sys/procfs.h>
5586be94fcSTycho Nightingale #include <sys/ptrace.h>
561ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
57b7924341SAndrew Turner #include <sys/reg.h>
588c64af4fSJohn Polstra #include <sys/resourcevar.h>
5989f6b863SAttilio Rao #include <sys/rwlock.h>
60bd390213SMikolaj Golub #include <sys/sbuf.h>
61da61b9a6SAlan Cox #include <sys/sf_buf.h>
62ee235befSKonstantin Belousov #include <sys/smp.h>
6336240ea5SDoug Rabson #include <sys/systm.h>
64e1743d02SSøren Schmidt #include <sys/signalvar.h>
658c64af4fSJohn Polstra #include <sys/stat.h>
661005a129SJohn Baldwin #include <sys/sx.h>
678c64af4fSJohn Polstra #include <sys/syscall.h>
68e1743d02SSøren Schmidt #include <sys/sysctl.h>
698c64af4fSJohn Polstra #include <sys/sysent.h>
70a794e791SBruce Evans #include <sys/vnode.h>
71e7228204SAlfred Perlstein #include <sys/syslog.h>
72e7228204SAlfred Perlstein #include <sys/eventhandler.h>
73f1fca82eSMikolaj Golub #include <sys/user.h>
74e7228204SAlfred Perlstein 
75e1743d02SSøren Schmidt #include <vm/vm.h>
76e1743d02SSøren Schmidt #include <vm/vm_kern.h>
77e1743d02SSøren Schmidt #include <vm/vm_param.h>
78e1743d02SSøren Schmidt #include <vm/pmap.h>
79e1743d02SSøren Schmidt #include <vm/vm_map.h>
800ff27d31SJohn Polstra #include <vm/vm_object.h>
81e1743d02SSøren Schmidt #include <vm/vm_extern.h>
82e1743d02SSøren Schmidt 
8352c24af7SPeter Wemm #include <machine/elf.h>
84e1743d02SSøren Schmidt #include <machine/md_var.h>
85e1743d02SSøren Schmidt 
861b8388cdSMikolaj Golub #define ELF_NOTE_ROUNDSIZE	4
87c815a20cSDavid E. O'Brien #define OLD_EI_BRAND	8
88c815a20cSDavid E. O'Brien 
893ebc1248SPeter Wemm static int __elfN(check_header)(const Elf_Ehdr *hdr);
9032c01de2SDmitry Chagin static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
9109c78d53SEdward Tomasz Napierala     const char *interp, int32_t *osrel, uint32_t *fctl0);
923ebc1248SPeter Wemm static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
931699546dSEdward Tomasz Napierala     u_long *entry);
940bbee4cdSKonstantin Belousov static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
951699546dSEdward Tomasz Napierala     caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
963ebc1248SPeter Wemm static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
97a95659f7SEd Maste static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note,
9889ffc202SBjoern A. Zeeb     int32_t *osrel);
99a95659f7SEd Maste static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
10019621645SAlex Richardson static bool __elfN(check_note)(struct image_params *imgp,
10119621645SAlex Richardson     Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0,
1020cad2aa2SKonstantin Belousov     uint32_t *fctl0);
103ed167eaaSKonstantin Belousov static vm_prot_t __elfN(trans_prot)(Elf_Word);
104ed167eaaSKonstantin Belousov static Elf_Word __elfN(untrans_prot)(vm_prot_t);
105e1743d02SSøren Schmidt 
1067029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
1077029da5cSPawel Biernacki     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
108a360a43dSJake Burkholder     "");
109a360a43dSJake Burkholder 
110e548a1d4SJake Burkholder int __elfN(fallback_brand) = -1;
111e548a1d4SJake Burkholder SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
112af3b2549SHans Petter Selasky     fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
113a360a43dSJake Burkholder     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
114a360a43dSJake Burkholder 
115551d79e1SMarcel Moolenaar static int elf_legacy_coredump = 0;
116a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
1171cbb879dSEd Maste     &elf_legacy_coredump, 0,
1181cbb879dSEd Maste     "include all and only RW pages in core dumps");
119e1743d02SSøren Schmidt 
12062c625fdSKonstantin Belousov int __elfN(nxstack) =
1214d22d07aSKonstantin Belousov #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \
1224bf4b0f1SJohn Baldwin     (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \
1234bf4b0f1SJohn Baldwin     defined(__riscv)
12462c625fdSKonstantin Belousov 	1;
12562c625fdSKonstantin Belousov #else
12662c625fdSKonstantin Belousov 	0;
12762c625fdSKonstantin Belousov #endif
128291c06a1SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
129291c06a1SKonstantin Belousov     nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
130291c06a1SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
131291c06a1SKonstantin Belousov 
132eb029587SKonstantin Belousov #if defined(__amd64__)
133eb029587SKonstantin Belousov static int __elfN(vdso) = 1;
134eb029587SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
135eb029587SKonstantin Belousov     vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0,
136eb029587SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading");
137eb029587SKonstantin Belousov #else
138eb029587SKonstantin Belousov static int __elfN(vdso) = 0;
139eb029587SKonstantin Belousov #endif
140eb029587SKonstantin Belousov 
141eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
142126b36a2SKonstantin Belousov int i386_read_exec = 0;
143126b36a2SKonstantin Belousov SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
144126b36a2SKonstantin Belousov     "enable execution from readable segments");
145126b36a2SKonstantin Belousov #endif
146126b36a2SKonstantin Belousov 
14795aafd69SKonstantin Belousov static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR;
148f33533daSKonstantin Belousov static int
149f33533daSKonstantin Belousov sysctl_pie_base(SYSCTL_HANDLER_ARGS)
150f33533daSKonstantin Belousov {
151f33533daSKonstantin Belousov 	u_long val;
152f33533daSKonstantin Belousov 	int error;
153f33533daSKonstantin Belousov 
154f33533daSKonstantin Belousov 	val = __elfN(pie_base);
155f33533daSKonstantin Belousov 	error = sysctl_handle_long(oidp, &val, 0, req);
156f33533daSKonstantin Belousov 	if (error != 0 || req->newptr == NULL)
157f33533daSKonstantin Belousov 		return (error);
158f33533daSKonstantin Belousov 	if ((val & PAGE_MASK) != 0)
159f33533daSKonstantin Belousov 		return (EINVAL);
160f33533daSKonstantin Belousov 	__elfN(pie_base) = val;
161f33533daSKonstantin Belousov 	return (0);
162f33533daSKonstantin Belousov }
163f33533daSKonstantin Belousov SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
164f33533daSKonstantin Belousov     CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
165f33533daSKonstantin Belousov     sysctl_pie_base, "LU",
16695aafd69SKonstantin Belousov     "PIE load base without randomization");
16795aafd69SKonstantin Belousov 
1687029da5cSPawel Biernacki SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
1697029da5cSPawel Biernacki     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
170fa50a355SKonstantin Belousov     "");
171fa50a355SKonstantin Belousov #define	ASLR_NODE_OID	__CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
172fa50a355SKonstantin Belousov 
173b014e0f1SMarcin Wojtas /*
174b014e0f1SMarcin Wojtas  * While for 64-bit machines ASLR works properly, there are
175b014e0f1SMarcin Wojtas  * still some problems when using 32-bit architectures. For this
176b014e0f1SMarcin Wojtas  * reason ASLR is only enabled by default when running native
177b014e0f1SMarcin Wojtas  * 64-bit non-PIE executables.
178b014e0f1SMarcin Wojtas  */
179b014e0f1SMarcin Wojtas static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64;
180fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
181fa50a355SKonstantin Belousov     &__elfN(aslr_enabled), 0,
182fa50a355SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
183fa50a355SKonstantin Belousov     ": enable address map randomization");
184fa50a355SKonstantin Belousov 
185b014e0f1SMarcin Wojtas /*
186b014e0f1SMarcin Wojtas  * Enable ASLR only for 64-bit PIE binaries by default.
187b014e0f1SMarcin Wojtas  */
188b014e0f1SMarcin Wojtas static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64;
189fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
190fa50a355SKonstantin Belousov     &__elfN(pie_aslr_enabled), 0,
191fa50a355SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
192fa50a355SKonstantin Belousov     ": enable address map randomization for PIE binaries");
193fa50a355SKonstantin Belousov 
194b014e0f1SMarcin Wojtas /*
195b014e0f1SMarcin Wojtas  * Sbrk is now deprecated and it can be assumed, that in most
196b014e0f1SMarcin Wojtas  * cases it will not be used anyway. This setting is valid only
197b014e0f1SMarcin Wojtas  * for the ASLR enabled and allows for utilizing the bss grow region.
198b014e0f1SMarcin Wojtas  */
199b014e0f1SMarcin Wojtas static int __elfN(aslr_honor_sbrk) = 0;
200fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
201fa50a355SKonstantin Belousov     &__elfN(aslr_honor_sbrk), 0,
202fa50a355SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
203fa50a355SKonstantin Belousov 
204fc83c5a7SKonstantin Belousov static int __elfN(aslr_stack_gap) = 3;
205fc83c5a7SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW,
206fc83c5a7SKonstantin Belousov     &__elfN(aslr_stack_gap), 0,
207fc83c5a7SKonstantin Belousov     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
208fc83c5a7SKonstantin Belousov     ": maximum percentage of main stack to waste on a random gap");
209fc83c5a7SKonstantin Belousov 
210944cf37bSKonstantin Belousov static int __elfN(sigfastblock) = 1;
211944cf37bSKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
212944cf37bSKonstantin Belousov     CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
213944cf37bSKonstantin Belousov     "enable sigfastblock for new processes");
214944cf37bSKonstantin Belousov 
2152e1c94aaSKonstantin Belousov static bool __elfN(allow_wx) = true;
2162e1c94aaSKonstantin Belousov SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
2172e1c94aaSKonstantin Belousov     CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
2182e1c94aaSKonstantin Belousov     "Allow pages to be mapped simultaneously writable and executable");
2192e1c94aaSKonstantin Belousov 
2203ebc1248SPeter Wemm static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
221e1743d02SSøren Schmidt 
222545517f1SEdward Tomasz Napierala #define	aligned(a, t)	(rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
22393d1c728SKonstantin Belousov 
22432c01de2SDmitry Chagin Elf_Brandnote __elfN(freebsd_brandnote) = {
22532c01de2SDmitry Chagin 	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
22632c01de2SDmitry Chagin 	.hdr.n_descsz	= sizeof(int32_t),
2274c22b468SEd Maste 	.hdr.n_type	= NT_FREEBSD_ABI_TAG,
22832c01de2SDmitry Chagin 	.vendor		= FREEBSD_ABI_VENDOR,
22989ffc202SBjoern A. Zeeb 	.flags		= BN_TRANSLATE_OSREL,
23089ffc202SBjoern A. Zeeb 	.trans_osrel	= __elfN(freebsd_trans_osrel)
23132c01de2SDmitry Chagin };
23232c01de2SDmitry Chagin 
233a95659f7SEd Maste static bool
23489ffc202SBjoern A. Zeeb __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
23589ffc202SBjoern A. Zeeb {
23689ffc202SBjoern A. Zeeb 	uintptr_t p;
23789ffc202SBjoern A. Zeeb 
23889ffc202SBjoern A. Zeeb 	p = (uintptr_t)(note + 1);
2391b8388cdSMikolaj Golub 	p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
24089ffc202SBjoern A. Zeeb 	*osrel = *(const int32_t *)(p);
24189ffc202SBjoern A. Zeeb 
242a95659f7SEd Maste 	return (true);
24389ffc202SBjoern A. Zeeb }
24489ffc202SBjoern A. Zeeb 
24589ffc202SBjoern A. Zeeb static const char GNU_ABI_VENDOR[] = "GNU";
24689ffc202SBjoern A. Zeeb static int GNU_KFREEBSD_ABI_DESC = 3;
24789ffc202SBjoern A. Zeeb 
24889ffc202SBjoern A. Zeeb Elf_Brandnote __elfN(kfreebsd_brandnote) = {
24989ffc202SBjoern A. Zeeb 	.hdr.n_namesz	= sizeof(GNU_ABI_VENDOR),
25089ffc202SBjoern A. Zeeb 	.hdr.n_descsz	= 16,	/* XXX at least 16 */
25189ffc202SBjoern A. Zeeb 	.hdr.n_type	= 1,
25289ffc202SBjoern A. Zeeb 	.vendor		= GNU_ABI_VENDOR,
25389ffc202SBjoern A. Zeeb 	.flags		= BN_TRANSLATE_OSREL,
25489ffc202SBjoern A. Zeeb 	.trans_osrel	= kfreebsd_trans_osrel
25589ffc202SBjoern A. Zeeb };
25689ffc202SBjoern A. Zeeb 
257a95659f7SEd Maste static bool
25889ffc202SBjoern A. Zeeb kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
25989ffc202SBjoern A. Zeeb {
26089ffc202SBjoern A. Zeeb 	const Elf32_Word *desc;
26189ffc202SBjoern A. Zeeb 	uintptr_t p;
26289ffc202SBjoern A. Zeeb 
26389ffc202SBjoern A. Zeeb 	p = (uintptr_t)(note + 1);
2641b8388cdSMikolaj Golub 	p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
26589ffc202SBjoern A. Zeeb 
26689ffc202SBjoern A. Zeeb 	desc = (const Elf32_Word *)p;
26789ffc202SBjoern A. Zeeb 	if (desc[0] != GNU_KFREEBSD_ABI_DESC)
268a95659f7SEd Maste 		return (false);
26989ffc202SBjoern A. Zeeb 
27089ffc202SBjoern A. Zeeb 	/*
27189ffc202SBjoern A. Zeeb 	 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
27289ffc202SBjoern A. Zeeb 	 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
27389ffc202SBjoern A. Zeeb 	 */
27489ffc202SBjoern A. Zeeb 	*osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
27589ffc202SBjoern A. Zeeb 
276a95659f7SEd Maste 	return (true);
27789ffc202SBjoern A. Zeeb }
27889ffc202SBjoern A. Zeeb 
279e1743d02SSøren Schmidt int
2803ebc1248SPeter Wemm __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
281e1743d02SSøren Schmidt {
282e1743d02SSøren Schmidt 	int i;
283e1743d02SSøren Schmidt 
2843ebc1248SPeter Wemm 	for (i = 0; i < MAX_BRANDS; i++) {
285ea5a2b2eSSøren Schmidt 		if (elf_brand_list[i] == NULL) {
286ea5a2b2eSSøren Schmidt 			elf_brand_list[i] = entry;
287e1743d02SSøren Schmidt 			break;
288e1743d02SSøren Schmidt 		}
289e1743d02SSøren Schmidt 	}
290925c8b5bSBjoern A. Zeeb 	if (i == MAX_BRANDS) {
291925c8b5bSBjoern A. Zeeb 		printf("WARNING: %s: could not insert brandinfo entry: %p\n",
292925c8b5bSBjoern A. Zeeb 			__func__, entry);
293a7cddfedSJake Burkholder 		return (-1);
294925c8b5bSBjoern A. Zeeb 	}
295a7cddfedSJake Burkholder 	return (0);
296e1743d02SSøren Schmidt }
297e1743d02SSøren Schmidt 
298e1743d02SSøren Schmidt int
2993ebc1248SPeter Wemm __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
300e1743d02SSøren Schmidt {
301e1743d02SSøren Schmidt 	int i;
302e1743d02SSøren Schmidt 
3033ebc1248SPeter Wemm 	for (i = 0; i < MAX_BRANDS; i++) {
304ea5a2b2eSSøren Schmidt 		if (elf_brand_list[i] == entry) {
305ea5a2b2eSSøren Schmidt 			elf_brand_list[i] = NULL;
306e1743d02SSøren Schmidt 			break;
307e1743d02SSøren Schmidt 		}
308e1743d02SSøren Schmidt 	}
309ea5a2b2eSSøren Schmidt 	if (i == MAX_BRANDS)
310a7cddfedSJake Burkholder 		return (-1);
311a7cddfedSJake Burkholder 	return (0);
312e1743d02SSøren Schmidt }
313e1743d02SSøren Schmidt 
3144082b189SAlex Richardson bool
3153ebc1248SPeter Wemm __elfN(brand_inuse)(Elf_Brandinfo *entry)
316096977faSMark Newton {
317096977faSMark Newton 	struct proc *p;
3184082b189SAlex Richardson 	bool rval = false;
319096977faSMark Newton 
3201005a129SJohn Baldwin 	sx_slock(&allproc_lock);
3214f506694SXin LI 	FOREACH_PROC_IN_SYSTEM(p) {
322553629ebSJake Burkholder 		if (p->p_sysent == entry->sysvec) {
3234082b189SAlex Richardson 			rval = true;
324553629ebSJake Burkholder 			break;
325096977faSMark Newton 		}
326553629ebSJake Burkholder 	}
3271005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
328096977faSMark Newton 
329553629ebSJake Burkholder 	return (rval);
330096977faSMark Newton }
331096977faSMark Newton 
3325fe3ed62SJake Burkholder static Elf_Brandinfo *
33332c01de2SDmitry Chagin __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
33409c78d53SEdward Tomasz Napierala     int32_t *osrel, uint32_t *fctl0)
3355fe3ed62SJake Burkholder {
33632c01de2SDmitry Chagin 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
337af582aaeSKonstantin Belousov 	Elf_Brandinfo *bi, *bi_m;
33819621645SAlex Richardson 	bool ret, has_fctl0;
33909c78d53SEdward Tomasz Napierala 	int i, interp_name_len;
34009c78d53SEdward Tomasz Napierala 
341be7808dcSKonstantin Belousov 	interp_name_len = interp != NULL ? strlen(interp) + 1 : 0;
3425fe3ed62SJake Burkholder 
3435fe3ed62SJake Burkholder 	/*
34432c01de2SDmitry Chagin 	 * We support four types of branding -- (1) the ELF EI_OSABI field
3455fe3ed62SJake Burkholder 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
34632c01de2SDmitry Chagin 	 * branding w/in the ELF header, (3) path of the `interp_path'
34732c01de2SDmitry Chagin 	 * field, and (4) the ".note.ABI-tag" ELF section.
3485fe3ed62SJake Burkholder 	 */
3495fe3ed62SJake Burkholder 
35032c01de2SDmitry Chagin 	/* Look for an ".note.ABI-tag" ELF section */
351af582aaeSKonstantin Belousov 	bi_m = NULL;
35232c01de2SDmitry Chagin 	for (i = 0; i < MAX_BRANDS; i++) {
35332c01de2SDmitry Chagin 		bi = elf_brand_list[i];
354ecc2fda8SBjoern A. Zeeb 		if (bi == NULL)
355ecc2fda8SBjoern A. Zeeb 			continue;
3562274ab3dSKonstantin Belousov 		if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)
3571438fe3cSKonstantin Belousov 			continue;
358ecc2fda8SBjoern A. Zeeb 		if (hdr->e_machine == bi->machine && (bi->flags &
359ecc2fda8SBjoern A. Zeeb 		    (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
3600cad2aa2SKonstantin Belousov 			has_fctl0 = false;
3610cad2aa2SKonstantin Belousov 			*fctl0 = 0;
3620cad2aa2SKonstantin Belousov 			*osrel = 0;
363cefb93f2SKonstantin Belousov 			ret = __elfN(check_note)(imgp, bi->brand_note, osrel,
3640cad2aa2SKonstantin Belousov 			    &has_fctl0, fctl0);
365f19d421aSNathan Whitehorn 			/* Give brand a chance to veto check_note's guess */
3660cad2aa2SKonstantin Belousov 			if (ret && bi->header_supported) {
3670cad2aa2SKonstantin Belousov 				ret = bi->header_supported(imgp, osrel,
3680cad2aa2SKonstantin Belousov 				    has_fctl0 ? fctl0 : NULL);
3690cad2aa2SKonstantin Belousov 			}
370af582aaeSKonstantin Belousov 			/*
371af582aaeSKonstantin Belousov 			 * If note checker claimed the binary, but the
372af582aaeSKonstantin Belousov 			 * interpreter path in the image does not
373af582aaeSKonstantin Belousov 			 * match default one for the brand, try to
374af582aaeSKonstantin Belousov 			 * search for other brands with the same
375af582aaeSKonstantin Belousov 			 * interpreter.  Either there is better brand
376af582aaeSKonstantin Belousov 			 * with the right interpreter, or, failing
377af582aaeSKonstantin Belousov 			 * this, we return first brand which accepted
378af582aaeSKonstantin Belousov 			 * our note and, optionally, header.
379af582aaeSKonstantin Belousov 			 */
3803aeacc55SKonstantin Belousov 			if (ret && bi_m == NULL && interp != NULL &&
3813aeacc55SKonstantin Belousov 			    (bi->interp_path == NULL ||
3823aeacc55SKonstantin Belousov 			    (strlen(bi->interp_path) + 1 != interp_name_len ||
3833aeacc55SKonstantin Belousov 			    strncmp(interp, bi->interp_path, interp_name_len)
3843aeacc55SKonstantin Belousov 			    != 0))) {
385af582aaeSKonstantin Belousov 				bi_m = bi;
386af582aaeSKonstantin Belousov 				ret = 0;
387af582aaeSKonstantin Belousov 			}
38832c01de2SDmitry Chagin 			if (ret)
38932c01de2SDmitry Chagin 				return (bi);
39032c01de2SDmitry Chagin 		}
39132c01de2SDmitry Chagin 	}
392af582aaeSKonstantin Belousov 	if (bi_m != NULL)
393af582aaeSKonstantin Belousov 		return (bi_m);
39432c01de2SDmitry Chagin 
3955fe3ed62SJake Burkholder 	/* If the executable has a brand, search for it in the brand list. */
3965fe3ed62SJake Burkholder 	for (i = 0; i < MAX_BRANDS; i++) {
3975fe3ed62SJake Burkholder 		bi = elf_brand_list[i];
3981438fe3cSKonstantin Belousov 		if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
3992274ab3dSKonstantin Belousov 		    (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
400ecc2fda8SBjoern A. Zeeb 			continue;
401ecc2fda8SBjoern A. Zeeb 		if (hdr->e_machine == bi->machine &&
4025fe3ed62SJake Burkholder 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
4030fe98320SEd Schouten 		    (bi->compat_3_brand != NULL &&
4043d560b4bSKonstantin Belousov 		    strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
4050fe98320SEd Schouten 		    bi->compat_3_brand) == 0))) {
406686d2f31SNathan Whitehorn 			/* Looks good, but give brand a chance to veto */
407d722231bSJohn Baldwin 			if (bi->header_supported == NULL ||
4080cad2aa2SKonstantin Belousov 			    bi->header_supported(imgp, NULL, NULL)) {
40915a9aedfSKonstantin Belousov 				/*
41015a9aedfSKonstantin Belousov 				 * Again, prefer strictly matching
41115a9aedfSKonstantin Belousov 				 * interpreter path.
41215a9aedfSKonstantin Belousov 				 */
4137aab7a80SKonstantin Belousov 				if (interp_name_len == 0 &&
4147aab7a80SKonstantin Belousov 				    bi->interp_path == NULL)
4157aab7a80SKonstantin Belousov 					return (bi);
4167aab7a80SKonstantin Belousov 				if (bi->interp_path != NULL &&
4177aab7a80SKonstantin Belousov 				    strlen(bi->interp_path) + 1 ==
41815a9aedfSKonstantin Belousov 				    interp_name_len && strncmp(interp,
41915a9aedfSKonstantin Belousov 				    bi->interp_path, interp_name_len) == 0)
4205fe3ed62SJake Burkholder 					return (bi);
42115a9aedfSKonstantin Belousov 				if (bi_m == NULL)
42215a9aedfSKonstantin Belousov 					bi_m = bi;
4235fe3ed62SJake Burkholder 			}
424686d2f31SNathan Whitehorn 		}
42515a9aedfSKonstantin Belousov 	}
42615a9aedfSKonstantin Belousov 	if (bi_m != NULL)
42715a9aedfSKonstantin Belousov 		return (bi_m);
4285fe3ed62SJake Burkholder 
429817dc004SWarner Losh 	/* No known brand, see if the header is recognized by any brand */
430817dc004SWarner Losh 	for (i = 0; i < MAX_BRANDS; i++) {
431817dc004SWarner Losh 		bi = elf_brand_list[i];
432817dc004SWarner Losh 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY ||
433817dc004SWarner Losh 		    bi->header_supported == NULL)
434817dc004SWarner Losh 			continue;
435817dc004SWarner Losh 		if (hdr->e_machine == bi->machine) {
4360cad2aa2SKonstantin Belousov 			ret = bi->header_supported(imgp, NULL, NULL);
437817dc004SWarner Losh 			if (ret)
438817dc004SWarner Losh 				return (bi);
439817dc004SWarner Losh 		}
440817dc004SWarner Losh 	}
441817dc004SWarner Losh 
4425fe3ed62SJake Burkholder 	/* Lacking a known brand, search for a recognized interpreter. */
4435fe3ed62SJake Burkholder 	if (interp != NULL) {
4445fe3ed62SJake Burkholder 		for (i = 0; i < MAX_BRANDS; i++) {
4455fe3ed62SJake Burkholder 			bi = elf_brand_list[i];
4462274ab3dSKonstantin Belousov 			if (bi == NULL || (bi->flags &
4472274ab3dSKonstantin Belousov 			    (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC))
4482274ab3dSKonstantin Belousov 			    != 0)
449ecc2fda8SBjoern A. Zeeb 				continue;
450ecc2fda8SBjoern A. Zeeb 			if (hdr->e_machine == bi->machine &&
4513aeacc55SKonstantin Belousov 			    bi->interp_path != NULL &&
452d1ae5c83SKonstantin Belousov 			    /* ELF image p_filesz includes terminating zero */
453d1ae5c83SKonstantin Belousov 			    strlen(bi->interp_path) + 1 == interp_name_len &&
454d1ae5c83SKonstantin Belousov 			    strncmp(interp, bi->interp_path, interp_name_len)
455d722231bSJohn Baldwin 			    == 0 && (bi->header_supported == NULL ||
4560cad2aa2SKonstantin Belousov 			    bi->header_supported(imgp, NULL, NULL)))
4575fe3ed62SJake Burkholder 				return (bi);
4585fe3ed62SJake Burkholder 		}
4595fe3ed62SJake Burkholder 	}
4605fe3ed62SJake Burkholder 
4615fe3ed62SJake Burkholder 	/* Lacking a recognized interpreter, try the default brand */
4625fe3ed62SJake Burkholder 	for (i = 0; i < MAX_BRANDS; i++) {
4635fe3ed62SJake Burkholder 		bi = elf_brand_list[i];
4641438fe3cSKonstantin Belousov 		if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
4652274ab3dSKonstantin Belousov 		    (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
466ecc2fda8SBjoern A. Zeeb 			continue;
467ecc2fda8SBjoern A. Zeeb 		if (hdr->e_machine == bi->machine &&
468d722231bSJohn Baldwin 		    __elfN(fallback_brand) == bi->brand &&
469d722231bSJohn Baldwin 		    (bi->header_supported == NULL ||
4700cad2aa2SKonstantin Belousov 		    bi->header_supported(imgp, NULL, NULL)))
4715fe3ed62SJake Burkholder 			return (bi);
4725fe3ed62SJake Burkholder 	}
4735fe3ed62SJake Burkholder 	return (NULL);
4745fe3ed62SJake Burkholder }
4755fe3ed62SJake Burkholder 
4767de1bc13SKonstantin Belousov static bool
4777de1bc13SKonstantin Belousov __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr)
4787de1bc13SKonstantin Belousov {
4797de1bc13SKonstantin Belousov 	return (hdr->e_phoff <= PAGE_SIZE &&
4807de1bc13SKonstantin Belousov 	    (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff);
4817de1bc13SKonstantin Belousov }
4827de1bc13SKonstantin Belousov 
483e1743d02SSøren Schmidt static int
4843ebc1248SPeter Wemm __elfN(check_header)(const Elf_Ehdr *hdr)
485e1743d02SSøren Schmidt {
486d0ca7c29SPeter Wemm 	Elf_Brandinfo *bi;
4873ebc1248SPeter Wemm 	int i;
4883ebc1248SPeter Wemm 
48952c24af7SPeter Wemm 	if (!IS_ELF(*hdr) ||
49052c24af7SPeter Wemm 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
49152c24af7SPeter Wemm 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
4923dc19c46SJacques Vidrine 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
4933dc19c46SJacques Vidrine 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
4943dc19c46SJacques Vidrine 	    hdr->e_version != ELF_TARG_VER)
495a7cddfedSJake Burkholder 		return (ENOEXEC);
496e1743d02SSøren Schmidt 
4973ebc1248SPeter Wemm 	/*
4983ebc1248SPeter Wemm 	 * Make sure we have at least one brand for this machine.
4993ebc1248SPeter Wemm 	 */
5003ebc1248SPeter Wemm 
5013ebc1248SPeter Wemm 	for (i = 0; i < MAX_BRANDS; i++) {
502d0ca7c29SPeter Wemm 		bi = elf_brand_list[i];
503d0ca7c29SPeter Wemm 		if (bi != NULL && bi->machine == hdr->e_machine)
5043ebc1248SPeter Wemm 			break;
5053ebc1248SPeter Wemm 	}
5063ebc1248SPeter Wemm 	if (i == MAX_BRANDS)
507a7cddfedSJake Burkholder 		return (ENOEXEC);
508e1743d02SSøren Schmidt 
509a7cddfedSJake Burkholder 	return (0);
510e1743d02SSøren Schmidt }
511e1743d02SSøren Schmidt 
512e1743d02SSøren Schmidt static int
5133ebc1248SPeter Wemm __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
514ff6f03c7SAlan Cox     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
5153ebc1248SPeter Wemm {
516da61b9a6SAlan Cox 	struct sf_buf *sf;
517da61b9a6SAlan Cox 	int error;
5183ebc1248SPeter Wemm 	vm_offset_t off;
5193ebc1248SPeter Wemm 
5203ebc1248SPeter Wemm 	/*
5213ebc1248SPeter Wemm 	 * Create the page if it doesn't exist yet. Ignore errors.
5223ebc1248SPeter Wemm 	 */
523aaadc41fSKonstantin Belousov 	vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) -
524aaadc41fSKonstantin Belousov 	    trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL);
5253ebc1248SPeter Wemm 
5263ebc1248SPeter Wemm 	/*
5273ebc1248SPeter Wemm 	 * Find the page from the underlying object.
5283ebc1248SPeter Wemm 	 */
52928e8da65SAlan Cox 	if (object != NULL) {
530da61b9a6SAlan Cox 		sf = vm_imgact_map_page(object, offset);
531da61b9a6SAlan Cox 		if (sf == NULL)
532da61b9a6SAlan Cox 			return (KERN_FAILURE);
5333ebc1248SPeter Wemm 		off = offset - trunc_page(offset);
534da61b9a6SAlan Cox 		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
535ca0387efSJake Burkholder 		    end - start);
536be996836SAttilio Rao 		vm_imgact_unmap_page(sf);
537fe0a8a39SKonstantin Belousov 		if (error != 0)
538a7cddfedSJake Burkholder 			return (KERN_FAILURE);
5393ebc1248SPeter Wemm 	}
5403ebc1248SPeter Wemm 
541a7cddfedSJake Burkholder 	return (KERN_SUCCESS);
5423ebc1248SPeter Wemm }
5433ebc1248SPeter Wemm 
5443ebc1248SPeter Wemm static int
545e3d8f8feSKonstantin Belousov __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object,
546e3d8f8feSKonstantin Belousov     vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot,
547e3d8f8feSKonstantin Belousov     int cow)
5483ebc1248SPeter Wemm {
549da61b9a6SAlan Cox 	struct sf_buf *sf;
550da61b9a6SAlan Cox 	vm_offset_t off;
551a063facbSMarcel Moolenaar 	vm_size_t sz;
552e3d8f8feSKonstantin Belousov 	int error, locked, rv;
5533ebc1248SPeter Wemm 
5543ebc1248SPeter Wemm 	if (start != trunc_page(start)) {
55581f223caSJake Burkholder 		rv = __elfN(map_partial)(map, object, offset, start,
556ff6f03c7SAlan Cox 		    round_page(start), prot);
55728e8da65SAlan Cox 		if (rv != KERN_SUCCESS)
558a7cddfedSJake Burkholder 			return (rv);
5593ebc1248SPeter Wemm 		offset += round_page(start) - start;
5603ebc1248SPeter Wemm 		start = round_page(start);
5613ebc1248SPeter Wemm 	}
5623ebc1248SPeter Wemm 	if (end != round_page(end)) {
56381f223caSJake Burkholder 		rv = __elfN(map_partial)(map, object, offset +
564ff6f03c7SAlan Cox 		    trunc_page(end) - start, trunc_page(end), end, prot);
56528e8da65SAlan Cox 		if (rv != KERN_SUCCESS)
566a7cddfedSJake Burkholder 			return (rv);
5673ebc1248SPeter Wemm 		end = trunc_page(end);
5683ebc1248SPeter Wemm 	}
569e383e820SAlan Cox 	if (start >= end)
570e383e820SAlan Cox 		return (KERN_SUCCESS);
571e383e820SAlan Cox 	if ((offset & PAGE_MASK) != 0) {
5723ebc1248SPeter Wemm 		/*
573e383e820SAlan Cox 		 * The mapping is not page aligned.  This means that we have
574e383e820SAlan Cox 		 * to copy the data.
5753ebc1248SPeter Wemm 		 */
576aaadc41fSKonstantin Belousov 		rv = vm_map_fixed(map, NULL, 0, start, end - start,
577aaadc41fSKonstantin Belousov 		    prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL);
5785420f76bSKonstantin Belousov 		if (rv != KERN_SUCCESS)
579a7cddfedSJake Burkholder 			return (rv);
580da61b9a6SAlan Cox 		if (object == NULL)
581da61b9a6SAlan Cox 			return (KERN_SUCCESS);
582da61b9a6SAlan Cox 		for (; start < end; start += sz) {
583da61b9a6SAlan Cox 			sf = vm_imgact_map_page(object, offset);
584da61b9a6SAlan Cox 			if (sf == NULL)
585da61b9a6SAlan Cox 				return (KERN_FAILURE);
5863ebc1248SPeter Wemm 			off = offset - trunc_page(offset);
5873ebc1248SPeter Wemm 			sz = end - start;
588da61b9a6SAlan Cox 			if (sz > PAGE_SIZE - off)
589da61b9a6SAlan Cox 				sz = PAGE_SIZE - off;
590da61b9a6SAlan Cox 			error = copyout((caddr_t)sf_buf_kva(sf) + off,
5913ebc1248SPeter Wemm 			    (caddr_t)start, sz);
592be996836SAttilio Rao 			vm_imgact_unmap_page(sf);
5935420f76bSKonstantin Belousov 			if (error != 0)
594a7cddfedSJake Burkholder 				return (KERN_FAILURE);
595da61b9a6SAlan Cox 			offset += sz;
5963ebc1248SPeter Wemm 		}
5973ebc1248SPeter Wemm 	} else {
598e5e6093bSAlan Cox 		vm_object_reference(object);
599e383e820SAlan Cox 		rv = vm_map_fixed(map, object, offset, start, end - start,
60078022527SKonstantin Belousov 		    prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL |
60178022527SKonstantin Belousov 		    (object != NULL ? MAP_VN_EXEC : 0));
602e3d8f8feSKonstantin Belousov 		if (rv != KERN_SUCCESS) {
603e3d8f8feSKonstantin Belousov 			locked = VOP_ISLOCKED(imgp->vp);
604b249ce48SMateusz Guzik 			VOP_UNLOCK(imgp->vp);
605e5e6093bSAlan Cox 			vm_object_deallocate(object);
606e3d8f8feSKonstantin Belousov 			vn_lock(imgp->vp, locked | LK_RETRY);
607a7cddfedSJake Burkholder 			return (rv);
60878022527SKonstantin Belousov 		} else if (object != NULL) {
60978022527SKonstantin Belousov 			MPASS(imgp->vp->v_object == object);
61078022527SKonstantin Belousov 			VOP_SET_TEXT_CHECKED(imgp->vp);
6113ebc1248SPeter Wemm 		}
6123ebc1248SPeter Wemm 	}
613e383e820SAlan Cox 	return (KERN_SUCCESS);
614e383e820SAlan Cox }
6153ebc1248SPeter Wemm 
6163ebc1248SPeter Wemm static int
6170bbee4cdSKonstantin Belousov __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
6181699546dSEdward Tomasz Napierala     caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
619e1743d02SSøren Schmidt {
620da61b9a6SAlan Cox 	struct sf_buf *sf;
621e1743d02SSøren Schmidt 	size_t map_len;
622292177e6SAlan Cox 	vm_map_t map;
623292177e6SAlan Cox 	vm_object_t object;
624e020a35fSMark Johnston 	vm_offset_t map_addr;
625fa7dd9c5SMatthew Dillon 	int error, rv, cow;
626e1743d02SSøren Schmidt 	size_t copy_len;
6270bbee4cdSKonstantin Belousov 	vm_ooffset_t file_addr;
62852c24af7SPeter Wemm 
62925ead034SBrian Feldman 	/*
63025ead034SBrian Feldman 	 * It's necessary to fail if the filsz + offset taken from the
63125ead034SBrian Feldman 	 * header is greater than the actual file pager object's size.
63225ead034SBrian Feldman 	 * If we were to allow this, then the vm_map_find() below would
63325ead034SBrian Feldman 	 * walk right off the end of the file object and into the ether.
63425ead034SBrian Feldman 	 *
63525ead034SBrian Feldman 	 * While I'm here, might as well check for something else that
63625ead034SBrian Feldman 	 * is invalid: filsz cannot be greater than memsz.
63725ead034SBrian Feldman 	 */
6389bcf2f2dSKonstantin Belousov 	if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) ||
6399bcf2f2dSKonstantin Belousov 	    filsz > memsz) {
64025ead034SBrian Feldman 		uprintf("elf_load_section: truncated ELF file\n");
64125ead034SBrian Feldman 		return (ENOEXEC);
64225ead034SBrian Feldman 	}
64325ead034SBrian Feldman 
644292177e6SAlan Cox 	object = imgp->object;
645292177e6SAlan Cox 	map = &imgp->proc->p_vmspace->vm_map;
646545517f1SEdward Tomasz Napierala 	map_addr = trunc_page((vm_offset_t)vmaddr);
647545517f1SEdward Tomasz Napierala 	file_addr = trunc_page(offset);
648e1743d02SSøren Schmidt 
649e1743d02SSøren Schmidt 	/*
65052c24af7SPeter Wemm 	 * We have two choices.  We can either clear the data in the last page
65152c24af7SPeter Wemm 	 * of an oversized mapping, or we can start the anon mapping a page
65252c24af7SPeter Wemm 	 * early and copy the initialized data into that first page.  We
65328e8da65SAlan Cox 	 * choose the second.
65452c24af7SPeter Wemm 	 */
6559bcf2f2dSKonstantin Belousov 	if (filsz == 0)
6569bcf2f2dSKonstantin Belousov 		map_len = 0;
6579bcf2f2dSKonstantin Belousov 	else if (memsz > filsz)
658545517f1SEdward Tomasz Napierala 		map_len = trunc_page(offset + filsz) - file_addr;
65952c24af7SPeter Wemm 	else
660545517f1SEdward Tomasz Napierala 		map_len = round_page(offset + filsz) - file_addr;
66152c24af7SPeter Wemm 
66252c24af7SPeter Wemm 	if (map_len != 0) {
663fa7dd9c5SMatthew Dillon 		/* cow flags: don't dump readonly sections in core */
664fa7dd9c5SMatthew Dillon 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
665fa7dd9c5SMatthew Dillon 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
666fa7dd9c5SMatthew Dillon 
66778022527SKonstantin Belousov 		rv = __elfN(map_insert)(imgp, map, object, file_addr,
66878022527SKonstantin Belousov 		    map_addr, map_addr + map_len, prot, cow);
669e5e6093bSAlan Cox 		if (rv != KERN_SUCCESS)
670a7cddfedSJake Burkholder 			return (EINVAL);
67152c24af7SPeter Wemm 
67252c24af7SPeter Wemm 		/* we can stop now if we've covered it all */
673973d67c4SKonstantin Belousov 		if (memsz == filsz)
674a7cddfedSJake Burkholder 			return (0);
67552c24af7SPeter Wemm 	}
67652c24af7SPeter Wemm 
67752c24af7SPeter Wemm 	/*
67852c24af7SPeter Wemm 	 * We have to get the remaining bit of the file into the first part
67952c24af7SPeter Wemm 	 * of the oversized map segment.  This is normally because the .data
68052c24af7SPeter Wemm 	 * segment in the file is extended to provide bss.  It's a neat idea
68152c24af7SPeter Wemm 	 * to try and save a page, but it's a pain in the behind to implement.
682e1743d02SSøren Schmidt 	 */
683545517f1SEdward Tomasz Napierala 	copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset +
684545517f1SEdward Tomasz Napierala 	    filsz);
685545517f1SEdward Tomasz Napierala 	map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
686545517f1SEdward Tomasz Napierala 	map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
687e1743d02SSøren Schmidt 
68852c24af7SPeter Wemm 	/* This had damn well better be true! */
6898191d577SPeter Wemm 	if (map_len != 0) {
690e3d8f8feSKonstantin Belousov 		rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr,
691c547cbb4SAlan Cox 		    map_addr + map_len, prot, 0);
692973d67c4SKonstantin Belousov 		if (rv != KERN_SUCCESS)
693a7cddfedSJake Burkholder 			return (EINVAL);
6948191d577SPeter Wemm 	}
695e1743d02SSøren Schmidt 
69652c24af7SPeter Wemm 	if (copy_len != 0) {
697da61b9a6SAlan Cox 		sf = vm_imgact_map_page(object, offset + filsz);
698da61b9a6SAlan Cox 		if (sf == NULL)
699da61b9a6SAlan Cox 			return (EIO);
700e1743d02SSøren Schmidt 
70152c24af7SPeter Wemm 		/* send the page fragment to user space */
702e020a35fSMark Johnston 		error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr,
703e020a35fSMark Johnston 		    copy_len);
704be996836SAttilio Rao 		vm_imgact_unmap_page(sf);
705973d67c4SKonstantin Belousov 		if (error != 0)
70652c24af7SPeter Wemm 			return (error);
70752c24af7SPeter Wemm 	}
708e1743d02SSøren Schmidt 
709e1743d02SSøren Schmidt 	/*
710c547cbb4SAlan Cox 	 * Remove write access to the page if it was only granted by map_insert
711c547cbb4SAlan Cox 	 * to allow copyout.
712e1743d02SSøren Schmidt 	 */
713c547cbb4SAlan Cox 	if ((prot & VM_PROT_WRITE) == 0)
714292177e6SAlan Cox 		vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
7150659df6fSKonstantin Belousov 		    map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
7168191d577SPeter Wemm 
717ff6f03c7SAlan Cox 	return (0);
718e1743d02SSøren Schmidt }
719e1743d02SSøren Schmidt 
7209bcd7482SEdward Tomasz Napierala static int
7219bcd7482SEdward Tomasz Napierala __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr,
7229bcd7482SEdward Tomasz Napierala     const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp)
7239bcd7482SEdward Tomasz Napierala {
7249bcd7482SEdward Tomasz Napierala 	vm_prot_t prot;
7259bcd7482SEdward Tomasz Napierala 	u_long base_addr;
7269bcd7482SEdward Tomasz Napierala 	bool first;
7279bcd7482SEdward Tomasz Napierala 	int error, i;
7289bcd7482SEdward Tomasz Napierala 
729b65ca345SEdward Tomasz Napierala 	ASSERT_VOP_LOCKED(imgp->vp, __func__);
730b65ca345SEdward Tomasz Napierala 
7319bcd7482SEdward Tomasz Napierala 	base_addr = 0;
7329bcd7482SEdward Tomasz Napierala 	first = true;
7339bcd7482SEdward Tomasz Napierala 
7349bcd7482SEdward Tomasz Napierala 	for (i = 0; i < hdr->e_phnum; i++) {
7359bcd7482SEdward Tomasz Napierala 		if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
7369bcd7482SEdward Tomasz Napierala 			continue;
7379bcd7482SEdward Tomasz Napierala 
7389bcd7482SEdward Tomasz Napierala 		/* Loadable segment */
7399bcd7482SEdward Tomasz Napierala 		prot = __elfN(trans_prot)(phdr[i].p_flags);
7409bcd7482SEdward Tomasz Napierala 		error = __elfN(load_section)(imgp, phdr[i].p_offset,
7419bcd7482SEdward Tomasz Napierala 		    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
7429bcd7482SEdward Tomasz Napierala 		    phdr[i].p_memsz, phdr[i].p_filesz, prot);
7439bcd7482SEdward Tomasz Napierala 		if (error != 0)
7449bcd7482SEdward Tomasz Napierala 			return (error);
7459bcd7482SEdward Tomasz Napierala 
7469bcd7482SEdward Tomasz Napierala 		/*
7479bcd7482SEdward Tomasz Napierala 		 * Establish the base address if this is the first segment.
7489bcd7482SEdward Tomasz Napierala 		 */
7499bcd7482SEdward Tomasz Napierala 		if (first) {
7509bcd7482SEdward Tomasz Napierala   			base_addr = trunc_page(phdr[i].p_vaddr + rbase);
7519bcd7482SEdward Tomasz Napierala 			first = false;
7529bcd7482SEdward Tomasz Napierala 		}
7539bcd7482SEdward Tomasz Napierala 	}
7549bcd7482SEdward Tomasz Napierala 
7559bcd7482SEdward Tomasz Napierala 	if (base_addrp != NULL)
7569bcd7482SEdward Tomasz Napierala 		*base_addrp = base_addr;
7579bcd7482SEdward Tomasz Napierala 
7589bcd7482SEdward Tomasz Napierala 	return (0);
7599bcd7482SEdward Tomasz Napierala }
7609bcd7482SEdward Tomasz Napierala 
761c33fe779SJohn Polstra /*
762c33fe779SJohn Polstra  * Load the file "file" into memory.  It may be either a shared object
763c33fe779SJohn Polstra  * or an executable.
764c33fe779SJohn Polstra  *
765c33fe779SJohn Polstra  * The "addr" reference parameter is in/out.  On entry, it specifies
766c33fe779SJohn Polstra  * the address where a shared object should be loaded.  If the file is
767c33fe779SJohn Polstra  * an executable, this value is ignored.  On exit, "addr" specifies
768c33fe779SJohn Polstra  * where the file was actually loaded.
769c33fe779SJohn Polstra  *
770c33fe779SJohn Polstra  * The "entry" reference parameter is out only.  On exit, it specifies
771c33fe779SJohn Polstra  * the entry point for the loaded file.
772c33fe779SJohn Polstra  */
773e1743d02SSøren Schmidt static int
7743ebc1248SPeter Wemm __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
7751699546dSEdward Tomasz Napierala 	u_long *entry)
776e1743d02SSøren Schmidt {
777911c2be0SMark Peek 	struct {
778911c2be0SMark Peek 		struct nameidata nd;
779911c2be0SMark Peek 		struct vattr attr;
780911c2be0SMark Peek 		struct image_params image_params;
781911c2be0SMark Peek 	} *tempdata;
782d254af07SMatthew Dillon 	const Elf_Ehdr *hdr = NULL;
783d254af07SMatthew Dillon 	const Elf_Phdr *phdr = NULL;
784911c2be0SMark Peek 	struct nameidata *nd;
785911c2be0SMark Peek 	struct vattr *attr;
786911c2be0SMark Peek 	struct image_params *imgp;
78778022527SKonstantin Belousov 	u_long rbase;
788c33fe779SJohn Polstra 	u_long base_addr = 0;
7899bcd7482SEdward Tomasz Napierala 	int error;
790e1743d02SSøren Schmidt 
79112bc222eSJonathan Anderson #ifdef CAPABILITY_MODE
79212bc222eSJonathan Anderson 	/*
79312bc222eSJonathan Anderson 	 * XXXJA: This check can go away once we are sufficiently confident
79412bc222eSJonathan Anderson 	 * that the checks in namei() are correct.
79512bc222eSJonathan Anderson 	 */
79612bc222eSJonathan Anderson 	if (IN_CAPABILITY_MODE(curthread))
79712bc222eSJonathan Anderson 		return (ECAPMODE);
79812bc222eSJonathan Anderson #endif
79912bc222eSJonathan Anderson 
8001073d17eSKonstantin Belousov 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO);
801911c2be0SMark Peek 	nd = &tempdata->nd;
802911c2be0SMark Peek 	attr = &tempdata->attr;
803911c2be0SMark Peek 	imgp = &tempdata->image_params;
804911c2be0SMark Peek 
805c8a79999SPeter Wemm 	/*
806c8a79999SPeter Wemm 	 * Initialize part of the common data
807c8a79999SPeter Wemm 	 */
808c8a79999SPeter Wemm 	imgp->proc = p;
809911c2be0SMark Peek 	imgp->attr = attr;
810c8a79999SPeter Wemm 
811f422bc30SJohn Baldwin 	NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF,
8127e1d3eefSMateusz Guzik 	    UIO_SYSSPACE, file);
813911c2be0SMark Peek 	if ((error = namei(nd)) != 0) {
814911c2be0SMark Peek 		nd->ni_vp = NULL;
815e1743d02SSøren Schmidt 		goto fail;
816e1743d02SSøren Schmidt 	}
817911c2be0SMark Peek 	NDFREE(nd, NDF_ONLY_PNBUF);
818911c2be0SMark Peek 	imgp->vp = nd->ni_vp;
819c8a79999SPeter Wemm 
820e1743d02SSøren Schmidt 	/*
821e1743d02SSøren Schmidt 	 * Check permissions, modes, uid, etc on the file, and "open" it.
822e1743d02SSøren Schmidt 	 */
823c8a79999SPeter Wemm 	error = exec_check_permissions(imgp);
824373d1a3fSAlan Cox 	if (error)
825c8a79999SPeter Wemm 		goto fail;
826e1743d02SSøren Schmidt 
827c8a79999SPeter Wemm 	error = exec_map_first_page(imgp);
828373d1a3fSAlan Cox 	if (error)
829373d1a3fSAlan Cox 		goto fail;
830373d1a3fSAlan Cox 
8318516dd18SPoul-Henning Kamp 	imgp->object = nd->ni_vp->v_object;
832e1743d02SSøren Schmidt 
833d254af07SMatthew Dillon 	hdr = (const Elf_Ehdr *)imgp->image_header;
8343ebc1248SPeter Wemm 	if ((error = __elfN(check_header)(hdr)) != 0)
835e1743d02SSøren Schmidt 		goto fail;
836c33fe779SJohn Polstra 	if (hdr->e_type == ET_DYN)
837c33fe779SJohn Polstra 		rbase = *addr;
838c33fe779SJohn Polstra 	else if (hdr->e_type == ET_EXEC)
839c33fe779SJohn Polstra 		rbase = 0;
840c33fe779SJohn Polstra 	else {
841c33fe779SJohn Polstra 		error = ENOEXEC;
842c33fe779SJohn Polstra 		goto fail;
843c33fe779SJohn Polstra 	}
844e1743d02SSøren Schmidt 
845c8a79999SPeter Wemm 	/* Only support headers that fit within first page for now      */
8467de1bc13SKonstantin Belousov 	if (!__elfN(phdr_in_zero_page)(hdr)) {
847c8a79999SPeter Wemm 		error = ENOEXEC;
848e1743d02SSøren Schmidt 		goto fail;
849c8a79999SPeter Wemm 	}
850c8a79999SPeter Wemm 
851d254af07SMatthew Dillon 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
85293d1c728SKonstantin Belousov 	if (!aligned(phdr, Elf_Addr)) {
85393d1c728SKonstantin Belousov 		error = ENOEXEC;
85493d1c728SKonstantin Belousov 		goto fail;
85593d1c728SKonstantin Belousov 	}
856e1743d02SSøren Schmidt 
8579bcd7482SEdward Tomasz Napierala 	error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr);
858292177e6SAlan Cox 	if (error != 0)
859e1743d02SSøren Schmidt 		goto fail;
8609bcd7482SEdward Tomasz Napierala 
861c33fe779SJohn Polstra 	*addr = base_addr;
862c33fe779SJohn Polstra 	*entry = (unsigned long)hdr->e_entry + rbase;
863e1743d02SSøren Schmidt 
864e1743d02SSøren Schmidt fail:
865c8a79999SPeter Wemm 	if (imgp->firstpage)
866c8a79999SPeter Wemm 		exec_unmap_first_page(imgp);
8670b2ed1aeSJeff Roberson 
86878022527SKonstantin Belousov 	if (nd->ni_vp) {
86978022527SKonstantin Belousov 		if (imgp->textset)
87078022527SKonstantin Belousov 			VOP_UNSET_TEXT_CHECKED(nd->ni_vp);
871373d1a3fSAlan Cox 		vput(nd->ni_vp);
87278022527SKonstantin Belousov 	}
873911c2be0SMark Peek 	free(tempdata, M_TEMP);
874e1743d02SSøren Schmidt 
875a7cddfedSJake Burkholder 	return (error);
876e1743d02SSøren Schmidt }
877e1743d02SSøren Schmidt 
878a4007ae1SKonstantin Belousov /*
879a4007ae1SKonstantin Belousov  * Select randomized valid address in the map map, between minv and
880a4007ae1SKonstantin Belousov  * maxv, with specified alignment.  The [minv, maxv) range must belong
881a4007ae1SKonstantin Belousov  * to the map.  Note that function only allocates the address, it is
882a4007ae1SKonstantin Belousov  * up to caller to clamp maxv in a way that the final allocation
883a4007ae1SKonstantin Belousov  * length fit into the map.
884a4007ae1SKonstantin Belousov  *
885a4007ae1SKonstantin Belousov  * Result is returned in *resp, error code indicates that arguments
886a4007ae1SKonstantin Belousov  * did not pass sanity checks for overflow and range correctness.
887a4007ae1SKonstantin Belousov  */
8889cf78c1cSKonstantin Belousov static int
8899cf78c1cSKonstantin Belousov __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv,
8909cf78c1cSKonstantin Belousov     u_int align, u_long *resp)
891fa50a355SKonstantin Belousov {
892fa50a355SKonstantin Belousov 	u_long rbase, res;
893fa50a355SKonstantin Belousov 
894fa50a355SKonstantin Belousov 	MPASS(vm_map_min(map) <= minv);
8959cf78c1cSKonstantin Belousov 
8969cf78c1cSKonstantin Belousov 	if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) {
8979cf78c1cSKonstantin Belousov 		uprintf("Invalid ELF segments layout\n");
8989cf78c1cSKonstantin Belousov 		return (ENOEXEC);
8999cf78c1cSKonstantin Belousov 	}
9009cf78c1cSKonstantin Belousov 
901fa50a355SKonstantin Belousov 	arc4rand(&rbase, sizeof(rbase), 0);
902fa50a355SKonstantin Belousov 	res = roundup(minv, (u_long)align) + rbase % (maxv - minv);
903fa50a355SKonstantin Belousov 	res &= ~((u_long)align - 1);
904fa50a355SKonstantin Belousov 	if (res >= maxv)
905fa50a355SKonstantin Belousov 		res -= align;
9069cf78c1cSKonstantin Belousov 
907fa50a355SKonstantin Belousov 	KASSERT(res >= minv,
908fa50a355SKonstantin Belousov 	    ("res %#lx < minv %#lx, maxv %#lx rbase %#lx",
909fa50a355SKonstantin Belousov 	    res, minv, maxv, rbase));
910fa50a355SKonstantin Belousov 	KASSERT(res < maxv,
911fa50a355SKonstantin Belousov 	    ("res %#lx > maxv %#lx, minv %#lx rbase %#lx",
912fa50a355SKonstantin Belousov 	    res, maxv, minv, rbase));
9139cf78c1cSKonstantin Belousov 
9149cf78c1cSKonstantin Belousov 	*resp = res;
9159cf78c1cSKonstantin Belousov 	return (0);
916fa50a355SKonstantin Belousov }
917fa50a355SKonstantin Belousov 
91820e1174aSEdward Tomasz Napierala static int
91920e1174aSEdward Tomasz Napierala __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr,
92020e1174aSEdward Tomasz Napierala     const Elf_Phdr *phdr, u_long et_dyn_addr)
92120e1174aSEdward Tomasz Napierala {
92220e1174aSEdward Tomasz Napierala 	struct vmspace *vmspace;
92320e1174aSEdward Tomasz Napierala 	const char *err_str;
92420e1174aSEdward Tomasz Napierala 	u_long text_size, data_size, total_size, text_addr, data_addr;
92520e1174aSEdward Tomasz Napierala 	u_long seg_size, seg_addr;
92620e1174aSEdward Tomasz Napierala 	int i;
92720e1174aSEdward Tomasz Napierala 
92820e1174aSEdward Tomasz Napierala 	err_str = NULL;
92920e1174aSEdward Tomasz Napierala 	text_size = data_size = total_size = text_addr = data_addr = 0;
93020e1174aSEdward Tomasz Napierala 
93120e1174aSEdward Tomasz Napierala 	for (i = 0; i < hdr->e_phnum; i++) {
93220e1174aSEdward Tomasz Napierala 		if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
93320e1174aSEdward Tomasz Napierala 			continue;
93420e1174aSEdward Tomasz Napierala 
93520e1174aSEdward Tomasz Napierala 		seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
93620e1174aSEdward Tomasz Napierala 		seg_size = round_page(phdr[i].p_memsz +
93720e1174aSEdward Tomasz Napierala 		    phdr[i].p_vaddr + et_dyn_addr - seg_addr);
93820e1174aSEdward Tomasz Napierala 
93920e1174aSEdward Tomasz Napierala 		/*
94020e1174aSEdward Tomasz Napierala 		 * Make the largest executable segment the official
94120e1174aSEdward Tomasz Napierala 		 * text segment and all others data.
94220e1174aSEdward Tomasz Napierala 		 *
94320e1174aSEdward Tomasz Napierala 		 * Note that obreak() assumes that data_addr + data_size == end
94420e1174aSEdward Tomasz Napierala 		 * of data load area, and the ELF file format expects segments
94520e1174aSEdward Tomasz Napierala 		 * to be sorted by address.  If multiple data segments exist,
94620e1174aSEdward Tomasz Napierala 		 * the last one will be used.
94720e1174aSEdward Tomasz Napierala 		 */
94820e1174aSEdward Tomasz Napierala 
94920e1174aSEdward Tomasz Napierala 		if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) {
95020e1174aSEdward Tomasz Napierala 			text_size = seg_size;
95120e1174aSEdward Tomasz Napierala 			text_addr = seg_addr;
95220e1174aSEdward Tomasz Napierala 		} else {
95320e1174aSEdward Tomasz Napierala 			data_size = seg_size;
95420e1174aSEdward Tomasz Napierala 			data_addr = seg_addr;
95520e1174aSEdward Tomasz Napierala 		}
95620e1174aSEdward Tomasz Napierala 		total_size += seg_size;
95720e1174aSEdward Tomasz Napierala 	}
95820e1174aSEdward Tomasz Napierala 
95920e1174aSEdward Tomasz Napierala 	if (data_addr == 0 && data_size == 0) {
96020e1174aSEdward Tomasz Napierala 		data_addr = text_addr;
96120e1174aSEdward Tomasz Napierala 		data_size = text_size;
96220e1174aSEdward Tomasz Napierala 	}
96320e1174aSEdward Tomasz Napierala 
96420e1174aSEdward Tomasz Napierala 	/*
96520e1174aSEdward Tomasz Napierala 	 * Check limits.  It should be safe to check the
96620e1174aSEdward Tomasz Napierala 	 * limits after loading the segments since we do
96720e1174aSEdward Tomasz Napierala 	 * not actually fault in all the segments pages.
96820e1174aSEdward Tomasz Napierala 	 */
96920e1174aSEdward Tomasz Napierala 	PROC_LOCK(imgp->proc);
97020e1174aSEdward Tomasz Napierala 	if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA))
97120e1174aSEdward Tomasz Napierala 		err_str = "Data segment size exceeds process limit";
97220e1174aSEdward Tomasz Napierala 	else if (text_size > maxtsiz)
97320e1174aSEdward Tomasz Napierala 		err_str = "Text segment size exceeds system limit";
97420e1174aSEdward Tomasz Napierala 	else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM))
97520e1174aSEdward Tomasz Napierala 		err_str = "Total segment size exceeds process limit";
97620e1174aSEdward Tomasz Napierala 	else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0)
97720e1174aSEdward Tomasz Napierala 		err_str = "Data segment size exceeds resource limit";
97820e1174aSEdward Tomasz Napierala 	else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0)
97920e1174aSEdward Tomasz Napierala 		err_str = "Total segment size exceeds resource limit";
98020e1174aSEdward Tomasz Napierala 	PROC_UNLOCK(imgp->proc);
98120e1174aSEdward Tomasz Napierala 	if (err_str != NULL) {
98220e1174aSEdward Tomasz Napierala 		uprintf("%s\n", err_str);
98320e1174aSEdward Tomasz Napierala 		return (ENOMEM);
98420e1174aSEdward Tomasz Napierala 	}
98520e1174aSEdward Tomasz Napierala 
98620e1174aSEdward Tomasz Napierala 	vmspace = imgp->proc->p_vmspace;
98720e1174aSEdward Tomasz Napierala 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
98820e1174aSEdward Tomasz Napierala 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
98920e1174aSEdward Tomasz Napierala 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
99020e1174aSEdward Tomasz Napierala 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
99120e1174aSEdward Tomasz Napierala 
99220e1174aSEdward Tomasz Napierala 	return (0);
99320e1174aSEdward Tomasz Napierala }
99420e1174aSEdward Tomasz Napierala 
99509c78d53SEdward Tomasz Napierala static int
99609c78d53SEdward Tomasz Napierala __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr,
99709c78d53SEdward Tomasz Napierala     char **interpp, bool *free_interpp)
99809c78d53SEdward Tomasz Napierala {
99909c78d53SEdward Tomasz Napierala 	struct thread *td;
100009c78d53SEdward Tomasz Napierala 	char *interp;
100109c78d53SEdward Tomasz Napierala 	int error, interp_name_len;
100209c78d53SEdward Tomasz Napierala 
100309c78d53SEdward Tomasz Napierala 	KASSERT(phdr->p_type == PT_INTERP,
100409c78d53SEdward Tomasz Napierala 	    ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type));
1005b65ca345SEdward Tomasz Napierala 	ASSERT_VOP_LOCKED(imgp->vp, __func__);
100609c78d53SEdward Tomasz Napierala 
100709c78d53SEdward Tomasz Napierala 	td = curthread;
100809c78d53SEdward Tomasz Napierala 
100909c78d53SEdward Tomasz Napierala 	/* Path to interpreter */
101009c78d53SEdward Tomasz Napierala 	if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) {
101109c78d53SEdward Tomasz Napierala 		uprintf("Invalid PT_INTERP\n");
101209c78d53SEdward Tomasz Napierala 		return (ENOEXEC);
101309c78d53SEdward Tomasz Napierala 	}
101409c78d53SEdward Tomasz Napierala 
101509c78d53SEdward Tomasz Napierala 	interp_name_len = phdr->p_filesz;
101609c78d53SEdward Tomasz Napierala 	if (phdr->p_offset > PAGE_SIZE ||
101709c78d53SEdward Tomasz Napierala 	    interp_name_len > PAGE_SIZE - phdr->p_offset) {
10180ddfdc60SKonstantin Belousov 		/*
1019f1f81d3bSKonstantin Belousov 		 * The vnode lock might be needed by the pagedaemon to
10200ddfdc60SKonstantin Belousov 		 * clean pages owned by the vnode.  Do not allow sleep
10210ddfdc60SKonstantin Belousov 		 * waiting for memory with the vnode locked, instead
10220ddfdc60SKonstantin Belousov 		 * try non-sleepable allocation first, and if it
10230ddfdc60SKonstantin Belousov 		 * fails, go to the slow path were we drop the lock
1024f1f81d3bSKonstantin Belousov 		 * and do M_WAITOK.  A text reference prevents
1025f1f81d3bSKonstantin Belousov 		 * modifications to the vnode content.
10260ddfdc60SKonstantin Belousov 		 */
10272d6b8546SKonstantin Belousov 		interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT);
10282d6b8546SKonstantin Belousov 		if (interp == NULL) {
1029b249ce48SMateusz Guzik 			VOP_UNLOCK(imgp->vp);
103009c78d53SEdward Tomasz Napierala 			interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK);
103178022527SKonstantin Belousov 			vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
10322d6b8546SKonstantin Belousov 		}
10330ddfdc60SKonstantin Belousov 
103409c78d53SEdward Tomasz Napierala 		error = vn_rdwr(UIO_READ, imgp->vp, interp,
103509c78d53SEdward Tomasz Napierala 		    interp_name_len, phdr->p_offset,
103609c78d53SEdward Tomasz Napierala 		    UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
103709c78d53SEdward Tomasz Napierala 		    NOCRED, NULL, td);
103809c78d53SEdward Tomasz Napierala 		if (error != 0) {
103909c78d53SEdward Tomasz Napierala 			free(interp, M_TEMP);
104009c78d53SEdward Tomasz Napierala 			uprintf("i/o error PT_INTERP %d\n", error);
104109c78d53SEdward Tomasz Napierala 			return (error);
104209c78d53SEdward Tomasz Napierala 		}
104309c78d53SEdward Tomasz Napierala 		interp[interp_name_len] = '\0';
104409c78d53SEdward Tomasz Napierala 
104509c78d53SEdward Tomasz Napierala 		*interpp = interp;
104609c78d53SEdward Tomasz Napierala 		*free_interpp = true;
104709c78d53SEdward Tomasz Napierala 		return (0);
104809c78d53SEdward Tomasz Napierala 	}
104909c78d53SEdward Tomasz Napierala 
105009c78d53SEdward Tomasz Napierala 	interp = __DECONST(char *, imgp->image_header) + phdr->p_offset;
105109c78d53SEdward Tomasz Napierala 	if (interp[interp_name_len - 1] != '\0') {
105209c78d53SEdward Tomasz Napierala 		uprintf("Invalid PT_INTERP\n");
105309c78d53SEdward Tomasz Napierala 		return (ENOEXEC);
105409c78d53SEdward Tomasz Napierala 	}
105509c78d53SEdward Tomasz Napierala 
105609c78d53SEdward Tomasz Napierala 	*interpp = interp;
105709c78d53SEdward Tomasz Napierala 	*free_interpp = false;
105809c78d53SEdward Tomasz Napierala 	return (0);
105909c78d53SEdward Tomasz Napierala }
106009c78d53SEdward Tomasz Napierala 
10619274fb35SEdward Tomasz Napierala static int
10629274fb35SEdward Tomasz Napierala __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info,
10639274fb35SEdward Tomasz Napierala     const char *interp, u_long *addr, u_long *entry)
10649274fb35SEdward Tomasz Napierala {
10659274fb35SEdward Tomasz Napierala 	char *path;
10669274fb35SEdward Tomasz Napierala 	int error;
10679274fb35SEdward Tomasz Napierala 
10689274fb35SEdward Tomasz Napierala 	if (brand_info->emul_path != NULL &&
10699274fb35SEdward Tomasz Napierala 	    brand_info->emul_path[0] != '\0') {
10709274fb35SEdward Tomasz Napierala 		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
10719274fb35SEdward Tomasz Napierala 		snprintf(path, MAXPATHLEN, "%s%s",
10729274fb35SEdward Tomasz Napierala 		    brand_info->emul_path, interp);
10739274fb35SEdward Tomasz Napierala 		error = __elfN(load_file)(imgp->proc, path, addr, entry);
10749274fb35SEdward Tomasz Napierala 		free(path, M_TEMP);
10759274fb35SEdward Tomasz Napierala 		if (error == 0)
10769274fb35SEdward Tomasz Napierala 			return (0);
10779274fb35SEdward Tomasz Napierala 	}
10789274fb35SEdward Tomasz Napierala 
10799274fb35SEdward Tomasz Napierala 	if (brand_info->interp_newpath != NULL &&
10809274fb35SEdward Tomasz Napierala 	    (brand_info->interp_path == NULL ||
10819274fb35SEdward Tomasz Napierala 	    strcmp(interp, brand_info->interp_path) == 0)) {
10829274fb35SEdward Tomasz Napierala 		error = __elfN(load_file)(imgp->proc,
10839274fb35SEdward Tomasz Napierala 		    brand_info->interp_newpath, addr, entry);
10849274fb35SEdward Tomasz Napierala 		if (error == 0)
10859274fb35SEdward Tomasz Napierala 			return (0);
10869274fb35SEdward Tomasz Napierala 	}
10879274fb35SEdward Tomasz Napierala 
10889274fb35SEdward Tomasz Napierala 	error = __elfN(load_file)(imgp->proc, interp, addr, entry);
10899274fb35SEdward Tomasz Napierala 	if (error == 0)
10909274fb35SEdward Tomasz Napierala 		return (0);
10919274fb35SEdward Tomasz Napierala 
10929274fb35SEdward Tomasz Napierala 	uprintf("ELF interpreter %s not found, error %d\n", interp, error);
10939274fb35SEdward Tomasz Napierala 	return (error);
10949274fb35SEdward Tomasz Napierala }
10959274fb35SEdward Tomasz Napierala 
1096fa50a355SKonstantin Belousov /*
1097fa50a355SKonstantin Belousov  * Impossible et_dyn_addr initial value indicating that the real base
1098fa50a355SKonstantin Belousov  * must be calculated later with some randomization applied.
1099fa50a355SKonstantin Belousov  */
1100fa50a355SKonstantin Belousov #define	ET_DYN_ADDR_RAND	1
1101fa50a355SKonstantin Belousov 
1102303b270bSEivind Eklund static int
11033ebc1248SPeter Wemm __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
1104e1743d02SSøren Schmidt {
11056c775eb6SKonstantin Belousov 	struct thread *td;
11066c775eb6SKonstantin Belousov 	const Elf_Ehdr *hdr;
110732c01de2SDmitry Chagin 	const Elf_Phdr *phdr;
1108e5e6093bSAlan Cox 	Elf_Auxargs *elf_auxargs;
11095856e12eSJohn Dyson 	struct vmspace *vmspace;
1110fa50a355SKonstantin Belousov 	vm_map_t map;
11119274fb35SEdward Tomasz Napierala 	char *interp;
1112d1dbc694SJohn Polstra 	Elf_Brandinfo *brand_info;
11135fe3ed62SJake Burkholder 	struct sysentvec *sv;
111420e1174aSEdward Tomasz Napierala 	u_long addr, baddr, et_dyn_addr, entry, proghdr;
1115714d6d09SKonstantin Belousov 	u_long maxalign, maxsalign, mapsz, maxv, maxv1;
1116cefb93f2SKonstantin Belousov 	uint32_t fctl0;
11176c775eb6SKonstantin Belousov 	int32_t osrel;
111809c78d53SEdward Tomasz Napierala 	bool free_interp;
11199274fb35SEdward Tomasz Napierala 	int error, i, n;
11206c775eb6SKonstantin Belousov 
11216c775eb6SKonstantin Belousov 	hdr = (const Elf_Ehdr *)imgp->image_header;
1122e1743d02SSøren Schmidt 
1123e1743d02SSøren Schmidt 	/*
1124e1743d02SSøren Schmidt 	 * Do we have a valid ELF header ?
1125900b28f9SMaxim Sobolev 	 *
1126900b28f9SMaxim Sobolev 	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
1127900b28f9SMaxim Sobolev 	 * if particular brand doesn't support it.
1128e1743d02SSøren Schmidt 	 */
1129900b28f9SMaxim Sobolev 	if (__elfN(check_header)(hdr) != 0 ||
1130900b28f9SMaxim Sobolev 	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
1131a7cddfedSJake Burkholder 		return (-1);
1132e1743d02SSøren Schmidt 
1133e1743d02SSøren Schmidt 	/*
1134e1743d02SSøren Schmidt 	 * From here on down, we return an errno, not -1, as we've
1135e1743d02SSøren Schmidt 	 * detected an ELF file.
1136e1743d02SSøren Schmidt 	 */
1137e1743d02SSøren Schmidt 
11387de1bc13SKonstantin Belousov 	if (!__elfN(phdr_in_zero_page)(hdr)) {
11396b16d664SEd Maste 		uprintf("Program headers not in the first page\n");
1140a7cddfedSJake Burkholder 		return (ENOEXEC);
1141e1743d02SSøren Schmidt 	}
114252c24af7SPeter Wemm 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
11436b16d664SEd Maste 	if (!aligned(phdr, Elf_Addr)) {
11446b16d664SEd Maste 		uprintf("Unaligned program headers\n");
114593d1c728SKonstantin Belousov 		return (ENOEXEC);
11466b16d664SEd Maste 	}
11476c775eb6SKonstantin Belousov 
11486c775eb6SKonstantin Belousov 	n = error = 0;
11497564c4adSKonstantin Belousov 	baddr = 0;
11506c775eb6SKonstantin Belousov 	osrel = 0;
1151cefb93f2SKonstantin Belousov 	fctl0 = 0;
11526c775eb6SKonstantin Belousov 	entry = proghdr = 0;
11539274fb35SEdward Tomasz Napierala 	interp = NULL;
115409c78d53SEdward Tomasz Napierala 	free_interp = false;
11556c775eb6SKonstantin Belousov 	td = curthread;
1156714d6d09SKonstantin Belousov 
1157714d6d09SKonstantin Belousov 	/*
1158714d6d09SKonstantin Belousov 	 * Somewhat arbitrary, limit accepted max alignment for the
1159714d6d09SKonstantin Belousov 	 * loadable segment to the max supported superpage size. Too
1160714d6d09SKonstantin Belousov 	 * large alignment requests are not useful and are indicators
1161714d6d09SKonstantin Belousov 	 * of corrupted or outright malicious binary.
1162714d6d09SKonstantin Belousov 	 */
1163fa50a355SKonstantin Belousov 	maxalign = PAGE_SIZE;
1164714d6d09SKonstantin Belousov 	maxsalign = PAGE_SIZE * 1024;
1165714d6d09SKonstantin Belousov 	for (i = MAXPAGESIZES - 1; i > 0; i--) {
1166714d6d09SKonstantin Belousov 		if (pagesizes[i] > maxsalign)
1167714d6d09SKonstantin Belousov 			maxsalign = pagesizes[i];
1168714d6d09SKonstantin Belousov 	}
1169714d6d09SKonstantin Belousov 
1170fa50a355SKonstantin Belousov 	mapsz = 0;
11716c775eb6SKonstantin Belousov 
11725fe3ed62SJake Burkholder 	for (i = 0; i < hdr->e_phnum; i++) {
1173291c06a1SKonstantin Belousov 		switch (phdr[i].p_type) {
1174291c06a1SKonstantin Belousov 		case PT_LOAD:
11757564c4adSKonstantin Belousov 			if (n == 0)
11767564c4adSKonstantin Belousov 				baddr = phdr[i].p_vaddr;
117736df8f54SKonstantin Belousov 			if (!powerof2(phdr[i].p_align) ||
117836df8f54SKonstantin Belousov 			    phdr[i].p_align > maxsalign) {
1179714d6d09SKonstantin Belousov 				uprintf("Invalid segment alignment\n");
1180714d6d09SKonstantin Belousov 				error = ENOEXEC;
1181714d6d09SKonstantin Belousov 				goto ret;
1182714d6d09SKonstantin Belousov 			}
1183fa50a355SKonstantin Belousov 			if (phdr[i].p_align > maxalign)
1184fa50a355SKonstantin Belousov 				maxalign = phdr[i].p_align;
1185*bf839416SKonstantin Belousov 			if (mapsz + phdr[i].p_memsz < mapsz) {
1186*bf839416SKonstantin Belousov 				uprintf("Mapsize overflow\n");
1187*bf839416SKonstantin Belousov 				error = ENOEXEC;
1188*bf839416SKonstantin Belousov 				goto ret;
1189*bf839416SKonstantin Belousov 			}
1190fa50a355SKonstantin Belousov 			mapsz += phdr[i].p_memsz;
11917564c4adSKonstantin Belousov 			n++;
11929bcd7482SEdward Tomasz Napierala 
11939bcd7482SEdward Tomasz Napierala 			/*
11949bcd7482SEdward Tomasz Napierala 			 * If this segment contains the program headers,
11959bcd7482SEdward Tomasz Napierala 			 * remember their virtual address for the AT_PHDR
11969bcd7482SEdward Tomasz Napierala 			 * aux entry. Static binaries don't usually include
11979bcd7482SEdward Tomasz Napierala 			 * a PT_PHDR entry.
11989bcd7482SEdward Tomasz Napierala 			 */
11999bcd7482SEdward Tomasz Napierala 			if (phdr[i].p_offset == 0 &&
120088dd7a0aSKonstantin Belousov 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <=
120188dd7a0aSKonstantin Belousov 			    phdr[i].p_filesz)
12029bcd7482SEdward Tomasz Napierala 				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
1203291c06a1SKonstantin Belousov 			break;
1204291c06a1SKonstantin Belousov 		case PT_INTERP:
1205e5e6093bSAlan Cox 			/* Path to interpreter */
1206d3ee0a15SJonathan T. Looney 			if (interp != NULL) {
1207d3ee0a15SJonathan T. Looney 				uprintf("Multiple PT_INTERP headers\n");
1208d3ee0a15SJonathan T. Looney 				error = ENOEXEC;
1209d3ee0a15SJonathan T. Looney 				goto ret;
1210d3ee0a15SJonathan T. Looney 			}
121109c78d53SEdward Tomasz Napierala 			error = __elfN(get_interp)(imgp, &phdr[i], &interp,
121209c78d53SEdward Tomasz Napierala 			    &free_interp);
121309c78d53SEdward Tomasz Napierala 			if (error != 0)
12146c775eb6SKonstantin Belousov 				goto ret;
1215291c06a1SKonstantin Belousov 			break;
1216291c06a1SKonstantin Belousov 		case PT_GNU_STACK:
1217291c06a1SKonstantin Belousov 			if (__elfN(nxstack))
1218291c06a1SKonstantin Belousov 				imgp->stack_prot =
1219291c06a1SKonstantin Belousov 				    __elfN(trans_prot)(phdr[i].p_flags);
1220316b3843SKonstantin Belousov 			imgp->stack_sz = phdr[i].p_memsz;
1221291c06a1SKonstantin Belousov 			break;
12229bcd7482SEdward Tomasz Napierala 		case PT_PHDR: 	/* Program header table info */
12239bcd7482SEdward Tomasz Napierala 			proghdr = phdr[i].p_vaddr;
12249bcd7482SEdward Tomasz Napierala 			break;
12253ebc1248SPeter Wemm 		}
12263ebc1248SPeter Wemm 	}
12273ebc1248SPeter Wemm 
122809c78d53SEdward Tomasz Napierala 	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0);
12295fe3ed62SJake Burkholder 	if (brand_info == NULL) {
12305fe3ed62SJake Burkholder 		uprintf("ELF binary type \"%u\" not known.\n",
12315fe3ed62SJake Burkholder 		    hdr->e_ident[EI_OSABI]);
12326c775eb6SKonstantin Belousov 		error = ENOEXEC;
12336c775eb6SKonstantin Belousov 		goto ret;
12343ebc1248SPeter Wemm 	}
1235fa50a355SKonstantin Belousov 	sv = brand_info->sysvec;
123677ebe276SEd Maste 	et_dyn_addr = 0;
1237ab02d85fSKonstantin Belousov 	if (hdr->e_type == ET_DYN) {
12386b16d664SEd Maste 		if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) {
12396b16d664SEd Maste 			uprintf("Cannot execute shared object\n");
12406c775eb6SKonstantin Belousov 			error = ENOEXEC;
12416c775eb6SKonstantin Belousov 			goto ret;
12426b16d664SEd Maste 		}
12437564c4adSKonstantin Belousov 		/*
12447564c4adSKonstantin Belousov 		 * Honour the base load address from the dso if it is
12457564c4adSKonstantin Belousov 		 * non-zero for some reason.
12467564c4adSKonstantin Belousov 		 */
1247fa50a355SKonstantin Belousov 		if (baddr == 0) {
1248fa50a355SKonstantin Belousov 			if ((sv->sv_flags & SV_ASLR) == 0 ||
1249fa50a355SKonstantin Belousov 			    (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0)
125095aafd69SKonstantin Belousov 				et_dyn_addr = __elfN(pie_base);
1251fa50a355SKonstantin Belousov 			else if ((__elfN(pie_aslr_enabled) &&
1252fa50a355SKonstantin Belousov 			    (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) ||
1253fa50a355SKonstantin Belousov 			    (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0)
1254fa50a355SKonstantin Belousov 				et_dyn_addr = ET_DYN_ADDR_RAND;
1255fa50a355SKonstantin Belousov 			else
125695aafd69SKonstantin Belousov 				et_dyn_addr = __elfN(pie_base);
125777ebe276SEd Maste 		}
1258fa50a355SKonstantin Belousov 	}
12593ebc1248SPeter Wemm 
126060bb3943SAlan Cox 	/*
126160bb3943SAlan Cox 	 * Avoid a possible deadlock if the current address space is destroyed
126260bb3943SAlan Cox 	 * and that address space maps the locked vnode.  In the common case,
126360bb3943SAlan Cox 	 * the locked vnode's v_usecount is decremented but remains greater
126460bb3943SAlan Cox 	 * than zero.  Consequently, the vnode lock is not needed by vrele().
126560bb3943SAlan Cox 	 * However, in cases where the vnode lock is external, such as nullfs,
126660bb3943SAlan Cox 	 * v_usecount may become zero.
12671dfab802SAlan Cox 	 *
12681dfab802SAlan Cox 	 * The VV_TEXT flag prevents modifications to the executable while
12691dfab802SAlan Cox 	 * the vnode is unlocked.
127060bb3943SAlan Cox 	 */
1271b249ce48SMateusz Guzik 	VOP_UNLOCK(imgp->vp);
127260bb3943SAlan Cox 
1273fa50a355SKonstantin Belousov 	/*
1274fa50a355SKonstantin Belousov 	 * Decide whether to enable randomization of user mappings.
1275fa50a355SKonstantin Belousov 	 * First, reset user preferences for the setid binaries.
1276fa50a355SKonstantin Belousov 	 * Then, account for the support of the randomization by the
1277fa50a355SKonstantin Belousov 	 * ABI, by user preferences, and make special treatment for
1278fa50a355SKonstantin Belousov 	 * PIE binaries.
1279fa50a355SKonstantin Belousov 	 */
1280fa50a355SKonstantin Belousov 	if (imgp->credential_setid) {
1281fa50a355SKonstantin Belousov 		PROC_LOCK(imgp->proc);
1282796a8e1aSKonstantin Belousov 		imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE |
1283796a8e1aSKonstantin Belousov 		    P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC);
1284fa50a355SKonstantin Belousov 		PROC_UNLOCK(imgp->proc);
1285fa50a355SKonstantin Belousov 	}
1286fa50a355SKonstantin Belousov 	if ((sv->sv_flags & SV_ASLR) == 0 ||
1287fa50a355SKonstantin Belousov 	    (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 ||
1288fa50a355SKonstantin Belousov 	    (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) {
1289fa50a355SKonstantin Belousov 		KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND,
1290fa50a355SKonstantin Belousov 		    ("et_dyn_addr == RAND and !ASLR"));
1291fa50a355SKonstantin Belousov 	} else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 ||
1292fa50a355SKonstantin Belousov 	    (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) ||
1293fa50a355SKonstantin Belousov 	    et_dyn_addr == ET_DYN_ADDR_RAND) {
1294fa50a355SKonstantin Belousov 		imgp->map_flags |= MAP_ASLR;
1295fa50a355SKonstantin Belousov 		/*
1296fa50a355SKonstantin Belousov 		 * If user does not care about sbrk, utilize the bss
1297fa50a355SKonstantin Belousov 		 * grow region for mappings as well.  We can select
1298fa50a355SKonstantin Belousov 		 * the base for the image anywere and still not suffer
1299fa50a355SKonstantin Belousov 		 * from the fragmentation.
1300fa50a355SKonstantin Belousov 		 */
1301fa50a355SKonstantin Belousov 		if (!__elfN(aslr_honor_sbrk) ||
1302fa50a355SKonstantin Belousov 		    (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0)
1303fa50a355SKonstantin Belousov 			imgp->map_flags |= MAP_ASLR_IGNSTART;
1304fa50a355SKonstantin Belousov 	}
1305fa50a355SKonstantin Belousov 
1306796a8e1aSKonstantin Belousov 	if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 &&
1307796a8e1aSKonstantin Belousov 	    (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) ||
1308796a8e1aSKonstantin Belousov 	    (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
13092e1c94aaSKonstantin Belousov 		imgp->map_flags |= MAP_WXORX;
13102e1c94aaSKonstantin Belousov 
131189b57fcfSKonstantin Belousov 	error = exec_new_vmspace(imgp, sv);
1312fa50a355SKonstantin Belousov 	vmspace = imgp->proc->p_vmspace;
1313fa50a355SKonstantin Belousov 	map = &vmspace->vm_map;
1314fa50a355SKonstantin Belousov 
131519059a13SJohn Baldwin 	imgp->proc->p_sysent = sv;
1316615f22b2SDmitry Chagin 	imgp->proc->p_elf_brandinfo = brand_info;
1317e1743d02SSøren Schmidt 
1318fa50a355SKonstantin Belousov 	maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK);
1319*bf839416SKonstantin Belousov 	if (mapsz >= maxv - vm_map_min(map)) {
1320*bf839416SKonstantin Belousov 		uprintf("Excessive mapping size\n");
1321*bf839416SKonstantin Belousov 		error = ENOEXEC;
1322*bf839416SKonstantin Belousov 	}
1323*bf839416SKonstantin Belousov 
13249cf78c1cSKonstantin Belousov 	if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) {
1325fa50a355SKonstantin Belousov 		KASSERT((map->flags & MAP_ASLR) != 0,
1326fa50a355SKonstantin Belousov 		    ("ET_DYN_ADDR_RAND but !MAP_ASLR"));
13279cf78c1cSKonstantin Belousov 		error = __CONCAT(rnd_, __elfN(base))(map,
1328fa50a355SKonstantin Belousov 		    vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA),
1329fa50a355SKonstantin Belousov 		    /* reserve half of the address space to interpreter */
133036df8f54SKonstantin Belousov 		    maxv / 2, maxalign, &et_dyn_addr);
1331fa50a355SKonstantin Belousov 	}
1332fa50a355SKonstantin Belousov 
133378022527SKonstantin Belousov 	vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
13346c775eb6SKonstantin Belousov 	if (error != 0)
13356c775eb6SKonstantin Belousov 		goto ret;
133660bb3943SAlan Cox 
13379bcd7482SEdward Tomasz Napierala 	error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL);
1338292177e6SAlan Cox 	if (error != 0)
13396c775eb6SKonstantin Belousov 		goto ret;
1340e1743d02SSøren Schmidt 
134120e1174aSEdward Tomasz Napierala 	error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr);
134220e1174aSEdward Tomasz Napierala 	if (error != 0)
134320e1174aSEdward Tomasz Napierala 		goto ret;
1344cac45152SMatthew Dillon 
1345920acedbSNathan Whitehorn 	entry = (u_long)hdr->e_entry + et_dyn_addr;
1346920acedbSNathan Whitehorn 
1347cac45152SMatthew Dillon 	/*
1348c460ac3aSPeter Wemm 	 * We load the dynamic linker where a userland call
1349c460ac3aSPeter Wemm 	 * to mmap(0, ...) would put it.  The rationale behind this
1350c460ac3aSPeter Wemm 	 * calculation is that it leaves room for the heap to grow to
1351c460ac3aSPeter Wemm 	 * its maximum allowed size.
1352c460ac3aSPeter Wemm 	 */
13536c775eb6SKonstantin Belousov 	addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td,
1354292177e6SAlan Cox 	    RLIMIT_DATA));
1355fa50a355SKonstantin Belousov 	if ((map->flags & MAP_ASLR) != 0) {
1356fa50a355SKonstantin Belousov 		maxv1 = maxv / 2 + addr / 2;
13579cf78c1cSKonstantin Belousov 		error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1,
135841032835SJason A. Harmening 		    (MAXPAGESIZES > 1 && pagesizes[1] != 0) ?
13599cf78c1cSKonstantin Belousov 		    pagesizes[1] : pagesizes[0], &map->anon_loc);
13609cf78c1cSKonstantin Belousov 		if (error != 0)
13619cf78c1cSKonstantin Belousov 			goto ret;
1362fa50a355SKonstantin Belousov 	} else {
1363fa50a355SKonstantin Belousov 		map->anon_loc = addr;
1364fa50a355SKonstantin Belousov 	}
1365e1743d02SSøren Schmidt 
1366ea5a2b2eSSøren Schmidt 	imgp->entry_addr = entry;
1367ea5a2b2eSSøren Schmidt 
136860bb3943SAlan Cox 	if (interp != NULL) {
1369b249ce48SMateusz Guzik 		VOP_UNLOCK(imgp->vp);
1370fa50a355SKonstantin Belousov 		if ((map->flags & MAP_ASLR) != 0) {
137141032835SJason A. Harmening 			/* Assume that interpreter fits into 1/4 of AS */
1372fa50a355SKonstantin Belousov 			maxv1 = maxv / 2 + addr / 2;
13739cf78c1cSKonstantin Belousov 			error = __CONCAT(rnd_, __elfN(base))(map, addr,
13749cf78c1cSKonstantin Belousov 			    maxv1, PAGE_SIZE, &addr);
1375fa50a355SKonstantin Belousov 		}
13769cf78c1cSKonstantin Belousov 		if (error == 0) {
13779cf78c1cSKonstantin Belousov 			error = __elfN(load_interp)(imgp, brand_info, interp,
13789cf78c1cSKonstantin Belousov 			    &addr, &imgp->entry_addr);
13799cf78c1cSKonstantin Belousov 		}
138078022527SKonstantin Belousov 		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
13819274fb35SEdward Tomasz Napierala 		if (error != 0)
13826c775eb6SKonstantin Belousov 			goto ret;
138395c807cfSRobert Watson 	} else
13847564c4adSKonstantin Belousov 		addr = et_dyn_addr;
1385ea5a2b2eSSøren Schmidt 
1386e1743d02SSøren Schmidt 	/*
1387e3532331SJohn Baldwin 	 * Construct auxargs table (used by the copyout_auxargs routine)
1388e1743d02SSøren Schmidt 	 */
13892d6b8546SKonstantin Belousov 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT);
13902d6b8546SKonstantin Belousov 	if (elf_auxargs == NULL) {
1391b249ce48SMateusz Guzik 		VOP_UNLOCK(imgp->vp);
1392a163d034SWarner Losh 		elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
139378022527SKonstantin Belousov 		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
13942d6b8546SKonstantin Belousov 	}
1395e1743d02SSøren Schmidt 	elf_auxargs->execfd = -1;
13969bcd7482SEdward Tomasz Napierala 	elf_auxargs->phdr = proghdr + et_dyn_addr;
1397e1743d02SSøren Schmidt 	elf_auxargs->phent = hdr->e_phentsize;
1398e1743d02SSøren Schmidt 	elf_auxargs->phnum = hdr->e_phnum;
1399e1743d02SSøren Schmidt 	elf_auxargs->pagesz = PAGE_SIZE;
1400e1743d02SSøren Schmidt 	elf_auxargs->base = addr;
1401e1743d02SSøren Schmidt 	elf_auxargs->flags = 0;
1402e1743d02SSøren Schmidt 	elf_auxargs->entry = entry;
1403d36eec69SWarner Losh 	elf_auxargs->hdr_eflags = hdr->e_flags;
1404e1743d02SSøren Schmidt 
1405e1743d02SSøren Schmidt 	imgp->auxargs = elf_auxargs;
1406e1743d02SSøren Schmidt 	imgp->interpreted = 0;
1407a0ea661fSNathan Whitehorn 	imgp->reloc_base = addr;
140832c01de2SDmitry Chagin 	imgp->proc->p_osrel = osrel;
1409cefb93f2SKonstantin Belousov 	imgp->proc->p_fctl0 = fctl0;
1410885f13dcSJohn Baldwin 	imgp->proc->p_elf_flags = hdr->e_flags;
1411f231de47SKonstantin Belousov 
14126c775eb6SKonstantin Belousov ret:
1413b4b20492SKonstantin Belousov 	ASSERT_VOP_LOCKED(imgp->vp, "skipped relock");
141409c78d53SEdward Tomasz Napierala 	if (free_interp)
141509c78d53SEdward Tomasz Napierala 		free(interp, M_TEMP);
1416a7cddfedSJake Burkholder 	return (error);
1417e1743d02SSøren Schmidt }
1418e1743d02SSøren Schmidt 
1419ebf98866SMark Johnston #define	elf_suword __CONCAT(suword, __ELF_WORD_SIZE)
14203ebc1248SPeter Wemm 
142103b0d68cSJohn Baldwin int
1422d8010b11SJohn Baldwin __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base)
1423e1743d02SSøren Schmidt {
1424ecbb00a2SDoug Rabson 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
14255f77b8a8SBrooks Davis 	Elf_Auxinfo *argarray, *pos;
142603b0d68cSJohn Baldwin 	int error;
1427e1743d02SSøren Schmidt 
14285f77b8a8SBrooks Davis 	argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP,
14295f77b8a8SBrooks Davis 	    M_WAITOK | M_ZERO);
1430e1743d02SSøren Schmidt 
143135c2a5a8SWarner Losh 	if (args->execfd != -1)
1432e1743d02SSøren Schmidt 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
1433e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
1434e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
1435e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
1436e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
1437e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
1438e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
1439e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
1440d36eec69SWarner Losh 	AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags);
14413ff06357SKonstantin Belousov 	if (imgp->execpathp != 0)
1442b24e6ac8SBrooks Davis 		AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp);
1443b96bd95bSIan Lepore 	AUXARGS_ENTRY(pos, AT_OSRELDATE,
1444b96bd95bSIan Lepore 	    imgp->proc->p_ucred->cr_prison->pr_osreldate);
1445ee235befSKonstantin Belousov 	if (imgp->canary != 0) {
1446b24e6ac8SBrooks Davis 		AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary);
1447ee235befSKonstantin Belousov 		AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1448ee235befSKonstantin Belousov 	}
1449ee235befSKonstantin Belousov 	AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1450ee235befSKonstantin Belousov 	if (imgp->pagesizes != 0) {
1451b24e6ac8SBrooks Davis 		AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes);
1452ee235befSKonstantin Belousov 		AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1453ee235befSKonstantin Belousov 	}
1454aea81038SKonstantin Belousov 	if (imgp->sysent->sv_timekeep_base != 0) {
1455aea81038SKonstantin Belousov 		AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1456aea81038SKonstantin Belousov 		    imgp->sysent->sv_timekeep_base);
1457aea81038SKonstantin Belousov 	}
145826d8f3e1SKonstantin Belousov 	AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
145926d8f3e1SKonstantin Belousov 	    != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
146026d8f3e1SKonstantin Belousov 	    imgp->sysent->sv_stackprot);
1461c2f37b92SJohn Baldwin 	if (imgp->sysent->sv_hwcap != NULL)
1462c2f37b92SJohn Baldwin 		AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap);
1463904d8c49SMichal Meloun 	if (imgp->sysent->sv_hwcap2 != NULL)
1464904d8c49SMichal Meloun 		AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2);
1465944cf37bSKonstantin Belousov 	AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ?
1466944cf37bSKonstantin Belousov 	    ELF_BSDF_SIGFASTBLK : 0);
14679df1c38bSBrooks Davis 	AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc);
14689df1c38bSBrooks Davis 	AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv);
14699df1c38bSBrooks Davis 	AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc);
14709df1c38bSBrooks Davis 	AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv);
14719df1c38bSBrooks Davis 	AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings);
1472f8e8a06dSConrad Meyer 	if (imgp->sysent->sv_fxrng_gen_base != 0)
1473f8e8a06dSConrad Meyer 		AUXARGS_ENTRY(pos, AT_FXRNG, imgp->sysent->sv_fxrng_gen_base);
1474eb029587SKonstantin Belousov 	if (imgp->sysent->sv_vdso_base != 0 && __elfN(vdso) != 0)
147501c77a43SKonstantin Belousov 		AUXARGS_ENTRY(pos, AT_KPRELOAD, imgp->sysent->sv_vdso_base);
1476e1743d02SSøren Schmidt 	AUXARGS_ENTRY(pos, AT_NULL, 0);
1477e1743d02SSøren Schmidt 
1478e1743d02SSøren Schmidt 	free(imgp->auxargs, M_TEMP);
1479e1743d02SSøren Schmidt 	imgp->auxargs = NULL;
1480d8b2f079SBrooks Davis 	KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs"));
14815f77b8a8SBrooks Davis 
1482d8010b11SJohn Baldwin 	error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT);
14835f77b8a8SBrooks Davis 	free(argarray, M_TEMP);
148403b0d68cSJohn Baldwin 	return (error);
1485e3532331SJohn Baldwin }
1486e1743d02SSøren Schmidt 
1487e3532331SJohn Baldwin int
148831174518SJohn Baldwin __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp)
1489e3532331SJohn Baldwin {
1490e3532331SJohn Baldwin 	Elf_Addr *base;
1491e3532331SJohn Baldwin 
1492e3532331SJohn Baldwin 	base = (Elf_Addr *)*stack_base;
14933ebc1248SPeter Wemm 	base--;
1494ebf98866SMark Johnston 	if (elf_suword(base, imgp->args->argc) == -1)
14955f77b8a8SBrooks Davis 		return (EFAULT);
149631174518SJohn Baldwin 	*stack_base = (uintptr_t)base;
1497a7cddfedSJake Burkholder 	return (0);
1498e1743d02SSøren Schmidt }
1499e1743d02SSøren Schmidt 
1500e1743d02SSøren Schmidt /*
15018c64af4fSJohn Polstra  * Code for generating ELF core dumps.
15028c64af4fSJohn Polstra  */
15038c64af4fSJohn Polstra 
15044d77a549SAlfred Perlstein typedef void (*segment_callback)(vm_map_entry_t, void *);
15050ff27d31SJohn Polstra 
15060ff27d31SJohn Polstra /* Closure for cb_put_phdr(). */
15070ff27d31SJohn Polstra struct phdr_closure {
15080ff27d31SJohn Polstra 	Elf_Phdr *phdr;		/* Program header to fill in */
15090ff27d31SJohn Polstra 	Elf_Off offset;		/* Offset of segment in core file */
15100ff27d31SJohn Polstra };
15110ff27d31SJohn Polstra 
1512bd390213SMikolaj Golub struct note_info {
1513bd390213SMikolaj Golub 	int		type;		/* Note type. */
1514bd390213SMikolaj Golub 	outfunc_t 	outfunc; 	/* Output function. */
1515bd390213SMikolaj Golub 	void		*outarg;	/* Argument for the output function. */
1516bd390213SMikolaj Golub 	size_t		outsize;	/* Output size. */
1517bd390213SMikolaj Golub 	TAILQ_ENTRY(note_info) link;	/* Link to the next note info. */
1518bd390213SMikolaj Golub };
1519bd390213SMikolaj Golub 
1520bd390213SMikolaj Golub TAILQ_HEAD(note_info_list, note_info);
1521bd390213SMikolaj Golub 
152278f57a9cSMark Johnston extern int compress_user_cores;
152378f57a9cSMark Johnston extern int compress_user_cores_level;
152478f57a9cSMark Johnston 
15254d77a549SAlfred Perlstein static void cb_put_phdr(vm_map_entry_t, void *);
15264d77a549SAlfred Perlstein static void cb_size_segment(vm_map_entry_t, void *);
15275bc3c617SKonstantin Belousov static void each_dumpable_segment(struct thread *, segment_callback, void *,
15285bc3c617SKonstantin Belousov     int);
1529aa14e9b7SMark Johnston static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t,
15305bc3c617SKonstantin Belousov     struct note_info_list *, size_t, int);
1531435754a5SEdward Tomasz Napierala static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *);
1532bd390213SMikolaj Golub 
1533bd390213SMikolaj Golub static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *);
1534bd390213SMikolaj Golub static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1535bd390213SMikolaj Golub static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *);
1536bd390213SMikolaj Golub static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1537bd390213SMikolaj Golub static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *);
153886be94fcSTycho Nightingale static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *);
1539f1fca82eSMikolaj Golub static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1540f1fca82eSMikolaj Golub static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1541f1fca82eSMikolaj Golub static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1542f1fca82eSMikolaj Golub static void note_procstat_files(void *, struct sbuf *, size_t *);
1543f1fca82eSMikolaj Golub static void note_procstat_groups(void *, struct sbuf *, size_t *);
1544f1fca82eSMikolaj Golub static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1545f1fca82eSMikolaj Golub static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1546f1fca82eSMikolaj Golub static void note_procstat_umask(void *, struct sbuf *, size_t *);
1547f1fca82eSMikolaj Golub static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
15488c64af4fSJohn Polstra 
1549aa14e9b7SMark Johnston static int
155078f57a9cSMark Johnston core_compressed_write(void *base, size_t len, off_t offset, void *arg)
1551aa14e9b7SMark Johnston {
1552aa14e9b7SMark Johnston 
1553aa14e9b7SMark Johnston 	return (core_write((struct coredump_params *)arg, base, len, offset,
1554f31695ccSMark Johnston 	    UIO_SYSSPACE, NULL));
1555aa14e9b7SMark Johnston }
1556aa14e9b7SMark Johnston 
15578c64af4fSJohn Polstra int
1558e7228204SAlfred Perlstein __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1559fca666a1SJulian Elischer {
1560247aba24SMarcel Moolenaar 	struct ucred *cred = td->td_ucred;
156186ffb3d1SKonstantin Belousov 	int compm, error = 0;
15620ff27d31SJohn Polstra 	struct sseg_closure seginfo;
1563bd390213SMikolaj Golub 	struct note_info_list notelst;
1564aa14e9b7SMark Johnston 	struct coredump_params params;
1565bd390213SMikolaj Golub 	struct note_info *ninfo;
1566aa14e9b7SMark Johnston 	void *hdr, *tmpbuf;
1567bd390213SMikolaj Golub 	size_t hdrsize, notesz, coresize;
15688c64af4fSJohn Polstra 
1569e7228204SAlfred Perlstein 	hdr = NULL;
157002d131adSMark Johnston 	tmpbuf = NULL;
1571bd390213SMikolaj Golub 	TAILQ_INIT(&notelst);
1572e7228204SAlfred Perlstein 
15730ff27d31SJohn Polstra 	/* Size the program segments. */
1574905d192dSEdward Tomasz Napierala 	__elfN(size_segments)(td, &seginfo, flags);
15750ff27d31SJohn Polstra 
15760ff27d31SJohn Polstra 	/*
1577bd390213SMikolaj Golub 	 * Collect info about the core file header area.
15780ff27d31SJohn Polstra 	 */
1579bd390213SMikolaj Golub 	hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1580c17b0bd2SConrad Meyer 	if (seginfo.count + 1 >= PN_XNUM)
1581c17b0bd2SConrad Meyer 		hdrsize += sizeof(Elf_Shdr);
1582435754a5SEdward Tomasz Napierala 	td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, &notelst, &notesz);
1583bd390213SMikolaj Golub 	coresize = round_page(hdrsize + notesz) + seginfo.size;
15840ff27d31SJohn Polstra 
158502d131adSMark Johnston 	/* Set up core dump parameters. */
158602d131adSMark Johnston 	params.offset = 0;
158702d131adSMark Johnston 	params.active_cred = cred;
158802d131adSMark Johnston 	params.file_cred = NOCRED;
158902d131adSMark Johnston 	params.td = td;
159002d131adSMark Johnston 	params.vp = vp;
159178f57a9cSMark Johnston 	params.comp = NULL;
159202d131adSMark Johnston 
1593afcc55f3SEdward Tomasz Napierala #ifdef RACCT
15944b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
15951ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
1596bd390213SMikolaj Golub 		error = racct_add(td->td_proc, RACCT_CORE, coresize);
15971ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
15981ba5ad42SEdward Tomasz Napierala 		if (error != 0) {
15991ba5ad42SEdward Tomasz Napierala 			error = EFAULT;
16001ba5ad42SEdward Tomasz Napierala 			goto done;
16011ba5ad42SEdward Tomasz Napierala 		}
16024b5c9cf6SEdward Tomasz Napierala 	}
1603afcc55f3SEdward Tomasz Napierala #endif
1604bd390213SMikolaj Golub 	if (coresize >= limit) {
1605fba6b1afSAlfred Perlstein 		error = EFAULT;
1606fba6b1afSAlfred Perlstein 		goto done;
1607fba6b1afSAlfred Perlstein 	}
16080ff27d31SJohn Polstra 
1609aa14e9b7SMark Johnston 	/* Create a compression stream if necessary. */
161086ffb3d1SKonstantin Belousov 	compm = compress_user_cores;
161186ffb3d1SKonstantin Belousov 	if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP &&
161286ffb3d1SKonstantin Belousov 	    compm == 0)
161386ffb3d1SKonstantin Belousov 		compm = COMPRESS_GZIP;
161486ffb3d1SKonstantin Belousov 	if (compm != 0) {
161578f57a9cSMark Johnston 		params.comp = compressor_init(core_compressed_write,
161686ffb3d1SKonstantin Belousov 		    compm, CORE_BUF_SIZE,
161778f57a9cSMark Johnston 		    compress_user_cores_level, &params);
161878f57a9cSMark Johnston 		if (params.comp == NULL) {
1619aa14e9b7SMark Johnston 			error = EFAULT;
1620aa14e9b7SMark Johnston 			goto done;
1621aa14e9b7SMark Johnston 		}
1622aa14e9b7SMark Johnston 		tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1623aa14e9b7SMark Johnston         }
1624aa14e9b7SMark Johnston 
16250ff27d31SJohn Polstra 	/*
16260ff27d31SJohn Polstra 	 * Allocate memory for building the header, fill it up,
1627bd390213SMikolaj Golub 	 * and write it out following the notes.
16280ff27d31SJohn Polstra 	 */
1629a163d034SWarner Losh 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1630aa14e9b7SMark Johnston 	error = __elfN(corehdr)(&params, seginfo.count, hdr, hdrsize, &notelst,
16315bc3c617SKonstantin Belousov 	    notesz, flags);
16320ff27d31SJohn Polstra 
16330ff27d31SJohn Polstra 	/* Write the contents of all of the writable segments. */
16340ff27d31SJohn Polstra 	if (error == 0) {
16350ff27d31SJohn Polstra 		Elf_Phdr *php;
16362b471bc6STim J. Robbins 		off_t offset;
16370ff27d31SJohn Polstra 		int i;
16380ff27d31SJohn Polstra 
16390ff27d31SJohn Polstra 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1640bd390213SMikolaj Golub 		offset = round_page(hdrsize + notesz);
16410ff27d31SJohn Polstra 		for (i = 0; i < seginfo.count; i++) {
1642f31695ccSMark Johnston 			error = core_output((char *)(uintptr_t)php->p_vaddr,
1643aa14e9b7SMark Johnston 			    php->p_filesz, offset, &params, tmpbuf);
16440ff27d31SJohn Polstra 			if (error != 0)
16452b471bc6STim J. Robbins 				break;
16460ff27d31SJohn Polstra 			offset += php->p_filesz;
16470ff27d31SJohn Polstra 			php++;
16480ff27d31SJohn Polstra 		}
164978f57a9cSMark Johnston 		if (error == 0 && params.comp != NULL)
165078f57a9cSMark Johnston 			error = compressor_flush(params.comp);
16510ff27d31SJohn Polstra 	}
1652e7228204SAlfred Perlstein 	if (error) {
1653e7228204SAlfred Perlstein 		log(LOG_WARNING,
1654e7228204SAlfred Perlstein 		    "Failed to write core file for process %s (error %d)\n",
1655e7228204SAlfred Perlstein 		    curproc->p_comm, error);
1656e7228204SAlfred Perlstein 	}
1657e7228204SAlfred Perlstein 
1658e7228204SAlfred Perlstein done:
1659aa14e9b7SMark Johnston 	free(tmpbuf, M_TEMP);
166078f57a9cSMark Johnston 	if (params.comp != NULL)
166178f57a9cSMark Johnston 		compressor_fini(params.comp);
1662bd390213SMikolaj Golub 	while ((ninfo = TAILQ_FIRST(&notelst)) != NULL) {
1663bd390213SMikolaj Golub 		TAILQ_REMOVE(&notelst, ninfo, link);
1664bd390213SMikolaj Golub 		free(ninfo, M_TEMP);
1665bd390213SMikolaj Golub 	}
1666bd390213SMikolaj Golub 	if (hdr != NULL)
16670ff27d31SJohn Polstra 		free(hdr, M_TEMP);
16680ff27d31SJohn Polstra 
1669a7cddfedSJake Burkholder 	return (error);
16708c64af4fSJohn Polstra }
16718c64af4fSJohn Polstra 
16720ff27d31SJohn Polstra /*
16731005d8afSConrad Meyer  * A callback for each_dumpable_segment() to write out the segment's
16740ff27d31SJohn Polstra  * program header entry.
16750ff27d31SJohn Polstra  */
16760ff27d31SJohn Polstra static void
16775cc6d253SEd Maste cb_put_phdr(vm_map_entry_t entry, void *closure)
16780ff27d31SJohn Polstra {
16790ff27d31SJohn Polstra 	struct phdr_closure *phc = (struct phdr_closure *)closure;
16800ff27d31SJohn Polstra 	Elf_Phdr *phdr = phc->phdr;
16810ff27d31SJohn Polstra 
16820ff27d31SJohn Polstra 	phc->offset = round_page(phc->offset);
16830ff27d31SJohn Polstra 
16840ff27d31SJohn Polstra 	phdr->p_type = PT_LOAD;
16850ff27d31SJohn Polstra 	phdr->p_offset = phc->offset;
16860ff27d31SJohn Polstra 	phdr->p_vaddr = entry->start;
16870ff27d31SJohn Polstra 	phdr->p_paddr = 0;
16880ff27d31SJohn Polstra 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
16890ff27d31SJohn Polstra 	phdr->p_align = PAGE_SIZE;
1690ed167eaaSKonstantin Belousov 	phdr->p_flags = __elfN(untrans_prot)(entry->protection);
16910ff27d31SJohn Polstra 
16920ff27d31SJohn Polstra 	phc->offset += phdr->p_filesz;
16930ff27d31SJohn Polstra 	phc->phdr++;
16940ff27d31SJohn Polstra }
16950ff27d31SJohn Polstra 
16960ff27d31SJohn Polstra /*
16971005d8afSConrad Meyer  * A callback for each_dumpable_segment() to gather information about
16980ff27d31SJohn Polstra  * the number of segments and their total size.
16990ff27d31SJohn Polstra  */
17000ff27d31SJohn Polstra static void
1701f3325003SConrad Meyer cb_size_segment(vm_map_entry_t entry, void *closure)
17020ff27d31SJohn Polstra {
17030ff27d31SJohn Polstra 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
17040ff27d31SJohn Polstra 
17050ff27d31SJohn Polstra 	ssc->count++;
17060ff27d31SJohn Polstra 	ssc->size += entry->end - entry->start;
17070ff27d31SJohn Polstra }
17080ff27d31SJohn Polstra 
1709905d192dSEdward Tomasz Napierala void
1710905d192dSEdward Tomasz Napierala __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo,
1711905d192dSEdward Tomasz Napierala     int flags)
1712905d192dSEdward Tomasz Napierala {
1713905d192dSEdward Tomasz Napierala 	seginfo->count = 0;
1714905d192dSEdward Tomasz Napierala 	seginfo->size = 0;
1715905d192dSEdward Tomasz Napierala 
1716905d192dSEdward Tomasz Napierala 	each_dumpable_segment(td, cb_size_segment, seginfo, flags);
1717905d192dSEdward Tomasz Napierala }
1718905d192dSEdward Tomasz Napierala 
17190ff27d31SJohn Polstra /*
17200ff27d31SJohn Polstra  * For each writable segment in the process's memory map, call the given
17210ff27d31SJohn Polstra  * function with a pointer to the map entry and some arbitrary
17220ff27d31SJohn Polstra  * caller-supplied data.
17230ff27d31SJohn Polstra  */
17240ff27d31SJohn Polstra static void
17255bc3c617SKonstantin Belousov each_dumpable_segment(struct thread *td, segment_callback func, void *closure,
17265bc3c617SKonstantin Belousov     int flags)
17270ff27d31SJohn Polstra {
1728247aba24SMarcel Moolenaar 	struct proc *p = td->td_proc;
17290ff27d31SJohn Polstra 	vm_map_t map = &p->p_vmspace->vm_map;
17300ff27d31SJohn Polstra 	vm_map_entry_t entry;
1731976a87a2SAlan Cox 	vm_object_t backing_object, object;
1732fec41f07SMark Johnston 	bool ignore_entry;
17330ff27d31SJohn Polstra 
1734976a87a2SAlan Cox 	vm_map_lock_read(map);
17352288078cSDoug Moore 	VM_MAP_ENTRY_FOREACH(entry, map) {
1736fa7dd9c5SMatthew Dillon 		/*
1737fa7dd9c5SMatthew Dillon 		 * Don't dump inaccessible mappings, deal with legacy
1738fa7dd9c5SMatthew Dillon 		 * coredump mode.
1739fa7dd9c5SMatthew Dillon 		 *
1740fa7dd9c5SMatthew Dillon 		 * Note that read-only segments related to the elf binary
1741fa7dd9c5SMatthew Dillon 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1742fa7dd9c5SMatthew Dillon 		 * need to arbitrarily ignore such segments.
1743fa7dd9c5SMatthew Dillon 		 */
174486ffb3d1SKonstantin Belousov 		if ((flags & SVC_ALL) == 0) {
1745fa7dd9c5SMatthew Dillon 			if (elf_legacy_coredump) {
174686ffb3d1SKonstantin Belousov 				if ((entry->protection & VM_PROT_RW) !=
174786ffb3d1SKonstantin Belousov 				    VM_PROT_RW)
17480ff27d31SJohn Polstra 					continue;
1749fa7dd9c5SMatthew Dillon 			} else {
1750fa7dd9c5SMatthew Dillon 				if ((entry->protection & VM_PROT_ALL) == 0)
1751fa7dd9c5SMatthew Dillon 					continue;
1752fa7dd9c5SMatthew Dillon 			}
175386ffb3d1SKonstantin Belousov 		}
17540ff27d31SJohn Polstra 
17559730a5daSPaul Saab 		/*
1756fa7dd9c5SMatthew Dillon 		 * Dont include memory segment in the coredump if
1757fa7dd9c5SMatthew Dillon 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1758fa7dd9c5SMatthew Dillon 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1759fa7dd9c5SMatthew Dillon 		 * kernel map).
17609730a5daSPaul Saab 		 */
176186ffb3d1SKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
17629730a5daSPaul Saab 			continue;
176386ffb3d1SKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 &&
176486ffb3d1SKonstantin Belousov 		    (flags & SVC_ALL) == 0)
176586ffb3d1SKonstantin Belousov 			continue;
1766976a87a2SAlan Cox 		if ((object = entry->object.vm_object) == NULL)
17670ff27d31SJohn Polstra 			continue;
17680ff27d31SJohn Polstra 
17690ff27d31SJohn Polstra 		/* Ignore memory-mapped devices and such things. */
1770bc403f03SAttilio Rao 		VM_OBJECT_RLOCK(object);
1771976a87a2SAlan Cox 		while ((backing_object = object->backing_object) != NULL) {
1772bc403f03SAttilio Rao 			VM_OBJECT_RLOCK(backing_object);
1773bc403f03SAttilio Rao 			VM_OBJECT_RUNLOCK(object);
1774976a87a2SAlan Cox 			object = backing_object;
1775976a87a2SAlan Cox 		}
1776fec41f07SMark Johnston 		ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0;
1777bc403f03SAttilio Rao 		VM_OBJECT_RUNLOCK(object);
1778976a87a2SAlan Cox 		if (ignore_entry)
17790ff27d31SJohn Polstra 			continue;
17800ff27d31SJohn Polstra 
17810ff27d31SJohn Polstra 		(*func)(entry, closure);
17820ff27d31SJohn Polstra 	}
1783976a87a2SAlan Cox 	vm_map_unlock_read(map);
17840ff27d31SJohn Polstra }
17850ff27d31SJohn Polstra 
17860ff27d31SJohn Polstra /*
17870ff27d31SJohn Polstra  * Write the core file header to the file, including padding up to
17880ff27d31SJohn Polstra  * the page boundary.
17890ff27d31SJohn Polstra  */
17908c64af4fSJohn Polstra static int
1791aa14e9b7SMark Johnston __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr,
17925bc3c617SKonstantin Belousov     size_t hdrsize, struct note_info_list *notelst, size_t notesz,
17935bc3c617SKonstantin Belousov     int flags)
17948c64af4fSJohn Polstra {
1795bd390213SMikolaj Golub 	struct note_info *ninfo;
1796bd390213SMikolaj Golub 	struct sbuf *sb;
1797bd390213SMikolaj Golub 	int error;
17988c64af4fSJohn Polstra 
17998c64af4fSJohn Polstra 	/* Fill in the header. */
18000ff27d31SJohn Polstra 	bzero(hdr, hdrsize);
18015bc3c617SKonstantin Belousov 	__elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags);
18028c64af4fSJohn Polstra 
1803bd390213SMikolaj Golub 	sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1804aa14e9b7SMark Johnston 	sbuf_set_drain(sb, sbuf_drain_core_output, p);
1805bd390213SMikolaj Golub 	sbuf_start_section(sb, NULL);
1806bd390213SMikolaj Golub 	sbuf_bcat(sb, hdr, hdrsize);
1807bd390213SMikolaj Golub 	TAILQ_FOREACH(ninfo, notelst, link)
1808435754a5SEdward Tomasz Napierala 	    __elfN(putnote)(p->td, ninfo, sb);
1809bd390213SMikolaj Golub 	/* Align up to a page boundary for the program segments. */
1810bd390213SMikolaj Golub 	sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1811bd390213SMikolaj Golub 	error = sbuf_finish(sb);
1812bd390213SMikolaj Golub 	sbuf_delete(sb);
1813bd390213SMikolaj Golub 
1814bd390213SMikolaj Golub 	return (error);
1815e7228204SAlfred Perlstein }
1816bd390213SMikolaj Golub 
1817435754a5SEdward Tomasz Napierala void
1818bd390213SMikolaj Golub __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1819bd390213SMikolaj Golub     size_t *sizep)
1820bd390213SMikolaj Golub {
1821bd390213SMikolaj Golub 	struct proc *p;
1822bd390213SMikolaj Golub 	struct thread *thr;
1823bd390213SMikolaj Golub 	size_t size;
1824bd390213SMikolaj Golub 
1825bd390213SMikolaj Golub 	p = td->td_proc;
1826bd390213SMikolaj Golub 	size = 0;
1827bd390213SMikolaj Golub 
1828435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PRPSINFO, __elfN(note_prpsinfo), p);
1829bd390213SMikolaj Golub 
1830bd390213SMikolaj Golub 	/*
1831bd390213SMikolaj Golub 	 * To have the debugger select the right thread (LWP) as the initial
1832bd390213SMikolaj Golub 	 * thread, we dump the state of the thread passed to us in td first.
1833bd390213SMikolaj Golub 	 * This is the thread that causes the core dump and thus likely to
1834bd390213SMikolaj Golub 	 * be the right thread one wants to have selected in the debugger.
1835bd390213SMikolaj Golub 	 */
1836bd390213SMikolaj Golub 	thr = td;
1837bd390213SMikolaj Golub 	while (thr != NULL) {
1838435754a5SEdward Tomasz Napierala 		size += __elfN(register_note)(td, list, NT_PRSTATUS,
1839bd390213SMikolaj Golub 		    __elfN(note_prstatus), thr);
1840435754a5SEdward Tomasz Napierala 		size += __elfN(register_note)(td, list, NT_FPREGSET,
1841bd390213SMikolaj Golub 		    __elfN(note_fpregset), thr);
1842435754a5SEdward Tomasz Napierala 		size += __elfN(register_note)(td, list, NT_THRMISC,
1843bd390213SMikolaj Golub 		    __elfN(note_thrmisc), thr);
1844435754a5SEdward Tomasz Napierala 		size += __elfN(register_note)(td, list, NT_PTLWPINFO,
184586be94fcSTycho Nightingale 		    __elfN(note_ptlwpinfo), thr);
1846435754a5SEdward Tomasz Napierala 		size += __elfN(register_note)(td, list, -1,
1847bd390213SMikolaj Golub 		    __elfN(note_threadmd), thr);
1848bd390213SMikolaj Golub 
184961b4c627SEdward Tomasz Napierala 		thr = thr == td ? TAILQ_FIRST(&p->p_threads) :
1850bd390213SMikolaj Golub 		    TAILQ_NEXT(thr, td_plist);
1851bd390213SMikolaj Golub 		if (thr == td)
1852bd390213SMikolaj Golub 			thr = TAILQ_NEXT(thr, td_plist);
1853dada0278SJohn Polstra 	}
1854dada0278SJohn Polstra 
1855435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC,
1856f1fca82eSMikolaj Golub 	    __elfN(note_procstat_proc), p);
1857435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES,
1858f1fca82eSMikolaj Golub 	    note_procstat_files, p);
1859435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP,
1860f1fca82eSMikolaj Golub 	    note_procstat_vmmap, p);
1861435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS,
1862f1fca82eSMikolaj Golub 	    note_procstat_groups, p);
1863435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK,
1864f1fca82eSMikolaj Golub 	    note_procstat_umask, p);
1865435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT,
1866f1fca82eSMikolaj Golub 	    note_procstat_rlimit, p);
1867435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL,
1868f1fca82eSMikolaj Golub 	    note_procstat_osrel, p);
1869435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS,
1870f1fca82eSMikolaj Golub 	    __elfN(note_procstat_psstrings), p);
1871435754a5SEdward Tomasz Napierala 	size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV,
1872f1fca82eSMikolaj Golub 	    __elfN(note_procstat_auxv), p);
1873f1fca82eSMikolaj Golub 
1874bd390213SMikolaj Golub 	*sizep = size;
1875bd390213SMikolaj Golub }
1876bd390213SMikolaj Golub 
1877905d192dSEdward Tomasz Napierala void
1878bd390213SMikolaj Golub __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
18795bc3c617SKonstantin Belousov     size_t notesz, int flags)
1880bd390213SMikolaj Golub {
1881bd390213SMikolaj Golub 	Elf_Ehdr *ehdr;
1882bd390213SMikolaj Golub 	Elf_Phdr *phdr;
1883c17b0bd2SConrad Meyer 	Elf_Shdr *shdr;
1884bd390213SMikolaj Golub 	struct phdr_closure phc;
18855d9f7901SDmitry Chagin 	Elf_Brandinfo *bi;
1886bd390213SMikolaj Golub 
1887bd390213SMikolaj Golub 	ehdr = (Elf_Ehdr *)hdr;
18885d9f7901SDmitry Chagin 	bi = td->td_proc->p_elf_brandinfo;
1889bd390213SMikolaj Golub 
1890bd390213SMikolaj Golub 	ehdr->e_ident[EI_MAG0] = ELFMAG0;
1891bd390213SMikolaj Golub 	ehdr->e_ident[EI_MAG1] = ELFMAG1;
1892bd390213SMikolaj Golub 	ehdr->e_ident[EI_MAG2] = ELFMAG2;
1893bd390213SMikolaj Golub 	ehdr->e_ident[EI_MAG3] = ELFMAG3;
1894bd390213SMikolaj Golub 	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1895bd390213SMikolaj Golub 	ehdr->e_ident[EI_DATA] = ELF_DATA;
1896bd390213SMikolaj Golub 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1897435754a5SEdward Tomasz Napierala 	ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi;
1898bd390213SMikolaj Golub 	ehdr->e_ident[EI_ABIVERSION] = 0;
1899bd390213SMikolaj Golub 	ehdr->e_ident[EI_PAD] = 0;
1900bd390213SMikolaj Golub 	ehdr->e_type = ET_CORE;
19015d9f7901SDmitry Chagin 	ehdr->e_machine = bi->machine;
1902bd390213SMikolaj Golub 	ehdr->e_version = EV_CURRENT;
1903bd390213SMikolaj Golub 	ehdr->e_entry = 0;
1904bd390213SMikolaj Golub 	ehdr->e_phoff = sizeof(Elf_Ehdr);
1905885f13dcSJohn Baldwin 	ehdr->e_flags = td->td_proc->p_elf_flags;
1906bd390213SMikolaj Golub 	ehdr->e_ehsize = sizeof(Elf_Ehdr);
1907bd390213SMikolaj Golub 	ehdr->e_phentsize = sizeof(Elf_Phdr);
1908bd390213SMikolaj Golub 	ehdr->e_shentsize = sizeof(Elf_Shdr);
1909bd390213SMikolaj Golub 	ehdr->e_shstrndx = SHN_UNDEF;
1910c17b0bd2SConrad Meyer 	if (numsegs + 1 < PN_XNUM) {
1911c17b0bd2SConrad Meyer 		ehdr->e_phnum = numsegs + 1;
1912c17b0bd2SConrad Meyer 		ehdr->e_shnum = 0;
1913c17b0bd2SConrad Meyer 	} else {
1914c17b0bd2SConrad Meyer 		ehdr->e_phnum = PN_XNUM;
1915c17b0bd2SConrad Meyer 		ehdr->e_shnum = 1;
1916c17b0bd2SConrad Meyer 
1917c17b0bd2SConrad Meyer 		ehdr->e_shoff = ehdr->e_phoff +
1918c17b0bd2SConrad Meyer 		    (numsegs + 1) * ehdr->e_phentsize;
1919c17b0bd2SConrad Meyer 		KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr),
1920c17b0bd2SConrad Meyer 		    ("e_shoff: %zu, hdrsize - shdr: %zu",
192107f825e8SConrad Meyer 		     (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr)));
1922c17b0bd2SConrad Meyer 
1923c17b0bd2SConrad Meyer 		shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
1924c17b0bd2SConrad Meyer 		memset(shdr, 0, sizeof(*shdr));
1925c17b0bd2SConrad Meyer 		/*
1926c17b0bd2SConrad Meyer 		 * A special first section is used to hold large segment and
1927c17b0bd2SConrad Meyer 		 * section counts.  This was proposed by Sun Microsystems in
1928c17b0bd2SConrad Meyer 		 * Solaris and has been adopted by Linux; the standard ELF
1929c17b0bd2SConrad Meyer 		 * tools are already familiar with the technique.
1930c17b0bd2SConrad Meyer 		 *
1931c17b0bd2SConrad Meyer 		 * See table 7-7 of the Solaris "Linker and Libraries Guide"
1932c17b0bd2SConrad Meyer 		 * (or 12-7 depending on the version of the document) for more
1933c17b0bd2SConrad Meyer 		 * details.
1934c17b0bd2SConrad Meyer 		 */
1935c17b0bd2SConrad Meyer 		shdr->sh_type = SHT_NULL;
1936c17b0bd2SConrad Meyer 		shdr->sh_size = ehdr->e_shnum;
1937c17b0bd2SConrad Meyer 		shdr->sh_link = ehdr->e_shstrndx;
1938c17b0bd2SConrad Meyer 		shdr->sh_info = numsegs + 1;
1939c17b0bd2SConrad Meyer 	}
1940bd390213SMikolaj Golub 
1941bd390213SMikolaj Golub 	/*
1942bd390213SMikolaj Golub 	 * Fill in the program header entries.
1943bd390213SMikolaj Golub 	 */
1944c17b0bd2SConrad Meyer 	phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
1945bd390213SMikolaj Golub 
1946bd390213SMikolaj Golub 	/* The note segement. */
1947bd390213SMikolaj Golub 	phdr->p_type = PT_NOTE;
1948bd390213SMikolaj Golub 	phdr->p_offset = hdrsize;
1949bd390213SMikolaj Golub 	phdr->p_vaddr = 0;
1950bd390213SMikolaj Golub 	phdr->p_paddr = 0;
1951bd390213SMikolaj Golub 	phdr->p_filesz = notesz;
1952bd390213SMikolaj Golub 	phdr->p_memsz = 0;
1953bd390213SMikolaj Golub 	phdr->p_flags = PF_R;
19541b8388cdSMikolaj Golub 	phdr->p_align = ELF_NOTE_ROUNDSIZE;
1955bd390213SMikolaj Golub 	phdr++;
1956bd390213SMikolaj Golub 
1957bd390213SMikolaj Golub 	/* All the writable segments from the program. */
1958bd390213SMikolaj Golub 	phc.phdr = phdr;
1959bd390213SMikolaj Golub 	phc.offset = round_page(hdrsize + notesz);
19605bc3c617SKonstantin Belousov 	each_dumpable_segment(td, cb_put_phdr, &phc, flags);
1961bd390213SMikolaj Golub }
1962bd390213SMikolaj Golub 
1963435754a5SEdward Tomasz Napierala size_t
1964435754a5SEdward Tomasz Napierala __elfN(register_note)(struct thread *td, struct note_info_list *list,
1965435754a5SEdward Tomasz Napierala     int type, outfunc_t out, void *arg)
1966bd390213SMikolaj Golub {
1967435754a5SEdward Tomasz Napierala 	const struct sysentvec *sv;
1968bd390213SMikolaj Golub 	struct note_info *ninfo;
1969bd390213SMikolaj Golub 	size_t size, notesize;
1970bd390213SMikolaj Golub 
1971435754a5SEdward Tomasz Napierala 	sv = td->td_proc->p_sysent;
1972bd390213SMikolaj Golub 	size = 0;
1973bd390213SMikolaj Golub 	out(arg, NULL, &size);
1974bd390213SMikolaj Golub 	ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
1975bd390213SMikolaj Golub 	ninfo->type = type;
1976bd390213SMikolaj Golub 	ninfo->outfunc = out;
1977bd390213SMikolaj Golub 	ninfo->outarg = arg;
1978bd390213SMikolaj Golub 	ninfo->outsize = size;
1979bd390213SMikolaj Golub 	TAILQ_INSERT_TAIL(list, ninfo, link);
1980bd390213SMikolaj Golub 
1981bd390213SMikolaj Golub 	if (type == -1)
1982bd390213SMikolaj Golub 		return (size);
1983bd390213SMikolaj Golub 
1984bd390213SMikolaj Golub 	notesize = sizeof(Elf_Note) +		/* note header */
1985435754a5SEdward Tomasz Napierala 	    roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) +
1986180e57e5SJohn Baldwin 						/* note name */
1987180e57e5SJohn Baldwin 	    roundup2(size, ELF_NOTE_ROUNDSIZE);	/* note description */
1988180e57e5SJohn Baldwin 
1989180e57e5SJohn Baldwin 	return (notesize);
1990180e57e5SJohn Baldwin }
1991180e57e5SJohn Baldwin 
1992180e57e5SJohn Baldwin static size_t
1993180e57e5SJohn Baldwin append_note_data(const void *src, void *dst, size_t len)
1994180e57e5SJohn Baldwin {
1995180e57e5SJohn Baldwin 	size_t padded_len;
1996180e57e5SJohn Baldwin 
1997180e57e5SJohn Baldwin 	padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE);
1998180e57e5SJohn Baldwin 	if (dst != NULL) {
1999180e57e5SJohn Baldwin 		bcopy(src, dst, len);
2000180e57e5SJohn Baldwin 		bzero((char *)dst + len, padded_len - len);
2001180e57e5SJohn Baldwin 	}
2002180e57e5SJohn Baldwin 	return (padded_len);
2003180e57e5SJohn Baldwin }
2004180e57e5SJohn Baldwin 
2005180e57e5SJohn Baldwin size_t
2006180e57e5SJohn Baldwin __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp)
2007180e57e5SJohn Baldwin {
2008180e57e5SJohn Baldwin 	Elf_Note *note;
2009180e57e5SJohn Baldwin 	char *buf;
2010180e57e5SJohn Baldwin 	size_t notesize;
2011180e57e5SJohn Baldwin 
2012180e57e5SJohn Baldwin 	buf = dst;
2013180e57e5SJohn Baldwin 	if (buf != NULL) {
2014180e57e5SJohn Baldwin 		note = (Elf_Note *)buf;
2015180e57e5SJohn Baldwin 		note->n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2016180e57e5SJohn Baldwin 		note->n_descsz = size;
2017180e57e5SJohn Baldwin 		note->n_type = type;
2018180e57e5SJohn Baldwin 		buf += sizeof(*note);
2019180e57e5SJohn Baldwin 		buf += append_note_data(FREEBSD_ABI_VENDOR, buf,
2020180e57e5SJohn Baldwin 		    sizeof(FREEBSD_ABI_VENDOR));
2021180e57e5SJohn Baldwin 		append_note_data(src, buf, size);
2022180e57e5SJohn Baldwin 		if (descp != NULL)
2023180e57e5SJohn Baldwin 			*descp = buf;
2024180e57e5SJohn Baldwin 	}
2025180e57e5SJohn Baldwin 
2026180e57e5SJohn Baldwin 	notesize = sizeof(Elf_Note) +		/* note header */
2027180e57e5SJohn Baldwin 	    roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2028180e57e5SJohn Baldwin 						/* note name */
20291b8388cdSMikolaj Golub 	    roundup2(size, ELF_NOTE_ROUNDSIZE);	/* note description */
2030bd390213SMikolaj Golub 
2031bd390213SMikolaj Golub 	return (notesize);
2032bd390213SMikolaj Golub }
2033bd390213SMikolaj Golub 
2034bd390213SMikolaj Golub static void
2035435754a5SEdward Tomasz Napierala __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb)
2036bd390213SMikolaj Golub {
2037bd390213SMikolaj Golub 	Elf_Note note;
2038435754a5SEdward Tomasz Napierala 	const struct sysentvec *sv;
203914bdbaf2SConrad Meyer 	ssize_t old_len, sect_len;
204014bdbaf2SConrad Meyer 	size_t new_len, descsz, i;
2041bd390213SMikolaj Golub 
2042bd390213SMikolaj Golub 	if (ninfo->type == -1) {
2043bd390213SMikolaj Golub 		ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2044bd390213SMikolaj Golub 		return;
2045bd390213SMikolaj Golub 	}
2046bd390213SMikolaj Golub 
2047435754a5SEdward Tomasz Napierala 	sv = td->td_proc->p_sysent;
2048435754a5SEdward Tomasz Napierala 
2049435754a5SEdward Tomasz Napierala 	note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1;
2050bd390213SMikolaj Golub 	note.n_descsz = ninfo->outsize;
2051bd390213SMikolaj Golub 	note.n_type = ninfo->type;
2052bd390213SMikolaj Golub 
2053bd390213SMikolaj Golub 	sbuf_bcat(sb, &note, sizeof(note));
2054bd390213SMikolaj Golub 	sbuf_start_section(sb, &old_len);
2055435754a5SEdward Tomasz Napierala 	sbuf_bcat(sb, sv->sv_elf_core_abi_vendor,
2056435754a5SEdward Tomasz Napierala 	    strlen(sv->sv_elf_core_abi_vendor) + 1);
20571b8388cdSMikolaj Golub 	sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2058bd390213SMikolaj Golub 	if (note.n_descsz == 0)
2059bd390213SMikolaj Golub 		return;
2060bd390213SMikolaj Golub 	sbuf_start_section(sb, &old_len);
2061bd390213SMikolaj Golub 	ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
206214bdbaf2SConrad Meyer 	sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
206314bdbaf2SConrad Meyer 	if (sect_len < 0)
206414bdbaf2SConrad Meyer 		return;
206514bdbaf2SConrad Meyer 
206614bdbaf2SConrad Meyer 	new_len = (size_t)sect_len;
206714bdbaf2SConrad Meyer 	descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE);
206814bdbaf2SConrad Meyer 	if (new_len < descsz) {
206914bdbaf2SConrad Meyer 		/*
207014bdbaf2SConrad Meyer 		 * It is expected that individual note emitters will correctly
207114bdbaf2SConrad Meyer 		 * predict their expected output size and fill up to that size
207214bdbaf2SConrad Meyer 		 * themselves, padding in a format-specific way if needed.
207314bdbaf2SConrad Meyer 		 * However, in case they don't, just do it here with zeros.
207414bdbaf2SConrad Meyer 		 */
207514bdbaf2SConrad Meyer 		for (i = 0; i < descsz - new_len; i++)
207614bdbaf2SConrad Meyer 			sbuf_putc(sb, 0);
207714bdbaf2SConrad Meyer 	} else if (new_len > descsz) {
207814bdbaf2SConrad Meyer 		/*
207914bdbaf2SConrad Meyer 		 * We can't always truncate sb -- we may have drained some
208014bdbaf2SConrad Meyer 		 * of it already.
208114bdbaf2SConrad Meyer 		 */
208214bdbaf2SConrad Meyer 		KASSERT(new_len == descsz, ("%s: Note type %u changed as we "
208314bdbaf2SConrad Meyer 		    "read it (%zu > %zu).  Since it is longer than "
208414bdbaf2SConrad Meyer 		    "expected, this coredump's notes are corrupt.  THIS "
208514bdbaf2SConrad Meyer 		    "IS A BUG in the note_procstat routine for type %u.\n",
208614bdbaf2SConrad Meyer 		    __func__, (unsigned)note.n_type, new_len, descsz,
208714bdbaf2SConrad Meyer 		    (unsigned)note.n_type));
208814bdbaf2SConrad Meyer 	}
2089bd390213SMikolaj Golub }
2090bd390213SMikolaj Golub 
2091bd390213SMikolaj Golub /*
2092bd390213SMikolaj Golub  * Miscellaneous note out functions.
2093bd390213SMikolaj Golub  */
2094bd390213SMikolaj Golub 
2095841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2096841c0c7eSNathan Whitehorn #include <compat/freebsd32/freebsd32.h>
209751645e83SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h>
2098841c0c7eSNathan Whitehorn 
209962919d78SPeter Wemm typedef struct prstatus32 elf_prstatus_t;
210062919d78SPeter Wemm typedef struct prpsinfo32 elf_prpsinfo_t;
210162919d78SPeter Wemm typedef struct fpreg32 elf_prfpregset_t;
210262919d78SPeter Wemm typedef struct fpreg32 elf_fpregset_t;
210362919d78SPeter Wemm typedef struct reg32 elf_gregset_t;
21047f08176eSAttilio Rao typedef struct thrmisc32 elf_thrmisc_t;
2105f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK	KERN_PROC_MASK32
2106f1fca82eSMikolaj Golub typedef struct kinfo_proc32 elf_kinfo_proc_t;
2107f1fca82eSMikolaj Golub typedef uint32_t elf_ps_strings_t;
210862919d78SPeter Wemm #else
210962919d78SPeter Wemm typedef prstatus_t elf_prstatus_t;
211062919d78SPeter Wemm typedef prpsinfo_t elf_prpsinfo_t;
211162919d78SPeter Wemm typedef prfpregset_t elf_prfpregset_t;
211262919d78SPeter Wemm typedef prfpregset_t elf_fpregset_t;
211362919d78SPeter Wemm typedef gregset_t elf_gregset_t;
21147f08176eSAttilio Rao typedef thrmisc_t elf_thrmisc_t;
2115f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK	0
2116f1fca82eSMikolaj Golub typedef struct kinfo_proc elf_kinfo_proc_t;
2117f1fca82eSMikolaj Golub typedef vm_offset_t elf_ps_strings_t;
211862919d78SPeter Wemm #endif
211962919d78SPeter Wemm 
21208c64af4fSJohn Polstra static void
2121bd390213SMikolaj Golub __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
21228c64af4fSJohn Polstra {
2123c77547d2SJohn Baldwin 	struct sbuf sbarg;
2124c77547d2SJohn Baldwin 	size_t len;
2125c77547d2SJohn Baldwin 	char *cp, *end;
2126247aba24SMarcel Moolenaar 	struct proc *p;
2127bd390213SMikolaj Golub 	elf_prpsinfo_t *psinfo;
2128c77547d2SJohn Baldwin 	int error;
21298c64af4fSJohn Polstra 
213061b4c627SEdward Tomasz Napierala 	p = arg;
2131bd390213SMikolaj Golub 	if (sb != NULL) {
2132bd390213SMikolaj Golub 		KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
2133bd390213SMikolaj Golub 		psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
21348c9b7b2cSMarcel Moolenaar 		psinfo->pr_version = PRPSINFO_VERSION;
213562919d78SPeter Wemm 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
2136ccd3953eSJohn Baldwin 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
2137c77547d2SJohn Baldwin 		PROC_LOCK(p);
2138c77547d2SJohn Baldwin 		if (p->p_args != NULL) {
2139c77547d2SJohn Baldwin 			len = sizeof(psinfo->pr_psargs) - 1;
2140c77547d2SJohn Baldwin 			if (len > p->p_args->ar_length)
2141c77547d2SJohn Baldwin 				len = p->p_args->ar_length;
2142c77547d2SJohn Baldwin 			memcpy(psinfo->pr_psargs, p->p_args->ar_args, len);
2143c77547d2SJohn Baldwin 			PROC_UNLOCK(p);
2144c77547d2SJohn Baldwin 			error = 0;
2145c77547d2SJohn Baldwin 		} else {
2146c77547d2SJohn Baldwin 			_PHOLD(p);
2147c77547d2SJohn Baldwin 			PROC_UNLOCK(p);
2148c77547d2SJohn Baldwin 			sbuf_new(&sbarg, psinfo->pr_psargs,
2149c77547d2SJohn Baldwin 			    sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN);
2150c77547d2SJohn Baldwin 			error = proc_getargv(curthread, p, &sbarg);
2151c77547d2SJohn Baldwin 			PRELE(p);
2152c77547d2SJohn Baldwin 			if (sbuf_finish(&sbarg) == 0)
2153c77547d2SJohn Baldwin 				len = sbuf_len(&sbarg) - 1;
2154c77547d2SJohn Baldwin 			else
2155c77547d2SJohn Baldwin 				len = sizeof(psinfo->pr_psargs) - 1;
2156c77547d2SJohn Baldwin 			sbuf_delete(&sbarg);
2157c77547d2SJohn Baldwin 		}
2158c77547d2SJohn Baldwin 		if (error || len == 0)
2159ccd3953eSJohn Baldwin 			strlcpy(psinfo->pr_psargs, p->p_comm,
21608c9b7b2cSMarcel Moolenaar 			    sizeof(psinfo->pr_psargs));
2161c77547d2SJohn Baldwin 		else {
2162c77547d2SJohn Baldwin 			KASSERT(len < sizeof(psinfo->pr_psargs),
2163c77547d2SJohn Baldwin 			    ("len is too long: %zu vs %zu", len,
2164c77547d2SJohn Baldwin 			    sizeof(psinfo->pr_psargs)));
2165c77547d2SJohn Baldwin 			cp = psinfo->pr_psargs;
2166c77547d2SJohn Baldwin 			end = cp + len - 1;
2167c77547d2SJohn Baldwin 			for (;;) {
2168c77547d2SJohn Baldwin 				cp = memchr(cp, '\0', end - cp);
2169c77547d2SJohn Baldwin 				if (cp == NULL)
2170c77547d2SJohn Baldwin 					break;
2171c77547d2SJohn Baldwin 				*cp = ' ';
2172c77547d2SJohn Baldwin 			}
2173c77547d2SJohn Baldwin 		}
2174ccb83afdSJohn Baldwin 		psinfo->pr_pid = p->p_pid;
2175bd390213SMikolaj Golub 		sbuf_bcat(sb, psinfo, sizeof(*psinfo));
2176bd390213SMikolaj Golub 		free(psinfo, M_TEMP);
2177bd390213SMikolaj Golub 	}
2178bd390213SMikolaj Golub 	*sizep = sizeof(*psinfo);
2179bd390213SMikolaj Golub }
2180bd390213SMikolaj Golub 
2181bd390213SMikolaj Golub static void
2182bd390213SMikolaj Golub __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep)
2183bd390213SMikolaj Golub {
2184bd390213SMikolaj Golub 	struct thread *td;
2185bd390213SMikolaj Golub 	elf_prstatus_t *status;
2186bd390213SMikolaj Golub 
218761b4c627SEdward Tomasz Napierala 	td = arg;
2188bd390213SMikolaj Golub 	if (sb != NULL) {
2189bd390213SMikolaj Golub 		KASSERT(*sizep == sizeof(*status), ("invalid size"));
2190bd390213SMikolaj Golub 		status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK);
21918c9b7b2cSMarcel Moolenaar 		status->pr_version = PRSTATUS_VERSION;
219262919d78SPeter Wemm 		status->pr_statussz = sizeof(elf_prstatus_t);
219362919d78SPeter Wemm 		status->pr_gregsetsz = sizeof(elf_gregset_t);
219462919d78SPeter Wemm 		status->pr_fpregsetsz = sizeof(elf_fpregset_t);
21958c9b7b2cSMarcel Moolenaar 		status->pr_osreldate = osreldate;
2196bd390213SMikolaj Golub 		status->pr_cursig = td->td_proc->p_sig;
2197bd390213SMikolaj Golub 		status->pr_pid = td->td_tid;
2198841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2199bd390213SMikolaj Golub 		fill_regs32(td, &status->pr_reg);
220062919d78SPeter Wemm #else
2201bd390213SMikolaj Golub 		fill_regs(td, &status->pr_reg);
220262919d78SPeter Wemm #endif
2203bd390213SMikolaj Golub 		sbuf_bcat(sb, status, sizeof(*status));
2204bd390213SMikolaj Golub 		free(status, M_TEMP);
22058c9b7b2cSMarcel Moolenaar 	}
2206bd390213SMikolaj Golub 	*sizep = sizeof(*status);
2207bd390213SMikolaj Golub }
2208bd390213SMikolaj Golub 
2209bd390213SMikolaj Golub static void
2210bd390213SMikolaj Golub __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep)
2211bd390213SMikolaj Golub {
2212bd390213SMikolaj Golub 	struct thread *td;
2213bd390213SMikolaj Golub 	elf_prfpregset_t *fpregset;
2214bd390213SMikolaj Golub 
221561b4c627SEdward Tomasz Napierala 	td = arg;
2216bd390213SMikolaj Golub 	if (sb != NULL) {
2217bd390213SMikolaj Golub 		KASSERT(*sizep == sizeof(*fpregset), ("invalid size"));
2218bd390213SMikolaj Golub 		fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK);
2219bd390213SMikolaj Golub #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2220bd390213SMikolaj Golub 		fill_fpregs32(td, fpregset);
2221bd390213SMikolaj Golub #else
2222bd390213SMikolaj Golub 		fill_fpregs(td, fpregset);
2223bd390213SMikolaj Golub #endif
2224bd390213SMikolaj Golub 		sbuf_bcat(sb, fpregset, sizeof(*fpregset));
2225bd390213SMikolaj Golub 		free(fpregset, M_TEMP);
2226bd390213SMikolaj Golub 	}
2227bd390213SMikolaj Golub 	*sizep = sizeof(*fpregset);
2228bd390213SMikolaj Golub }
2229bd390213SMikolaj Golub 
2230bd390213SMikolaj Golub static void
2231bd390213SMikolaj Golub __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep)
2232bd390213SMikolaj Golub {
2233bd390213SMikolaj Golub 	struct thread *td;
2234bd390213SMikolaj Golub 	elf_thrmisc_t thrmisc;
2235bd390213SMikolaj Golub 
223661b4c627SEdward Tomasz Napierala 	td = arg;
2237bd390213SMikolaj Golub 	if (sb != NULL) {
2238bd390213SMikolaj Golub 		KASSERT(*sizep == sizeof(thrmisc), ("invalid size"));
22392e5f9189SEd Maste 		bzero(&thrmisc, sizeof(thrmisc));
2240bd390213SMikolaj Golub 		strcpy(thrmisc.pr_tname, td->td_name);
2241bd390213SMikolaj Golub 		sbuf_bcat(sb, &thrmisc, sizeof(thrmisc));
2242bd390213SMikolaj Golub 	}
2243bd390213SMikolaj Golub 	*sizep = sizeof(thrmisc);
2244bd390213SMikolaj Golub }
2245bd390213SMikolaj Golub 
224686be94fcSTycho Nightingale static void
224786be94fcSTycho Nightingale __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep)
224886be94fcSTycho Nightingale {
224986be94fcSTycho Nightingale 	struct thread *td;
225086be94fcSTycho Nightingale 	size_t size;
225186be94fcSTycho Nightingale 	int structsize;
225251645e83SJohn Baldwin #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
225351645e83SJohn Baldwin 	struct ptrace_lwpinfo32 pl;
225451645e83SJohn Baldwin #else
225586be94fcSTycho Nightingale 	struct ptrace_lwpinfo pl;
225651645e83SJohn Baldwin #endif
225786be94fcSTycho Nightingale 
225861b4c627SEdward Tomasz Napierala 	td = arg;
225951645e83SJohn Baldwin 	size = sizeof(structsize) + sizeof(pl);
226086be94fcSTycho Nightingale 	if (sb != NULL) {
226186be94fcSTycho Nightingale 		KASSERT(*sizep == size, ("invalid size"));
226251645e83SJohn Baldwin 		structsize = sizeof(pl);
226386be94fcSTycho Nightingale 		sbuf_bcat(sb, &structsize, sizeof(structsize));
226486be94fcSTycho Nightingale 		bzero(&pl, sizeof(pl));
226586be94fcSTycho Nightingale 		pl.pl_lwpid = td->td_tid;
226686be94fcSTycho Nightingale 		pl.pl_event = PL_EVENT_NONE;
226786be94fcSTycho Nightingale 		pl.pl_sigmask = td->td_sigmask;
226886be94fcSTycho Nightingale 		pl.pl_siglist = td->td_siglist;
226986be94fcSTycho Nightingale 		if (td->td_si.si_signo != 0) {
227086be94fcSTycho Nightingale 			pl.pl_event = PL_EVENT_SIGNAL;
227186be94fcSTycho Nightingale 			pl.pl_flags |= PL_FLAG_SI;
227251645e83SJohn Baldwin #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
227351645e83SJohn Baldwin 			siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo);
227451645e83SJohn Baldwin #else
227586be94fcSTycho Nightingale 			pl.pl_siginfo = td->td_si;
227651645e83SJohn Baldwin #endif
227786be94fcSTycho Nightingale 		}
227886be94fcSTycho Nightingale 		strcpy(pl.pl_tdname, td->td_name);
227986be94fcSTycho Nightingale 		/* XXX TODO: supply more information in struct ptrace_lwpinfo*/
228051645e83SJohn Baldwin 		sbuf_bcat(sb, &pl, sizeof(pl));
228186be94fcSTycho Nightingale 	}
228286be94fcSTycho Nightingale 	*sizep = size;
228386be94fcSTycho Nightingale }
228486be94fcSTycho Nightingale 
22854da47b2fSMarcel Moolenaar /*
22864da47b2fSMarcel Moolenaar  * Allow for MD specific notes, as well as any MD
22874da47b2fSMarcel Moolenaar  * specific preparations for writing MI notes.
22884da47b2fSMarcel Moolenaar  */
22898c64af4fSJohn Polstra static void
2290bd390213SMikolaj Golub __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
22918c64af4fSJohn Polstra {
2292bd390213SMikolaj Golub 	struct thread *td;
2293bd390213SMikolaj Golub 	void *buf;
2294bd390213SMikolaj Golub 	size_t size;
22958c64af4fSJohn Polstra 
2296bd390213SMikolaj Golub 	td = (struct thread *)arg;
2297bd390213SMikolaj Golub 	size = *sizep;
2298bd390213SMikolaj Golub 	if (size != 0 && sb != NULL)
2299bd390213SMikolaj Golub 		buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
230083a396ceSChristian Brueffer 	else
230183a396ceSChristian Brueffer 		buf = NULL;
2302bd390213SMikolaj Golub 	size = 0;
2303bd390213SMikolaj Golub 	__elfN(dump_thread)(td, buf, &size);
230464779280SKonstantin Belousov 	KASSERT(sb == NULL || *sizep == size, ("invalid size"));
230583a396ceSChristian Brueffer 	if (size != 0 && sb != NULL)
2306bd390213SMikolaj Golub 		sbuf_bcat(sb, buf, size);
2307a1761d73SChristian Brueffer 	free(buf, M_TEMP);
2308bd390213SMikolaj Golub 	*sizep = size;
23098c64af4fSJohn Polstra }
23108c64af4fSJohn Polstra 
2311f1fca82eSMikolaj Golub #ifdef KINFO_PROC_SIZE
2312f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
2313f1fca82eSMikolaj Golub #endif
2314f1fca82eSMikolaj Golub 
2315f1fca82eSMikolaj Golub static void
2316f1fca82eSMikolaj Golub __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
2317f1fca82eSMikolaj Golub {
2318f1fca82eSMikolaj Golub 	struct proc *p;
2319f1fca82eSMikolaj Golub 	size_t size;
2320f1fca82eSMikolaj Golub 	int structsize;
2321f1fca82eSMikolaj Golub 
232261b4c627SEdward Tomasz Napierala 	p = arg;
2323f1fca82eSMikolaj Golub 	size = sizeof(structsize) + p->p_numthreads *
2324f1fca82eSMikolaj Golub 	    sizeof(elf_kinfo_proc_t);
2325f1fca82eSMikolaj Golub 
2326f1fca82eSMikolaj Golub 	if (sb != NULL) {
2327f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
2328f1fca82eSMikolaj Golub 		structsize = sizeof(elf_kinfo_proc_t);
2329f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
23304daea938SKonstantin Belousov 		sx_slock(&proctree_lock);
2331f1fca82eSMikolaj Golub 		PROC_LOCK(p);
2332f1fca82eSMikolaj Golub 		kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
23334daea938SKonstantin Belousov 		sx_sunlock(&proctree_lock);
2334f1fca82eSMikolaj Golub 	}
2335f1fca82eSMikolaj Golub 	*sizep = size;
2336f1fca82eSMikolaj Golub }
2337f1fca82eSMikolaj Golub 
2338f1fca82eSMikolaj Golub #ifdef KINFO_FILE_SIZE
2339f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
2340f1fca82eSMikolaj Golub #endif
2341f1fca82eSMikolaj Golub 
2342f1fca82eSMikolaj Golub static void
2343f1fca82eSMikolaj Golub note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
2344f1fca82eSMikolaj Golub {
2345f1fca82eSMikolaj Golub 	struct proc *p;
234614bdbaf2SConrad Meyer 	size_t size, sect_sz, i;
234714bdbaf2SConrad Meyer 	ssize_t start_len, sect_len;
234814bdbaf2SConrad Meyer 	int structsize, filedesc_flags;
234914bdbaf2SConrad Meyer 
2350bcb60d52SConrad Meyer 	if (coredump_pack_fileinfo)
235114bdbaf2SConrad Meyer 		filedesc_flags = KERN_FILEDESC_PACK_KINFO;
235214bdbaf2SConrad Meyer 	else
235314bdbaf2SConrad Meyer 		filedesc_flags = 0;
2354f1fca82eSMikolaj Golub 
235561b4c627SEdward Tomasz Napierala 	p = arg;
235614bdbaf2SConrad Meyer 	structsize = sizeof(struct kinfo_file);
2357f1fca82eSMikolaj Golub 	if (sb == NULL) {
2358f1fca82eSMikolaj Golub 		size = 0;
2359f1fca82eSMikolaj Golub 		sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
23605c32e9fcSAlexander Motin 		sbuf_set_drain(sb, sbuf_count_drain, &size);
2361f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2362f1fca82eSMikolaj Golub 		PROC_LOCK(p);
236314bdbaf2SConrad Meyer 		kern_proc_filedesc_out(p, sb, -1, filedesc_flags);
2364f1fca82eSMikolaj Golub 		sbuf_finish(sb);
2365f1fca82eSMikolaj Golub 		sbuf_delete(sb);
2366f1fca82eSMikolaj Golub 		*sizep = size;
2367f1fca82eSMikolaj Golub 	} else {
236814bdbaf2SConrad Meyer 		sbuf_start_section(sb, &start_len);
236914bdbaf2SConrad Meyer 
2370f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2371f1fca82eSMikolaj Golub 		PROC_LOCK(p);
237214bdbaf2SConrad Meyer 		kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize),
237314bdbaf2SConrad Meyer 		    filedesc_flags);
237414bdbaf2SConrad Meyer 
237514bdbaf2SConrad Meyer 		sect_len = sbuf_end_section(sb, start_len, 0, 0);
237614bdbaf2SConrad Meyer 		if (sect_len < 0)
237714bdbaf2SConrad Meyer 			return;
237814bdbaf2SConrad Meyer 		sect_sz = sect_len;
237914bdbaf2SConrad Meyer 
238014bdbaf2SConrad Meyer 		KASSERT(sect_sz <= *sizep,
238114bdbaf2SConrad Meyer 		    ("kern_proc_filedesc_out did not respect maxlen; "
238214bdbaf2SConrad Meyer 		     "requested %zu, got %zu", *sizep - sizeof(structsize),
238314bdbaf2SConrad Meyer 		     sect_sz - sizeof(structsize)));
238414bdbaf2SConrad Meyer 
238514bdbaf2SConrad Meyer 		for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++)
238614bdbaf2SConrad Meyer 			sbuf_putc(sb, 0);
2387f1fca82eSMikolaj Golub 	}
2388f1fca82eSMikolaj Golub }
2389f1fca82eSMikolaj Golub 
2390f1fca82eSMikolaj Golub #ifdef KINFO_VMENTRY_SIZE
2391f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2392f1fca82eSMikolaj Golub #endif
2393f1fca82eSMikolaj Golub 
2394f1fca82eSMikolaj Golub static void
2395f1fca82eSMikolaj Golub note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
2396f1fca82eSMikolaj Golub {
2397f1fca82eSMikolaj Golub 	struct proc *p;
2398f1fca82eSMikolaj Golub 	size_t size;
2399e6b95927SConrad Meyer 	int structsize, vmmap_flags;
2400e6b95927SConrad Meyer 
2401e6b95927SConrad Meyer 	if (coredump_pack_vmmapinfo)
2402e6b95927SConrad Meyer 		vmmap_flags = KERN_VMMAP_PACK_KINFO;
2403e6b95927SConrad Meyer 	else
2404e6b95927SConrad Meyer 		vmmap_flags = 0;
2405f1fca82eSMikolaj Golub 
240661b4c627SEdward Tomasz Napierala 	p = arg;
2407e6b95927SConrad Meyer 	structsize = sizeof(struct kinfo_vmentry);
2408f1fca82eSMikolaj Golub 	if (sb == NULL) {
2409f1fca82eSMikolaj Golub 		size = 0;
2410f1fca82eSMikolaj Golub 		sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
24115c32e9fcSAlexander Motin 		sbuf_set_drain(sb, sbuf_count_drain, &size);
2412f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2413f1fca82eSMikolaj Golub 		PROC_LOCK(p);
2414e6b95927SConrad Meyer 		kern_proc_vmmap_out(p, sb, -1, vmmap_flags);
2415f1fca82eSMikolaj Golub 		sbuf_finish(sb);
2416f1fca82eSMikolaj Golub 		sbuf_delete(sb);
2417f1fca82eSMikolaj Golub 		*sizep = size;
2418f1fca82eSMikolaj Golub 	} else {
2419f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2420f1fca82eSMikolaj Golub 		PROC_LOCK(p);
2421e6b95927SConrad Meyer 		kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize),
2422e6b95927SConrad Meyer 		    vmmap_flags);
2423f1fca82eSMikolaj Golub 	}
2424f1fca82eSMikolaj Golub }
2425f1fca82eSMikolaj Golub 
2426f1fca82eSMikolaj Golub static void
2427f1fca82eSMikolaj Golub note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
2428f1fca82eSMikolaj Golub {
2429f1fca82eSMikolaj Golub 	struct proc *p;
2430f1fca82eSMikolaj Golub 	size_t size;
2431f1fca82eSMikolaj Golub 	int structsize;
2432f1fca82eSMikolaj Golub 
243361b4c627SEdward Tomasz Napierala 	p = arg;
2434f1fca82eSMikolaj Golub 	size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
2435f1fca82eSMikolaj Golub 	if (sb != NULL) {
2436f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
2437f1fca82eSMikolaj Golub 		structsize = sizeof(gid_t);
2438f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2439f1fca82eSMikolaj Golub 		sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
2440f1fca82eSMikolaj Golub 		    sizeof(gid_t));
2441f1fca82eSMikolaj Golub 	}
2442f1fca82eSMikolaj Golub 	*sizep = size;
2443f1fca82eSMikolaj Golub }
2444f1fca82eSMikolaj Golub 
2445f1fca82eSMikolaj Golub static void
2446f1fca82eSMikolaj Golub note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
2447f1fca82eSMikolaj Golub {
2448f1fca82eSMikolaj Golub 	struct proc *p;
2449f1fca82eSMikolaj Golub 	size_t size;
2450f1fca82eSMikolaj Golub 	int structsize;
2451f1fca82eSMikolaj Golub 
245261b4c627SEdward Tomasz Napierala 	p = arg;
245385078b85SConrad Meyer 	size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask);
2454f1fca82eSMikolaj Golub 	if (sb != NULL) {
2455f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
245685078b85SConrad Meyer 		structsize = sizeof(p->p_pd->pd_cmask);
2457f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
245885078b85SConrad Meyer 		sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask));
2459f1fca82eSMikolaj Golub 	}
2460f1fca82eSMikolaj Golub 	*sizep = size;
2461f1fca82eSMikolaj Golub }
2462f1fca82eSMikolaj Golub 
2463f1fca82eSMikolaj Golub static void
2464f1fca82eSMikolaj Golub note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
2465f1fca82eSMikolaj Golub {
2466f1fca82eSMikolaj Golub 	struct proc *p;
2467f1fca82eSMikolaj Golub 	struct rlimit rlim[RLIM_NLIMITS];
2468f1fca82eSMikolaj Golub 	size_t size;
2469f1fca82eSMikolaj Golub 	int structsize, i;
2470f1fca82eSMikolaj Golub 
247161b4c627SEdward Tomasz Napierala 	p = arg;
2472f1fca82eSMikolaj Golub 	size = sizeof(structsize) + sizeof(rlim);
2473f1fca82eSMikolaj Golub 	if (sb != NULL) {
2474f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
2475f1fca82eSMikolaj Golub 		structsize = sizeof(rlim);
2476f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2477f1fca82eSMikolaj Golub 		PROC_LOCK(p);
2478f1fca82eSMikolaj Golub 		for (i = 0; i < RLIM_NLIMITS; i++)
2479f6f6d240SMateusz Guzik 			lim_rlimit_proc(p, i, &rlim[i]);
2480f1fca82eSMikolaj Golub 		PROC_UNLOCK(p);
2481f1fca82eSMikolaj Golub 		sbuf_bcat(sb, rlim, sizeof(rlim));
2482f1fca82eSMikolaj Golub 	}
2483f1fca82eSMikolaj Golub 	*sizep = size;
2484f1fca82eSMikolaj Golub }
2485f1fca82eSMikolaj Golub 
2486f1fca82eSMikolaj Golub static void
2487f1fca82eSMikolaj Golub note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
2488f1fca82eSMikolaj Golub {
2489f1fca82eSMikolaj Golub 	struct proc *p;
2490f1fca82eSMikolaj Golub 	size_t size;
2491f1fca82eSMikolaj Golub 	int structsize;
2492f1fca82eSMikolaj Golub 
249361b4c627SEdward Tomasz Napierala 	p = arg;
2494f1fca82eSMikolaj Golub 	size = sizeof(structsize) + sizeof(p->p_osrel);
2495f1fca82eSMikolaj Golub 	if (sb != NULL) {
2496f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
2497f1fca82eSMikolaj Golub 		structsize = sizeof(p->p_osrel);
2498f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2499f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
2500f1fca82eSMikolaj Golub 	}
2501f1fca82eSMikolaj Golub 	*sizep = size;
2502f1fca82eSMikolaj Golub }
2503f1fca82eSMikolaj Golub 
2504f1fca82eSMikolaj Golub static void
2505f1fca82eSMikolaj Golub __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
2506f1fca82eSMikolaj Golub {
2507f1fca82eSMikolaj Golub 	struct proc *p;
2508f1fca82eSMikolaj Golub 	elf_ps_strings_t ps_strings;
2509f1fca82eSMikolaj Golub 	size_t size;
2510f1fca82eSMikolaj Golub 	int structsize;
2511f1fca82eSMikolaj Golub 
251261b4c627SEdward Tomasz Napierala 	p = arg;
2513f1fca82eSMikolaj Golub 	size = sizeof(structsize) + sizeof(ps_strings);
2514f1fca82eSMikolaj Golub 	if (sb != NULL) {
2515f1fca82eSMikolaj Golub 		KASSERT(*sizep == size, ("invalid size"));
2516f1fca82eSMikolaj Golub 		structsize = sizeof(ps_strings);
2517f1fca82eSMikolaj Golub #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2518f1fca82eSMikolaj Golub 		ps_strings = PTROUT(p->p_sysent->sv_psstrings);
2519f1fca82eSMikolaj Golub #else
2520f1fca82eSMikolaj Golub 		ps_strings = p->p_sysent->sv_psstrings;
2521f1fca82eSMikolaj Golub #endif
2522f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2523f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
2524f1fca82eSMikolaj Golub 	}
2525f1fca82eSMikolaj Golub 	*sizep = size;
2526f1fca82eSMikolaj Golub }
2527f1fca82eSMikolaj Golub 
2528f1fca82eSMikolaj Golub static void
2529f1fca82eSMikolaj Golub __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
2530f1fca82eSMikolaj Golub {
2531f1fca82eSMikolaj Golub 	struct proc *p;
2532f1fca82eSMikolaj Golub 	size_t size;
2533f1fca82eSMikolaj Golub 	int structsize;
2534f1fca82eSMikolaj Golub 
253561b4c627SEdward Tomasz Napierala 	p = arg;
2536f1fca82eSMikolaj Golub 	if (sb == NULL) {
2537f1fca82eSMikolaj Golub 		size = 0;
253806250515SEdward Tomasz Napierala 		sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo),
253906250515SEdward Tomasz Napierala 		    SBUF_FIXEDLEN);
25405c32e9fcSAlexander Motin 		sbuf_set_drain(sb, sbuf_count_drain, &size);
2541f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2542f1fca82eSMikolaj Golub 		PHOLD(p);
2543f1fca82eSMikolaj Golub 		proc_getauxv(curthread, p, sb);
2544f1fca82eSMikolaj Golub 		PRELE(p);
2545f1fca82eSMikolaj Golub 		sbuf_finish(sb);
2546f1fca82eSMikolaj Golub 		sbuf_delete(sb);
2547f1fca82eSMikolaj Golub 		*sizep = size;
2548f1fca82eSMikolaj Golub 	} else {
2549f1fca82eSMikolaj Golub 		structsize = sizeof(Elf_Auxinfo);
2550f1fca82eSMikolaj Golub 		sbuf_bcat(sb, &structsize, sizeof(structsize));
2551f1fca82eSMikolaj Golub 		PHOLD(p);
2552f1fca82eSMikolaj Golub 		proc_getauxv(curthread, p, sb);
2553f1fca82eSMikolaj Golub 		PRELE(p);
2554f1fca82eSMikolaj Golub 	}
2555f1fca82eSMikolaj Golub }
2556f1fca82eSMikolaj Golub 
255719621645SAlex Richardson static bool
255892328a32SKonstantin Belousov __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote,
255992328a32SKonstantin Belousov     const char *note_vendor, const Elf_Phdr *pnote,
256019621645SAlex Richardson     bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg)
256132c01de2SDmitry Chagin {
2562267c52fcSKonstantin Belousov 	const Elf_Note *note, *note0, *note_end;
256332c01de2SDmitry Chagin 	const char *note_name;
25646c775eb6SKonstantin Belousov 	char *buf;
25656c775eb6SKonstantin Belousov 	int i, error;
256619621645SAlex Richardson 	bool res;
256732c01de2SDmitry Chagin 
25686c775eb6SKonstantin Belousov 	/* We need some limit, might as well use PAGE_SIZE. */
25696c775eb6SKonstantin Belousov 	if (pnote == NULL || pnote->p_filesz > PAGE_SIZE)
257019621645SAlex Richardson 		return (false);
25716c775eb6SKonstantin Belousov 	ASSERT_VOP_LOCKED(imgp->vp, "parse_notes");
25726c775eb6SKonstantin Belousov 	if (pnote->p_offset > PAGE_SIZE ||
25736c775eb6SKonstantin Belousov 	    pnote->p_filesz > PAGE_SIZE - pnote->p_offset) {
25742d6b8546SKonstantin Belousov 		buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT);
25752d6b8546SKonstantin Belousov 		if (buf == NULL) {
2576b249ce48SMateusz Guzik 			VOP_UNLOCK(imgp->vp);
25776c775eb6SKonstantin Belousov 			buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK);
257878022527SKonstantin Belousov 			vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
25792d6b8546SKonstantin Belousov 		}
25806c775eb6SKonstantin Belousov 		error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz,
25816c775eb6SKonstantin Belousov 		    pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED,
25826c775eb6SKonstantin Belousov 		    curthread->td_ucred, NOCRED, NULL, curthread);
25836c775eb6SKonstantin Belousov 		if (error != 0) {
25846c775eb6SKonstantin Belousov 			uprintf("i/o error PT_NOTE\n");
2585eda8fe63SKonstantin Belousov 			goto retf;
25866c775eb6SKonstantin Belousov 		}
25876c775eb6SKonstantin Belousov 		note = note0 = (const Elf_Note *)buf;
25886c775eb6SKonstantin Belousov 		note_end = (const Elf_Note *)(buf + pnote->p_filesz);
25896c775eb6SKonstantin Belousov 	} else {
25906c775eb6SKonstantin Belousov 		note = note0 = (const Elf_Note *)(imgp->image_header +
25916c775eb6SKonstantin Belousov 		    pnote->p_offset);
259232c01de2SDmitry Chagin 		note_end = (const Elf_Note *)(imgp->image_header +
259332c01de2SDmitry Chagin 		    pnote->p_offset + pnote->p_filesz);
25946c775eb6SKonstantin Belousov 		buf = NULL;
25956c775eb6SKonstantin Belousov 	}
2596267c52fcSKonstantin Belousov 	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
2597d1ae5c83SKonstantin Belousov 		if (!aligned(note, Elf32_Addr) || (const char *)note_end -
25986c775eb6SKonstantin Belousov 		    (const char *)note < sizeof(Elf_Note)) {
2599eda8fe63SKonstantin Belousov 			goto retf;
26006c775eb6SKonstantin Belousov 		}
260192328a32SKonstantin Belousov 		if (note->n_namesz != checknote->n_namesz ||
260292328a32SKonstantin Belousov 		    note->n_descsz != checknote->n_descsz ||
260392328a32SKonstantin Belousov 		    note->n_type != checknote->n_type)
260432c01de2SDmitry Chagin 			goto nextnote;
260532c01de2SDmitry Chagin 		note_name = (const char *)(note + 1);
260692328a32SKonstantin Belousov 		if (note_name + checknote->n_namesz >=
260792328a32SKonstantin Belousov 		    (const char *)note_end || strncmp(note_vendor,
260892328a32SKonstantin Belousov 		    note_name, checknote->n_namesz) != 0)
260932c01de2SDmitry Chagin 			goto nextnote;
261032c01de2SDmitry Chagin 
261192328a32SKonstantin Belousov 		if (cb(note, cb_arg, &res))
26126c775eb6SKonstantin Belousov 			goto ret;
261332c01de2SDmitry Chagin nextnote:
261432c01de2SDmitry Chagin 		note = (const Elf_Note *)((const char *)(note + 1) +
26151b8388cdSMikolaj Golub 		    roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
26161b8388cdSMikolaj Golub 		    roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
261732c01de2SDmitry Chagin 	}
2618eda8fe63SKonstantin Belousov retf:
261919621645SAlex Richardson 	res = false;
26206c775eb6SKonstantin Belousov ret:
26216c775eb6SKonstantin Belousov 	free(buf, M_TEMP);
26226c775eb6SKonstantin Belousov 	return (res);
262332c01de2SDmitry Chagin }
262432c01de2SDmitry Chagin 
262592328a32SKonstantin Belousov struct brandnote_cb_arg {
262692328a32SKonstantin Belousov 	Elf_Brandnote *brandnote;
262792328a32SKonstantin Belousov 	int32_t *osrel;
262892328a32SKonstantin Belousov };
262992328a32SKonstantin Belousov 
263019621645SAlex Richardson static bool
263119621645SAlex Richardson brandnote_cb(const Elf_Note *note, void *arg0, bool *res)
263292328a32SKonstantin Belousov {
263392328a32SKonstantin Belousov 	struct brandnote_cb_arg *arg;
263492328a32SKonstantin Belousov 
263592328a32SKonstantin Belousov 	arg = arg0;
263692328a32SKonstantin Belousov 
263792328a32SKonstantin Belousov 	/*
263892328a32SKonstantin Belousov 	 * Fetch the osreldate for binary from the ELF OSABI-note if
263992328a32SKonstantin Belousov 	 * necessary.
264092328a32SKonstantin Belousov 	 */
264192328a32SKonstantin Belousov 	*res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 &&
264292328a32SKonstantin Belousov 	    arg->brandnote->trans_osrel != NULL ?
264319621645SAlex Richardson 	    arg->brandnote->trans_osrel(note, arg->osrel) : true;
264492328a32SKonstantin Belousov 
264519621645SAlex Richardson 	return (true);
264692328a32SKonstantin Belousov }
264792328a32SKonstantin Belousov 
2648cefb93f2SKonstantin Belousov static Elf_Note fctl_note = {
2649cefb93f2SKonstantin Belousov 	.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
2650cefb93f2SKonstantin Belousov 	.n_descsz = sizeof(uint32_t),
2651cefb93f2SKonstantin Belousov 	.n_type = NT_FREEBSD_FEATURE_CTL,
2652cefb93f2SKonstantin Belousov };
2653cefb93f2SKonstantin Belousov 
2654cefb93f2SKonstantin Belousov struct fctl_cb_arg {
265519621645SAlex Richardson 	bool *has_fctl0;
2656cefb93f2SKonstantin Belousov 	uint32_t *fctl0;
2657cefb93f2SKonstantin Belousov };
2658cefb93f2SKonstantin Belousov 
265919621645SAlex Richardson static bool
266019621645SAlex Richardson note_fctl_cb(const Elf_Note *note, void *arg0, bool *res)
2661cefb93f2SKonstantin Belousov {
2662cefb93f2SKonstantin Belousov 	struct fctl_cb_arg *arg;
2663cefb93f2SKonstantin Belousov 	const Elf32_Word *desc;
2664cefb93f2SKonstantin Belousov 	uintptr_t p;
2665cefb93f2SKonstantin Belousov 
2666cefb93f2SKonstantin Belousov 	arg = arg0;
2667cefb93f2SKonstantin Belousov 	p = (uintptr_t)(note + 1);
2668cefb93f2SKonstantin Belousov 	p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
2669cefb93f2SKonstantin Belousov 	desc = (const Elf32_Word *)p;
267019621645SAlex Richardson 	*arg->has_fctl0 = true;
2671cefb93f2SKonstantin Belousov 	*arg->fctl0 = desc[0];
267219621645SAlex Richardson 	*res = true;
267319621645SAlex Richardson 	return (true);
2674cefb93f2SKonstantin Belousov }
2675cefb93f2SKonstantin Belousov 
267632c01de2SDmitry Chagin /*
2677cefb93f2SKonstantin Belousov  * Try to find the appropriate ABI-note section for checknote, fetch
2678cefb93f2SKonstantin Belousov  * the osreldate and feature control flags for binary from the ELF
2679cefb93f2SKonstantin Belousov  * OSABI-note.  Only the first page of the image is searched, the same
2680cefb93f2SKonstantin Belousov  * as for headers.
26811a9c7decSKonstantin Belousov  */
268219621645SAlex Richardson static bool
268392328a32SKonstantin Belousov __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
268419621645SAlex Richardson     int32_t *osrel, bool *has_fctl0, uint32_t *fctl0)
26851a9c7decSKonstantin Belousov {
26861a9c7decSKonstantin Belousov 	const Elf_Phdr *phdr;
26871a9c7decSKonstantin Belousov 	const Elf_Ehdr *hdr;
268892328a32SKonstantin Belousov 	struct brandnote_cb_arg b_arg;
2689cefb93f2SKonstantin Belousov 	struct fctl_cb_arg f_arg;
2690cefb93f2SKonstantin Belousov 	int i, j;
26911a9c7decSKonstantin Belousov 
26921a9c7decSKonstantin Belousov 	hdr = (const Elf_Ehdr *)imgp->image_header;
26931a9c7decSKonstantin Belousov 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
269492328a32SKonstantin Belousov 	b_arg.brandnote = brandnote;
269592328a32SKonstantin Belousov 	b_arg.osrel = osrel;
26960cad2aa2SKonstantin Belousov 	f_arg.has_fctl0 = has_fctl0;
2697cefb93f2SKonstantin Belousov 	f_arg.fctl0 = fctl0;
26981a9c7decSKonstantin Belousov 
26991a9c7decSKonstantin Belousov 	for (i = 0; i < hdr->e_phnum; i++) {
270092328a32SKonstantin Belousov 		if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp,
270192328a32SKonstantin Belousov 		    &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb,
270292328a32SKonstantin Belousov 		    &b_arg)) {
2703cefb93f2SKonstantin Belousov 			for (j = 0; j < hdr->e_phnum; j++) {
2704cefb93f2SKonstantin Belousov 				if (phdr[j].p_type == PT_NOTE &&
2705cefb93f2SKonstantin Belousov 				    __elfN(parse_notes)(imgp, &fctl_note,
2706cefb93f2SKonstantin Belousov 				    FREEBSD_ABI_VENDOR, &phdr[j],
2707cefb93f2SKonstantin Belousov 				    note_fctl_cb, &f_arg))
2708cefb93f2SKonstantin Belousov 					break;
2709cefb93f2SKonstantin Belousov 			}
271019621645SAlex Richardson 			return (true);
27111a9c7decSKonstantin Belousov 		}
271292328a32SKonstantin Belousov 	}
271319621645SAlex Richardson 	return (false);
27141a9c7decSKonstantin Belousov 
27151a9c7decSKonstantin Belousov }
27161a9c7decSKonstantin Belousov 
27171a9c7decSKonstantin Belousov /*
2718e1743d02SSøren Schmidt  * Tell kern_execve.c about it, with a little help from the linker.
2719e1743d02SSøren Schmidt  */
2720a360a43dSJake Burkholder static struct execsw __elfN(execsw) = {
2721b7feabf9SEd Maste 	.ex_imgact = __CONCAT(exec_, __elfN(imgact)),
2722b7feabf9SEd Maste 	.ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2723a360a43dSJake Burkholder };
2724a360a43dSJake Burkholder EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2725e7228204SAlfred Perlstein 
2726ed167eaaSKonstantin Belousov static vm_prot_t
2727ed167eaaSKonstantin Belousov __elfN(trans_prot)(Elf_Word flags)
2728ed167eaaSKonstantin Belousov {
2729ed167eaaSKonstantin Belousov 	vm_prot_t prot;
2730ed167eaaSKonstantin Belousov 
2731ed167eaaSKonstantin Belousov 	prot = 0;
2732ed167eaaSKonstantin Belousov 	if (flags & PF_X)
2733ed167eaaSKonstantin Belousov 		prot |= VM_PROT_EXECUTE;
2734ed167eaaSKonstantin Belousov 	if (flags & PF_W)
2735ed167eaaSKonstantin Belousov 		prot |= VM_PROT_WRITE;
2736ed167eaaSKonstantin Belousov 	if (flags & PF_R)
2737ed167eaaSKonstantin Belousov 		prot |= VM_PROT_READ;
2738eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
2739126b36a2SKonstantin Belousov 	if (i386_read_exec && (flags & PF_R))
2740676eda08SMarcel Moolenaar 		prot |= VM_PROT_EXECUTE;
2741676eda08SMarcel Moolenaar #endif
2742ed167eaaSKonstantin Belousov 	return (prot);
2743ed167eaaSKonstantin Belousov }
2744ed167eaaSKonstantin Belousov 
2745ed167eaaSKonstantin Belousov static Elf_Word
2746ed167eaaSKonstantin Belousov __elfN(untrans_prot)(vm_prot_t prot)
2747ed167eaaSKonstantin Belousov {
2748ed167eaaSKonstantin Belousov 	Elf_Word flags;
2749ed167eaaSKonstantin Belousov 
2750ed167eaaSKonstantin Belousov 	flags = 0;
2751ed167eaaSKonstantin Belousov 	if (prot & VM_PROT_EXECUTE)
2752ed167eaaSKonstantin Belousov 		flags |= PF_X;
2753ed167eaaSKonstantin Belousov 	if (prot & VM_PROT_READ)
2754ed167eaaSKonstantin Belousov 		flags |= PF_R;
2755ed167eaaSKonstantin Belousov 	if (prot & VM_PROT_WRITE)
2756ed167eaaSKonstantin Belousov 		flags |= PF_W;
2757ed167eaaSKonstantin Belousov 	return (flags);
2758ed167eaaSKonstantin Belousov }
2759fc83c5a7SKonstantin Belousov 
2760889b56c8SDawid Gorecki vm_size_t
276131174518SJohn Baldwin __elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base)
2762fc83c5a7SKonstantin Belousov {
276331174518SJohn Baldwin 	uintptr_t range, rbase, gap;
2764fc83c5a7SKonstantin Belousov 	int pct;
2765fc83c5a7SKonstantin Belousov 
2766fc83c5a7SKonstantin Belousov 	pct = __elfN(aslr_stack_gap);
2767fc83c5a7SKonstantin Belousov 	if (pct == 0)
2768889b56c8SDawid Gorecki 		return (0);
2769fc83c5a7SKonstantin Belousov 	if (pct > 50)
2770fc83c5a7SKonstantin Belousov 		pct = 50;
2771fc83c5a7SKonstantin Belousov 	range = imgp->eff_stack_sz * pct / 100;
2772fc83c5a7SKonstantin Belousov 	arc4rand(&rbase, sizeof(rbase), 0);
2773fc83c5a7SKonstantin Belousov 	gap = rbase % range;
2774fc83c5a7SKonstantin Belousov 	gap &= ~(sizeof(u_long) - 1);
2775fc83c5a7SKonstantin Belousov 	*stack_base -= gap;
2776889b56c8SDawid Gorecki 	return (gap);
2777fc83c5a7SKonstantin Belousov }
2778