1e1743d02SSøren Schmidt /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 38a36da99SPedro F. Giffuni * 486be94fcSTycho Nightingale * Copyright (c) 2017 Dell EMC 5455d3589SDavid E. O'Brien * Copyright (c) 2000-2001, 2003 David O'Brien 69a14aa01SUlrich Spörlein * Copyright (c) 1995-1996 Søren Schmidt 7e1743d02SSøren Schmidt * Copyright (c) 1996 Peter Wemm 8e1743d02SSøren Schmidt * All rights reserved. 9e1743d02SSøren Schmidt * 10e1743d02SSøren Schmidt * Redistribution and use in source and binary forms, with or without 11e1743d02SSøren Schmidt * modification, are permitted provided that the following conditions 12e1743d02SSøren Schmidt * are met: 13e1743d02SSøren Schmidt * 1. Redistributions of source code must retain the above copyright 14e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer 15e1743d02SSøren Schmidt * in this position and unchanged. 16e1743d02SSøren Schmidt * 2. Redistributions in binary form must reproduce the above copyright 17e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer in the 18e1743d02SSøren Schmidt * documentation and/or other materials provided with the distribution. 19e1743d02SSøren Schmidt * 3. The name of the author may not be used to endorse or promote products 2021dc7d4fSJens Schweikhardt * derived from this software without specific prior written permission 21e1743d02SSøren Schmidt * 22e1743d02SSøren Schmidt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23e1743d02SSøren Schmidt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24e1743d02SSøren Schmidt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25e1743d02SSøren Schmidt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26e1743d02SSøren Schmidt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27e1743d02SSøren Schmidt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28e1743d02SSøren Schmidt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29e1743d02SSøren Schmidt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30e1743d02SSøren Schmidt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31e1743d02SSøren Schmidt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32e1743d02SSøren Schmidt */ 33e1743d02SSøren Schmidt 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3712bc222eSJonathan Anderson #include "opt_capsicum.h" 3862919d78SPeter Wemm 39e1743d02SSøren Schmidt #include <sys/param.h> 404a144410SRobert Watson #include <sys/capsicum.h> 4178f57a9cSMark Johnston #include <sys/compressor.h> 42e1743d02SSøren Schmidt #include <sys/exec.h> 438c64af4fSJohn Polstra #include <sys/fcntl.h> 44e1743d02SSøren Schmidt #include <sys/imgact.h> 45e1743d02SSøren Schmidt #include <sys/imgact_elf.h> 46b96bd95bSIan Lepore #include <sys/jail.h> 47e1743d02SSøren Schmidt #include <sys/kernel.h> 48f34fa851SJohn Baldwin #include <sys/lock.h> 49e1743d02SSøren Schmidt #include <sys/malloc.h> 5068ff2a43SChristian S.J. Peron #include <sys/mount.h> 518c64af4fSJohn Polstra #include <sys/mman.h> 52a794e791SBruce Evans #include <sys/namei.h> 53a794e791SBruce Evans #include <sys/proc.h> 548c64af4fSJohn Polstra #include <sys/procfs.h> 5586be94fcSTycho Nightingale #include <sys/ptrace.h> 561ba5ad42SEdward Tomasz Napierala #include <sys/racct.h> 57b7924341SAndrew Turner #include <sys/reg.h> 588c64af4fSJohn Polstra #include <sys/resourcevar.h> 5989f6b863SAttilio Rao #include <sys/rwlock.h> 60bd390213SMikolaj Golub #include <sys/sbuf.h> 61da61b9a6SAlan Cox #include <sys/sf_buf.h> 62ee235befSKonstantin Belousov #include <sys/smp.h> 6336240ea5SDoug Rabson #include <sys/systm.h> 64e1743d02SSøren Schmidt #include <sys/signalvar.h> 658c64af4fSJohn Polstra #include <sys/stat.h> 661005a129SJohn Baldwin #include <sys/sx.h> 678c64af4fSJohn Polstra #include <sys/syscall.h> 68e1743d02SSøren Schmidt #include <sys/sysctl.h> 698c64af4fSJohn Polstra #include <sys/sysent.h> 70a794e791SBruce Evans #include <sys/vnode.h> 71e7228204SAlfred Perlstein #include <sys/syslog.h> 72e7228204SAlfred Perlstein #include <sys/eventhandler.h> 73f1fca82eSMikolaj Golub #include <sys/user.h> 74e7228204SAlfred Perlstein 75e1743d02SSøren Schmidt #include <vm/vm.h> 76e1743d02SSøren Schmidt #include <vm/vm_kern.h> 77e1743d02SSøren Schmidt #include <vm/vm_param.h> 78e1743d02SSøren Schmidt #include <vm/pmap.h> 79e1743d02SSøren Schmidt #include <vm/vm_map.h> 800ff27d31SJohn Polstra #include <vm/vm_object.h> 81e1743d02SSøren Schmidt #include <vm/vm_extern.h> 82e1743d02SSøren Schmidt 8352c24af7SPeter Wemm #include <machine/elf.h> 84e1743d02SSøren Schmidt #include <machine/md_var.h> 85e1743d02SSøren Schmidt 861b8388cdSMikolaj Golub #define ELF_NOTE_ROUNDSIZE 4 87c815a20cSDavid E. O'Brien #define OLD_EI_BRAND 8 88c815a20cSDavid E. O'Brien 893ebc1248SPeter Wemm static int __elfN(check_header)(const Elf_Ehdr *hdr); 9032c01de2SDmitry Chagin static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 9109c78d53SEdward Tomasz Napierala const char *interp, int32_t *osrel, uint32_t *fctl0); 923ebc1248SPeter Wemm static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 931699546dSEdward Tomasz Napierala u_long *entry); 940bbee4cdSKonstantin Belousov static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 951699546dSEdward Tomasz Napierala caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); 963ebc1248SPeter Wemm static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 97a95659f7SEd Maste static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, 9889ffc202SBjoern A. Zeeb int32_t *osrel); 99a95659f7SEd Maste static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 10019621645SAlex Richardson static bool __elfN(check_note)(struct image_params *imgp, 10119621645SAlex Richardson Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0, 1020cad2aa2SKonstantin Belousov uint32_t *fctl0); 103ed167eaaSKonstantin Belousov static vm_prot_t __elfN(trans_prot)(Elf_Word); 104ed167eaaSKonstantin Belousov static Elf_Word __elfN(untrans_prot)(vm_prot_t); 1056b71405bSJohn Baldwin static size_t __elfN(prepare_register_notes)(struct thread *td, 1066b71405bSJohn Baldwin struct note_info_list *list, struct thread *target_td); 107e1743d02SSøren Schmidt 1087029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), 1097029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 110a360a43dSJake Burkholder ""); 111a360a43dSJake Burkholder 112e548a1d4SJake Burkholder int __elfN(fallback_brand) = -1; 113e548a1d4SJake Burkholder SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 114af3b2549SHans Petter Selasky fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, 115a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 116a360a43dSJake Burkholder 117551d79e1SMarcel Moolenaar static int elf_legacy_coredump = 0; 118a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 1191cbb879dSEd Maste &elf_legacy_coredump, 0, 1201cbb879dSEd Maste "include all and only RW pages in core dumps"); 121e1743d02SSøren Schmidt 12262c625fdSKonstantin Belousov int __elfN(nxstack) = 1234d22d07aSKonstantin Belousov #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ 1244bf4b0f1SJohn Baldwin (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ 1254bf4b0f1SJohn Baldwin defined(__riscv) 12662c625fdSKonstantin Belousov 1; 12762c625fdSKonstantin Belousov #else 12862c625fdSKonstantin Belousov 0; 12962c625fdSKonstantin Belousov #endif 130291c06a1SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 131291c06a1SKonstantin Belousov nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, 132291c06a1SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); 133291c06a1SKonstantin Belousov 134eb029587SKonstantin Belousov #if defined(__amd64__) 135eb029587SKonstantin Belousov static int __elfN(vdso) = 1; 136eb029587SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 137eb029587SKonstantin Belousov vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0, 138eb029587SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading"); 139eb029587SKonstantin Belousov #else 140eb029587SKonstantin Belousov static int __elfN(vdso) = 0; 141eb029587SKonstantin Belousov #endif 142eb029587SKonstantin Belousov 143eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 144126b36a2SKonstantin Belousov int i386_read_exec = 0; 145126b36a2SKonstantin Belousov SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, 146126b36a2SKonstantin Belousov "enable execution from readable segments"); 147126b36a2SKonstantin Belousov #endif 148126b36a2SKonstantin Belousov 14995aafd69SKonstantin Belousov static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; 150f33533daSKonstantin Belousov static int 151f33533daSKonstantin Belousov sysctl_pie_base(SYSCTL_HANDLER_ARGS) 152f33533daSKonstantin Belousov { 153f33533daSKonstantin Belousov u_long val; 154f33533daSKonstantin Belousov int error; 155f33533daSKonstantin Belousov 156f33533daSKonstantin Belousov val = __elfN(pie_base); 157f33533daSKonstantin Belousov error = sysctl_handle_long(oidp, &val, 0, req); 158f33533daSKonstantin Belousov if (error != 0 || req->newptr == NULL) 159f33533daSKonstantin Belousov return (error); 160f33533daSKonstantin Belousov if ((val & PAGE_MASK) != 0) 161f33533daSKonstantin Belousov return (EINVAL); 162f33533daSKonstantin Belousov __elfN(pie_base) = val; 163f33533daSKonstantin Belousov return (0); 164f33533daSKonstantin Belousov } 165f33533daSKonstantin Belousov SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, 166f33533daSKonstantin Belousov CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 167f33533daSKonstantin Belousov sysctl_pie_base, "LU", 16895aafd69SKonstantin Belousov "PIE load base without randomization"); 16995aafd69SKonstantin Belousov 1707029da5cSPawel Biernacki SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, 1717029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172fa50a355SKonstantin Belousov ""); 173fa50a355SKonstantin Belousov #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) 174fa50a355SKonstantin Belousov 175b014e0f1SMarcin Wojtas /* 176*f0687f3eSEd Maste * Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures 177*f0687f3eSEd Maste * have limited address space (which can cause issues for applications with 178*f0687f3eSEd Maste * high memory use) so we leave it off there. 179b014e0f1SMarcin Wojtas */ 180b014e0f1SMarcin Wojtas static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64; 181fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, 182fa50a355SKonstantin Belousov &__elfN(aslr_enabled), 0, 183fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 184fa50a355SKonstantin Belousov ": enable address map randomization"); 185fa50a355SKonstantin Belousov 186b014e0f1SMarcin Wojtas /* 187*f0687f3eSEd Maste * Enable ASLR by default for 64-bit PIE binaries. 188b014e0f1SMarcin Wojtas */ 189b014e0f1SMarcin Wojtas static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64; 190fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, 191fa50a355SKonstantin Belousov &__elfN(pie_aslr_enabled), 0, 192fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 193fa50a355SKonstantin Belousov ": enable address map randomization for PIE binaries"); 194fa50a355SKonstantin Belousov 195b014e0f1SMarcin Wojtas /* 196*f0687f3eSEd Maste * Sbrk is deprecated and it can be assumed that in most cases it will not be 197*f0687f3eSEd Maste * used anyway. This setting is valid only with ASLR enabled, and allows ASLR 198*f0687f3eSEd Maste * to use the bss grow region. 199b014e0f1SMarcin Wojtas */ 200b014e0f1SMarcin Wojtas static int __elfN(aslr_honor_sbrk) = 0; 201fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, 202fa50a355SKonstantin Belousov &__elfN(aslr_honor_sbrk), 0, 203fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); 204fa50a355SKonstantin Belousov 2051811c1e9SMark Johnston static int __elfN(aslr_stack) = 1; 2061811c1e9SMark Johnston SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN, 2071811c1e9SMark Johnston &__elfN(aslr_stack), 0, 208fc83c5a7SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2091811c1e9SMark Johnston ": enable stack address randomization"); 210fc83c5a7SKonstantin Belousov 211939f0b63SKornel Dulęba static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64; 212939f0b63SKornel Dulęba SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN, 213939f0b63SKornel Dulęba &__elfN(aslr_shared_page), 0, 214939f0b63SKornel Dulęba __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 215939f0b63SKornel Dulęba ": enable shared page address randomization"); 216939f0b63SKornel Dulęba 217944cf37bSKonstantin Belousov static int __elfN(sigfastblock) = 1; 218944cf37bSKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, 219944cf37bSKonstantin Belousov CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, 220944cf37bSKonstantin Belousov "enable sigfastblock for new processes"); 221944cf37bSKonstantin Belousov 2222e1c94aaSKonstantin Belousov static bool __elfN(allow_wx) = true; 2232e1c94aaSKonstantin Belousov SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, 2242e1c94aaSKonstantin Belousov CTLFLAG_RWTUN, &__elfN(allow_wx), 0, 2252e1c94aaSKonstantin Belousov "Allow pages to be mapped simultaneously writable and executable"); 2262e1c94aaSKonstantin Belousov 2273ebc1248SPeter Wemm static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 228e1743d02SSøren Schmidt 229545517f1SEdward Tomasz Napierala #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) 23093d1c728SKonstantin Belousov 23132c01de2SDmitry Chagin Elf_Brandnote __elfN(freebsd_brandnote) = { 23232c01de2SDmitry Chagin .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 23332c01de2SDmitry Chagin .hdr.n_descsz = sizeof(int32_t), 2344c22b468SEd Maste .hdr.n_type = NT_FREEBSD_ABI_TAG, 23532c01de2SDmitry Chagin .vendor = FREEBSD_ABI_VENDOR, 23689ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 23789ffc202SBjoern A. Zeeb .trans_osrel = __elfN(freebsd_trans_osrel) 23832c01de2SDmitry Chagin }; 23932c01de2SDmitry Chagin 240a95659f7SEd Maste static bool 24189ffc202SBjoern A. Zeeb __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 24289ffc202SBjoern A. Zeeb { 24389ffc202SBjoern A. Zeeb uintptr_t p; 24489ffc202SBjoern A. Zeeb 24589ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 2461b8388cdSMikolaj Golub p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 24789ffc202SBjoern A. Zeeb *osrel = *(const int32_t *)(p); 24889ffc202SBjoern A. Zeeb 249a95659f7SEd Maste return (true); 25089ffc202SBjoern A. Zeeb } 25189ffc202SBjoern A. Zeeb 25289ffc202SBjoern A. Zeeb static const char GNU_ABI_VENDOR[] = "GNU"; 25389ffc202SBjoern A. Zeeb static int GNU_KFREEBSD_ABI_DESC = 3; 25489ffc202SBjoern A. Zeeb 25589ffc202SBjoern A. Zeeb Elf_Brandnote __elfN(kfreebsd_brandnote) = { 25689ffc202SBjoern A. Zeeb .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 25789ffc202SBjoern A. Zeeb .hdr.n_descsz = 16, /* XXX at least 16 */ 25889ffc202SBjoern A. Zeeb .hdr.n_type = 1, 25989ffc202SBjoern A. Zeeb .vendor = GNU_ABI_VENDOR, 26089ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 26189ffc202SBjoern A. Zeeb .trans_osrel = kfreebsd_trans_osrel 26289ffc202SBjoern A. Zeeb }; 26389ffc202SBjoern A. Zeeb 264a95659f7SEd Maste static bool 26589ffc202SBjoern A. Zeeb kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 26689ffc202SBjoern A. Zeeb { 26789ffc202SBjoern A. Zeeb const Elf32_Word *desc; 26889ffc202SBjoern A. Zeeb uintptr_t p; 26989ffc202SBjoern A. Zeeb 27089ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 2711b8388cdSMikolaj Golub p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 27289ffc202SBjoern A. Zeeb 27389ffc202SBjoern A. Zeeb desc = (const Elf32_Word *)p; 27489ffc202SBjoern A. Zeeb if (desc[0] != GNU_KFREEBSD_ABI_DESC) 275a95659f7SEd Maste return (false); 27689ffc202SBjoern A. Zeeb 27789ffc202SBjoern A. Zeeb /* 27889ffc202SBjoern A. Zeeb * Debian GNU/kFreeBSD embed the earliest compatible kernel version 27989ffc202SBjoern A. Zeeb * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 28089ffc202SBjoern A. Zeeb */ 28189ffc202SBjoern A. Zeeb *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 28289ffc202SBjoern A. Zeeb 283a95659f7SEd Maste return (true); 28489ffc202SBjoern A. Zeeb } 28589ffc202SBjoern A. Zeeb 286e1743d02SSøren Schmidt int 2873ebc1248SPeter Wemm __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 288e1743d02SSøren Schmidt { 289e1743d02SSøren Schmidt int i; 290e1743d02SSøren Schmidt 2913ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 292ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == NULL) { 293ea5a2b2eSSøren Schmidt elf_brand_list[i] = entry; 294e1743d02SSøren Schmidt break; 295e1743d02SSøren Schmidt } 296e1743d02SSøren Schmidt } 297925c8b5bSBjoern A. Zeeb if (i == MAX_BRANDS) { 298925c8b5bSBjoern A. Zeeb printf("WARNING: %s: could not insert brandinfo entry: %p\n", 299925c8b5bSBjoern A. Zeeb __func__, entry); 300a7cddfedSJake Burkholder return (-1); 301925c8b5bSBjoern A. Zeeb } 302a7cddfedSJake Burkholder return (0); 303e1743d02SSøren Schmidt } 304e1743d02SSøren Schmidt 305e1743d02SSøren Schmidt int 3063ebc1248SPeter Wemm __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 307e1743d02SSøren Schmidt { 308e1743d02SSøren Schmidt int i; 309e1743d02SSøren Schmidt 3103ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 311ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == entry) { 312ea5a2b2eSSøren Schmidt elf_brand_list[i] = NULL; 313e1743d02SSøren Schmidt break; 314e1743d02SSøren Schmidt } 315e1743d02SSøren Schmidt } 316ea5a2b2eSSøren Schmidt if (i == MAX_BRANDS) 317a7cddfedSJake Burkholder return (-1); 318a7cddfedSJake Burkholder return (0); 319e1743d02SSøren Schmidt } 320e1743d02SSøren Schmidt 3214082b189SAlex Richardson bool 3223ebc1248SPeter Wemm __elfN(brand_inuse)(Elf_Brandinfo *entry) 323096977faSMark Newton { 324096977faSMark Newton struct proc *p; 3254082b189SAlex Richardson bool rval = false; 326096977faSMark Newton 3271005a129SJohn Baldwin sx_slock(&allproc_lock); 3284f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 329553629ebSJake Burkholder if (p->p_sysent == entry->sysvec) { 3304082b189SAlex Richardson rval = true; 331553629ebSJake Burkholder break; 332096977faSMark Newton } 333553629ebSJake Burkholder } 3341005a129SJohn Baldwin sx_sunlock(&allproc_lock); 335096977faSMark Newton 336553629ebSJake Burkholder return (rval); 337096977faSMark Newton } 338096977faSMark Newton 3395fe3ed62SJake Burkholder static Elf_Brandinfo * 34032c01de2SDmitry Chagin __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 34109c78d53SEdward Tomasz Napierala int32_t *osrel, uint32_t *fctl0) 3425fe3ed62SJake Burkholder { 34332c01de2SDmitry Chagin const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 344af582aaeSKonstantin Belousov Elf_Brandinfo *bi, *bi_m; 34519621645SAlex Richardson bool ret, has_fctl0; 34609c78d53SEdward Tomasz Napierala int i, interp_name_len; 34709c78d53SEdward Tomasz Napierala 348be7808dcSKonstantin Belousov interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; 3495fe3ed62SJake Burkholder 3505fe3ed62SJake Burkholder /* 35132c01de2SDmitry Chagin * We support four types of branding -- (1) the ELF EI_OSABI field 3525fe3ed62SJake Burkholder * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 35332c01de2SDmitry Chagin * branding w/in the ELF header, (3) path of the `interp_path' 35432c01de2SDmitry Chagin * field, and (4) the ".note.ABI-tag" ELF section. 3555fe3ed62SJake Burkholder */ 3565fe3ed62SJake Burkholder 35732c01de2SDmitry Chagin /* Look for an ".note.ABI-tag" ELF section */ 358af582aaeSKonstantin Belousov bi_m = NULL; 35932c01de2SDmitry Chagin for (i = 0; i < MAX_BRANDS; i++) { 36032c01de2SDmitry Chagin bi = elf_brand_list[i]; 361ecc2fda8SBjoern A. Zeeb if (bi == NULL) 362ecc2fda8SBjoern A. Zeeb continue; 3632274ab3dSKonstantin Belousov if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) 3641438fe3cSKonstantin Belousov continue; 365ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && (bi->flags & 366ecc2fda8SBjoern A. Zeeb (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 3670cad2aa2SKonstantin Belousov has_fctl0 = false; 3680cad2aa2SKonstantin Belousov *fctl0 = 0; 3690cad2aa2SKonstantin Belousov *osrel = 0; 370cefb93f2SKonstantin Belousov ret = __elfN(check_note)(imgp, bi->brand_note, osrel, 3710cad2aa2SKonstantin Belousov &has_fctl0, fctl0); 372f19d421aSNathan Whitehorn /* Give brand a chance to veto check_note's guess */ 3730cad2aa2SKonstantin Belousov if (ret && bi->header_supported) { 3740cad2aa2SKonstantin Belousov ret = bi->header_supported(imgp, osrel, 3750cad2aa2SKonstantin Belousov has_fctl0 ? fctl0 : NULL); 3760cad2aa2SKonstantin Belousov } 377af582aaeSKonstantin Belousov /* 378af582aaeSKonstantin Belousov * If note checker claimed the binary, but the 379af582aaeSKonstantin Belousov * interpreter path in the image does not 380af582aaeSKonstantin Belousov * match default one for the brand, try to 381af582aaeSKonstantin Belousov * search for other brands with the same 382af582aaeSKonstantin Belousov * interpreter. Either there is better brand 383af582aaeSKonstantin Belousov * with the right interpreter, or, failing 384af582aaeSKonstantin Belousov * this, we return first brand which accepted 385af582aaeSKonstantin Belousov * our note and, optionally, header. 386af582aaeSKonstantin Belousov */ 3873aeacc55SKonstantin Belousov if (ret && bi_m == NULL && interp != NULL && 3883aeacc55SKonstantin Belousov (bi->interp_path == NULL || 3893aeacc55SKonstantin Belousov (strlen(bi->interp_path) + 1 != interp_name_len || 3903aeacc55SKonstantin Belousov strncmp(interp, bi->interp_path, interp_name_len) 3913aeacc55SKonstantin Belousov != 0))) { 392af582aaeSKonstantin Belousov bi_m = bi; 393af582aaeSKonstantin Belousov ret = 0; 394af582aaeSKonstantin Belousov } 39532c01de2SDmitry Chagin if (ret) 39632c01de2SDmitry Chagin return (bi); 39732c01de2SDmitry Chagin } 39832c01de2SDmitry Chagin } 399af582aaeSKonstantin Belousov if (bi_m != NULL) 400af582aaeSKonstantin Belousov return (bi_m); 40132c01de2SDmitry Chagin 4025fe3ed62SJake Burkholder /* If the executable has a brand, search for it in the brand list. */ 4035fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4045fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4051438fe3cSKonstantin Belousov if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 4062274ab3dSKonstantin Belousov (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 407ecc2fda8SBjoern A. Zeeb continue; 408ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 4095fe3ed62SJake Burkholder (hdr->e_ident[EI_OSABI] == bi->brand || 4100fe98320SEd Schouten (bi->compat_3_brand != NULL && 4113d560b4bSKonstantin Belousov strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 4120fe98320SEd Schouten bi->compat_3_brand) == 0))) { 413686d2f31SNathan Whitehorn /* Looks good, but give brand a chance to veto */ 414d722231bSJohn Baldwin if (bi->header_supported == NULL || 4150cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL)) { 41615a9aedfSKonstantin Belousov /* 41715a9aedfSKonstantin Belousov * Again, prefer strictly matching 41815a9aedfSKonstantin Belousov * interpreter path. 41915a9aedfSKonstantin Belousov */ 4207aab7a80SKonstantin Belousov if (interp_name_len == 0 && 4217aab7a80SKonstantin Belousov bi->interp_path == NULL) 4227aab7a80SKonstantin Belousov return (bi); 4237aab7a80SKonstantin Belousov if (bi->interp_path != NULL && 4247aab7a80SKonstantin Belousov strlen(bi->interp_path) + 1 == 42515a9aedfSKonstantin Belousov interp_name_len && strncmp(interp, 42615a9aedfSKonstantin Belousov bi->interp_path, interp_name_len) == 0) 4275fe3ed62SJake Burkholder return (bi); 42815a9aedfSKonstantin Belousov if (bi_m == NULL) 42915a9aedfSKonstantin Belousov bi_m = bi; 4305fe3ed62SJake Burkholder } 431686d2f31SNathan Whitehorn } 43215a9aedfSKonstantin Belousov } 43315a9aedfSKonstantin Belousov if (bi_m != NULL) 43415a9aedfSKonstantin Belousov return (bi_m); 4355fe3ed62SJake Burkholder 436817dc004SWarner Losh /* No known brand, see if the header is recognized by any brand */ 437817dc004SWarner Losh for (i = 0; i < MAX_BRANDS; i++) { 438817dc004SWarner Losh bi = elf_brand_list[i]; 439817dc004SWarner Losh if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || 440817dc004SWarner Losh bi->header_supported == NULL) 441817dc004SWarner Losh continue; 442817dc004SWarner Losh if (hdr->e_machine == bi->machine) { 4430cad2aa2SKonstantin Belousov ret = bi->header_supported(imgp, NULL, NULL); 444817dc004SWarner Losh if (ret) 445817dc004SWarner Losh return (bi); 446817dc004SWarner Losh } 447817dc004SWarner Losh } 448817dc004SWarner Losh 4495fe3ed62SJake Burkholder /* Lacking a known brand, search for a recognized interpreter. */ 4505fe3ed62SJake Burkholder if (interp != NULL) { 4515fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4525fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4532274ab3dSKonstantin Belousov if (bi == NULL || (bi->flags & 4542274ab3dSKonstantin Belousov (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) 4552274ab3dSKonstantin Belousov != 0) 456ecc2fda8SBjoern A. Zeeb continue; 457ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 4583aeacc55SKonstantin Belousov bi->interp_path != NULL && 459d1ae5c83SKonstantin Belousov /* ELF image p_filesz includes terminating zero */ 460d1ae5c83SKonstantin Belousov strlen(bi->interp_path) + 1 == interp_name_len && 461d1ae5c83SKonstantin Belousov strncmp(interp, bi->interp_path, interp_name_len) 462d722231bSJohn Baldwin == 0 && (bi->header_supported == NULL || 4630cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL))) 4645fe3ed62SJake Burkholder return (bi); 4655fe3ed62SJake Burkholder } 4665fe3ed62SJake Burkholder } 4675fe3ed62SJake Burkholder 4685fe3ed62SJake Burkholder /* Lacking a recognized interpreter, try the default brand */ 4695fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4705fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4711438fe3cSKonstantin Belousov if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 4722274ab3dSKonstantin Belousov (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 473ecc2fda8SBjoern A. Zeeb continue; 474ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 475d722231bSJohn Baldwin __elfN(fallback_brand) == bi->brand && 476d722231bSJohn Baldwin (bi->header_supported == NULL || 4770cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL))) 4785fe3ed62SJake Burkholder return (bi); 4795fe3ed62SJake Burkholder } 4805fe3ed62SJake Burkholder return (NULL); 4815fe3ed62SJake Burkholder } 4825fe3ed62SJake Burkholder 4837de1bc13SKonstantin Belousov static bool 4847de1bc13SKonstantin Belousov __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) 4857de1bc13SKonstantin Belousov { 4867de1bc13SKonstantin Belousov return (hdr->e_phoff <= PAGE_SIZE && 4877de1bc13SKonstantin Belousov (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); 4887de1bc13SKonstantin Belousov } 4897de1bc13SKonstantin Belousov 490e1743d02SSøren Schmidt static int 4913ebc1248SPeter Wemm __elfN(check_header)(const Elf_Ehdr *hdr) 492e1743d02SSøren Schmidt { 493d0ca7c29SPeter Wemm Elf_Brandinfo *bi; 4943ebc1248SPeter Wemm int i; 4953ebc1248SPeter Wemm 49652c24af7SPeter Wemm if (!IS_ELF(*hdr) || 49752c24af7SPeter Wemm hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 49852c24af7SPeter Wemm hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 4993dc19c46SJacques Vidrine hdr->e_ident[EI_VERSION] != EV_CURRENT || 5003dc19c46SJacques Vidrine hdr->e_phentsize != sizeof(Elf_Phdr) || 5013dc19c46SJacques Vidrine hdr->e_version != ELF_TARG_VER) 502a7cddfedSJake Burkholder return (ENOEXEC); 503e1743d02SSøren Schmidt 5043ebc1248SPeter Wemm /* 5053ebc1248SPeter Wemm * Make sure we have at least one brand for this machine. 5063ebc1248SPeter Wemm */ 5073ebc1248SPeter Wemm 5083ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 509d0ca7c29SPeter Wemm bi = elf_brand_list[i]; 510d0ca7c29SPeter Wemm if (bi != NULL && bi->machine == hdr->e_machine) 5113ebc1248SPeter Wemm break; 5123ebc1248SPeter Wemm } 5133ebc1248SPeter Wemm if (i == MAX_BRANDS) 514a7cddfedSJake Burkholder return (ENOEXEC); 515e1743d02SSøren Schmidt 516a7cddfedSJake Burkholder return (0); 517e1743d02SSøren Schmidt } 518e1743d02SSøren Schmidt 519e1743d02SSøren Schmidt static int 5203ebc1248SPeter Wemm __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 521ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot) 5223ebc1248SPeter Wemm { 523da61b9a6SAlan Cox struct sf_buf *sf; 524da61b9a6SAlan Cox int error; 5253ebc1248SPeter Wemm vm_offset_t off; 5263ebc1248SPeter Wemm 5273ebc1248SPeter Wemm /* 5283ebc1248SPeter Wemm * Create the page if it doesn't exist yet. Ignore errors. 5293ebc1248SPeter Wemm */ 530aaadc41fSKonstantin Belousov vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - 531aaadc41fSKonstantin Belousov trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); 5323ebc1248SPeter Wemm 5333ebc1248SPeter Wemm /* 5343ebc1248SPeter Wemm * Find the page from the underlying object. 5353ebc1248SPeter Wemm */ 53628e8da65SAlan Cox if (object != NULL) { 537da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 538da61b9a6SAlan Cox if (sf == NULL) 539da61b9a6SAlan Cox return (KERN_FAILURE); 5403ebc1248SPeter Wemm off = offset - trunc_page(offset); 541da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 542ca0387efSJake Burkholder end - start); 543be996836SAttilio Rao vm_imgact_unmap_page(sf); 544fe0a8a39SKonstantin Belousov if (error != 0) 545a7cddfedSJake Burkholder return (KERN_FAILURE); 5463ebc1248SPeter Wemm } 5473ebc1248SPeter Wemm 548a7cddfedSJake Burkholder return (KERN_SUCCESS); 5493ebc1248SPeter Wemm } 5503ebc1248SPeter Wemm 5513ebc1248SPeter Wemm static int 552e3d8f8feSKonstantin Belousov __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, 553e3d8f8feSKonstantin Belousov vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, 554e3d8f8feSKonstantin Belousov int cow) 5553ebc1248SPeter Wemm { 556da61b9a6SAlan Cox struct sf_buf *sf; 557da61b9a6SAlan Cox vm_offset_t off; 558a063facbSMarcel Moolenaar vm_size_t sz; 559e3d8f8feSKonstantin Belousov int error, locked, rv; 5603ebc1248SPeter Wemm 5613ebc1248SPeter Wemm if (start != trunc_page(start)) { 56281f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset, start, 563ff6f03c7SAlan Cox round_page(start), prot); 56428e8da65SAlan Cox if (rv != KERN_SUCCESS) 565a7cddfedSJake Burkholder return (rv); 5663ebc1248SPeter Wemm offset += round_page(start) - start; 5673ebc1248SPeter Wemm start = round_page(start); 5683ebc1248SPeter Wemm } 5693ebc1248SPeter Wemm if (end != round_page(end)) { 57081f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset + 571ff6f03c7SAlan Cox trunc_page(end) - start, trunc_page(end), end, prot); 57228e8da65SAlan Cox if (rv != KERN_SUCCESS) 573a7cddfedSJake Burkholder return (rv); 5743ebc1248SPeter Wemm end = trunc_page(end); 5753ebc1248SPeter Wemm } 576e383e820SAlan Cox if (start >= end) 577e383e820SAlan Cox return (KERN_SUCCESS); 578e383e820SAlan Cox if ((offset & PAGE_MASK) != 0) { 5793ebc1248SPeter Wemm /* 580e383e820SAlan Cox * The mapping is not page aligned. This means that we have 581e383e820SAlan Cox * to copy the data. 5823ebc1248SPeter Wemm */ 583aaadc41fSKonstantin Belousov rv = vm_map_fixed(map, NULL, 0, start, end - start, 584aaadc41fSKonstantin Belousov prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); 5855420f76bSKonstantin Belousov if (rv != KERN_SUCCESS) 586a7cddfedSJake Burkholder return (rv); 587da61b9a6SAlan Cox if (object == NULL) 588da61b9a6SAlan Cox return (KERN_SUCCESS); 589da61b9a6SAlan Cox for (; start < end; start += sz) { 590da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 591da61b9a6SAlan Cox if (sf == NULL) 592da61b9a6SAlan Cox return (KERN_FAILURE); 5933ebc1248SPeter Wemm off = offset - trunc_page(offset); 5943ebc1248SPeter Wemm sz = end - start; 595da61b9a6SAlan Cox if (sz > PAGE_SIZE - off) 596da61b9a6SAlan Cox sz = PAGE_SIZE - off; 597da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 5983ebc1248SPeter Wemm (caddr_t)start, sz); 599be996836SAttilio Rao vm_imgact_unmap_page(sf); 6005420f76bSKonstantin Belousov if (error != 0) 601a7cddfedSJake Burkholder return (KERN_FAILURE); 602da61b9a6SAlan Cox offset += sz; 6033ebc1248SPeter Wemm } 6043ebc1248SPeter Wemm } else { 605e5e6093bSAlan Cox vm_object_reference(object); 606e383e820SAlan Cox rv = vm_map_fixed(map, object, offset, start, end - start, 60778022527SKonstantin Belousov prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | 60878022527SKonstantin Belousov (object != NULL ? MAP_VN_EXEC : 0)); 609e3d8f8feSKonstantin Belousov if (rv != KERN_SUCCESS) { 610e3d8f8feSKonstantin Belousov locked = VOP_ISLOCKED(imgp->vp); 611b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 612e5e6093bSAlan Cox vm_object_deallocate(object); 613e3d8f8feSKonstantin Belousov vn_lock(imgp->vp, locked | LK_RETRY); 614a7cddfedSJake Burkholder return (rv); 61578022527SKonstantin Belousov } else if (object != NULL) { 61678022527SKonstantin Belousov MPASS(imgp->vp->v_object == object); 61778022527SKonstantin Belousov VOP_SET_TEXT_CHECKED(imgp->vp); 6183ebc1248SPeter Wemm } 6193ebc1248SPeter Wemm } 620e383e820SAlan Cox return (KERN_SUCCESS); 621e383e820SAlan Cox } 6223ebc1248SPeter Wemm 6233ebc1248SPeter Wemm static int 6240bbee4cdSKonstantin Belousov __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 6251699546dSEdward Tomasz Napierala caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 626e1743d02SSøren Schmidt { 627da61b9a6SAlan Cox struct sf_buf *sf; 628e1743d02SSøren Schmidt size_t map_len; 629292177e6SAlan Cox vm_map_t map; 630292177e6SAlan Cox vm_object_t object; 631e020a35fSMark Johnston vm_offset_t map_addr; 632fa7dd9c5SMatthew Dillon int error, rv, cow; 633e1743d02SSøren Schmidt size_t copy_len; 6340bbee4cdSKonstantin Belousov vm_ooffset_t file_addr; 63552c24af7SPeter Wemm 63625ead034SBrian Feldman /* 63725ead034SBrian Feldman * It's necessary to fail if the filsz + offset taken from the 63825ead034SBrian Feldman * header is greater than the actual file pager object's size. 63925ead034SBrian Feldman * If we were to allow this, then the vm_map_find() below would 64025ead034SBrian Feldman * walk right off the end of the file object and into the ether. 64125ead034SBrian Feldman * 64225ead034SBrian Feldman * While I'm here, might as well check for something else that 64325ead034SBrian Feldman * is invalid: filsz cannot be greater than memsz. 64425ead034SBrian Feldman */ 6459bcf2f2dSKonstantin Belousov if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || 6469bcf2f2dSKonstantin Belousov filsz > memsz) { 64725ead034SBrian Feldman uprintf("elf_load_section: truncated ELF file\n"); 64825ead034SBrian Feldman return (ENOEXEC); 64925ead034SBrian Feldman } 65025ead034SBrian Feldman 651292177e6SAlan Cox object = imgp->object; 652292177e6SAlan Cox map = &imgp->proc->p_vmspace->vm_map; 653545517f1SEdward Tomasz Napierala map_addr = trunc_page((vm_offset_t)vmaddr); 654545517f1SEdward Tomasz Napierala file_addr = trunc_page(offset); 655e1743d02SSøren Schmidt 656e1743d02SSøren Schmidt /* 65752c24af7SPeter Wemm * We have two choices. We can either clear the data in the last page 65852c24af7SPeter Wemm * of an oversized mapping, or we can start the anon mapping a page 65952c24af7SPeter Wemm * early and copy the initialized data into that first page. We 66028e8da65SAlan Cox * choose the second. 66152c24af7SPeter Wemm */ 6629bcf2f2dSKonstantin Belousov if (filsz == 0) 6639bcf2f2dSKonstantin Belousov map_len = 0; 6649bcf2f2dSKonstantin Belousov else if (memsz > filsz) 665545517f1SEdward Tomasz Napierala map_len = trunc_page(offset + filsz) - file_addr; 66652c24af7SPeter Wemm else 667545517f1SEdward Tomasz Napierala map_len = round_page(offset + filsz) - file_addr; 66852c24af7SPeter Wemm 66952c24af7SPeter Wemm if (map_len != 0) { 670fa7dd9c5SMatthew Dillon /* cow flags: don't dump readonly sections in core */ 671fa7dd9c5SMatthew Dillon cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 672fa7dd9c5SMatthew Dillon (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 673fa7dd9c5SMatthew Dillon 67478022527SKonstantin Belousov rv = __elfN(map_insert)(imgp, map, object, file_addr, 67578022527SKonstantin Belousov map_addr, map_addr + map_len, prot, cow); 676e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 677a7cddfedSJake Burkholder return (EINVAL); 67852c24af7SPeter Wemm 67952c24af7SPeter Wemm /* we can stop now if we've covered it all */ 680973d67c4SKonstantin Belousov if (memsz == filsz) 681a7cddfedSJake Burkholder return (0); 68252c24af7SPeter Wemm } 68352c24af7SPeter Wemm 68452c24af7SPeter Wemm /* 68552c24af7SPeter Wemm * We have to get the remaining bit of the file into the first part 68652c24af7SPeter Wemm * of the oversized map segment. This is normally because the .data 68752c24af7SPeter Wemm * segment in the file is extended to provide bss. It's a neat idea 68852c24af7SPeter Wemm * to try and save a page, but it's a pain in the behind to implement. 689e1743d02SSøren Schmidt */ 690545517f1SEdward Tomasz Napierala copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + 691545517f1SEdward Tomasz Napierala filsz); 692545517f1SEdward Tomasz Napierala map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 693545517f1SEdward Tomasz Napierala map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 694e1743d02SSøren Schmidt 69552c24af7SPeter Wemm /* This had damn well better be true! */ 6968191d577SPeter Wemm if (map_len != 0) { 697e3d8f8feSKonstantin Belousov rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, 698c547cbb4SAlan Cox map_addr + map_len, prot, 0); 699973d67c4SKonstantin Belousov if (rv != KERN_SUCCESS) 700a7cddfedSJake Burkholder return (EINVAL); 7018191d577SPeter Wemm } 702e1743d02SSøren Schmidt 70352c24af7SPeter Wemm if (copy_len != 0) { 704da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset + filsz); 705da61b9a6SAlan Cox if (sf == NULL) 706da61b9a6SAlan Cox return (EIO); 707e1743d02SSøren Schmidt 70852c24af7SPeter Wemm /* send the page fragment to user space */ 709e020a35fSMark Johnston error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, 710e020a35fSMark Johnston copy_len); 711be996836SAttilio Rao vm_imgact_unmap_page(sf); 712973d67c4SKonstantin Belousov if (error != 0) 71352c24af7SPeter Wemm return (error); 71452c24af7SPeter Wemm } 715e1743d02SSøren Schmidt 716e1743d02SSøren Schmidt /* 717c547cbb4SAlan Cox * Remove write access to the page if it was only granted by map_insert 718c547cbb4SAlan Cox * to allow copyout. 719e1743d02SSøren Schmidt */ 720c547cbb4SAlan Cox if ((prot & VM_PROT_WRITE) == 0) 721292177e6SAlan Cox vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + 7220659df6fSKonstantin Belousov map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); 7238191d577SPeter Wemm 724ff6f03c7SAlan Cox return (0); 725e1743d02SSøren Schmidt } 726e1743d02SSøren Schmidt 7279bcd7482SEdward Tomasz Napierala static int 7289bcd7482SEdward Tomasz Napierala __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, 7299bcd7482SEdward Tomasz Napierala const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) 7309bcd7482SEdward Tomasz Napierala { 7319bcd7482SEdward Tomasz Napierala vm_prot_t prot; 7329bcd7482SEdward Tomasz Napierala u_long base_addr; 7339bcd7482SEdward Tomasz Napierala bool first; 7349bcd7482SEdward Tomasz Napierala int error, i; 7359bcd7482SEdward Tomasz Napierala 736b65ca345SEdward Tomasz Napierala ASSERT_VOP_LOCKED(imgp->vp, __func__); 737b65ca345SEdward Tomasz Napierala 7389bcd7482SEdward Tomasz Napierala base_addr = 0; 7399bcd7482SEdward Tomasz Napierala first = true; 7409bcd7482SEdward Tomasz Napierala 7419bcd7482SEdward Tomasz Napierala for (i = 0; i < hdr->e_phnum; i++) { 7429bcd7482SEdward Tomasz Napierala if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 7439bcd7482SEdward Tomasz Napierala continue; 7449bcd7482SEdward Tomasz Napierala 7459bcd7482SEdward Tomasz Napierala /* Loadable segment */ 7469bcd7482SEdward Tomasz Napierala prot = __elfN(trans_prot)(phdr[i].p_flags); 7479bcd7482SEdward Tomasz Napierala error = __elfN(load_section)(imgp, phdr[i].p_offset, 7489bcd7482SEdward Tomasz Napierala (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 7499bcd7482SEdward Tomasz Napierala phdr[i].p_memsz, phdr[i].p_filesz, prot); 7509bcd7482SEdward Tomasz Napierala if (error != 0) 7519bcd7482SEdward Tomasz Napierala return (error); 7529bcd7482SEdward Tomasz Napierala 7539bcd7482SEdward Tomasz Napierala /* 7549bcd7482SEdward Tomasz Napierala * Establish the base address if this is the first segment. 7559bcd7482SEdward Tomasz Napierala */ 7569bcd7482SEdward Tomasz Napierala if (first) { 7579bcd7482SEdward Tomasz Napierala base_addr = trunc_page(phdr[i].p_vaddr + rbase); 7589bcd7482SEdward Tomasz Napierala first = false; 7599bcd7482SEdward Tomasz Napierala } 7609bcd7482SEdward Tomasz Napierala } 7619bcd7482SEdward Tomasz Napierala 7629bcd7482SEdward Tomasz Napierala if (base_addrp != NULL) 7639bcd7482SEdward Tomasz Napierala *base_addrp = base_addr; 7649bcd7482SEdward Tomasz Napierala 7659bcd7482SEdward Tomasz Napierala return (0); 7669bcd7482SEdward Tomasz Napierala } 7679bcd7482SEdward Tomasz Napierala 768c33fe779SJohn Polstra /* 769c33fe779SJohn Polstra * Load the file "file" into memory. It may be either a shared object 770c33fe779SJohn Polstra * or an executable. 771c33fe779SJohn Polstra * 772c33fe779SJohn Polstra * The "addr" reference parameter is in/out. On entry, it specifies 773c33fe779SJohn Polstra * the address where a shared object should be loaded. If the file is 774c33fe779SJohn Polstra * an executable, this value is ignored. On exit, "addr" specifies 775c33fe779SJohn Polstra * where the file was actually loaded. 776c33fe779SJohn Polstra * 777c33fe779SJohn Polstra * The "entry" reference parameter is out only. On exit, it specifies 778c33fe779SJohn Polstra * the entry point for the loaded file. 779c33fe779SJohn Polstra */ 780e1743d02SSøren Schmidt static int 7813ebc1248SPeter Wemm __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 7821699546dSEdward Tomasz Napierala u_long *entry) 783e1743d02SSøren Schmidt { 784911c2be0SMark Peek struct { 785911c2be0SMark Peek struct nameidata nd; 786911c2be0SMark Peek struct vattr attr; 787911c2be0SMark Peek struct image_params image_params; 788911c2be0SMark Peek } *tempdata; 789d254af07SMatthew Dillon const Elf_Ehdr *hdr = NULL; 790d254af07SMatthew Dillon const Elf_Phdr *phdr = NULL; 791911c2be0SMark Peek struct nameidata *nd; 792911c2be0SMark Peek struct vattr *attr; 793911c2be0SMark Peek struct image_params *imgp; 79478022527SKonstantin Belousov u_long rbase; 795c33fe779SJohn Polstra u_long base_addr = 0; 7969bcd7482SEdward Tomasz Napierala int error; 797e1743d02SSøren Schmidt 79812bc222eSJonathan Anderson #ifdef CAPABILITY_MODE 79912bc222eSJonathan Anderson /* 80012bc222eSJonathan Anderson * XXXJA: This check can go away once we are sufficiently confident 80112bc222eSJonathan Anderson * that the checks in namei() are correct. 80212bc222eSJonathan Anderson */ 80312bc222eSJonathan Anderson if (IN_CAPABILITY_MODE(curthread)) 80412bc222eSJonathan Anderson return (ECAPMODE); 80512bc222eSJonathan Anderson #endif 80612bc222eSJonathan Anderson 8071073d17eSKonstantin Belousov tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); 808911c2be0SMark Peek nd = &tempdata->nd; 809911c2be0SMark Peek attr = &tempdata->attr; 810911c2be0SMark Peek imgp = &tempdata->image_params; 811911c2be0SMark Peek 812c8a79999SPeter Wemm /* 813c8a79999SPeter Wemm * Initialize part of the common data 814c8a79999SPeter Wemm */ 815c8a79999SPeter Wemm imgp->proc = p; 816911c2be0SMark Peek imgp->attr = attr; 817c8a79999SPeter Wemm 818f422bc30SJohn Baldwin NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, 8197e1d3eefSMateusz Guzik UIO_SYSSPACE, file); 820911c2be0SMark Peek if ((error = namei(nd)) != 0) { 821911c2be0SMark Peek nd->ni_vp = NULL; 822e1743d02SSøren Schmidt goto fail; 823e1743d02SSøren Schmidt } 824bb92cd7bSMateusz Guzik NDFREE_PNBUF(nd); 825911c2be0SMark Peek imgp->vp = nd->ni_vp; 826c8a79999SPeter Wemm 827e1743d02SSøren Schmidt /* 828e1743d02SSøren Schmidt * Check permissions, modes, uid, etc on the file, and "open" it. 829e1743d02SSøren Schmidt */ 830c8a79999SPeter Wemm error = exec_check_permissions(imgp); 831373d1a3fSAlan Cox if (error) 832c8a79999SPeter Wemm goto fail; 833e1743d02SSøren Schmidt 834c8a79999SPeter Wemm error = exec_map_first_page(imgp); 835373d1a3fSAlan Cox if (error) 836373d1a3fSAlan Cox goto fail; 837373d1a3fSAlan Cox 8388516dd18SPoul-Henning Kamp imgp->object = nd->ni_vp->v_object; 839e1743d02SSøren Schmidt 840d254af07SMatthew Dillon hdr = (const Elf_Ehdr *)imgp->image_header; 8413ebc1248SPeter Wemm if ((error = __elfN(check_header)(hdr)) != 0) 842e1743d02SSøren Schmidt goto fail; 843c33fe779SJohn Polstra if (hdr->e_type == ET_DYN) 844c33fe779SJohn Polstra rbase = *addr; 845c33fe779SJohn Polstra else if (hdr->e_type == ET_EXEC) 846c33fe779SJohn Polstra rbase = 0; 847c33fe779SJohn Polstra else { 848c33fe779SJohn Polstra error = ENOEXEC; 849c33fe779SJohn Polstra goto fail; 850c33fe779SJohn Polstra } 851e1743d02SSøren Schmidt 852c8a79999SPeter Wemm /* Only support headers that fit within first page for now */ 8537de1bc13SKonstantin Belousov if (!__elfN(phdr_in_zero_page)(hdr)) { 854c8a79999SPeter Wemm error = ENOEXEC; 855e1743d02SSøren Schmidt goto fail; 856c8a79999SPeter Wemm } 857c8a79999SPeter Wemm 858d254af07SMatthew Dillon phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 85993d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) { 86093d1c728SKonstantin Belousov error = ENOEXEC; 86193d1c728SKonstantin Belousov goto fail; 86293d1c728SKonstantin Belousov } 863e1743d02SSøren Schmidt 8649bcd7482SEdward Tomasz Napierala error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); 865292177e6SAlan Cox if (error != 0) 866e1743d02SSøren Schmidt goto fail; 8679bcd7482SEdward Tomasz Napierala 868c33fe779SJohn Polstra *addr = base_addr; 869c33fe779SJohn Polstra *entry = (unsigned long)hdr->e_entry + rbase; 870e1743d02SSøren Schmidt 871e1743d02SSøren Schmidt fail: 872c8a79999SPeter Wemm if (imgp->firstpage) 873c8a79999SPeter Wemm exec_unmap_first_page(imgp); 8740b2ed1aeSJeff Roberson 87578022527SKonstantin Belousov if (nd->ni_vp) { 87678022527SKonstantin Belousov if (imgp->textset) 87778022527SKonstantin Belousov VOP_UNSET_TEXT_CHECKED(nd->ni_vp); 878373d1a3fSAlan Cox vput(nd->ni_vp); 87978022527SKonstantin Belousov } 880911c2be0SMark Peek free(tempdata, M_TEMP); 881e1743d02SSøren Schmidt 882a7cddfedSJake Burkholder return (error); 883e1743d02SSøren Schmidt } 884e1743d02SSøren Schmidt 885a4007ae1SKonstantin Belousov /* 886a4007ae1SKonstantin Belousov * Select randomized valid address in the map map, between minv and 887a4007ae1SKonstantin Belousov * maxv, with specified alignment. The [minv, maxv) range must belong 888a4007ae1SKonstantin Belousov * to the map. Note that function only allocates the address, it is 889a4007ae1SKonstantin Belousov * up to caller to clamp maxv in a way that the final allocation 890a4007ae1SKonstantin Belousov * length fit into the map. 891a4007ae1SKonstantin Belousov * 892a4007ae1SKonstantin Belousov * Result is returned in *resp, error code indicates that arguments 893a4007ae1SKonstantin Belousov * did not pass sanity checks for overflow and range correctness. 894a4007ae1SKonstantin Belousov */ 8959cf78c1cSKonstantin Belousov static int 8969cf78c1cSKonstantin Belousov __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv, 8979cf78c1cSKonstantin Belousov u_int align, u_long *resp) 898fa50a355SKonstantin Belousov { 899fa50a355SKonstantin Belousov u_long rbase, res; 900fa50a355SKonstantin Belousov 901fa50a355SKonstantin Belousov MPASS(vm_map_min(map) <= minv); 9029cf78c1cSKonstantin Belousov 9039cf78c1cSKonstantin Belousov if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) { 9049cf78c1cSKonstantin Belousov uprintf("Invalid ELF segments layout\n"); 9059cf78c1cSKonstantin Belousov return (ENOEXEC); 9069cf78c1cSKonstantin Belousov } 9079cf78c1cSKonstantin Belousov 908fa50a355SKonstantin Belousov arc4rand(&rbase, sizeof(rbase), 0); 909fa50a355SKonstantin Belousov res = roundup(minv, (u_long)align) + rbase % (maxv - minv); 910fa50a355SKonstantin Belousov res &= ~((u_long)align - 1); 911fa50a355SKonstantin Belousov if (res >= maxv) 912fa50a355SKonstantin Belousov res -= align; 9139cf78c1cSKonstantin Belousov 914fa50a355SKonstantin Belousov KASSERT(res >= minv, 915fa50a355SKonstantin Belousov ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", 916fa50a355SKonstantin Belousov res, minv, maxv, rbase)); 917fa50a355SKonstantin Belousov KASSERT(res < maxv, 918fa50a355SKonstantin Belousov ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", 919fa50a355SKonstantin Belousov res, maxv, minv, rbase)); 9209cf78c1cSKonstantin Belousov 9219cf78c1cSKonstantin Belousov *resp = res; 9229cf78c1cSKonstantin Belousov return (0); 923fa50a355SKonstantin Belousov } 924fa50a355SKonstantin Belousov 92520e1174aSEdward Tomasz Napierala static int 92620e1174aSEdward Tomasz Napierala __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, 92720e1174aSEdward Tomasz Napierala const Elf_Phdr *phdr, u_long et_dyn_addr) 92820e1174aSEdward Tomasz Napierala { 92920e1174aSEdward Tomasz Napierala struct vmspace *vmspace; 93020e1174aSEdward Tomasz Napierala const char *err_str; 93120e1174aSEdward Tomasz Napierala u_long text_size, data_size, total_size, text_addr, data_addr; 93220e1174aSEdward Tomasz Napierala u_long seg_size, seg_addr; 93320e1174aSEdward Tomasz Napierala int i; 93420e1174aSEdward Tomasz Napierala 93520e1174aSEdward Tomasz Napierala err_str = NULL; 93620e1174aSEdward Tomasz Napierala text_size = data_size = total_size = text_addr = data_addr = 0; 93720e1174aSEdward Tomasz Napierala 93820e1174aSEdward Tomasz Napierala for (i = 0; i < hdr->e_phnum; i++) { 93920e1174aSEdward Tomasz Napierala if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 94020e1174aSEdward Tomasz Napierala continue; 94120e1174aSEdward Tomasz Napierala 94220e1174aSEdward Tomasz Napierala seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 94320e1174aSEdward Tomasz Napierala seg_size = round_page(phdr[i].p_memsz + 94420e1174aSEdward Tomasz Napierala phdr[i].p_vaddr + et_dyn_addr - seg_addr); 94520e1174aSEdward Tomasz Napierala 94620e1174aSEdward Tomasz Napierala /* 94720e1174aSEdward Tomasz Napierala * Make the largest executable segment the official 94820e1174aSEdward Tomasz Napierala * text segment and all others data. 94920e1174aSEdward Tomasz Napierala * 95020e1174aSEdward Tomasz Napierala * Note that obreak() assumes that data_addr + data_size == end 95120e1174aSEdward Tomasz Napierala * of data load area, and the ELF file format expects segments 95220e1174aSEdward Tomasz Napierala * to be sorted by address. If multiple data segments exist, 95320e1174aSEdward Tomasz Napierala * the last one will be used. 95420e1174aSEdward Tomasz Napierala */ 95520e1174aSEdward Tomasz Napierala 95620e1174aSEdward Tomasz Napierala if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { 95720e1174aSEdward Tomasz Napierala text_size = seg_size; 95820e1174aSEdward Tomasz Napierala text_addr = seg_addr; 95920e1174aSEdward Tomasz Napierala } else { 96020e1174aSEdward Tomasz Napierala data_size = seg_size; 96120e1174aSEdward Tomasz Napierala data_addr = seg_addr; 96220e1174aSEdward Tomasz Napierala } 96320e1174aSEdward Tomasz Napierala total_size += seg_size; 96420e1174aSEdward Tomasz Napierala } 96520e1174aSEdward Tomasz Napierala 96620e1174aSEdward Tomasz Napierala if (data_addr == 0 && data_size == 0) { 96720e1174aSEdward Tomasz Napierala data_addr = text_addr; 96820e1174aSEdward Tomasz Napierala data_size = text_size; 96920e1174aSEdward Tomasz Napierala } 97020e1174aSEdward Tomasz Napierala 97120e1174aSEdward Tomasz Napierala /* 97220e1174aSEdward Tomasz Napierala * Check limits. It should be safe to check the 97320e1174aSEdward Tomasz Napierala * limits after loading the segments since we do 97420e1174aSEdward Tomasz Napierala * not actually fault in all the segments pages. 97520e1174aSEdward Tomasz Napierala */ 97620e1174aSEdward Tomasz Napierala PROC_LOCK(imgp->proc); 97720e1174aSEdward Tomasz Napierala if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) 97820e1174aSEdward Tomasz Napierala err_str = "Data segment size exceeds process limit"; 97920e1174aSEdward Tomasz Napierala else if (text_size > maxtsiz) 98020e1174aSEdward Tomasz Napierala err_str = "Text segment size exceeds system limit"; 98120e1174aSEdward Tomasz Napierala else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) 98220e1174aSEdward Tomasz Napierala err_str = "Total segment size exceeds process limit"; 98320e1174aSEdward Tomasz Napierala else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) 98420e1174aSEdward Tomasz Napierala err_str = "Data segment size exceeds resource limit"; 98520e1174aSEdward Tomasz Napierala else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) 98620e1174aSEdward Tomasz Napierala err_str = "Total segment size exceeds resource limit"; 98720e1174aSEdward Tomasz Napierala PROC_UNLOCK(imgp->proc); 98820e1174aSEdward Tomasz Napierala if (err_str != NULL) { 98920e1174aSEdward Tomasz Napierala uprintf("%s\n", err_str); 99020e1174aSEdward Tomasz Napierala return (ENOMEM); 99120e1174aSEdward Tomasz Napierala } 99220e1174aSEdward Tomasz Napierala 99320e1174aSEdward Tomasz Napierala vmspace = imgp->proc->p_vmspace; 99420e1174aSEdward Tomasz Napierala vmspace->vm_tsize = text_size >> PAGE_SHIFT; 99520e1174aSEdward Tomasz Napierala vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 99620e1174aSEdward Tomasz Napierala vmspace->vm_dsize = data_size >> PAGE_SHIFT; 99720e1174aSEdward Tomasz Napierala vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 99820e1174aSEdward Tomasz Napierala 99920e1174aSEdward Tomasz Napierala return (0); 100020e1174aSEdward Tomasz Napierala } 100120e1174aSEdward Tomasz Napierala 100209c78d53SEdward Tomasz Napierala static int 100309c78d53SEdward Tomasz Napierala __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, 100409c78d53SEdward Tomasz Napierala char **interpp, bool *free_interpp) 100509c78d53SEdward Tomasz Napierala { 100609c78d53SEdward Tomasz Napierala struct thread *td; 100709c78d53SEdward Tomasz Napierala char *interp; 100809c78d53SEdward Tomasz Napierala int error, interp_name_len; 100909c78d53SEdward Tomasz Napierala 101009c78d53SEdward Tomasz Napierala KASSERT(phdr->p_type == PT_INTERP, 101109c78d53SEdward Tomasz Napierala ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); 1012b65ca345SEdward Tomasz Napierala ASSERT_VOP_LOCKED(imgp->vp, __func__); 101309c78d53SEdward Tomasz Napierala 101409c78d53SEdward Tomasz Napierala td = curthread; 101509c78d53SEdward Tomasz Napierala 101609c78d53SEdward Tomasz Napierala /* Path to interpreter */ 101709c78d53SEdward Tomasz Napierala if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { 101809c78d53SEdward Tomasz Napierala uprintf("Invalid PT_INTERP\n"); 101909c78d53SEdward Tomasz Napierala return (ENOEXEC); 102009c78d53SEdward Tomasz Napierala } 102109c78d53SEdward Tomasz Napierala 102209c78d53SEdward Tomasz Napierala interp_name_len = phdr->p_filesz; 102309c78d53SEdward Tomasz Napierala if (phdr->p_offset > PAGE_SIZE || 102409c78d53SEdward Tomasz Napierala interp_name_len > PAGE_SIZE - phdr->p_offset) { 10250ddfdc60SKonstantin Belousov /* 1026f1f81d3bSKonstantin Belousov * The vnode lock might be needed by the pagedaemon to 10270ddfdc60SKonstantin Belousov * clean pages owned by the vnode. Do not allow sleep 10280ddfdc60SKonstantin Belousov * waiting for memory with the vnode locked, instead 10290ddfdc60SKonstantin Belousov * try non-sleepable allocation first, and if it 10300ddfdc60SKonstantin Belousov * fails, go to the slow path were we drop the lock 1031f1f81d3bSKonstantin Belousov * and do M_WAITOK. A text reference prevents 1032f1f81d3bSKonstantin Belousov * modifications to the vnode content. 10330ddfdc60SKonstantin Belousov */ 10342d6b8546SKonstantin Belousov interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); 10352d6b8546SKonstantin Belousov if (interp == NULL) { 1036b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 103709c78d53SEdward Tomasz Napierala interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); 103878022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 10392d6b8546SKonstantin Belousov } 10400ddfdc60SKonstantin Belousov 104109c78d53SEdward Tomasz Napierala error = vn_rdwr(UIO_READ, imgp->vp, interp, 104209c78d53SEdward Tomasz Napierala interp_name_len, phdr->p_offset, 104309c78d53SEdward Tomasz Napierala UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, 104409c78d53SEdward Tomasz Napierala NOCRED, NULL, td); 104509c78d53SEdward Tomasz Napierala if (error != 0) { 104609c78d53SEdward Tomasz Napierala free(interp, M_TEMP); 104709c78d53SEdward Tomasz Napierala uprintf("i/o error PT_INTERP %d\n", error); 104809c78d53SEdward Tomasz Napierala return (error); 104909c78d53SEdward Tomasz Napierala } 105009c78d53SEdward Tomasz Napierala interp[interp_name_len] = '\0'; 105109c78d53SEdward Tomasz Napierala 105209c78d53SEdward Tomasz Napierala *interpp = interp; 105309c78d53SEdward Tomasz Napierala *free_interpp = true; 105409c78d53SEdward Tomasz Napierala return (0); 105509c78d53SEdward Tomasz Napierala } 105609c78d53SEdward Tomasz Napierala 105709c78d53SEdward Tomasz Napierala interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; 105809c78d53SEdward Tomasz Napierala if (interp[interp_name_len - 1] != '\0') { 105909c78d53SEdward Tomasz Napierala uprintf("Invalid PT_INTERP\n"); 106009c78d53SEdward Tomasz Napierala return (ENOEXEC); 106109c78d53SEdward Tomasz Napierala } 106209c78d53SEdward Tomasz Napierala 106309c78d53SEdward Tomasz Napierala *interpp = interp; 106409c78d53SEdward Tomasz Napierala *free_interpp = false; 106509c78d53SEdward Tomasz Napierala return (0); 106609c78d53SEdward Tomasz Napierala } 106709c78d53SEdward Tomasz Napierala 10689274fb35SEdward Tomasz Napierala static int 10699274fb35SEdward Tomasz Napierala __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, 10709274fb35SEdward Tomasz Napierala const char *interp, u_long *addr, u_long *entry) 10719274fb35SEdward Tomasz Napierala { 10729274fb35SEdward Tomasz Napierala char *path; 10739274fb35SEdward Tomasz Napierala int error; 10749274fb35SEdward Tomasz Napierala 10759274fb35SEdward Tomasz Napierala if (brand_info->emul_path != NULL && 10769274fb35SEdward Tomasz Napierala brand_info->emul_path[0] != '\0') { 10779274fb35SEdward Tomasz Napierala path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 10789274fb35SEdward Tomasz Napierala snprintf(path, MAXPATHLEN, "%s%s", 10799274fb35SEdward Tomasz Napierala brand_info->emul_path, interp); 10809274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, path, addr, entry); 10819274fb35SEdward Tomasz Napierala free(path, M_TEMP); 10829274fb35SEdward Tomasz Napierala if (error == 0) 10839274fb35SEdward Tomasz Napierala return (0); 10849274fb35SEdward Tomasz Napierala } 10859274fb35SEdward Tomasz Napierala 10869274fb35SEdward Tomasz Napierala if (brand_info->interp_newpath != NULL && 10879274fb35SEdward Tomasz Napierala (brand_info->interp_path == NULL || 10889274fb35SEdward Tomasz Napierala strcmp(interp, brand_info->interp_path) == 0)) { 10899274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, 10909274fb35SEdward Tomasz Napierala brand_info->interp_newpath, addr, entry); 10919274fb35SEdward Tomasz Napierala if (error == 0) 10929274fb35SEdward Tomasz Napierala return (0); 10939274fb35SEdward Tomasz Napierala } 10949274fb35SEdward Tomasz Napierala 10959274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, interp, addr, entry); 10969274fb35SEdward Tomasz Napierala if (error == 0) 10979274fb35SEdward Tomasz Napierala return (0); 10989274fb35SEdward Tomasz Napierala 10999274fb35SEdward Tomasz Napierala uprintf("ELF interpreter %s not found, error %d\n", interp, error); 11009274fb35SEdward Tomasz Napierala return (error); 11019274fb35SEdward Tomasz Napierala } 11029274fb35SEdward Tomasz Napierala 1103fa50a355SKonstantin Belousov /* 1104fa50a355SKonstantin Belousov * Impossible et_dyn_addr initial value indicating that the real base 1105fa50a355SKonstantin Belousov * must be calculated later with some randomization applied. 1106fa50a355SKonstantin Belousov */ 1107fa50a355SKonstantin Belousov #define ET_DYN_ADDR_RAND 1 1108fa50a355SKonstantin Belousov 1109303b270bSEivind Eklund static int 11103ebc1248SPeter Wemm __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 1111e1743d02SSøren Schmidt { 11126c775eb6SKonstantin Belousov struct thread *td; 11136c775eb6SKonstantin Belousov const Elf_Ehdr *hdr; 111432c01de2SDmitry Chagin const Elf_Phdr *phdr; 1115e5e6093bSAlan Cox Elf_Auxargs *elf_auxargs; 11165856e12eSJohn Dyson struct vmspace *vmspace; 1117fa50a355SKonstantin Belousov vm_map_t map; 11189274fb35SEdward Tomasz Napierala char *interp; 1119d1dbc694SJohn Polstra Elf_Brandinfo *brand_info; 11205fe3ed62SJake Burkholder struct sysentvec *sv; 112120e1174aSEdward Tomasz Napierala u_long addr, baddr, et_dyn_addr, entry, proghdr; 1122e499988fSKonstantin Belousov u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc; 1123cefb93f2SKonstantin Belousov uint32_t fctl0; 11246c775eb6SKonstantin Belousov int32_t osrel; 112509c78d53SEdward Tomasz Napierala bool free_interp; 11269274fb35SEdward Tomasz Napierala int error, i, n; 11276c775eb6SKonstantin Belousov 11286c775eb6SKonstantin Belousov hdr = (const Elf_Ehdr *)imgp->image_header; 1129e1743d02SSøren Schmidt 1130e1743d02SSøren Schmidt /* 1131e1743d02SSøren Schmidt * Do we have a valid ELF header ? 1132900b28f9SMaxim Sobolev * 1133900b28f9SMaxim Sobolev * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 1134900b28f9SMaxim Sobolev * if particular brand doesn't support it. 1135e1743d02SSøren Schmidt */ 1136900b28f9SMaxim Sobolev if (__elfN(check_header)(hdr) != 0 || 1137900b28f9SMaxim Sobolev (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 1138a7cddfedSJake Burkholder return (-1); 1139e1743d02SSøren Schmidt 1140e1743d02SSøren Schmidt /* 1141e1743d02SSøren Schmidt * From here on down, we return an errno, not -1, as we've 1142e1743d02SSøren Schmidt * detected an ELF file. 1143e1743d02SSøren Schmidt */ 1144e1743d02SSøren Schmidt 11457de1bc13SKonstantin Belousov if (!__elfN(phdr_in_zero_page)(hdr)) { 11466b16d664SEd Maste uprintf("Program headers not in the first page\n"); 1147a7cddfedSJake Burkholder return (ENOEXEC); 1148e1743d02SSøren Schmidt } 114952c24af7SPeter Wemm phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 11506b16d664SEd Maste if (!aligned(phdr, Elf_Addr)) { 11516b16d664SEd Maste uprintf("Unaligned program headers\n"); 115293d1c728SKonstantin Belousov return (ENOEXEC); 11536b16d664SEd Maste } 11546c775eb6SKonstantin Belousov 11556c775eb6SKonstantin Belousov n = error = 0; 11567564c4adSKonstantin Belousov baddr = 0; 11576c775eb6SKonstantin Belousov osrel = 0; 1158cefb93f2SKonstantin Belousov fctl0 = 0; 11596c775eb6SKonstantin Belousov entry = proghdr = 0; 11609274fb35SEdward Tomasz Napierala interp = NULL; 116109c78d53SEdward Tomasz Napierala free_interp = false; 11626c775eb6SKonstantin Belousov td = curthread; 1163714d6d09SKonstantin Belousov 1164714d6d09SKonstantin Belousov /* 1165714d6d09SKonstantin Belousov * Somewhat arbitrary, limit accepted max alignment for the 1166714d6d09SKonstantin Belousov * loadable segment to the max supported superpage size. Too 1167714d6d09SKonstantin Belousov * large alignment requests are not useful and are indicators 1168714d6d09SKonstantin Belousov * of corrupted or outright malicious binary. 1169714d6d09SKonstantin Belousov */ 1170fa50a355SKonstantin Belousov maxalign = PAGE_SIZE; 1171714d6d09SKonstantin Belousov maxsalign = PAGE_SIZE * 1024; 1172714d6d09SKonstantin Belousov for (i = MAXPAGESIZES - 1; i > 0; i--) { 1173714d6d09SKonstantin Belousov if (pagesizes[i] > maxsalign) 1174714d6d09SKonstantin Belousov maxsalign = pagesizes[i]; 1175714d6d09SKonstantin Belousov } 1176714d6d09SKonstantin Belousov 1177fa50a355SKonstantin Belousov mapsz = 0; 11786c775eb6SKonstantin Belousov 11795fe3ed62SJake Burkholder for (i = 0; i < hdr->e_phnum; i++) { 1180291c06a1SKonstantin Belousov switch (phdr[i].p_type) { 1181291c06a1SKonstantin Belousov case PT_LOAD: 11827564c4adSKonstantin Belousov if (n == 0) 11837564c4adSKonstantin Belousov baddr = phdr[i].p_vaddr; 118436df8f54SKonstantin Belousov if (!powerof2(phdr[i].p_align) || 118536df8f54SKonstantin Belousov phdr[i].p_align > maxsalign) { 1186714d6d09SKonstantin Belousov uprintf("Invalid segment alignment\n"); 1187714d6d09SKonstantin Belousov error = ENOEXEC; 1188714d6d09SKonstantin Belousov goto ret; 1189714d6d09SKonstantin Belousov } 1190fa50a355SKonstantin Belousov if (phdr[i].p_align > maxalign) 1191fa50a355SKonstantin Belousov maxalign = phdr[i].p_align; 1192bf839416SKonstantin Belousov if (mapsz + phdr[i].p_memsz < mapsz) { 1193bf839416SKonstantin Belousov uprintf("Mapsize overflow\n"); 1194bf839416SKonstantin Belousov error = ENOEXEC; 1195bf839416SKonstantin Belousov goto ret; 1196bf839416SKonstantin Belousov } 1197fa50a355SKonstantin Belousov mapsz += phdr[i].p_memsz; 11987564c4adSKonstantin Belousov n++; 11999bcd7482SEdward Tomasz Napierala 12009bcd7482SEdward Tomasz Napierala /* 12019bcd7482SEdward Tomasz Napierala * If this segment contains the program headers, 12029bcd7482SEdward Tomasz Napierala * remember their virtual address for the AT_PHDR 12039bcd7482SEdward Tomasz Napierala * aux entry. Static binaries don't usually include 12049bcd7482SEdward Tomasz Napierala * a PT_PHDR entry. 12059bcd7482SEdward Tomasz Napierala */ 12069bcd7482SEdward Tomasz Napierala if (phdr[i].p_offset == 0 && 120788dd7a0aSKonstantin Belousov hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= 120888dd7a0aSKonstantin Belousov phdr[i].p_filesz) 12099bcd7482SEdward Tomasz Napierala proghdr = phdr[i].p_vaddr + hdr->e_phoff; 1210291c06a1SKonstantin Belousov break; 1211291c06a1SKonstantin Belousov case PT_INTERP: 1212e5e6093bSAlan Cox /* Path to interpreter */ 1213d3ee0a15SJonathan T. Looney if (interp != NULL) { 1214d3ee0a15SJonathan T. Looney uprintf("Multiple PT_INTERP headers\n"); 1215d3ee0a15SJonathan T. Looney error = ENOEXEC; 1216d3ee0a15SJonathan T. Looney goto ret; 1217d3ee0a15SJonathan T. Looney } 121809c78d53SEdward Tomasz Napierala error = __elfN(get_interp)(imgp, &phdr[i], &interp, 121909c78d53SEdward Tomasz Napierala &free_interp); 122009c78d53SEdward Tomasz Napierala if (error != 0) 12216c775eb6SKonstantin Belousov goto ret; 1222291c06a1SKonstantin Belousov break; 1223291c06a1SKonstantin Belousov case PT_GNU_STACK: 1224291c06a1SKonstantin Belousov if (__elfN(nxstack)) 1225291c06a1SKonstantin Belousov imgp->stack_prot = 1226291c06a1SKonstantin Belousov __elfN(trans_prot)(phdr[i].p_flags); 1227316b3843SKonstantin Belousov imgp->stack_sz = phdr[i].p_memsz; 1228291c06a1SKonstantin Belousov break; 12299bcd7482SEdward Tomasz Napierala case PT_PHDR: /* Program header table info */ 12309bcd7482SEdward Tomasz Napierala proghdr = phdr[i].p_vaddr; 12319bcd7482SEdward Tomasz Napierala break; 12323ebc1248SPeter Wemm } 12333ebc1248SPeter Wemm } 12343ebc1248SPeter Wemm 123509c78d53SEdward Tomasz Napierala brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); 12365fe3ed62SJake Burkholder if (brand_info == NULL) { 12375fe3ed62SJake Burkholder uprintf("ELF binary type \"%u\" not known.\n", 12385fe3ed62SJake Burkholder hdr->e_ident[EI_OSABI]); 12396c775eb6SKonstantin Belousov error = ENOEXEC; 12406c775eb6SKonstantin Belousov goto ret; 12413ebc1248SPeter Wemm } 1242fa50a355SKonstantin Belousov sv = brand_info->sysvec; 124377ebe276SEd Maste et_dyn_addr = 0; 1244ab02d85fSKonstantin Belousov if (hdr->e_type == ET_DYN) { 12456b16d664SEd Maste if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { 12466b16d664SEd Maste uprintf("Cannot execute shared object\n"); 12476c775eb6SKonstantin Belousov error = ENOEXEC; 12486c775eb6SKonstantin Belousov goto ret; 12496b16d664SEd Maste } 12507564c4adSKonstantin Belousov /* 12517564c4adSKonstantin Belousov * Honour the base load address from the dso if it is 12527564c4adSKonstantin Belousov * non-zero for some reason. 12537564c4adSKonstantin Belousov */ 1254fa50a355SKonstantin Belousov if (baddr == 0) { 1255fa50a355SKonstantin Belousov if ((sv->sv_flags & SV_ASLR) == 0 || 1256fa50a355SKonstantin Belousov (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) 125795aafd69SKonstantin Belousov et_dyn_addr = __elfN(pie_base); 1258fa50a355SKonstantin Belousov else if ((__elfN(pie_aslr_enabled) && 1259fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || 1260fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) 1261fa50a355SKonstantin Belousov et_dyn_addr = ET_DYN_ADDR_RAND; 1262fa50a355SKonstantin Belousov else 126395aafd69SKonstantin Belousov et_dyn_addr = __elfN(pie_base); 126477ebe276SEd Maste } 1265fa50a355SKonstantin Belousov } 12663ebc1248SPeter Wemm 126760bb3943SAlan Cox /* 126860bb3943SAlan Cox * Avoid a possible deadlock if the current address space is destroyed 126960bb3943SAlan Cox * and that address space maps the locked vnode. In the common case, 127060bb3943SAlan Cox * the locked vnode's v_usecount is decremented but remains greater 127160bb3943SAlan Cox * than zero. Consequently, the vnode lock is not needed by vrele(). 127260bb3943SAlan Cox * However, in cases where the vnode lock is external, such as nullfs, 127360bb3943SAlan Cox * v_usecount may become zero. 12741dfab802SAlan Cox * 12751dfab802SAlan Cox * The VV_TEXT flag prevents modifications to the executable while 12761dfab802SAlan Cox * the vnode is unlocked. 127760bb3943SAlan Cox */ 1278b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 127960bb3943SAlan Cox 1280fa50a355SKonstantin Belousov /* 1281fa50a355SKonstantin Belousov * Decide whether to enable randomization of user mappings. 1282fa50a355SKonstantin Belousov * First, reset user preferences for the setid binaries. 1283fa50a355SKonstantin Belousov * Then, account for the support of the randomization by the 1284fa50a355SKonstantin Belousov * ABI, by user preferences, and make special treatment for 1285fa50a355SKonstantin Belousov * PIE binaries. 1286fa50a355SKonstantin Belousov */ 1287fa50a355SKonstantin Belousov if (imgp->credential_setid) { 1288fa50a355SKonstantin Belousov PROC_LOCK(imgp->proc); 1289796a8e1aSKonstantin Belousov imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE | 1290796a8e1aSKonstantin Belousov P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); 1291fa50a355SKonstantin Belousov PROC_UNLOCK(imgp->proc); 1292fa50a355SKonstantin Belousov } 1293fa50a355SKonstantin Belousov if ((sv->sv_flags & SV_ASLR) == 0 || 1294fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || 1295fa50a355SKonstantin Belousov (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { 1296fa50a355SKonstantin Belousov KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, 1297fa50a355SKonstantin Belousov ("et_dyn_addr == RAND and !ASLR")); 1298fa50a355SKonstantin Belousov } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || 1299fa50a355SKonstantin Belousov (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || 1300fa50a355SKonstantin Belousov et_dyn_addr == ET_DYN_ADDR_RAND) { 1301fa50a355SKonstantin Belousov imgp->map_flags |= MAP_ASLR; 1302fa50a355SKonstantin Belousov /* 1303fa50a355SKonstantin Belousov * If user does not care about sbrk, utilize the bss 1304fa50a355SKonstantin Belousov * grow region for mappings as well. We can select 1305fa50a355SKonstantin Belousov * the base for the image anywere and still not suffer 1306fa50a355SKonstantin Belousov * from the fragmentation. 1307fa50a355SKonstantin Belousov */ 1308fa50a355SKonstantin Belousov if (!__elfN(aslr_honor_sbrk) || 1309fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) 1310fa50a355SKonstantin Belousov imgp->map_flags |= MAP_ASLR_IGNSTART; 13111811c1e9SMark Johnston if (__elfN(aslr_stack)) 13121811c1e9SMark Johnston imgp->map_flags |= MAP_ASLR_STACK; 1313939f0b63SKornel Dulęba if (__elfN(aslr_shared_page)) 1314939f0b63SKornel Dulęba imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE; 1315fa50a355SKonstantin Belousov } 1316fa50a355SKonstantin Belousov 1317796a8e1aSKonstantin Belousov if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && 1318796a8e1aSKonstantin Belousov (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || 1319796a8e1aSKonstantin Belousov (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) 13202e1c94aaSKonstantin Belousov imgp->map_flags |= MAP_WXORX; 13212e1c94aaSKonstantin Belousov 132289b57fcfSKonstantin Belousov error = exec_new_vmspace(imgp, sv); 1323fa50a355SKonstantin Belousov 132419059a13SJohn Baldwin imgp->proc->p_sysent = sv; 1325615f22b2SDmitry Chagin imgp->proc->p_elf_brandinfo = brand_info; 1326e1743d02SSøren Schmidt 13271811c1e9SMark Johnston vmspace = imgp->proc->p_vmspace; 13281811c1e9SMark Johnston map = &vmspace->vm_map; 13291811c1e9SMark Johnston maxv = sv->sv_usrstack; 13301811c1e9SMark Johnston if ((imgp->map_flags & MAP_ASLR_STACK) == 0) 13311811c1e9SMark Johnston maxv -= lim_max(td, RLIMIT_STACK); 13321811c1e9SMark Johnston if (error == 0 && mapsz >= maxv - vm_map_min(map)) { 1333bf839416SKonstantin Belousov uprintf("Excessive mapping size\n"); 1334bf839416SKonstantin Belousov error = ENOEXEC; 1335bf839416SKonstantin Belousov } 1336bf839416SKonstantin Belousov 13379cf78c1cSKonstantin Belousov if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) { 1338fa50a355SKonstantin Belousov KASSERT((map->flags & MAP_ASLR) != 0, 1339fa50a355SKonstantin Belousov ("ET_DYN_ADDR_RAND but !MAP_ASLR")); 13409cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, 1341fa50a355SKonstantin Belousov vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), 1342fa50a355SKonstantin Belousov /* reserve half of the address space to interpreter */ 134336df8f54SKonstantin Belousov maxv / 2, maxalign, &et_dyn_addr); 1344fa50a355SKonstantin Belousov } 1345fa50a355SKonstantin Belousov 134678022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 13476c775eb6SKonstantin Belousov if (error != 0) 13486c775eb6SKonstantin Belousov goto ret; 134960bb3943SAlan Cox 13509bcd7482SEdward Tomasz Napierala error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); 1351292177e6SAlan Cox if (error != 0) 13526c775eb6SKonstantin Belousov goto ret; 1353e1743d02SSøren Schmidt 135420e1174aSEdward Tomasz Napierala error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); 135520e1174aSEdward Tomasz Napierala if (error != 0) 135620e1174aSEdward Tomasz Napierala goto ret; 1357cac45152SMatthew Dillon 1358cac45152SMatthew Dillon /* 1359c460ac3aSPeter Wemm * We load the dynamic linker where a userland call 1360c460ac3aSPeter Wemm * to mmap(0, ...) would put it. The rationale behind this 1361c460ac3aSPeter Wemm * calculation is that it leaves room for the heap to grow to 1362c460ac3aSPeter Wemm * its maximum allowed size. 1363c460ac3aSPeter Wemm */ 13646c775eb6SKonstantin Belousov addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, 1365292177e6SAlan Cox RLIMIT_DATA)); 1366fa50a355SKonstantin Belousov if ((map->flags & MAP_ASLR) != 0) { 1367fa50a355SKonstantin Belousov maxv1 = maxv / 2 + addr / 2; 13689cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, 136941032835SJason A. Harmening (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? 1370e499988fSKonstantin Belousov pagesizes[1] : pagesizes[0], &anon_loc); 13719cf78c1cSKonstantin Belousov if (error != 0) 13729cf78c1cSKonstantin Belousov goto ret; 1373e499988fSKonstantin Belousov map->anon_loc = anon_loc; 1374fa50a355SKonstantin Belousov } else { 1375fa50a355SKonstantin Belousov map->anon_loc = addr; 1376fa50a355SKonstantin Belousov } 1377e1743d02SSøren Schmidt 13781811c1e9SMark Johnston entry = (u_long)hdr->e_entry + et_dyn_addr; 1379ea5a2b2eSSøren Schmidt imgp->entry_addr = entry; 1380ea5a2b2eSSøren Schmidt 138160bb3943SAlan Cox if (interp != NULL) { 1382b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 1383fa50a355SKonstantin Belousov if ((map->flags & MAP_ASLR) != 0) { 138441032835SJason A. Harmening /* Assume that interpreter fits into 1/4 of AS */ 1385fa50a355SKonstantin Belousov maxv1 = maxv / 2 + addr / 2; 13869cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, addr, 13879cf78c1cSKonstantin Belousov maxv1, PAGE_SIZE, &addr); 1388fa50a355SKonstantin Belousov } 13899cf78c1cSKonstantin Belousov if (error == 0) { 13909cf78c1cSKonstantin Belousov error = __elfN(load_interp)(imgp, brand_info, interp, 13919cf78c1cSKonstantin Belousov &addr, &imgp->entry_addr); 13929cf78c1cSKonstantin Belousov } 139378022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 13949274fb35SEdward Tomasz Napierala if (error != 0) 13956c775eb6SKonstantin Belousov goto ret; 139695c807cfSRobert Watson } else 13977564c4adSKonstantin Belousov addr = et_dyn_addr; 1398ea5a2b2eSSøren Schmidt 13991811c1e9SMark Johnston error = exec_map_stack(imgp); 14001811c1e9SMark Johnston if (error != 0) 14011811c1e9SMark Johnston goto ret; 14021811c1e9SMark Johnston 1403e1743d02SSøren Schmidt /* 1404e3532331SJohn Baldwin * Construct auxargs table (used by the copyout_auxargs routine) 1405e1743d02SSøren Schmidt */ 14062d6b8546SKonstantin Belousov elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); 14072d6b8546SKonstantin Belousov if (elf_auxargs == NULL) { 1408b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 1409a163d034SWarner Losh elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 141078022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 14112d6b8546SKonstantin Belousov } 1412e1743d02SSøren Schmidt elf_auxargs->execfd = -1; 14139bcd7482SEdward Tomasz Napierala elf_auxargs->phdr = proghdr + et_dyn_addr; 1414e1743d02SSøren Schmidt elf_auxargs->phent = hdr->e_phentsize; 1415e1743d02SSøren Schmidt elf_auxargs->phnum = hdr->e_phnum; 1416e1743d02SSøren Schmidt elf_auxargs->pagesz = PAGE_SIZE; 1417e1743d02SSøren Schmidt elf_auxargs->base = addr; 1418e1743d02SSøren Schmidt elf_auxargs->flags = 0; 1419e1743d02SSøren Schmidt elf_auxargs->entry = entry; 1420d36eec69SWarner Losh elf_auxargs->hdr_eflags = hdr->e_flags; 1421e1743d02SSøren Schmidt 1422e1743d02SSøren Schmidt imgp->auxargs = elf_auxargs; 1423e1743d02SSøren Schmidt imgp->interpreted = 0; 1424a0ea661fSNathan Whitehorn imgp->reloc_base = addr; 142532c01de2SDmitry Chagin imgp->proc->p_osrel = osrel; 1426cefb93f2SKonstantin Belousov imgp->proc->p_fctl0 = fctl0; 1427885f13dcSJohn Baldwin imgp->proc->p_elf_flags = hdr->e_flags; 1428f231de47SKonstantin Belousov 14296c775eb6SKonstantin Belousov ret: 1430b4b20492SKonstantin Belousov ASSERT_VOP_LOCKED(imgp->vp, "skipped relock"); 143109c78d53SEdward Tomasz Napierala if (free_interp) 143209c78d53SEdward Tomasz Napierala free(interp, M_TEMP); 1433a7cddfedSJake Burkholder return (error); 1434e1743d02SSøren Schmidt } 1435e1743d02SSøren Schmidt 1436ebf98866SMark Johnston #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE) 14373ebc1248SPeter Wemm 143803b0d68cSJohn Baldwin int 1439d8010b11SJohn Baldwin __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) 1440e1743d02SSøren Schmidt { 1441ecbb00a2SDoug Rabson Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 14425f77b8a8SBrooks Davis Elf_Auxinfo *argarray, *pos; 1443361971fbSKornel Dulęba struct vmspace *vmspace; 144403b0d68cSJohn Baldwin int error; 1445e1743d02SSøren Schmidt 14465f77b8a8SBrooks Davis argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, 14475f77b8a8SBrooks Davis M_WAITOK | M_ZERO); 1448e1743d02SSøren Schmidt 1449361971fbSKornel Dulęba vmspace = imgp->proc->p_vmspace; 1450361971fbSKornel Dulęba 145135c2a5a8SWarner Losh if (args->execfd != -1) 1452e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 1453e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 1454e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 1455e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 1456e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 1457e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 1458e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 1459e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_BASE, args->base); 1460d36eec69SWarner Losh AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); 14613ff06357SKonstantin Belousov if (imgp->execpathp != 0) 1462b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); 1463b96bd95bSIan Lepore AUXARGS_ENTRY(pos, AT_OSRELDATE, 1464b96bd95bSIan Lepore imgp->proc->p_ucred->cr_prison->pr_osreldate); 1465ee235befSKonstantin Belousov if (imgp->canary != 0) { 1466b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); 1467ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); 1468ee235befSKonstantin Belousov } 1469ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); 1470ee235befSKonstantin Belousov if (imgp->pagesizes != 0) { 1471b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); 1472ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); 1473ee235befSKonstantin Belousov } 1474361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_TIMEKEEP) != 0) { 1475aea81038SKonstantin Belousov AUXARGS_ENTRY(pos, AT_TIMEKEEP, 1476361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_timekeep_offset); 1477aea81038SKonstantin Belousov } 147826d8f3e1SKonstantin Belousov AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj 147926d8f3e1SKonstantin Belousov != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : 148026d8f3e1SKonstantin Belousov imgp->sysent->sv_stackprot); 1481c2f37b92SJohn Baldwin if (imgp->sysent->sv_hwcap != NULL) 1482c2f37b92SJohn Baldwin AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); 1483904d8c49SMichal Meloun if (imgp->sysent->sv_hwcap2 != NULL) 1484904d8c49SMichal Meloun AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); 1485944cf37bSKonstantin Belousov AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ? 1486944cf37bSKonstantin Belousov ELF_BSDF_SIGFASTBLK : 0); 14879df1c38bSBrooks Davis AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); 14889df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); 14899df1c38bSBrooks Davis AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); 14909df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); 14919df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); 1492361971fbSKornel Dulęba #ifdef RANDOM_FENESTRASX 1493361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_RNG_SEED_VER) != 0) { 1494361971fbSKornel Dulęba AUXARGS_ENTRY(pos, AT_FXRNG, 1495361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_fxrng_gen_offset); 1496361971fbSKornel Dulęba } 1497361971fbSKornel Dulęba #endif 1498361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_DSO_SIG) != 0 && __elfN(vdso) != 0) { 1499361971fbSKornel Dulęba AUXARGS_ENTRY(pos, AT_KPRELOAD, 1500361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_vdso_offset); 1501361971fbSKornel Dulęba } 1502e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_NULL, 0); 1503e1743d02SSøren Schmidt 1504e1743d02SSøren Schmidt free(imgp->auxargs, M_TEMP); 1505e1743d02SSøren Schmidt imgp->auxargs = NULL; 1506d8b2f079SBrooks Davis KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); 15075f77b8a8SBrooks Davis 1508d8010b11SJohn Baldwin error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); 15095f77b8a8SBrooks Davis free(argarray, M_TEMP); 151003b0d68cSJohn Baldwin return (error); 1511e3532331SJohn Baldwin } 1512e1743d02SSøren Schmidt 1513e3532331SJohn Baldwin int 151431174518SJohn Baldwin __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) 1515e3532331SJohn Baldwin { 1516e3532331SJohn Baldwin Elf_Addr *base; 1517e3532331SJohn Baldwin 1518e3532331SJohn Baldwin base = (Elf_Addr *)*stack_base; 15193ebc1248SPeter Wemm base--; 1520ebf98866SMark Johnston if (elf_suword(base, imgp->args->argc) == -1) 15215f77b8a8SBrooks Davis return (EFAULT); 152231174518SJohn Baldwin *stack_base = (uintptr_t)base; 1523a7cddfedSJake Burkholder return (0); 1524e1743d02SSøren Schmidt } 1525e1743d02SSøren Schmidt 1526e1743d02SSøren Schmidt /* 15278c64af4fSJohn Polstra * Code for generating ELF core dumps. 15288c64af4fSJohn Polstra */ 15298c64af4fSJohn Polstra 15304d77a549SAlfred Perlstein typedef void (*segment_callback)(vm_map_entry_t, void *); 15310ff27d31SJohn Polstra 15320ff27d31SJohn Polstra /* Closure for cb_put_phdr(). */ 15330ff27d31SJohn Polstra struct phdr_closure { 15340ff27d31SJohn Polstra Elf_Phdr *phdr; /* Program header to fill in */ 15350ff27d31SJohn Polstra Elf_Off offset; /* Offset of segment in core file */ 15360ff27d31SJohn Polstra }; 15370ff27d31SJohn Polstra 1538bd390213SMikolaj Golub struct note_info { 1539bd390213SMikolaj Golub int type; /* Note type. */ 15406b71405bSJohn Baldwin struct regset *regset; /* Register set. */ 1541bd390213SMikolaj Golub outfunc_t outfunc; /* Output function. */ 1542bd390213SMikolaj Golub void *outarg; /* Argument for the output function. */ 1543bd390213SMikolaj Golub size_t outsize; /* Output size. */ 1544bd390213SMikolaj Golub TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ 1545bd390213SMikolaj Golub }; 1546bd390213SMikolaj Golub 1547bd390213SMikolaj Golub TAILQ_HEAD(note_info_list, note_info); 1548bd390213SMikolaj Golub 154978f57a9cSMark Johnston extern int compress_user_cores; 155078f57a9cSMark Johnston extern int compress_user_cores_level; 155178f57a9cSMark Johnston 15524d77a549SAlfred Perlstein static void cb_put_phdr(vm_map_entry_t, void *); 15534d77a549SAlfred Perlstein static void cb_size_segment(vm_map_entry_t, void *); 15545bc3c617SKonstantin Belousov static void each_dumpable_segment(struct thread *, segment_callback, void *, 15555bc3c617SKonstantin Belousov int); 1556aa14e9b7SMark Johnston static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, 15575bc3c617SKonstantin Belousov struct note_info_list *, size_t, int); 1558435754a5SEdward Tomasz Napierala static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *); 1559bd390213SMikolaj Golub 1560bd390213SMikolaj Golub static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); 1561bd390213SMikolaj Golub static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); 1562f1fca82eSMikolaj Golub static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); 1563f1fca82eSMikolaj Golub static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); 1564f1fca82eSMikolaj Golub static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); 1565f1fca82eSMikolaj Golub static void note_procstat_files(void *, struct sbuf *, size_t *); 1566f1fca82eSMikolaj Golub static void note_procstat_groups(void *, struct sbuf *, size_t *); 1567f1fca82eSMikolaj Golub static void note_procstat_osrel(void *, struct sbuf *, size_t *); 1568f1fca82eSMikolaj Golub static void note_procstat_rlimit(void *, struct sbuf *, size_t *); 1569f1fca82eSMikolaj Golub static void note_procstat_umask(void *, struct sbuf *, size_t *); 1570f1fca82eSMikolaj Golub static void note_procstat_vmmap(void *, struct sbuf *, size_t *); 15718c64af4fSJohn Polstra 1572aa14e9b7SMark Johnston static int 157378f57a9cSMark Johnston core_compressed_write(void *base, size_t len, off_t offset, void *arg) 1574aa14e9b7SMark Johnston { 1575aa14e9b7SMark Johnston 1576aa14e9b7SMark Johnston return (core_write((struct coredump_params *)arg, base, len, offset, 1577f31695ccSMark Johnston UIO_SYSSPACE, NULL)); 1578aa14e9b7SMark Johnston } 1579aa14e9b7SMark Johnston 15808c64af4fSJohn Polstra int 1581e7228204SAlfred Perlstein __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) 1582fca666a1SJulian Elischer { 1583247aba24SMarcel Moolenaar struct ucred *cred = td->td_ucred; 158486ffb3d1SKonstantin Belousov int compm, error = 0; 15850ff27d31SJohn Polstra struct sseg_closure seginfo; 1586bd390213SMikolaj Golub struct note_info_list notelst; 1587aa14e9b7SMark Johnston struct coredump_params params; 1588bd390213SMikolaj Golub struct note_info *ninfo; 1589aa14e9b7SMark Johnston void *hdr, *tmpbuf; 1590bd390213SMikolaj Golub size_t hdrsize, notesz, coresize; 15918c64af4fSJohn Polstra 1592e7228204SAlfred Perlstein hdr = NULL; 159302d131adSMark Johnston tmpbuf = NULL; 1594bd390213SMikolaj Golub TAILQ_INIT(¬elst); 1595e7228204SAlfred Perlstein 15960ff27d31SJohn Polstra /* Size the program segments. */ 1597905d192dSEdward Tomasz Napierala __elfN(size_segments)(td, &seginfo, flags); 15980ff27d31SJohn Polstra 15990ff27d31SJohn Polstra /* 1600bd390213SMikolaj Golub * Collect info about the core file header area. 16010ff27d31SJohn Polstra */ 1602bd390213SMikolaj Golub hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 1603c17b0bd2SConrad Meyer if (seginfo.count + 1 >= PN_XNUM) 1604c17b0bd2SConrad Meyer hdrsize += sizeof(Elf_Shdr); 1605435754a5SEdward Tomasz Napierala td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz); 1606bd390213SMikolaj Golub coresize = round_page(hdrsize + notesz) + seginfo.size; 16070ff27d31SJohn Polstra 160802d131adSMark Johnston /* Set up core dump parameters. */ 160902d131adSMark Johnston params.offset = 0; 161002d131adSMark Johnston params.active_cred = cred; 161102d131adSMark Johnston params.file_cred = NOCRED; 161202d131adSMark Johnston params.td = td; 161302d131adSMark Johnston params.vp = vp; 161478f57a9cSMark Johnston params.comp = NULL; 161502d131adSMark Johnston 1616afcc55f3SEdward Tomasz Napierala #ifdef RACCT 16174b5c9cf6SEdward Tomasz Napierala if (racct_enable) { 16181ba5ad42SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 1619bd390213SMikolaj Golub error = racct_add(td->td_proc, RACCT_CORE, coresize); 16201ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 16211ba5ad42SEdward Tomasz Napierala if (error != 0) { 16221ba5ad42SEdward Tomasz Napierala error = EFAULT; 16231ba5ad42SEdward Tomasz Napierala goto done; 16241ba5ad42SEdward Tomasz Napierala } 16254b5c9cf6SEdward Tomasz Napierala } 1626afcc55f3SEdward Tomasz Napierala #endif 1627bd390213SMikolaj Golub if (coresize >= limit) { 1628fba6b1afSAlfred Perlstein error = EFAULT; 1629fba6b1afSAlfred Perlstein goto done; 1630fba6b1afSAlfred Perlstein } 16310ff27d31SJohn Polstra 1632aa14e9b7SMark Johnston /* Create a compression stream if necessary. */ 163386ffb3d1SKonstantin Belousov compm = compress_user_cores; 163486ffb3d1SKonstantin Belousov if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP && 163586ffb3d1SKonstantin Belousov compm == 0) 163686ffb3d1SKonstantin Belousov compm = COMPRESS_GZIP; 163786ffb3d1SKonstantin Belousov if (compm != 0) { 163878f57a9cSMark Johnston params.comp = compressor_init(core_compressed_write, 163986ffb3d1SKonstantin Belousov compm, CORE_BUF_SIZE, 164078f57a9cSMark Johnston compress_user_cores_level, ¶ms); 164178f57a9cSMark Johnston if (params.comp == NULL) { 1642aa14e9b7SMark Johnston error = EFAULT; 1643aa14e9b7SMark Johnston goto done; 1644aa14e9b7SMark Johnston } 1645aa14e9b7SMark Johnston tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); 1646aa14e9b7SMark Johnston } 1647aa14e9b7SMark Johnston 16480ff27d31SJohn Polstra /* 16490ff27d31SJohn Polstra * Allocate memory for building the header, fill it up, 1650bd390213SMikolaj Golub * and write it out following the notes. 16510ff27d31SJohn Polstra */ 1652a163d034SWarner Losh hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 1653aa14e9b7SMark Johnston error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, 16545bc3c617SKonstantin Belousov notesz, flags); 16550ff27d31SJohn Polstra 16560ff27d31SJohn Polstra /* Write the contents of all of the writable segments. */ 16570ff27d31SJohn Polstra if (error == 0) { 16580ff27d31SJohn Polstra Elf_Phdr *php; 16592b471bc6STim J. Robbins off_t offset; 16600ff27d31SJohn Polstra int i; 16610ff27d31SJohn Polstra 16620ff27d31SJohn Polstra php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 1663bd390213SMikolaj Golub offset = round_page(hdrsize + notesz); 16640ff27d31SJohn Polstra for (i = 0; i < seginfo.count; i++) { 1665f31695ccSMark Johnston error = core_output((char *)(uintptr_t)php->p_vaddr, 1666aa14e9b7SMark Johnston php->p_filesz, offset, ¶ms, tmpbuf); 16670ff27d31SJohn Polstra if (error != 0) 16682b471bc6STim J. Robbins break; 16690ff27d31SJohn Polstra offset += php->p_filesz; 16700ff27d31SJohn Polstra php++; 16710ff27d31SJohn Polstra } 167278f57a9cSMark Johnston if (error == 0 && params.comp != NULL) 167378f57a9cSMark Johnston error = compressor_flush(params.comp); 16740ff27d31SJohn Polstra } 1675e7228204SAlfred Perlstein if (error) { 1676e7228204SAlfred Perlstein log(LOG_WARNING, 1677e7228204SAlfred Perlstein "Failed to write core file for process %s (error %d)\n", 1678e7228204SAlfred Perlstein curproc->p_comm, error); 1679e7228204SAlfred Perlstein } 1680e7228204SAlfred Perlstein 1681e7228204SAlfred Perlstein done: 1682aa14e9b7SMark Johnston free(tmpbuf, M_TEMP); 168378f57a9cSMark Johnston if (params.comp != NULL) 168478f57a9cSMark Johnston compressor_fini(params.comp); 1685bd390213SMikolaj Golub while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { 1686bd390213SMikolaj Golub TAILQ_REMOVE(¬elst, ninfo, link); 1687bd390213SMikolaj Golub free(ninfo, M_TEMP); 1688bd390213SMikolaj Golub } 1689bd390213SMikolaj Golub if (hdr != NULL) 16900ff27d31SJohn Polstra free(hdr, M_TEMP); 16910ff27d31SJohn Polstra 1692a7cddfedSJake Burkholder return (error); 16938c64af4fSJohn Polstra } 16948c64af4fSJohn Polstra 16950ff27d31SJohn Polstra /* 16961005d8afSConrad Meyer * A callback for each_dumpable_segment() to write out the segment's 16970ff27d31SJohn Polstra * program header entry. 16980ff27d31SJohn Polstra */ 16990ff27d31SJohn Polstra static void 17005cc6d253SEd Maste cb_put_phdr(vm_map_entry_t entry, void *closure) 17010ff27d31SJohn Polstra { 17020ff27d31SJohn Polstra struct phdr_closure *phc = (struct phdr_closure *)closure; 17030ff27d31SJohn Polstra Elf_Phdr *phdr = phc->phdr; 17040ff27d31SJohn Polstra 17050ff27d31SJohn Polstra phc->offset = round_page(phc->offset); 17060ff27d31SJohn Polstra 17070ff27d31SJohn Polstra phdr->p_type = PT_LOAD; 17080ff27d31SJohn Polstra phdr->p_offset = phc->offset; 17090ff27d31SJohn Polstra phdr->p_vaddr = entry->start; 17100ff27d31SJohn Polstra phdr->p_paddr = 0; 17110ff27d31SJohn Polstra phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 17120ff27d31SJohn Polstra phdr->p_align = PAGE_SIZE; 1713ed167eaaSKonstantin Belousov phdr->p_flags = __elfN(untrans_prot)(entry->protection); 17140ff27d31SJohn Polstra 17150ff27d31SJohn Polstra phc->offset += phdr->p_filesz; 17160ff27d31SJohn Polstra phc->phdr++; 17170ff27d31SJohn Polstra } 17180ff27d31SJohn Polstra 17190ff27d31SJohn Polstra /* 17201005d8afSConrad Meyer * A callback for each_dumpable_segment() to gather information about 17210ff27d31SJohn Polstra * the number of segments and their total size. 17220ff27d31SJohn Polstra */ 17230ff27d31SJohn Polstra static void 1724f3325003SConrad Meyer cb_size_segment(vm_map_entry_t entry, void *closure) 17250ff27d31SJohn Polstra { 17260ff27d31SJohn Polstra struct sseg_closure *ssc = (struct sseg_closure *)closure; 17270ff27d31SJohn Polstra 17280ff27d31SJohn Polstra ssc->count++; 17290ff27d31SJohn Polstra ssc->size += entry->end - entry->start; 17300ff27d31SJohn Polstra } 17310ff27d31SJohn Polstra 1732905d192dSEdward Tomasz Napierala void 1733905d192dSEdward Tomasz Napierala __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo, 1734905d192dSEdward Tomasz Napierala int flags) 1735905d192dSEdward Tomasz Napierala { 1736905d192dSEdward Tomasz Napierala seginfo->count = 0; 1737905d192dSEdward Tomasz Napierala seginfo->size = 0; 1738905d192dSEdward Tomasz Napierala 1739905d192dSEdward Tomasz Napierala each_dumpable_segment(td, cb_size_segment, seginfo, flags); 1740905d192dSEdward Tomasz Napierala } 1741905d192dSEdward Tomasz Napierala 17420ff27d31SJohn Polstra /* 17430ff27d31SJohn Polstra * For each writable segment in the process's memory map, call the given 17440ff27d31SJohn Polstra * function with a pointer to the map entry and some arbitrary 17450ff27d31SJohn Polstra * caller-supplied data. 17460ff27d31SJohn Polstra */ 17470ff27d31SJohn Polstra static void 17485bc3c617SKonstantin Belousov each_dumpable_segment(struct thread *td, segment_callback func, void *closure, 17495bc3c617SKonstantin Belousov int flags) 17500ff27d31SJohn Polstra { 1751247aba24SMarcel Moolenaar struct proc *p = td->td_proc; 17520ff27d31SJohn Polstra vm_map_t map = &p->p_vmspace->vm_map; 17530ff27d31SJohn Polstra vm_map_entry_t entry; 1754976a87a2SAlan Cox vm_object_t backing_object, object; 1755fec41f07SMark Johnston bool ignore_entry; 17560ff27d31SJohn Polstra 1757976a87a2SAlan Cox vm_map_lock_read(map); 17582288078cSDoug Moore VM_MAP_ENTRY_FOREACH(entry, map) { 1759fa7dd9c5SMatthew Dillon /* 1760fa7dd9c5SMatthew Dillon * Don't dump inaccessible mappings, deal with legacy 1761fa7dd9c5SMatthew Dillon * coredump mode. 1762fa7dd9c5SMatthew Dillon * 1763fa7dd9c5SMatthew Dillon * Note that read-only segments related to the elf binary 1764fa7dd9c5SMatthew Dillon * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1765fa7dd9c5SMatthew Dillon * need to arbitrarily ignore such segments. 1766fa7dd9c5SMatthew Dillon */ 176786ffb3d1SKonstantin Belousov if ((flags & SVC_ALL) == 0) { 1768fa7dd9c5SMatthew Dillon if (elf_legacy_coredump) { 176986ffb3d1SKonstantin Belousov if ((entry->protection & VM_PROT_RW) != 177086ffb3d1SKonstantin Belousov VM_PROT_RW) 17710ff27d31SJohn Polstra continue; 1772fa7dd9c5SMatthew Dillon } else { 1773fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_ALL) == 0) 1774fa7dd9c5SMatthew Dillon continue; 1775fa7dd9c5SMatthew Dillon } 177686ffb3d1SKonstantin Belousov } 17770ff27d31SJohn Polstra 17789730a5daSPaul Saab /* 1779fa7dd9c5SMatthew Dillon * Dont include memory segment in the coredump if 1780fa7dd9c5SMatthew Dillon * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1781fa7dd9c5SMatthew Dillon * madvise(2). Do not dump submaps (i.e. parts of the 1782fa7dd9c5SMatthew Dillon * kernel map). 17839730a5daSPaul Saab */ 178486ffb3d1SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 17859730a5daSPaul Saab continue; 178686ffb3d1SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 && 178786ffb3d1SKonstantin Belousov (flags & SVC_ALL) == 0) 178886ffb3d1SKonstantin Belousov continue; 1789976a87a2SAlan Cox if ((object = entry->object.vm_object) == NULL) 17900ff27d31SJohn Polstra continue; 17910ff27d31SJohn Polstra 17920ff27d31SJohn Polstra /* Ignore memory-mapped devices and such things. */ 1793bc403f03SAttilio Rao VM_OBJECT_RLOCK(object); 1794976a87a2SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1795bc403f03SAttilio Rao VM_OBJECT_RLOCK(backing_object); 1796bc403f03SAttilio Rao VM_OBJECT_RUNLOCK(object); 1797976a87a2SAlan Cox object = backing_object; 1798976a87a2SAlan Cox } 1799fec41f07SMark Johnston ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; 1800bc403f03SAttilio Rao VM_OBJECT_RUNLOCK(object); 1801976a87a2SAlan Cox if (ignore_entry) 18020ff27d31SJohn Polstra continue; 18030ff27d31SJohn Polstra 18040ff27d31SJohn Polstra (*func)(entry, closure); 18050ff27d31SJohn Polstra } 1806976a87a2SAlan Cox vm_map_unlock_read(map); 18070ff27d31SJohn Polstra } 18080ff27d31SJohn Polstra 18090ff27d31SJohn Polstra /* 18100ff27d31SJohn Polstra * Write the core file header to the file, including padding up to 18110ff27d31SJohn Polstra * the page boundary. 18120ff27d31SJohn Polstra */ 18138c64af4fSJohn Polstra static int 1814aa14e9b7SMark Johnston __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, 18155bc3c617SKonstantin Belousov size_t hdrsize, struct note_info_list *notelst, size_t notesz, 18165bc3c617SKonstantin Belousov int flags) 18178c64af4fSJohn Polstra { 1818bd390213SMikolaj Golub struct note_info *ninfo; 1819bd390213SMikolaj Golub struct sbuf *sb; 1820bd390213SMikolaj Golub int error; 18218c64af4fSJohn Polstra 18228c64af4fSJohn Polstra /* Fill in the header. */ 18230ff27d31SJohn Polstra bzero(hdr, hdrsize); 18245bc3c617SKonstantin Belousov __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags); 18258c64af4fSJohn Polstra 1826bd390213SMikolaj Golub sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); 1827aa14e9b7SMark Johnston sbuf_set_drain(sb, sbuf_drain_core_output, p); 1828bd390213SMikolaj Golub sbuf_start_section(sb, NULL); 1829bd390213SMikolaj Golub sbuf_bcat(sb, hdr, hdrsize); 1830bd390213SMikolaj Golub TAILQ_FOREACH(ninfo, notelst, link) 1831435754a5SEdward Tomasz Napierala __elfN(putnote)(p->td, ninfo, sb); 1832bd390213SMikolaj Golub /* Align up to a page boundary for the program segments. */ 1833bd390213SMikolaj Golub sbuf_end_section(sb, -1, PAGE_SIZE, 0); 1834bd390213SMikolaj Golub error = sbuf_finish(sb); 1835bd390213SMikolaj Golub sbuf_delete(sb); 1836bd390213SMikolaj Golub 1837bd390213SMikolaj Golub return (error); 1838e7228204SAlfred Perlstein } 1839bd390213SMikolaj Golub 1840435754a5SEdward Tomasz Napierala void 1841bd390213SMikolaj Golub __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, 1842bd390213SMikolaj Golub size_t *sizep) 1843bd390213SMikolaj Golub { 1844bd390213SMikolaj Golub struct proc *p; 1845bd390213SMikolaj Golub struct thread *thr; 1846bd390213SMikolaj Golub size_t size; 1847bd390213SMikolaj Golub 1848bd390213SMikolaj Golub p = td->td_proc; 1849bd390213SMikolaj Golub size = 0; 1850bd390213SMikolaj Golub 18516b71405bSJohn Baldwin size += __elfN(register_note)(td, list, NT_PRPSINFO, 18526b71405bSJohn Baldwin __elfN(note_prpsinfo), p); 1853bd390213SMikolaj Golub 1854bd390213SMikolaj Golub /* 1855bd390213SMikolaj Golub * To have the debugger select the right thread (LWP) as the initial 1856bd390213SMikolaj Golub * thread, we dump the state of the thread passed to us in td first. 1857bd390213SMikolaj Golub * This is the thread that causes the core dump and thus likely to 1858bd390213SMikolaj Golub * be the right thread one wants to have selected in the debugger. 1859bd390213SMikolaj Golub */ 1860bd390213SMikolaj Golub thr = td; 1861bd390213SMikolaj Golub while (thr != NULL) { 18626b71405bSJohn Baldwin size += __elfN(prepare_register_notes)(td, list, thr); 1863435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, -1, 1864bd390213SMikolaj Golub __elfN(note_threadmd), thr); 1865bd390213SMikolaj Golub 186661b4c627SEdward Tomasz Napierala thr = thr == td ? TAILQ_FIRST(&p->p_threads) : 1867bd390213SMikolaj Golub TAILQ_NEXT(thr, td_plist); 1868bd390213SMikolaj Golub if (thr == td) 1869bd390213SMikolaj Golub thr = TAILQ_NEXT(thr, td_plist); 1870dada0278SJohn Polstra } 1871dada0278SJohn Polstra 1872435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC, 1873f1fca82eSMikolaj Golub __elfN(note_procstat_proc), p); 1874435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES, 1875f1fca82eSMikolaj Golub note_procstat_files, p); 1876435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP, 1877f1fca82eSMikolaj Golub note_procstat_vmmap, p); 1878435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS, 1879f1fca82eSMikolaj Golub note_procstat_groups, p); 1880435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK, 1881f1fca82eSMikolaj Golub note_procstat_umask, p); 1882435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT, 1883f1fca82eSMikolaj Golub note_procstat_rlimit, p); 1884435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL, 1885f1fca82eSMikolaj Golub note_procstat_osrel, p); 1886435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS, 1887f1fca82eSMikolaj Golub __elfN(note_procstat_psstrings), p); 1888435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV, 1889f1fca82eSMikolaj Golub __elfN(note_procstat_auxv), p); 1890f1fca82eSMikolaj Golub 1891bd390213SMikolaj Golub *sizep = size; 1892bd390213SMikolaj Golub } 1893bd390213SMikolaj Golub 1894905d192dSEdward Tomasz Napierala void 1895bd390213SMikolaj Golub __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, 18965bc3c617SKonstantin Belousov size_t notesz, int flags) 1897bd390213SMikolaj Golub { 1898bd390213SMikolaj Golub Elf_Ehdr *ehdr; 1899bd390213SMikolaj Golub Elf_Phdr *phdr; 1900c17b0bd2SConrad Meyer Elf_Shdr *shdr; 1901bd390213SMikolaj Golub struct phdr_closure phc; 19025d9f7901SDmitry Chagin Elf_Brandinfo *bi; 1903bd390213SMikolaj Golub 1904bd390213SMikolaj Golub ehdr = (Elf_Ehdr *)hdr; 19055d9f7901SDmitry Chagin bi = td->td_proc->p_elf_brandinfo; 1906bd390213SMikolaj Golub 1907bd390213SMikolaj Golub ehdr->e_ident[EI_MAG0] = ELFMAG0; 1908bd390213SMikolaj Golub ehdr->e_ident[EI_MAG1] = ELFMAG1; 1909bd390213SMikolaj Golub ehdr->e_ident[EI_MAG2] = ELFMAG2; 1910bd390213SMikolaj Golub ehdr->e_ident[EI_MAG3] = ELFMAG3; 1911bd390213SMikolaj Golub ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1912bd390213SMikolaj Golub ehdr->e_ident[EI_DATA] = ELF_DATA; 1913bd390213SMikolaj Golub ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1914435754a5SEdward Tomasz Napierala ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi; 1915bd390213SMikolaj Golub ehdr->e_ident[EI_ABIVERSION] = 0; 1916bd390213SMikolaj Golub ehdr->e_ident[EI_PAD] = 0; 1917bd390213SMikolaj Golub ehdr->e_type = ET_CORE; 19185d9f7901SDmitry Chagin ehdr->e_machine = bi->machine; 1919bd390213SMikolaj Golub ehdr->e_version = EV_CURRENT; 1920bd390213SMikolaj Golub ehdr->e_entry = 0; 1921bd390213SMikolaj Golub ehdr->e_phoff = sizeof(Elf_Ehdr); 1922885f13dcSJohn Baldwin ehdr->e_flags = td->td_proc->p_elf_flags; 1923bd390213SMikolaj Golub ehdr->e_ehsize = sizeof(Elf_Ehdr); 1924bd390213SMikolaj Golub ehdr->e_phentsize = sizeof(Elf_Phdr); 1925bd390213SMikolaj Golub ehdr->e_shentsize = sizeof(Elf_Shdr); 1926bd390213SMikolaj Golub ehdr->e_shstrndx = SHN_UNDEF; 1927c17b0bd2SConrad Meyer if (numsegs + 1 < PN_XNUM) { 1928c17b0bd2SConrad Meyer ehdr->e_phnum = numsegs + 1; 1929c17b0bd2SConrad Meyer ehdr->e_shnum = 0; 1930c17b0bd2SConrad Meyer } else { 1931c17b0bd2SConrad Meyer ehdr->e_phnum = PN_XNUM; 1932c17b0bd2SConrad Meyer ehdr->e_shnum = 1; 1933c17b0bd2SConrad Meyer 1934c17b0bd2SConrad Meyer ehdr->e_shoff = ehdr->e_phoff + 1935c17b0bd2SConrad Meyer (numsegs + 1) * ehdr->e_phentsize; 1936c17b0bd2SConrad Meyer KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), 1937c17b0bd2SConrad Meyer ("e_shoff: %zu, hdrsize - shdr: %zu", 193807f825e8SConrad Meyer (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); 1939c17b0bd2SConrad Meyer 1940c17b0bd2SConrad Meyer shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 1941c17b0bd2SConrad Meyer memset(shdr, 0, sizeof(*shdr)); 1942c17b0bd2SConrad Meyer /* 1943c17b0bd2SConrad Meyer * A special first section is used to hold large segment and 1944c17b0bd2SConrad Meyer * section counts. This was proposed by Sun Microsystems in 1945c17b0bd2SConrad Meyer * Solaris and has been adopted by Linux; the standard ELF 1946c17b0bd2SConrad Meyer * tools are already familiar with the technique. 1947c17b0bd2SConrad Meyer * 1948c17b0bd2SConrad Meyer * See table 7-7 of the Solaris "Linker and Libraries Guide" 1949c17b0bd2SConrad Meyer * (or 12-7 depending on the version of the document) for more 1950c17b0bd2SConrad Meyer * details. 1951c17b0bd2SConrad Meyer */ 1952c17b0bd2SConrad Meyer shdr->sh_type = SHT_NULL; 1953c17b0bd2SConrad Meyer shdr->sh_size = ehdr->e_shnum; 1954c17b0bd2SConrad Meyer shdr->sh_link = ehdr->e_shstrndx; 1955c17b0bd2SConrad Meyer shdr->sh_info = numsegs + 1; 1956c17b0bd2SConrad Meyer } 1957bd390213SMikolaj Golub 1958bd390213SMikolaj Golub /* 1959bd390213SMikolaj Golub * Fill in the program header entries. 1960bd390213SMikolaj Golub */ 1961c17b0bd2SConrad Meyer phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 1962bd390213SMikolaj Golub 1963bd390213SMikolaj Golub /* The note segement. */ 1964bd390213SMikolaj Golub phdr->p_type = PT_NOTE; 1965bd390213SMikolaj Golub phdr->p_offset = hdrsize; 1966bd390213SMikolaj Golub phdr->p_vaddr = 0; 1967bd390213SMikolaj Golub phdr->p_paddr = 0; 1968bd390213SMikolaj Golub phdr->p_filesz = notesz; 1969bd390213SMikolaj Golub phdr->p_memsz = 0; 1970bd390213SMikolaj Golub phdr->p_flags = PF_R; 19711b8388cdSMikolaj Golub phdr->p_align = ELF_NOTE_ROUNDSIZE; 1972bd390213SMikolaj Golub phdr++; 1973bd390213SMikolaj Golub 1974bd390213SMikolaj Golub /* All the writable segments from the program. */ 1975bd390213SMikolaj Golub phc.phdr = phdr; 1976bd390213SMikolaj Golub phc.offset = round_page(hdrsize + notesz); 19775bc3c617SKonstantin Belousov each_dumpable_segment(td, cb_put_phdr, &phc, flags); 1978bd390213SMikolaj Golub } 1979bd390213SMikolaj Golub 19806b71405bSJohn Baldwin static size_t 19816b71405bSJohn Baldwin __elfN(register_regset_note)(struct thread *td, struct note_info_list *list, 19826b71405bSJohn Baldwin struct regset *regset, struct thread *target_td) 19836b71405bSJohn Baldwin { 19846b71405bSJohn Baldwin const struct sysentvec *sv; 19856b71405bSJohn Baldwin struct note_info *ninfo; 19866b71405bSJohn Baldwin size_t size, notesize; 19876b71405bSJohn Baldwin 19886b71405bSJohn Baldwin size = 0; 19896b71405bSJohn Baldwin if (!regset->get(regset, target_td, NULL, &size) || size == 0) 19906b71405bSJohn Baldwin return (0); 19916b71405bSJohn Baldwin 19926b71405bSJohn Baldwin ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 19936b71405bSJohn Baldwin ninfo->type = regset->note; 19946b71405bSJohn Baldwin ninfo->regset = regset; 19956b71405bSJohn Baldwin ninfo->outarg = target_td; 19966b71405bSJohn Baldwin ninfo->outsize = size; 19976b71405bSJohn Baldwin TAILQ_INSERT_TAIL(list, ninfo, link); 19986b71405bSJohn Baldwin 19996b71405bSJohn Baldwin sv = td->td_proc->p_sysent; 20006b71405bSJohn Baldwin notesize = sizeof(Elf_Note) + /* note header */ 20016b71405bSJohn Baldwin roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 20026b71405bSJohn Baldwin /* note name */ 20036b71405bSJohn Baldwin roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 20046b71405bSJohn Baldwin 20056b71405bSJohn Baldwin return (notesize); 20066b71405bSJohn Baldwin } 20076b71405bSJohn Baldwin 2008435754a5SEdward Tomasz Napierala size_t 2009435754a5SEdward Tomasz Napierala __elfN(register_note)(struct thread *td, struct note_info_list *list, 2010435754a5SEdward Tomasz Napierala int type, outfunc_t out, void *arg) 2011bd390213SMikolaj Golub { 2012435754a5SEdward Tomasz Napierala const struct sysentvec *sv; 2013bd390213SMikolaj Golub struct note_info *ninfo; 2014bd390213SMikolaj Golub size_t size, notesize; 2015bd390213SMikolaj Golub 2016435754a5SEdward Tomasz Napierala sv = td->td_proc->p_sysent; 2017bd390213SMikolaj Golub size = 0; 2018bd390213SMikolaj Golub out(arg, NULL, &size); 2019bd390213SMikolaj Golub ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 2020bd390213SMikolaj Golub ninfo->type = type; 2021bd390213SMikolaj Golub ninfo->outfunc = out; 2022bd390213SMikolaj Golub ninfo->outarg = arg; 2023bd390213SMikolaj Golub ninfo->outsize = size; 2024bd390213SMikolaj Golub TAILQ_INSERT_TAIL(list, ninfo, link); 2025bd390213SMikolaj Golub 2026bd390213SMikolaj Golub if (type == -1) 2027bd390213SMikolaj Golub return (size); 2028bd390213SMikolaj Golub 2029bd390213SMikolaj Golub notesize = sizeof(Elf_Note) + /* note header */ 2030435754a5SEdward Tomasz Napierala roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 2031180e57e5SJohn Baldwin /* note name */ 2032180e57e5SJohn Baldwin roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2033180e57e5SJohn Baldwin 2034180e57e5SJohn Baldwin return (notesize); 2035180e57e5SJohn Baldwin } 2036180e57e5SJohn Baldwin 2037180e57e5SJohn Baldwin static size_t 2038180e57e5SJohn Baldwin append_note_data(const void *src, void *dst, size_t len) 2039180e57e5SJohn Baldwin { 2040180e57e5SJohn Baldwin size_t padded_len; 2041180e57e5SJohn Baldwin 2042180e57e5SJohn Baldwin padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); 2043180e57e5SJohn Baldwin if (dst != NULL) { 2044180e57e5SJohn Baldwin bcopy(src, dst, len); 2045180e57e5SJohn Baldwin bzero((char *)dst + len, padded_len - len); 2046180e57e5SJohn Baldwin } 2047180e57e5SJohn Baldwin return (padded_len); 2048180e57e5SJohn Baldwin } 2049180e57e5SJohn Baldwin 2050180e57e5SJohn Baldwin size_t 2051180e57e5SJohn Baldwin __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) 2052180e57e5SJohn Baldwin { 2053180e57e5SJohn Baldwin Elf_Note *note; 2054180e57e5SJohn Baldwin char *buf; 2055180e57e5SJohn Baldwin size_t notesize; 2056180e57e5SJohn Baldwin 2057180e57e5SJohn Baldwin buf = dst; 2058180e57e5SJohn Baldwin if (buf != NULL) { 2059180e57e5SJohn Baldwin note = (Elf_Note *)buf; 2060180e57e5SJohn Baldwin note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2061180e57e5SJohn Baldwin note->n_descsz = size; 2062180e57e5SJohn Baldwin note->n_type = type; 2063180e57e5SJohn Baldwin buf += sizeof(*note); 2064180e57e5SJohn Baldwin buf += append_note_data(FREEBSD_ABI_VENDOR, buf, 2065180e57e5SJohn Baldwin sizeof(FREEBSD_ABI_VENDOR)); 2066180e57e5SJohn Baldwin append_note_data(src, buf, size); 2067180e57e5SJohn Baldwin if (descp != NULL) 2068180e57e5SJohn Baldwin *descp = buf; 2069180e57e5SJohn Baldwin } 2070180e57e5SJohn Baldwin 2071180e57e5SJohn Baldwin notesize = sizeof(Elf_Note) + /* note header */ 2072180e57e5SJohn Baldwin roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2073180e57e5SJohn Baldwin /* note name */ 20741b8388cdSMikolaj Golub roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2075bd390213SMikolaj Golub 2076bd390213SMikolaj Golub return (notesize); 2077bd390213SMikolaj Golub } 2078bd390213SMikolaj Golub 2079bd390213SMikolaj Golub static void 2080435754a5SEdward Tomasz Napierala __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb) 2081bd390213SMikolaj Golub { 2082bd390213SMikolaj Golub Elf_Note note; 2083435754a5SEdward Tomasz Napierala const struct sysentvec *sv; 208414bdbaf2SConrad Meyer ssize_t old_len, sect_len; 208514bdbaf2SConrad Meyer size_t new_len, descsz, i; 2086bd390213SMikolaj Golub 2087bd390213SMikolaj Golub if (ninfo->type == -1) { 2088bd390213SMikolaj Golub ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2089bd390213SMikolaj Golub return; 2090bd390213SMikolaj Golub } 2091bd390213SMikolaj Golub 2092435754a5SEdward Tomasz Napierala sv = td->td_proc->p_sysent; 2093435754a5SEdward Tomasz Napierala 2094435754a5SEdward Tomasz Napierala note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1; 2095bd390213SMikolaj Golub note.n_descsz = ninfo->outsize; 2096bd390213SMikolaj Golub note.n_type = ninfo->type; 2097bd390213SMikolaj Golub 2098bd390213SMikolaj Golub sbuf_bcat(sb, ¬e, sizeof(note)); 2099bd390213SMikolaj Golub sbuf_start_section(sb, &old_len); 2100435754a5SEdward Tomasz Napierala sbuf_bcat(sb, sv->sv_elf_core_abi_vendor, 2101435754a5SEdward Tomasz Napierala strlen(sv->sv_elf_core_abi_vendor) + 1); 21021b8388cdSMikolaj Golub sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2103bd390213SMikolaj Golub if (note.n_descsz == 0) 2104bd390213SMikolaj Golub return; 2105bd390213SMikolaj Golub sbuf_start_section(sb, &old_len); 21066b71405bSJohn Baldwin if (ninfo->regset != NULL) { 21076b71405bSJohn Baldwin struct regset *regset = ninfo->regset; 21086b71405bSJohn Baldwin void *buf; 21096b71405bSJohn Baldwin 21106b71405bSJohn Baldwin buf = malloc(ninfo->outsize, M_TEMP, M_ZERO | M_WAITOK); 21116b71405bSJohn Baldwin (void)regset->get(regset, ninfo->outarg, buf, &ninfo->outsize); 21126b71405bSJohn Baldwin sbuf_bcat(sb, buf, ninfo->outsize); 21136b71405bSJohn Baldwin free(buf, M_TEMP); 21146b71405bSJohn Baldwin } else 2115bd390213SMikolaj Golub ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 211614bdbaf2SConrad Meyer sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 211714bdbaf2SConrad Meyer if (sect_len < 0) 211814bdbaf2SConrad Meyer return; 211914bdbaf2SConrad Meyer 212014bdbaf2SConrad Meyer new_len = (size_t)sect_len; 212114bdbaf2SConrad Meyer descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); 212214bdbaf2SConrad Meyer if (new_len < descsz) { 212314bdbaf2SConrad Meyer /* 212414bdbaf2SConrad Meyer * It is expected that individual note emitters will correctly 212514bdbaf2SConrad Meyer * predict their expected output size and fill up to that size 212614bdbaf2SConrad Meyer * themselves, padding in a format-specific way if needed. 212714bdbaf2SConrad Meyer * However, in case they don't, just do it here with zeros. 212814bdbaf2SConrad Meyer */ 212914bdbaf2SConrad Meyer for (i = 0; i < descsz - new_len; i++) 213014bdbaf2SConrad Meyer sbuf_putc(sb, 0); 213114bdbaf2SConrad Meyer } else if (new_len > descsz) { 213214bdbaf2SConrad Meyer /* 213314bdbaf2SConrad Meyer * We can't always truncate sb -- we may have drained some 213414bdbaf2SConrad Meyer * of it already. 213514bdbaf2SConrad Meyer */ 213614bdbaf2SConrad Meyer KASSERT(new_len == descsz, ("%s: Note type %u changed as we " 213714bdbaf2SConrad Meyer "read it (%zu > %zu). Since it is longer than " 213814bdbaf2SConrad Meyer "expected, this coredump's notes are corrupt. THIS " 213914bdbaf2SConrad Meyer "IS A BUG in the note_procstat routine for type %u.\n", 214014bdbaf2SConrad Meyer __func__, (unsigned)note.n_type, new_len, descsz, 214114bdbaf2SConrad Meyer (unsigned)note.n_type)); 214214bdbaf2SConrad Meyer } 2143bd390213SMikolaj Golub } 2144bd390213SMikolaj Golub 2145bd390213SMikolaj Golub /* 2146bd390213SMikolaj Golub * Miscellaneous note out functions. 2147bd390213SMikolaj Golub */ 2148bd390213SMikolaj Golub 2149841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2150841c0c7eSNathan Whitehorn #include <compat/freebsd32/freebsd32.h> 215151645e83SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h> 2152841c0c7eSNathan Whitehorn 215362919d78SPeter Wemm typedef struct prstatus32 elf_prstatus_t; 215462919d78SPeter Wemm typedef struct prpsinfo32 elf_prpsinfo_t; 215562919d78SPeter Wemm typedef struct fpreg32 elf_prfpregset_t; 215662919d78SPeter Wemm typedef struct fpreg32 elf_fpregset_t; 215762919d78SPeter Wemm typedef struct reg32 elf_gregset_t; 21587f08176eSAttilio Rao typedef struct thrmisc32 elf_thrmisc_t; 21590288d427SJohn Baldwin typedef struct ptrace_lwpinfo32 elf_lwpinfo_t; 2160f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 2161f1fca82eSMikolaj Golub typedef struct kinfo_proc32 elf_kinfo_proc_t; 2162f1fca82eSMikolaj Golub typedef uint32_t elf_ps_strings_t; 216362919d78SPeter Wemm #else 216462919d78SPeter Wemm typedef prstatus_t elf_prstatus_t; 216562919d78SPeter Wemm typedef prpsinfo_t elf_prpsinfo_t; 216662919d78SPeter Wemm typedef prfpregset_t elf_prfpregset_t; 216762919d78SPeter Wemm typedef prfpregset_t elf_fpregset_t; 216862919d78SPeter Wemm typedef gregset_t elf_gregset_t; 21697f08176eSAttilio Rao typedef thrmisc_t elf_thrmisc_t; 21700288d427SJohn Baldwin typedef struct ptrace_lwpinfo elf_lwpinfo_t; 2171f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK 0 2172f1fca82eSMikolaj Golub typedef struct kinfo_proc elf_kinfo_proc_t; 2173f1fca82eSMikolaj Golub typedef vm_offset_t elf_ps_strings_t; 217462919d78SPeter Wemm #endif 217562919d78SPeter Wemm 21768c64af4fSJohn Polstra static void 2177bd390213SMikolaj Golub __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) 21788c64af4fSJohn Polstra { 2179c77547d2SJohn Baldwin struct sbuf sbarg; 2180c77547d2SJohn Baldwin size_t len; 2181c77547d2SJohn Baldwin char *cp, *end; 2182247aba24SMarcel Moolenaar struct proc *p; 2183bd390213SMikolaj Golub elf_prpsinfo_t *psinfo; 2184c77547d2SJohn Baldwin int error; 21858c64af4fSJohn Polstra 218661b4c627SEdward Tomasz Napierala p = arg; 2187bd390213SMikolaj Golub if (sb != NULL) { 2188bd390213SMikolaj Golub KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); 2189bd390213SMikolaj Golub psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); 21908c9b7b2cSMarcel Moolenaar psinfo->pr_version = PRPSINFO_VERSION; 219162919d78SPeter Wemm psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 2192ccd3953eSJohn Baldwin strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 2193c77547d2SJohn Baldwin PROC_LOCK(p); 2194c77547d2SJohn Baldwin if (p->p_args != NULL) { 2195c77547d2SJohn Baldwin len = sizeof(psinfo->pr_psargs) - 1; 2196c77547d2SJohn Baldwin if (len > p->p_args->ar_length) 2197c77547d2SJohn Baldwin len = p->p_args->ar_length; 2198c77547d2SJohn Baldwin memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); 2199c77547d2SJohn Baldwin PROC_UNLOCK(p); 2200c77547d2SJohn Baldwin error = 0; 2201c77547d2SJohn Baldwin } else { 2202c77547d2SJohn Baldwin _PHOLD(p); 2203c77547d2SJohn Baldwin PROC_UNLOCK(p); 2204c77547d2SJohn Baldwin sbuf_new(&sbarg, psinfo->pr_psargs, 2205c77547d2SJohn Baldwin sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); 2206c77547d2SJohn Baldwin error = proc_getargv(curthread, p, &sbarg); 2207c77547d2SJohn Baldwin PRELE(p); 2208c77547d2SJohn Baldwin if (sbuf_finish(&sbarg) == 0) 2209c77547d2SJohn Baldwin len = sbuf_len(&sbarg) - 1; 2210c77547d2SJohn Baldwin else 2211c77547d2SJohn Baldwin len = sizeof(psinfo->pr_psargs) - 1; 2212c77547d2SJohn Baldwin sbuf_delete(&sbarg); 2213c77547d2SJohn Baldwin } 2214c77547d2SJohn Baldwin if (error || len == 0) 2215ccd3953eSJohn Baldwin strlcpy(psinfo->pr_psargs, p->p_comm, 22168c9b7b2cSMarcel Moolenaar sizeof(psinfo->pr_psargs)); 2217c77547d2SJohn Baldwin else { 2218c77547d2SJohn Baldwin KASSERT(len < sizeof(psinfo->pr_psargs), 2219c77547d2SJohn Baldwin ("len is too long: %zu vs %zu", len, 2220c77547d2SJohn Baldwin sizeof(psinfo->pr_psargs))); 2221c77547d2SJohn Baldwin cp = psinfo->pr_psargs; 2222c77547d2SJohn Baldwin end = cp + len - 1; 2223c77547d2SJohn Baldwin for (;;) { 2224c77547d2SJohn Baldwin cp = memchr(cp, '\0', end - cp); 2225c77547d2SJohn Baldwin if (cp == NULL) 2226c77547d2SJohn Baldwin break; 2227c77547d2SJohn Baldwin *cp = ' '; 2228c77547d2SJohn Baldwin } 2229c77547d2SJohn Baldwin } 2230ccb83afdSJohn Baldwin psinfo->pr_pid = p->p_pid; 2231bd390213SMikolaj Golub sbuf_bcat(sb, psinfo, sizeof(*psinfo)); 2232bd390213SMikolaj Golub free(psinfo, M_TEMP); 2233bd390213SMikolaj Golub } 2234bd390213SMikolaj Golub *sizep = sizeof(*psinfo); 2235bd390213SMikolaj Golub } 2236bd390213SMikolaj Golub 2237548a2ec4SAndrew Turner static bool 2238548a2ec4SAndrew Turner __elfN(get_prstatus)(struct regset *rs, struct thread *td, void *buf, 2239548a2ec4SAndrew Turner size_t *sizep) 2240bd390213SMikolaj Golub { 2241bd390213SMikolaj Golub elf_prstatus_t *status; 2242bd390213SMikolaj Golub 2243548a2ec4SAndrew Turner if (buf != NULL) { 2244548a2ec4SAndrew Turner KASSERT(*sizep == sizeof(*status), ("%s: invalid size", 2245548a2ec4SAndrew Turner __func__)); 2246548a2ec4SAndrew Turner status = buf; 22471babcad6SMark Johnston memset(status, 0, *sizep); 22488c9b7b2cSMarcel Moolenaar status->pr_version = PRSTATUS_VERSION; 224962919d78SPeter Wemm status->pr_statussz = sizeof(elf_prstatus_t); 225062919d78SPeter Wemm status->pr_gregsetsz = sizeof(elf_gregset_t); 225162919d78SPeter Wemm status->pr_fpregsetsz = sizeof(elf_fpregset_t); 22528c9b7b2cSMarcel Moolenaar status->pr_osreldate = osreldate; 2253bd390213SMikolaj Golub status->pr_cursig = td->td_proc->p_sig; 2254bd390213SMikolaj Golub status->pr_pid = td->td_tid; 2255841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2256bd390213SMikolaj Golub fill_regs32(td, &status->pr_reg); 225762919d78SPeter Wemm #else 2258bd390213SMikolaj Golub fill_regs(td, &status->pr_reg); 225962919d78SPeter Wemm #endif 2260548a2ec4SAndrew Turner } 2261548a2ec4SAndrew Turner *sizep = sizeof(*status); 2262548a2ec4SAndrew Turner return (true); 2263548a2ec4SAndrew Turner } 2264548a2ec4SAndrew Turner 2265548a2ec4SAndrew Turner static bool 2266548a2ec4SAndrew Turner __elfN(set_prstatus)(struct regset *rs, struct thread *td, void *buf, 2267548a2ec4SAndrew Turner size_t size) 2268548a2ec4SAndrew Turner { 2269548a2ec4SAndrew Turner elf_prstatus_t *status; 2270548a2ec4SAndrew Turner 2271548a2ec4SAndrew Turner KASSERT(size == sizeof(*status), ("%s: invalid size", __func__)); 2272548a2ec4SAndrew Turner status = buf; 2273548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2274548a2ec4SAndrew Turner set_regs32(td, &status->pr_reg); 2275548a2ec4SAndrew Turner #else 2276548a2ec4SAndrew Turner set_regs(td, &status->pr_reg); 2277548a2ec4SAndrew Turner #endif 2278548a2ec4SAndrew Turner return (true); 2279548a2ec4SAndrew Turner } 2280548a2ec4SAndrew Turner 2281548a2ec4SAndrew Turner static struct regset __elfN(regset_prstatus) = { 2282548a2ec4SAndrew Turner .note = NT_PRSTATUS, 2283548a2ec4SAndrew Turner .size = sizeof(elf_prstatus_t), 2284548a2ec4SAndrew Turner .get = __elfN(get_prstatus), 2285548a2ec4SAndrew Turner .set = __elfN(set_prstatus), 2286548a2ec4SAndrew Turner }; 2287548a2ec4SAndrew Turner ELF_REGSET(__elfN(regset_prstatus)); 2288548a2ec4SAndrew Turner 2289548a2ec4SAndrew Turner static bool 2290548a2ec4SAndrew Turner __elfN(get_fpregset)(struct regset *rs, struct thread *td, void *buf, 2291548a2ec4SAndrew Turner size_t *sizep) 2292548a2ec4SAndrew Turner { 2293548a2ec4SAndrew Turner elf_prfpregset_t *fpregset; 2294548a2ec4SAndrew Turner 2295548a2ec4SAndrew Turner if (buf != NULL) { 2296548a2ec4SAndrew Turner KASSERT(*sizep == sizeof(*fpregset), ("%s: invalid size", 2297548a2ec4SAndrew Turner __func__)); 2298548a2ec4SAndrew Turner fpregset = buf; 2299548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2300548a2ec4SAndrew Turner fill_fpregs32(td, fpregset); 2301548a2ec4SAndrew Turner #else 2302548a2ec4SAndrew Turner fill_fpregs(td, fpregset); 2303548a2ec4SAndrew Turner #endif 2304548a2ec4SAndrew Turner } 23050b25cbc7SJohn Baldwin *sizep = sizeof(*fpregset); 2306548a2ec4SAndrew Turner return (true); 2307548a2ec4SAndrew Turner } 2308548a2ec4SAndrew Turner 2309548a2ec4SAndrew Turner static bool 2310548a2ec4SAndrew Turner __elfN(set_fpregset)(struct regset *rs, struct thread *td, void *buf, 2311548a2ec4SAndrew Turner size_t size) 2312548a2ec4SAndrew Turner { 2313548a2ec4SAndrew Turner elf_prfpregset_t *fpregset; 2314548a2ec4SAndrew Turner 2315548a2ec4SAndrew Turner fpregset = buf; 2316548a2ec4SAndrew Turner KASSERT(size == sizeof(*fpregset), ("%s: invalid size", __func__)); 2317548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2318548a2ec4SAndrew Turner set_fpregs32(td, fpregset); 2319548a2ec4SAndrew Turner #else 2320548a2ec4SAndrew Turner set_fpregs(td, fpregset); 2321548a2ec4SAndrew Turner #endif 2322548a2ec4SAndrew Turner return (true); 2323548a2ec4SAndrew Turner } 2324548a2ec4SAndrew Turner 2325548a2ec4SAndrew Turner static struct regset __elfN(regset_fpregset) = { 2326548a2ec4SAndrew Turner .note = NT_FPREGSET, 2327548a2ec4SAndrew Turner .size = sizeof(elf_prfpregset_t), 2328548a2ec4SAndrew Turner .get = __elfN(get_fpregset), 2329548a2ec4SAndrew Turner .set = __elfN(set_fpregset), 2330548a2ec4SAndrew Turner }; 2331548a2ec4SAndrew Turner ELF_REGSET(__elfN(regset_fpregset)); 2332548a2ec4SAndrew Turner 23330288d427SJohn Baldwin static bool 23340288d427SJohn Baldwin __elfN(get_thrmisc)(struct regset *rs, struct thread *td, void *buf, 23350288d427SJohn Baldwin size_t *sizep) 23360288d427SJohn Baldwin { 23370288d427SJohn Baldwin elf_thrmisc_t *thrmisc; 23380288d427SJohn Baldwin 23390288d427SJohn Baldwin if (buf != NULL) { 23400288d427SJohn Baldwin KASSERT(*sizep == sizeof(*thrmisc), 23410288d427SJohn Baldwin ("%s: invalid size", __func__)); 23420288d427SJohn Baldwin thrmisc = buf; 23430288d427SJohn Baldwin bzero(thrmisc, sizeof(*thrmisc)); 23440288d427SJohn Baldwin strcpy(thrmisc->pr_tname, td->td_name); 23450288d427SJohn Baldwin } 23460288d427SJohn Baldwin *sizep = sizeof(*thrmisc); 23470288d427SJohn Baldwin return (true); 23480288d427SJohn Baldwin } 23490288d427SJohn Baldwin 23500288d427SJohn Baldwin static struct regset __elfN(regset_thrmisc) = { 23510288d427SJohn Baldwin .note = NT_THRMISC, 23520288d427SJohn Baldwin .size = sizeof(elf_thrmisc_t), 23530288d427SJohn Baldwin .get = __elfN(get_thrmisc), 23540288d427SJohn Baldwin }; 23550288d427SJohn Baldwin ELF_REGSET(__elfN(regset_thrmisc)); 23560288d427SJohn Baldwin 23570288d427SJohn Baldwin static bool 23580288d427SJohn Baldwin __elfN(get_lwpinfo)(struct regset *rs, struct thread *td, void *buf, 23590288d427SJohn Baldwin size_t *sizep) 23600288d427SJohn Baldwin { 23610288d427SJohn Baldwin elf_lwpinfo_t pl; 23620288d427SJohn Baldwin size_t size; 23630288d427SJohn Baldwin int structsize; 23640288d427SJohn Baldwin 23650288d427SJohn Baldwin size = sizeof(structsize) + sizeof(pl); 23660288d427SJohn Baldwin if (buf != NULL) { 23670288d427SJohn Baldwin KASSERT(*sizep == size, ("%s: invalid size", __func__)); 23680288d427SJohn Baldwin structsize = sizeof(pl); 23690288d427SJohn Baldwin memcpy(buf, &structsize, sizeof(structsize)); 23700288d427SJohn Baldwin bzero(&pl, sizeof(pl)); 23710288d427SJohn Baldwin pl.pl_lwpid = td->td_tid; 23720288d427SJohn Baldwin pl.pl_event = PL_EVENT_NONE; 23730288d427SJohn Baldwin pl.pl_sigmask = td->td_sigmask; 23740288d427SJohn Baldwin pl.pl_siglist = td->td_siglist; 23750288d427SJohn Baldwin if (td->td_si.si_signo != 0) { 23760288d427SJohn Baldwin pl.pl_event = PL_EVENT_SIGNAL; 23770288d427SJohn Baldwin pl.pl_flags |= PL_FLAG_SI; 23780288d427SJohn Baldwin #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 23790288d427SJohn Baldwin siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); 23800288d427SJohn Baldwin #else 23810288d427SJohn Baldwin pl.pl_siginfo = td->td_si; 23820288d427SJohn Baldwin #endif 23830288d427SJohn Baldwin } 23840288d427SJohn Baldwin strcpy(pl.pl_tdname, td->td_name); 23850288d427SJohn Baldwin /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ 23860288d427SJohn Baldwin memcpy((int *)buf + 1, &pl, sizeof(pl)); 23870288d427SJohn Baldwin } 23880288d427SJohn Baldwin *sizep = size; 23890288d427SJohn Baldwin return (true); 23900288d427SJohn Baldwin } 23910288d427SJohn Baldwin 23920288d427SJohn Baldwin static struct regset __elfN(regset_lwpinfo) = { 23930288d427SJohn Baldwin .note = NT_PTLWPINFO, 23940288d427SJohn Baldwin .size = sizeof(int) + sizeof(elf_lwpinfo_t), 23950288d427SJohn Baldwin .get = __elfN(get_lwpinfo), 23960288d427SJohn Baldwin }; 23970288d427SJohn Baldwin ELF_REGSET(__elfN(regset_lwpinfo)); 23980288d427SJohn Baldwin 23996b71405bSJohn Baldwin static size_t 24006b71405bSJohn Baldwin __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list, 24016b71405bSJohn Baldwin struct thread *target_td) 2402bd390213SMikolaj Golub { 24036b71405bSJohn Baldwin struct sysentvec *sv = td->td_proc->p_sysent; 24046b71405bSJohn Baldwin struct regset **regsetp, **regset_end, *regset; 24056b71405bSJohn Baldwin size_t size; 2406bd390213SMikolaj Golub 24076b71405bSJohn Baldwin size = 0; 24086b71405bSJohn Baldwin 24096b71405bSJohn Baldwin /* NT_PRSTATUS must be the first register set note. */ 24106b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, &__elfN(regset_prstatus), 24116b71405bSJohn Baldwin target_td); 24126b71405bSJohn Baldwin 24136b71405bSJohn Baldwin regsetp = sv->sv_regset_begin; 24146b71405bSJohn Baldwin if (regsetp == NULL) { 24156b71405bSJohn Baldwin /* XXX: This shouldn't be true for any FreeBSD ABIs. */ 24166b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, 24176b71405bSJohn Baldwin &__elfN(regset_fpregset), target_td); 24186b71405bSJohn Baldwin return (size); 2419bd390213SMikolaj Golub } 24206b71405bSJohn Baldwin regset_end = sv->sv_regset_end; 24216b71405bSJohn Baldwin MPASS(regset_end != NULL); 24226b71405bSJohn Baldwin for (; regsetp < regset_end; regsetp++) { 24236b71405bSJohn Baldwin regset = *regsetp; 24246b71405bSJohn Baldwin if (regset->note == NT_PRSTATUS) 24256b71405bSJohn Baldwin continue; 24266b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, regset, 24276b71405bSJohn Baldwin target_td); 24286b71405bSJohn Baldwin } 24296b71405bSJohn Baldwin return (size); 2430bd390213SMikolaj Golub } 2431bd390213SMikolaj Golub 24324da47b2fSMarcel Moolenaar /* 24334da47b2fSMarcel Moolenaar * Allow for MD specific notes, as well as any MD 24344da47b2fSMarcel Moolenaar * specific preparations for writing MI notes. 24354da47b2fSMarcel Moolenaar */ 24368c64af4fSJohn Polstra static void 2437bd390213SMikolaj Golub __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) 24388c64af4fSJohn Polstra { 2439bd390213SMikolaj Golub struct thread *td; 2440bd390213SMikolaj Golub void *buf; 2441bd390213SMikolaj Golub size_t size; 24428c64af4fSJohn Polstra 2443bd390213SMikolaj Golub td = (struct thread *)arg; 2444bd390213SMikolaj Golub size = *sizep; 2445bd390213SMikolaj Golub if (size != 0 && sb != NULL) 2446bd390213SMikolaj Golub buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); 244783a396ceSChristian Brueffer else 244883a396ceSChristian Brueffer buf = NULL; 2449bd390213SMikolaj Golub size = 0; 2450bd390213SMikolaj Golub __elfN(dump_thread)(td, buf, &size); 245164779280SKonstantin Belousov KASSERT(sb == NULL || *sizep == size, ("invalid size")); 245283a396ceSChristian Brueffer if (size != 0 && sb != NULL) 2453bd390213SMikolaj Golub sbuf_bcat(sb, buf, size); 2454a1761d73SChristian Brueffer free(buf, M_TEMP); 2455bd390213SMikolaj Golub *sizep = size; 24568c64af4fSJohn Polstra } 24578c64af4fSJohn Polstra 2458f1fca82eSMikolaj Golub #ifdef KINFO_PROC_SIZE 2459f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 2460f1fca82eSMikolaj Golub #endif 2461f1fca82eSMikolaj Golub 2462f1fca82eSMikolaj Golub static void 2463f1fca82eSMikolaj Golub __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) 2464f1fca82eSMikolaj Golub { 2465f1fca82eSMikolaj Golub struct proc *p; 2466f1fca82eSMikolaj Golub size_t size; 2467f1fca82eSMikolaj Golub int structsize; 2468f1fca82eSMikolaj Golub 246961b4c627SEdward Tomasz Napierala p = arg; 2470f1fca82eSMikolaj Golub size = sizeof(structsize) + p->p_numthreads * 2471f1fca82eSMikolaj Golub sizeof(elf_kinfo_proc_t); 2472f1fca82eSMikolaj Golub 2473f1fca82eSMikolaj Golub if (sb != NULL) { 2474f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2475f1fca82eSMikolaj Golub structsize = sizeof(elf_kinfo_proc_t); 2476f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 24774daea938SKonstantin Belousov sx_slock(&proctree_lock); 2478f1fca82eSMikolaj Golub PROC_LOCK(p); 2479f1fca82eSMikolaj Golub kern_proc_out(p, sb, ELF_KERN_PROC_MASK); 24804daea938SKonstantin Belousov sx_sunlock(&proctree_lock); 2481f1fca82eSMikolaj Golub } 2482f1fca82eSMikolaj Golub *sizep = size; 2483f1fca82eSMikolaj Golub } 2484f1fca82eSMikolaj Golub 2485f1fca82eSMikolaj Golub #ifdef KINFO_FILE_SIZE 2486f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2487f1fca82eSMikolaj Golub #endif 2488f1fca82eSMikolaj Golub 2489f1fca82eSMikolaj Golub static void 2490f1fca82eSMikolaj Golub note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) 2491f1fca82eSMikolaj Golub { 2492f1fca82eSMikolaj Golub struct proc *p; 249314bdbaf2SConrad Meyer size_t size, sect_sz, i; 249414bdbaf2SConrad Meyer ssize_t start_len, sect_len; 249514bdbaf2SConrad Meyer int structsize, filedesc_flags; 249614bdbaf2SConrad Meyer 2497bcb60d52SConrad Meyer if (coredump_pack_fileinfo) 249814bdbaf2SConrad Meyer filedesc_flags = KERN_FILEDESC_PACK_KINFO; 249914bdbaf2SConrad Meyer else 250014bdbaf2SConrad Meyer filedesc_flags = 0; 2501f1fca82eSMikolaj Golub 250261b4c627SEdward Tomasz Napierala p = arg; 250314bdbaf2SConrad Meyer structsize = sizeof(struct kinfo_file); 2504f1fca82eSMikolaj Golub if (sb == NULL) { 2505f1fca82eSMikolaj Golub size = 0; 2506f1fca82eSMikolaj Golub sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 25075c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2508f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2509f1fca82eSMikolaj Golub PROC_LOCK(p); 251014bdbaf2SConrad Meyer kern_proc_filedesc_out(p, sb, -1, filedesc_flags); 2511f1fca82eSMikolaj Golub sbuf_finish(sb); 2512f1fca82eSMikolaj Golub sbuf_delete(sb); 2513f1fca82eSMikolaj Golub *sizep = size; 2514f1fca82eSMikolaj Golub } else { 251514bdbaf2SConrad Meyer sbuf_start_section(sb, &start_len); 251614bdbaf2SConrad Meyer 2517f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2518f1fca82eSMikolaj Golub PROC_LOCK(p); 251914bdbaf2SConrad Meyer kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), 252014bdbaf2SConrad Meyer filedesc_flags); 252114bdbaf2SConrad Meyer 252214bdbaf2SConrad Meyer sect_len = sbuf_end_section(sb, start_len, 0, 0); 252314bdbaf2SConrad Meyer if (sect_len < 0) 252414bdbaf2SConrad Meyer return; 252514bdbaf2SConrad Meyer sect_sz = sect_len; 252614bdbaf2SConrad Meyer 252714bdbaf2SConrad Meyer KASSERT(sect_sz <= *sizep, 252814bdbaf2SConrad Meyer ("kern_proc_filedesc_out did not respect maxlen; " 252914bdbaf2SConrad Meyer "requested %zu, got %zu", *sizep - sizeof(structsize), 253014bdbaf2SConrad Meyer sect_sz - sizeof(structsize))); 253114bdbaf2SConrad Meyer 253214bdbaf2SConrad Meyer for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) 253314bdbaf2SConrad Meyer sbuf_putc(sb, 0); 2534f1fca82eSMikolaj Golub } 2535f1fca82eSMikolaj Golub } 2536f1fca82eSMikolaj Golub 2537f1fca82eSMikolaj Golub #ifdef KINFO_VMENTRY_SIZE 2538f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2539f1fca82eSMikolaj Golub #endif 2540f1fca82eSMikolaj Golub 2541f1fca82eSMikolaj Golub static void 2542f1fca82eSMikolaj Golub note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) 2543f1fca82eSMikolaj Golub { 2544f1fca82eSMikolaj Golub struct proc *p; 2545f1fca82eSMikolaj Golub size_t size; 2546e6b95927SConrad Meyer int structsize, vmmap_flags; 2547e6b95927SConrad Meyer 2548e6b95927SConrad Meyer if (coredump_pack_vmmapinfo) 2549e6b95927SConrad Meyer vmmap_flags = KERN_VMMAP_PACK_KINFO; 2550e6b95927SConrad Meyer else 2551e6b95927SConrad Meyer vmmap_flags = 0; 2552f1fca82eSMikolaj Golub 255361b4c627SEdward Tomasz Napierala p = arg; 2554e6b95927SConrad Meyer structsize = sizeof(struct kinfo_vmentry); 2555f1fca82eSMikolaj Golub if (sb == NULL) { 2556f1fca82eSMikolaj Golub size = 0; 2557f1fca82eSMikolaj Golub sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 25585c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2559f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2560f1fca82eSMikolaj Golub PROC_LOCK(p); 2561e6b95927SConrad Meyer kern_proc_vmmap_out(p, sb, -1, vmmap_flags); 2562f1fca82eSMikolaj Golub sbuf_finish(sb); 2563f1fca82eSMikolaj Golub sbuf_delete(sb); 2564f1fca82eSMikolaj Golub *sizep = size; 2565f1fca82eSMikolaj Golub } else { 2566f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2567f1fca82eSMikolaj Golub PROC_LOCK(p); 2568e6b95927SConrad Meyer kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), 2569e6b95927SConrad Meyer vmmap_flags); 2570f1fca82eSMikolaj Golub } 2571f1fca82eSMikolaj Golub } 2572f1fca82eSMikolaj Golub 2573f1fca82eSMikolaj Golub static void 2574f1fca82eSMikolaj Golub note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) 2575f1fca82eSMikolaj Golub { 2576f1fca82eSMikolaj Golub struct proc *p; 2577f1fca82eSMikolaj Golub size_t size; 2578f1fca82eSMikolaj Golub int structsize; 2579f1fca82eSMikolaj Golub 258061b4c627SEdward Tomasz Napierala p = arg; 2581f1fca82eSMikolaj Golub size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); 2582f1fca82eSMikolaj Golub if (sb != NULL) { 2583f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2584f1fca82eSMikolaj Golub structsize = sizeof(gid_t); 2585f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2586f1fca82eSMikolaj Golub sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * 2587f1fca82eSMikolaj Golub sizeof(gid_t)); 2588f1fca82eSMikolaj Golub } 2589f1fca82eSMikolaj Golub *sizep = size; 2590f1fca82eSMikolaj Golub } 2591f1fca82eSMikolaj Golub 2592f1fca82eSMikolaj Golub static void 2593f1fca82eSMikolaj Golub note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) 2594f1fca82eSMikolaj Golub { 2595f1fca82eSMikolaj Golub struct proc *p; 2596f1fca82eSMikolaj Golub size_t size; 2597f1fca82eSMikolaj Golub int structsize; 2598f1fca82eSMikolaj Golub 259961b4c627SEdward Tomasz Napierala p = arg; 260085078b85SConrad Meyer size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); 2601f1fca82eSMikolaj Golub if (sb != NULL) { 2602f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 260385078b85SConrad Meyer structsize = sizeof(p->p_pd->pd_cmask); 2604f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 260585078b85SConrad Meyer sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); 2606f1fca82eSMikolaj Golub } 2607f1fca82eSMikolaj Golub *sizep = size; 2608f1fca82eSMikolaj Golub } 2609f1fca82eSMikolaj Golub 2610f1fca82eSMikolaj Golub static void 2611f1fca82eSMikolaj Golub note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) 2612f1fca82eSMikolaj Golub { 2613f1fca82eSMikolaj Golub struct proc *p; 2614f1fca82eSMikolaj Golub struct rlimit rlim[RLIM_NLIMITS]; 2615f1fca82eSMikolaj Golub size_t size; 2616f1fca82eSMikolaj Golub int structsize, i; 2617f1fca82eSMikolaj Golub 261861b4c627SEdward Tomasz Napierala p = arg; 2619f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(rlim); 2620f1fca82eSMikolaj Golub if (sb != NULL) { 2621f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2622f1fca82eSMikolaj Golub structsize = sizeof(rlim); 2623f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2624f1fca82eSMikolaj Golub PROC_LOCK(p); 2625f1fca82eSMikolaj Golub for (i = 0; i < RLIM_NLIMITS; i++) 2626f6f6d240SMateusz Guzik lim_rlimit_proc(p, i, &rlim[i]); 2627f1fca82eSMikolaj Golub PROC_UNLOCK(p); 2628f1fca82eSMikolaj Golub sbuf_bcat(sb, rlim, sizeof(rlim)); 2629f1fca82eSMikolaj Golub } 2630f1fca82eSMikolaj Golub *sizep = size; 2631f1fca82eSMikolaj Golub } 2632f1fca82eSMikolaj Golub 2633f1fca82eSMikolaj Golub static void 2634f1fca82eSMikolaj Golub note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) 2635f1fca82eSMikolaj Golub { 2636f1fca82eSMikolaj Golub struct proc *p; 2637f1fca82eSMikolaj Golub size_t size; 2638f1fca82eSMikolaj Golub int structsize; 2639f1fca82eSMikolaj Golub 264061b4c627SEdward Tomasz Napierala p = arg; 2641f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(p->p_osrel); 2642f1fca82eSMikolaj Golub if (sb != NULL) { 2643f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2644f1fca82eSMikolaj Golub structsize = sizeof(p->p_osrel); 2645f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2646f1fca82eSMikolaj Golub sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); 2647f1fca82eSMikolaj Golub } 2648f1fca82eSMikolaj Golub *sizep = size; 2649f1fca82eSMikolaj Golub } 2650f1fca82eSMikolaj Golub 2651f1fca82eSMikolaj Golub static void 2652f1fca82eSMikolaj Golub __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) 2653f1fca82eSMikolaj Golub { 2654f1fca82eSMikolaj Golub struct proc *p; 2655f1fca82eSMikolaj Golub elf_ps_strings_t ps_strings; 2656f1fca82eSMikolaj Golub size_t size; 2657f1fca82eSMikolaj Golub int structsize; 2658f1fca82eSMikolaj Golub 265961b4c627SEdward Tomasz Napierala p = arg; 2660f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(ps_strings); 2661f1fca82eSMikolaj Golub if (sb != NULL) { 2662f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2663f1fca82eSMikolaj Golub structsize = sizeof(ps_strings); 2664f1fca82eSMikolaj Golub #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2665706f4a81SMark Johnston ps_strings = PTROUT(PROC_PS_STRINGS(p)); 2666f1fca82eSMikolaj Golub #else 2667706f4a81SMark Johnston ps_strings = PROC_PS_STRINGS(p); 2668f1fca82eSMikolaj Golub #endif 2669f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2670f1fca82eSMikolaj Golub sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); 2671f1fca82eSMikolaj Golub } 2672f1fca82eSMikolaj Golub *sizep = size; 2673f1fca82eSMikolaj Golub } 2674f1fca82eSMikolaj Golub 2675f1fca82eSMikolaj Golub static void 2676f1fca82eSMikolaj Golub __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) 2677f1fca82eSMikolaj Golub { 2678f1fca82eSMikolaj Golub struct proc *p; 2679f1fca82eSMikolaj Golub size_t size; 2680f1fca82eSMikolaj Golub int structsize; 2681f1fca82eSMikolaj Golub 268261b4c627SEdward Tomasz Napierala p = arg; 2683f1fca82eSMikolaj Golub if (sb == NULL) { 2684f1fca82eSMikolaj Golub size = 0; 268506250515SEdward Tomasz Napierala sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo), 268606250515SEdward Tomasz Napierala SBUF_FIXEDLEN); 26875c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2688f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2689f1fca82eSMikolaj Golub PHOLD(p); 2690f1fca82eSMikolaj Golub proc_getauxv(curthread, p, sb); 2691f1fca82eSMikolaj Golub PRELE(p); 2692f1fca82eSMikolaj Golub sbuf_finish(sb); 2693f1fca82eSMikolaj Golub sbuf_delete(sb); 2694f1fca82eSMikolaj Golub *sizep = size; 2695f1fca82eSMikolaj Golub } else { 2696f1fca82eSMikolaj Golub structsize = sizeof(Elf_Auxinfo); 2697f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2698f1fca82eSMikolaj Golub PHOLD(p); 2699f1fca82eSMikolaj Golub proc_getauxv(curthread, p, sb); 2700f1fca82eSMikolaj Golub PRELE(p); 2701f1fca82eSMikolaj Golub } 2702f1fca82eSMikolaj Golub } 2703f1fca82eSMikolaj Golub 270419621645SAlex Richardson static bool 270592328a32SKonstantin Belousov __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, 270692328a32SKonstantin Belousov const char *note_vendor, const Elf_Phdr *pnote, 270719621645SAlex Richardson bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg) 270832c01de2SDmitry Chagin { 2709267c52fcSKonstantin Belousov const Elf_Note *note, *note0, *note_end; 271032c01de2SDmitry Chagin const char *note_name; 27116c775eb6SKonstantin Belousov char *buf; 27126c775eb6SKonstantin Belousov int i, error; 271319621645SAlex Richardson bool res; 271432c01de2SDmitry Chagin 27156c775eb6SKonstantin Belousov /* We need some limit, might as well use PAGE_SIZE. */ 27166c775eb6SKonstantin Belousov if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) 271719621645SAlex Richardson return (false); 27186c775eb6SKonstantin Belousov ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); 27196c775eb6SKonstantin Belousov if (pnote->p_offset > PAGE_SIZE || 27206c775eb6SKonstantin Belousov pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { 27212d6b8546SKonstantin Belousov buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); 27222d6b8546SKonstantin Belousov if (buf == NULL) { 2723b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 27246c775eb6SKonstantin Belousov buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); 272578022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 27262d6b8546SKonstantin Belousov } 27276c775eb6SKonstantin Belousov error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, 27286c775eb6SKonstantin Belousov pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, 27296c775eb6SKonstantin Belousov curthread->td_ucred, NOCRED, NULL, curthread); 27306c775eb6SKonstantin Belousov if (error != 0) { 27316c775eb6SKonstantin Belousov uprintf("i/o error PT_NOTE\n"); 2732eda8fe63SKonstantin Belousov goto retf; 27336c775eb6SKonstantin Belousov } 27346c775eb6SKonstantin Belousov note = note0 = (const Elf_Note *)buf; 27356c775eb6SKonstantin Belousov note_end = (const Elf_Note *)(buf + pnote->p_filesz); 27366c775eb6SKonstantin Belousov } else { 27376c775eb6SKonstantin Belousov note = note0 = (const Elf_Note *)(imgp->image_header + 27386c775eb6SKonstantin Belousov pnote->p_offset); 273932c01de2SDmitry Chagin note_end = (const Elf_Note *)(imgp->image_header + 274032c01de2SDmitry Chagin pnote->p_offset + pnote->p_filesz); 27416c775eb6SKonstantin Belousov buf = NULL; 27426c775eb6SKonstantin Belousov } 2743267c52fcSKonstantin Belousov for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 2744d1ae5c83SKonstantin Belousov if (!aligned(note, Elf32_Addr) || (const char *)note_end - 27456c775eb6SKonstantin Belousov (const char *)note < sizeof(Elf_Note)) { 2746eda8fe63SKonstantin Belousov goto retf; 27476c775eb6SKonstantin Belousov } 274892328a32SKonstantin Belousov if (note->n_namesz != checknote->n_namesz || 274992328a32SKonstantin Belousov note->n_descsz != checknote->n_descsz || 275092328a32SKonstantin Belousov note->n_type != checknote->n_type) 275132c01de2SDmitry Chagin goto nextnote; 275232c01de2SDmitry Chagin note_name = (const char *)(note + 1); 275392328a32SKonstantin Belousov if (note_name + checknote->n_namesz >= 275492328a32SKonstantin Belousov (const char *)note_end || strncmp(note_vendor, 275592328a32SKonstantin Belousov note_name, checknote->n_namesz) != 0) 275632c01de2SDmitry Chagin goto nextnote; 275732c01de2SDmitry Chagin 275892328a32SKonstantin Belousov if (cb(note, cb_arg, &res)) 27596c775eb6SKonstantin Belousov goto ret; 276032c01de2SDmitry Chagin nextnote: 276132c01de2SDmitry Chagin note = (const Elf_Note *)((const char *)(note + 1) + 27621b8388cdSMikolaj Golub roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + 27631b8388cdSMikolaj Golub roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); 276432c01de2SDmitry Chagin } 2765eda8fe63SKonstantin Belousov retf: 276619621645SAlex Richardson res = false; 27676c775eb6SKonstantin Belousov ret: 27686c775eb6SKonstantin Belousov free(buf, M_TEMP); 27696c775eb6SKonstantin Belousov return (res); 277032c01de2SDmitry Chagin } 277132c01de2SDmitry Chagin 277292328a32SKonstantin Belousov struct brandnote_cb_arg { 277392328a32SKonstantin Belousov Elf_Brandnote *brandnote; 277492328a32SKonstantin Belousov int32_t *osrel; 277592328a32SKonstantin Belousov }; 277692328a32SKonstantin Belousov 277719621645SAlex Richardson static bool 277819621645SAlex Richardson brandnote_cb(const Elf_Note *note, void *arg0, bool *res) 277992328a32SKonstantin Belousov { 278092328a32SKonstantin Belousov struct brandnote_cb_arg *arg; 278192328a32SKonstantin Belousov 278292328a32SKonstantin Belousov arg = arg0; 278392328a32SKonstantin Belousov 278492328a32SKonstantin Belousov /* 278592328a32SKonstantin Belousov * Fetch the osreldate for binary from the ELF OSABI-note if 278692328a32SKonstantin Belousov * necessary. 278792328a32SKonstantin Belousov */ 278892328a32SKonstantin Belousov *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && 278992328a32SKonstantin Belousov arg->brandnote->trans_osrel != NULL ? 279019621645SAlex Richardson arg->brandnote->trans_osrel(note, arg->osrel) : true; 279192328a32SKonstantin Belousov 279219621645SAlex Richardson return (true); 279392328a32SKonstantin Belousov } 279492328a32SKonstantin Belousov 2795cefb93f2SKonstantin Belousov static Elf_Note fctl_note = { 2796cefb93f2SKonstantin Belousov .n_namesz = sizeof(FREEBSD_ABI_VENDOR), 2797cefb93f2SKonstantin Belousov .n_descsz = sizeof(uint32_t), 2798cefb93f2SKonstantin Belousov .n_type = NT_FREEBSD_FEATURE_CTL, 2799cefb93f2SKonstantin Belousov }; 2800cefb93f2SKonstantin Belousov 2801cefb93f2SKonstantin Belousov struct fctl_cb_arg { 280219621645SAlex Richardson bool *has_fctl0; 2803cefb93f2SKonstantin Belousov uint32_t *fctl0; 2804cefb93f2SKonstantin Belousov }; 2805cefb93f2SKonstantin Belousov 280619621645SAlex Richardson static bool 280719621645SAlex Richardson note_fctl_cb(const Elf_Note *note, void *arg0, bool *res) 2808cefb93f2SKonstantin Belousov { 2809cefb93f2SKonstantin Belousov struct fctl_cb_arg *arg; 2810cefb93f2SKonstantin Belousov const Elf32_Word *desc; 2811cefb93f2SKonstantin Belousov uintptr_t p; 2812cefb93f2SKonstantin Belousov 2813cefb93f2SKonstantin Belousov arg = arg0; 2814cefb93f2SKonstantin Belousov p = (uintptr_t)(note + 1); 2815cefb93f2SKonstantin Belousov p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 2816cefb93f2SKonstantin Belousov desc = (const Elf32_Word *)p; 281719621645SAlex Richardson *arg->has_fctl0 = true; 2818cefb93f2SKonstantin Belousov *arg->fctl0 = desc[0]; 281919621645SAlex Richardson *res = true; 282019621645SAlex Richardson return (true); 2821cefb93f2SKonstantin Belousov } 2822cefb93f2SKonstantin Belousov 282332c01de2SDmitry Chagin /* 2824cefb93f2SKonstantin Belousov * Try to find the appropriate ABI-note section for checknote, fetch 2825cefb93f2SKonstantin Belousov * the osreldate and feature control flags for binary from the ELF 2826cefb93f2SKonstantin Belousov * OSABI-note. Only the first page of the image is searched, the same 2827cefb93f2SKonstantin Belousov * as for headers. 28281a9c7decSKonstantin Belousov */ 282919621645SAlex Richardson static bool 283092328a32SKonstantin Belousov __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, 283119621645SAlex Richardson int32_t *osrel, bool *has_fctl0, uint32_t *fctl0) 28321a9c7decSKonstantin Belousov { 28331a9c7decSKonstantin Belousov const Elf_Phdr *phdr; 28341a9c7decSKonstantin Belousov const Elf_Ehdr *hdr; 283592328a32SKonstantin Belousov struct brandnote_cb_arg b_arg; 2836cefb93f2SKonstantin Belousov struct fctl_cb_arg f_arg; 2837cefb93f2SKonstantin Belousov int i, j; 28381a9c7decSKonstantin Belousov 28391a9c7decSKonstantin Belousov hdr = (const Elf_Ehdr *)imgp->image_header; 28401a9c7decSKonstantin Belousov phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 284192328a32SKonstantin Belousov b_arg.brandnote = brandnote; 284292328a32SKonstantin Belousov b_arg.osrel = osrel; 28430cad2aa2SKonstantin Belousov f_arg.has_fctl0 = has_fctl0; 2844cefb93f2SKonstantin Belousov f_arg.fctl0 = fctl0; 28451a9c7decSKonstantin Belousov 28461a9c7decSKonstantin Belousov for (i = 0; i < hdr->e_phnum; i++) { 284792328a32SKonstantin Belousov if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, 284892328a32SKonstantin Belousov &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, 284992328a32SKonstantin Belousov &b_arg)) { 2850cefb93f2SKonstantin Belousov for (j = 0; j < hdr->e_phnum; j++) { 2851cefb93f2SKonstantin Belousov if (phdr[j].p_type == PT_NOTE && 2852cefb93f2SKonstantin Belousov __elfN(parse_notes)(imgp, &fctl_note, 2853cefb93f2SKonstantin Belousov FREEBSD_ABI_VENDOR, &phdr[j], 2854cefb93f2SKonstantin Belousov note_fctl_cb, &f_arg)) 2855cefb93f2SKonstantin Belousov break; 2856cefb93f2SKonstantin Belousov } 285719621645SAlex Richardson return (true); 28581a9c7decSKonstantin Belousov } 285992328a32SKonstantin Belousov } 286019621645SAlex Richardson return (false); 28611a9c7decSKonstantin Belousov 28621a9c7decSKonstantin Belousov } 28631a9c7decSKonstantin Belousov 28641a9c7decSKonstantin Belousov /* 2865e1743d02SSøren Schmidt * Tell kern_execve.c about it, with a little help from the linker. 2866e1743d02SSøren Schmidt */ 2867a360a43dSJake Burkholder static struct execsw __elfN(execsw) = { 2868b7feabf9SEd Maste .ex_imgact = __CONCAT(exec_, __elfN(imgact)), 2869b7feabf9SEd Maste .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2870a360a43dSJake Burkholder }; 2871a360a43dSJake Burkholder EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 2872e7228204SAlfred Perlstein 2873ed167eaaSKonstantin Belousov static vm_prot_t 2874ed167eaaSKonstantin Belousov __elfN(trans_prot)(Elf_Word flags) 2875ed167eaaSKonstantin Belousov { 2876ed167eaaSKonstantin Belousov vm_prot_t prot; 2877ed167eaaSKonstantin Belousov 2878ed167eaaSKonstantin Belousov prot = 0; 2879ed167eaaSKonstantin Belousov if (flags & PF_X) 2880ed167eaaSKonstantin Belousov prot |= VM_PROT_EXECUTE; 2881ed167eaaSKonstantin Belousov if (flags & PF_W) 2882ed167eaaSKonstantin Belousov prot |= VM_PROT_WRITE; 2883ed167eaaSKonstantin Belousov if (flags & PF_R) 2884ed167eaaSKonstantin Belousov prot |= VM_PROT_READ; 2885eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 2886126b36a2SKonstantin Belousov if (i386_read_exec && (flags & PF_R)) 2887676eda08SMarcel Moolenaar prot |= VM_PROT_EXECUTE; 2888676eda08SMarcel Moolenaar #endif 2889ed167eaaSKonstantin Belousov return (prot); 2890ed167eaaSKonstantin Belousov } 2891ed167eaaSKonstantin Belousov 2892ed167eaaSKonstantin Belousov static Elf_Word 2893ed167eaaSKonstantin Belousov __elfN(untrans_prot)(vm_prot_t prot) 2894ed167eaaSKonstantin Belousov { 2895ed167eaaSKonstantin Belousov Elf_Word flags; 2896ed167eaaSKonstantin Belousov 2897ed167eaaSKonstantin Belousov flags = 0; 2898ed167eaaSKonstantin Belousov if (prot & VM_PROT_EXECUTE) 2899ed167eaaSKonstantin Belousov flags |= PF_X; 2900ed167eaaSKonstantin Belousov if (prot & VM_PROT_READ) 2901ed167eaaSKonstantin Belousov flags |= PF_R; 2902ed167eaaSKonstantin Belousov if (prot & VM_PROT_WRITE) 2903ed167eaaSKonstantin Belousov flags |= PF_W; 2904ed167eaaSKonstantin Belousov return (flags); 2905ed167eaaSKonstantin Belousov } 2906