1e1743d02SSøren Schmidt /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 38a36da99SPedro F. Giffuni * 486be94fcSTycho Nightingale * Copyright (c) 2017 Dell EMC 5455d3589SDavid E. O'Brien * Copyright (c) 2000-2001, 2003 David O'Brien 69a14aa01SUlrich Spörlein * Copyright (c) 1995-1996 Søren Schmidt 7e1743d02SSøren Schmidt * Copyright (c) 1996 Peter Wemm 8e1743d02SSøren Schmidt * All rights reserved. 9e1743d02SSøren Schmidt * 10e1743d02SSøren Schmidt * Redistribution and use in source and binary forms, with or without 11e1743d02SSøren Schmidt * modification, are permitted provided that the following conditions 12e1743d02SSøren Schmidt * are met: 13e1743d02SSøren Schmidt * 1. Redistributions of source code must retain the above copyright 14e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer 15e1743d02SSøren Schmidt * in this position and unchanged. 16e1743d02SSøren Schmidt * 2. Redistributions in binary form must reproduce the above copyright 17e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer in the 18e1743d02SSøren Schmidt * documentation and/or other materials provided with the distribution. 19e1743d02SSøren Schmidt * 3. The name of the author may not be used to endorse or promote products 2021dc7d4fSJens Schweikhardt * derived from this software without specific prior written permission 21e1743d02SSøren Schmidt * 22e1743d02SSøren Schmidt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23e1743d02SSøren Schmidt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24e1743d02SSøren Schmidt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25e1743d02SSøren Schmidt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26e1743d02SSøren Schmidt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27e1743d02SSøren Schmidt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28e1743d02SSøren Schmidt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29e1743d02SSøren Schmidt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30e1743d02SSøren Schmidt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31e1743d02SSøren Schmidt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32e1743d02SSøren Schmidt */ 33e1743d02SSøren Schmidt 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3712bc222eSJonathan Anderson #include "opt_capsicum.h" 3862919d78SPeter Wemm 39e1743d02SSøren Schmidt #include <sys/param.h> 404a144410SRobert Watson #include <sys/capsicum.h> 4178f57a9cSMark Johnston #include <sys/compressor.h> 42e1743d02SSøren Schmidt #include <sys/exec.h> 438c64af4fSJohn Polstra #include <sys/fcntl.h> 44e1743d02SSøren Schmidt #include <sys/imgact.h> 45e1743d02SSøren Schmidt #include <sys/imgact_elf.h> 46b96bd95bSIan Lepore #include <sys/jail.h> 47e1743d02SSøren Schmidt #include <sys/kernel.h> 48f34fa851SJohn Baldwin #include <sys/lock.h> 49e1743d02SSøren Schmidt #include <sys/malloc.h> 5068ff2a43SChristian S.J. Peron #include <sys/mount.h> 518c64af4fSJohn Polstra #include <sys/mman.h> 52a794e791SBruce Evans #include <sys/namei.h> 53a794e791SBruce Evans #include <sys/proc.h> 548c64af4fSJohn Polstra #include <sys/procfs.h> 5586be94fcSTycho Nightingale #include <sys/ptrace.h> 561ba5ad42SEdward Tomasz Napierala #include <sys/racct.h> 57b7924341SAndrew Turner #include <sys/reg.h> 588c64af4fSJohn Polstra #include <sys/resourcevar.h> 5989f6b863SAttilio Rao #include <sys/rwlock.h> 60bd390213SMikolaj Golub #include <sys/sbuf.h> 61da61b9a6SAlan Cox #include <sys/sf_buf.h> 62ee235befSKonstantin Belousov #include <sys/smp.h> 6336240ea5SDoug Rabson #include <sys/systm.h> 64e1743d02SSøren Schmidt #include <sys/signalvar.h> 658c64af4fSJohn Polstra #include <sys/stat.h> 661005a129SJohn Baldwin #include <sys/sx.h> 678c64af4fSJohn Polstra #include <sys/syscall.h> 68e1743d02SSøren Schmidt #include <sys/sysctl.h> 698c64af4fSJohn Polstra #include <sys/sysent.h> 70a794e791SBruce Evans #include <sys/vnode.h> 71e7228204SAlfred Perlstein #include <sys/syslog.h> 72e7228204SAlfred Perlstein #include <sys/eventhandler.h> 73f1fca82eSMikolaj Golub #include <sys/user.h> 74e7228204SAlfred Perlstein 75e1743d02SSøren Schmidt #include <vm/vm.h> 76e1743d02SSøren Schmidt #include <vm/vm_kern.h> 77e1743d02SSøren Schmidt #include <vm/vm_param.h> 78e1743d02SSøren Schmidt #include <vm/pmap.h> 79e1743d02SSøren Schmidt #include <vm/vm_map.h> 800ff27d31SJohn Polstra #include <vm/vm_object.h> 81e1743d02SSøren Schmidt #include <vm/vm_extern.h> 82e1743d02SSøren Schmidt 8352c24af7SPeter Wemm #include <machine/elf.h> 84e1743d02SSøren Schmidt #include <machine/md_var.h> 85e1743d02SSøren Schmidt 861b8388cdSMikolaj Golub #define ELF_NOTE_ROUNDSIZE 4 87c815a20cSDavid E. O'Brien #define OLD_EI_BRAND 8 88c815a20cSDavid E. O'Brien 893ebc1248SPeter Wemm static int __elfN(check_header)(const Elf_Ehdr *hdr); 9032c01de2SDmitry Chagin static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 9109c78d53SEdward Tomasz Napierala const char *interp, int32_t *osrel, uint32_t *fctl0); 923ebc1248SPeter Wemm static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 931699546dSEdward Tomasz Napierala u_long *entry); 940bbee4cdSKonstantin Belousov static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 951699546dSEdward Tomasz Napierala caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); 963ebc1248SPeter Wemm static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 97a95659f7SEd Maste static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, 9889ffc202SBjoern A. Zeeb int32_t *osrel); 99a95659f7SEd Maste static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 10019621645SAlex Richardson static bool __elfN(check_note)(struct image_params *imgp, 10119621645SAlex Richardson Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0, 1020cad2aa2SKonstantin Belousov uint32_t *fctl0); 103ed167eaaSKonstantin Belousov static vm_prot_t __elfN(trans_prot)(Elf_Word); 104ed167eaaSKonstantin Belousov static Elf_Word __elfN(untrans_prot)(vm_prot_t); 1056b71405bSJohn Baldwin static size_t __elfN(prepare_register_notes)(struct thread *td, 1066b71405bSJohn Baldwin struct note_info_list *list, struct thread *target_td); 107e1743d02SSøren Schmidt 1087029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), 1097029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 110a360a43dSJake Burkholder ""); 111a360a43dSJake Burkholder 112e548a1d4SJake Burkholder int __elfN(fallback_brand) = -1; 113e548a1d4SJake Burkholder SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 114af3b2549SHans Petter Selasky fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, 115a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 116a360a43dSJake Burkholder 117551d79e1SMarcel Moolenaar static int elf_legacy_coredump = 0; 118a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 1191cbb879dSEd Maste &elf_legacy_coredump, 0, 1201cbb879dSEd Maste "include all and only RW pages in core dumps"); 121e1743d02SSøren Schmidt 12262c625fdSKonstantin Belousov int __elfN(nxstack) = 1234d22d07aSKonstantin Belousov #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ 1244bf4b0f1SJohn Baldwin (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ 1254bf4b0f1SJohn Baldwin defined(__riscv) 12662c625fdSKonstantin Belousov 1; 12762c625fdSKonstantin Belousov #else 12862c625fdSKonstantin Belousov 0; 12962c625fdSKonstantin Belousov #endif 130291c06a1SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 131291c06a1SKonstantin Belousov nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, 132291c06a1SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); 133291c06a1SKonstantin Belousov 134eb029587SKonstantin Belousov #if defined(__amd64__) 135eb029587SKonstantin Belousov static int __elfN(vdso) = 1; 136eb029587SKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 137eb029587SKonstantin Belousov vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0, 138eb029587SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading"); 139eb029587SKonstantin Belousov #else 140eb029587SKonstantin Belousov static int __elfN(vdso) = 0; 141eb029587SKonstantin Belousov #endif 142eb029587SKonstantin Belousov 143eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 144126b36a2SKonstantin Belousov int i386_read_exec = 0; 145126b36a2SKonstantin Belousov SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, 146126b36a2SKonstantin Belousov "enable execution from readable segments"); 147126b36a2SKonstantin Belousov #endif 148126b36a2SKonstantin Belousov 14995aafd69SKonstantin Belousov static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; 150f33533daSKonstantin Belousov static int 151f33533daSKonstantin Belousov sysctl_pie_base(SYSCTL_HANDLER_ARGS) 152f33533daSKonstantin Belousov { 153f33533daSKonstantin Belousov u_long val; 154f33533daSKonstantin Belousov int error; 155f33533daSKonstantin Belousov 156f33533daSKonstantin Belousov val = __elfN(pie_base); 157f33533daSKonstantin Belousov error = sysctl_handle_long(oidp, &val, 0, req); 158f33533daSKonstantin Belousov if (error != 0 || req->newptr == NULL) 159f33533daSKonstantin Belousov return (error); 160f33533daSKonstantin Belousov if ((val & PAGE_MASK) != 0) 161f33533daSKonstantin Belousov return (EINVAL); 162f33533daSKonstantin Belousov __elfN(pie_base) = val; 163f33533daSKonstantin Belousov return (0); 164f33533daSKonstantin Belousov } 165f33533daSKonstantin Belousov SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, 166f33533daSKonstantin Belousov CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 167f33533daSKonstantin Belousov sysctl_pie_base, "LU", 16895aafd69SKonstantin Belousov "PIE load base without randomization"); 16995aafd69SKonstantin Belousov 1707029da5cSPawel Biernacki SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, 1717029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172fa50a355SKonstantin Belousov ""); 173fa50a355SKonstantin Belousov #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) 174fa50a355SKonstantin Belousov 175b014e0f1SMarcin Wojtas /* 176f0687f3eSEd Maste * Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures 177f0687f3eSEd Maste * have limited address space (which can cause issues for applications with 178f0687f3eSEd Maste * high memory use) so we leave it off there. 179b014e0f1SMarcin Wojtas */ 180b014e0f1SMarcin Wojtas static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64; 181fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, 182fa50a355SKonstantin Belousov &__elfN(aslr_enabled), 0, 183fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 184fa50a355SKonstantin Belousov ": enable address map randomization"); 185fa50a355SKonstantin Belousov 186b014e0f1SMarcin Wojtas /* 187f0687f3eSEd Maste * Enable ASLR by default for 64-bit PIE binaries. 188b014e0f1SMarcin Wojtas */ 189b014e0f1SMarcin Wojtas static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64; 190fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, 191fa50a355SKonstantin Belousov &__elfN(pie_aslr_enabled), 0, 192fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 193fa50a355SKonstantin Belousov ": enable address map randomization for PIE binaries"); 194fa50a355SKonstantin Belousov 195b014e0f1SMarcin Wojtas /* 196f0687f3eSEd Maste * Sbrk is deprecated and it can be assumed that in most cases it will not be 197f0687f3eSEd Maste * used anyway. This setting is valid only with ASLR enabled, and allows ASLR 198f0687f3eSEd Maste * to use the bss grow region. 199b014e0f1SMarcin Wojtas */ 200b014e0f1SMarcin Wojtas static int __elfN(aslr_honor_sbrk) = 0; 201fa50a355SKonstantin Belousov SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, 202fa50a355SKonstantin Belousov &__elfN(aslr_honor_sbrk), 0, 203fa50a355SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); 204fa50a355SKonstantin Belousov 2051811c1e9SMark Johnston static int __elfN(aslr_stack) = 1; 2061811c1e9SMark Johnston SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN, 2071811c1e9SMark Johnston &__elfN(aslr_stack), 0, 208fc83c5a7SKonstantin Belousov __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2091811c1e9SMark Johnston ": enable stack address randomization"); 210fc83c5a7SKonstantin Belousov 211939f0b63SKornel Dulęba static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64; 212939f0b63SKornel Dulęba SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN, 213939f0b63SKornel Dulęba &__elfN(aslr_shared_page), 0, 214939f0b63SKornel Dulęba __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 215939f0b63SKornel Dulęba ": enable shared page address randomization"); 216939f0b63SKornel Dulęba 217944cf37bSKonstantin Belousov static int __elfN(sigfastblock) = 1; 218944cf37bSKonstantin Belousov SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, 219944cf37bSKonstantin Belousov CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, 220944cf37bSKonstantin Belousov "enable sigfastblock for new processes"); 221944cf37bSKonstantin Belousov 2222e1c94aaSKonstantin Belousov static bool __elfN(allow_wx) = true; 2232e1c94aaSKonstantin Belousov SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, 2242e1c94aaSKonstantin Belousov CTLFLAG_RWTUN, &__elfN(allow_wx), 0, 2252e1c94aaSKonstantin Belousov "Allow pages to be mapped simultaneously writable and executable"); 2262e1c94aaSKonstantin Belousov 2273ebc1248SPeter Wemm static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 228e1743d02SSøren Schmidt 229545517f1SEdward Tomasz Napierala #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) 23093d1c728SKonstantin Belousov 23132c01de2SDmitry Chagin Elf_Brandnote __elfN(freebsd_brandnote) = { 23232c01de2SDmitry Chagin .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 23332c01de2SDmitry Chagin .hdr.n_descsz = sizeof(int32_t), 2344c22b468SEd Maste .hdr.n_type = NT_FREEBSD_ABI_TAG, 23532c01de2SDmitry Chagin .vendor = FREEBSD_ABI_VENDOR, 23689ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 23789ffc202SBjoern A. Zeeb .trans_osrel = __elfN(freebsd_trans_osrel) 23832c01de2SDmitry Chagin }; 23932c01de2SDmitry Chagin 240a95659f7SEd Maste static bool 24189ffc202SBjoern A. Zeeb __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 24289ffc202SBjoern A. Zeeb { 24389ffc202SBjoern A. Zeeb uintptr_t p; 24489ffc202SBjoern A. Zeeb 24589ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 2461b8388cdSMikolaj Golub p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 24789ffc202SBjoern A. Zeeb *osrel = *(const int32_t *)(p); 24889ffc202SBjoern A. Zeeb 249a95659f7SEd Maste return (true); 25089ffc202SBjoern A. Zeeb } 25189ffc202SBjoern A. Zeeb 25289ffc202SBjoern A. Zeeb static const char GNU_ABI_VENDOR[] = "GNU"; 25389ffc202SBjoern A. Zeeb static int GNU_KFREEBSD_ABI_DESC = 3; 25489ffc202SBjoern A. Zeeb 25589ffc202SBjoern A. Zeeb Elf_Brandnote __elfN(kfreebsd_brandnote) = { 25689ffc202SBjoern A. Zeeb .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 25789ffc202SBjoern A. Zeeb .hdr.n_descsz = 16, /* XXX at least 16 */ 25889ffc202SBjoern A. Zeeb .hdr.n_type = 1, 25989ffc202SBjoern A. Zeeb .vendor = GNU_ABI_VENDOR, 26089ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 26189ffc202SBjoern A. Zeeb .trans_osrel = kfreebsd_trans_osrel 26289ffc202SBjoern A. Zeeb }; 26389ffc202SBjoern A. Zeeb 264a95659f7SEd Maste static bool 26589ffc202SBjoern A. Zeeb kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 26689ffc202SBjoern A. Zeeb { 26789ffc202SBjoern A. Zeeb const Elf32_Word *desc; 26889ffc202SBjoern A. Zeeb uintptr_t p; 26989ffc202SBjoern A. Zeeb 27089ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 2711b8388cdSMikolaj Golub p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 27289ffc202SBjoern A. Zeeb 27389ffc202SBjoern A. Zeeb desc = (const Elf32_Word *)p; 27489ffc202SBjoern A. Zeeb if (desc[0] != GNU_KFREEBSD_ABI_DESC) 275a95659f7SEd Maste return (false); 27689ffc202SBjoern A. Zeeb 27789ffc202SBjoern A. Zeeb /* 27889ffc202SBjoern A. Zeeb * Debian GNU/kFreeBSD embed the earliest compatible kernel version 27989ffc202SBjoern A. Zeeb * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 28089ffc202SBjoern A. Zeeb */ 28189ffc202SBjoern A. Zeeb *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 28289ffc202SBjoern A. Zeeb 283a95659f7SEd Maste return (true); 28489ffc202SBjoern A. Zeeb } 28589ffc202SBjoern A. Zeeb 286e1743d02SSøren Schmidt int 2873ebc1248SPeter Wemm __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 288e1743d02SSøren Schmidt { 289e1743d02SSøren Schmidt int i; 290e1743d02SSøren Schmidt 2913ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 292ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == NULL) { 293ea5a2b2eSSøren Schmidt elf_brand_list[i] = entry; 294e1743d02SSøren Schmidt break; 295e1743d02SSøren Schmidt } 296e1743d02SSøren Schmidt } 297925c8b5bSBjoern A. Zeeb if (i == MAX_BRANDS) { 298925c8b5bSBjoern A. Zeeb printf("WARNING: %s: could not insert brandinfo entry: %p\n", 299925c8b5bSBjoern A. Zeeb __func__, entry); 300a7cddfedSJake Burkholder return (-1); 301925c8b5bSBjoern A. Zeeb } 302a7cddfedSJake Burkholder return (0); 303e1743d02SSøren Schmidt } 304e1743d02SSøren Schmidt 305e1743d02SSøren Schmidt int 3063ebc1248SPeter Wemm __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 307e1743d02SSøren Schmidt { 308e1743d02SSøren Schmidt int i; 309e1743d02SSøren Schmidt 3103ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 311ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == entry) { 312ea5a2b2eSSøren Schmidt elf_brand_list[i] = NULL; 313e1743d02SSøren Schmidt break; 314e1743d02SSøren Schmidt } 315e1743d02SSøren Schmidt } 316ea5a2b2eSSøren Schmidt if (i == MAX_BRANDS) 317a7cddfedSJake Burkholder return (-1); 318a7cddfedSJake Burkholder return (0); 319e1743d02SSøren Schmidt } 320e1743d02SSøren Schmidt 3214082b189SAlex Richardson bool 3223ebc1248SPeter Wemm __elfN(brand_inuse)(Elf_Brandinfo *entry) 323096977faSMark Newton { 324096977faSMark Newton struct proc *p; 3254082b189SAlex Richardson bool rval = false; 326096977faSMark Newton 3271005a129SJohn Baldwin sx_slock(&allproc_lock); 3284f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 329553629ebSJake Burkholder if (p->p_sysent == entry->sysvec) { 3304082b189SAlex Richardson rval = true; 331553629ebSJake Burkholder break; 332096977faSMark Newton } 333553629ebSJake Burkholder } 3341005a129SJohn Baldwin sx_sunlock(&allproc_lock); 335096977faSMark Newton 336553629ebSJake Burkholder return (rval); 337096977faSMark Newton } 338096977faSMark Newton 3395fe3ed62SJake Burkholder static Elf_Brandinfo * 34032c01de2SDmitry Chagin __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 34109c78d53SEdward Tomasz Napierala int32_t *osrel, uint32_t *fctl0) 3425fe3ed62SJake Burkholder { 34332c01de2SDmitry Chagin const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 344af582aaeSKonstantin Belousov Elf_Brandinfo *bi, *bi_m; 34519621645SAlex Richardson bool ret, has_fctl0; 34609c78d53SEdward Tomasz Napierala int i, interp_name_len; 34709c78d53SEdward Tomasz Napierala 348be7808dcSKonstantin Belousov interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; 3495fe3ed62SJake Burkholder 3505fe3ed62SJake Burkholder /* 35132c01de2SDmitry Chagin * We support four types of branding -- (1) the ELF EI_OSABI field 3525fe3ed62SJake Burkholder * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 35332c01de2SDmitry Chagin * branding w/in the ELF header, (3) path of the `interp_path' 35432c01de2SDmitry Chagin * field, and (4) the ".note.ABI-tag" ELF section. 3555fe3ed62SJake Burkholder */ 3565fe3ed62SJake Burkholder 35732c01de2SDmitry Chagin /* Look for an ".note.ABI-tag" ELF section */ 358af582aaeSKonstantin Belousov bi_m = NULL; 35932c01de2SDmitry Chagin for (i = 0; i < MAX_BRANDS; i++) { 36032c01de2SDmitry Chagin bi = elf_brand_list[i]; 361ecc2fda8SBjoern A. Zeeb if (bi == NULL) 362ecc2fda8SBjoern A. Zeeb continue; 3632274ab3dSKonstantin Belousov if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) 3641438fe3cSKonstantin Belousov continue; 365ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && (bi->flags & 366ecc2fda8SBjoern A. Zeeb (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 3670cad2aa2SKonstantin Belousov has_fctl0 = false; 3680cad2aa2SKonstantin Belousov *fctl0 = 0; 3690cad2aa2SKonstantin Belousov *osrel = 0; 370cefb93f2SKonstantin Belousov ret = __elfN(check_note)(imgp, bi->brand_note, osrel, 3710cad2aa2SKonstantin Belousov &has_fctl0, fctl0); 372f19d421aSNathan Whitehorn /* Give brand a chance to veto check_note's guess */ 3730cad2aa2SKonstantin Belousov if (ret && bi->header_supported) { 3740cad2aa2SKonstantin Belousov ret = bi->header_supported(imgp, osrel, 3750cad2aa2SKonstantin Belousov has_fctl0 ? fctl0 : NULL); 3760cad2aa2SKonstantin Belousov } 377af582aaeSKonstantin Belousov /* 378af582aaeSKonstantin Belousov * If note checker claimed the binary, but the 379af582aaeSKonstantin Belousov * interpreter path in the image does not 380af582aaeSKonstantin Belousov * match default one for the brand, try to 381af582aaeSKonstantin Belousov * search for other brands with the same 382af582aaeSKonstantin Belousov * interpreter. Either there is better brand 383af582aaeSKonstantin Belousov * with the right interpreter, or, failing 384af582aaeSKonstantin Belousov * this, we return first brand which accepted 385af582aaeSKonstantin Belousov * our note and, optionally, header. 386af582aaeSKonstantin Belousov */ 3873aeacc55SKonstantin Belousov if (ret && bi_m == NULL && interp != NULL && 3883aeacc55SKonstantin Belousov (bi->interp_path == NULL || 3893aeacc55SKonstantin Belousov (strlen(bi->interp_path) + 1 != interp_name_len || 3903aeacc55SKonstantin Belousov strncmp(interp, bi->interp_path, interp_name_len) 3913aeacc55SKonstantin Belousov != 0))) { 392af582aaeSKonstantin Belousov bi_m = bi; 393af582aaeSKonstantin Belousov ret = 0; 394af582aaeSKonstantin Belousov } 39532c01de2SDmitry Chagin if (ret) 39632c01de2SDmitry Chagin return (bi); 39732c01de2SDmitry Chagin } 39832c01de2SDmitry Chagin } 399af582aaeSKonstantin Belousov if (bi_m != NULL) 400af582aaeSKonstantin Belousov return (bi_m); 40132c01de2SDmitry Chagin 4025fe3ed62SJake Burkholder /* If the executable has a brand, search for it in the brand list. */ 4035fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4045fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4051438fe3cSKonstantin Belousov if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 4062274ab3dSKonstantin Belousov (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 407ecc2fda8SBjoern A. Zeeb continue; 408ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 4095fe3ed62SJake Burkholder (hdr->e_ident[EI_OSABI] == bi->brand || 4100fe98320SEd Schouten (bi->compat_3_brand != NULL && 4113d560b4bSKonstantin Belousov strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 4120fe98320SEd Schouten bi->compat_3_brand) == 0))) { 413686d2f31SNathan Whitehorn /* Looks good, but give brand a chance to veto */ 414d722231bSJohn Baldwin if (bi->header_supported == NULL || 4150cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL)) { 41615a9aedfSKonstantin Belousov /* 41715a9aedfSKonstantin Belousov * Again, prefer strictly matching 41815a9aedfSKonstantin Belousov * interpreter path. 41915a9aedfSKonstantin Belousov */ 4207aab7a80SKonstantin Belousov if (interp_name_len == 0 && 4217aab7a80SKonstantin Belousov bi->interp_path == NULL) 4227aab7a80SKonstantin Belousov return (bi); 4237aab7a80SKonstantin Belousov if (bi->interp_path != NULL && 4247aab7a80SKonstantin Belousov strlen(bi->interp_path) + 1 == 42515a9aedfSKonstantin Belousov interp_name_len && strncmp(interp, 42615a9aedfSKonstantin Belousov bi->interp_path, interp_name_len) == 0) 4275fe3ed62SJake Burkholder return (bi); 42815a9aedfSKonstantin Belousov if (bi_m == NULL) 42915a9aedfSKonstantin Belousov bi_m = bi; 4305fe3ed62SJake Burkholder } 431686d2f31SNathan Whitehorn } 43215a9aedfSKonstantin Belousov } 43315a9aedfSKonstantin Belousov if (bi_m != NULL) 43415a9aedfSKonstantin Belousov return (bi_m); 4355fe3ed62SJake Burkholder 436817dc004SWarner Losh /* No known brand, see if the header is recognized by any brand */ 437817dc004SWarner Losh for (i = 0; i < MAX_BRANDS; i++) { 438817dc004SWarner Losh bi = elf_brand_list[i]; 439817dc004SWarner Losh if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || 440817dc004SWarner Losh bi->header_supported == NULL) 441817dc004SWarner Losh continue; 442817dc004SWarner Losh if (hdr->e_machine == bi->machine) { 4430cad2aa2SKonstantin Belousov ret = bi->header_supported(imgp, NULL, NULL); 444817dc004SWarner Losh if (ret) 445817dc004SWarner Losh return (bi); 446817dc004SWarner Losh } 447817dc004SWarner Losh } 448817dc004SWarner Losh 4495fe3ed62SJake Burkholder /* Lacking a known brand, search for a recognized interpreter. */ 4505fe3ed62SJake Burkholder if (interp != NULL) { 4515fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4525fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4532274ab3dSKonstantin Belousov if (bi == NULL || (bi->flags & 4542274ab3dSKonstantin Belousov (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) 4552274ab3dSKonstantin Belousov != 0) 456ecc2fda8SBjoern A. Zeeb continue; 457ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 4583aeacc55SKonstantin Belousov bi->interp_path != NULL && 459d1ae5c83SKonstantin Belousov /* ELF image p_filesz includes terminating zero */ 460d1ae5c83SKonstantin Belousov strlen(bi->interp_path) + 1 == interp_name_len && 461d1ae5c83SKonstantin Belousov strncmp(interp, bi->interp_path, interp_name_len) 462d722231bSJohn Baldwin == 0 && (bi->header_supported == NULL || 4630cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL))) 4645fe3ed62SJake Burkholder return (bi); 4655fe3ed62SJake Burkholder } 4665fe3ed62SJake Burkholder } 4675fe3ed62SJake Burkholder 4685fe3ed62SJake Burkholder /* Lacking a recognized interpreter, try the default brand */ 4695fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 4705fe3ed62SJake Burkholder bi = elf_brand_list[i]; 4711438fe3cSKonstantin Belousov if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || 4722274ab3dSKonstantin Belousov (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) 473ecc2fda8SBjoern A. Zeeb continue; 474ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 475d722231bSJohn Baldwin __elfN(fallback_brand) == bi->brand && 476d722231bSJohn Baldwin (bi->header_supported == NULL || 4770cad2aa2SKonstantin Belousov bi->header_supported(imgp, NULL, NULL))) 4785fe3ed62SJake Burkholder return (bi); 4795fe3ed62SJake Burkholder } 4805fe3ed62SJake Burkholder return (NULL); 4815fe3ed62SJake Burkholder } 4825fe3ed62SJake Burkholder 4837de1bc13SKonstantin Belousov static bool 4847de1bc13SKonstantin Belousov __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) 4857de1bc13SKonstantin Belousov { 4867de1bc13SKonstantin Belousov return (hdr->e_phoff <= PAGE_SIZE && 4877de1bc13SKonstantin Belousov (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); 4887de1bc13SKonstantin Belousov } 4897de1bc13SKonstantin Belousov 490e1743d02SSøren Schmidt static int 4913ebc1248SPeter Wemm __elfN(check_header)(const Elf_Ehdr *hdr) 492e1743d02SSøren Schmidt { 493d0ca7c29SPeter Wemm Elf_Brandinfo *bi; 4943ebc1248SPeter Wemm int i; 4953ebc1248SPeter Wemm 49652c24af7SPeter Wemm if (!IS_ELF(*hdr) || 49752c24af7SPeter Wemm hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 49852c24af7SPeter Wemm hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 4993dc19c46SJacques Vidrine hdr->e_ident[EI_VERSION] != EV_CURRENT || 5003dc19c46SJacques Vidrine hdr->e_phentsize != sizeof(Elf_Phdr) || 5013dc19c46SJacques Vidrine hdr->e_version != ELF_TARG_VER) 502a7cddfedSJake Burkholder return (ENOEXEC); 503e1743d02SSøren Schmidt 5043ebc1248SPeter Wemm /* 5053ebc1248SPeter Wemm * Make sure we have at least one brand for this machine. 5063ebc1248SPeter Wemm */ 5073ebc1248SPeter Wemm 5083ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 509d0ca7c29SPeter Wemm bi = elf_brand_list[i]; 510d0ca7c29SPeter Wemm if (bi != NULL && bi->machine == hdr->e_machine) 5113ebc1248SPeter Wemm break; 5123ebc1248SPeter Wemm } 5133ebc1248SPeter Wemm if (i == MAX_BRANDS) 514a7cddfedSJake Burkholder return (ENOEXEC); 515e1743d02SSøren Schmidt 516a7cddfedSJake Burkholder return (0); 517e1743d02SSøren Schmidt } 518e1743d02SSøren Schmidt 519e1743d02SSøren Schmidt static int 5203ebc1248SPeter Wemm __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 521ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot) 5223ebc1248SPeter Wemm { 523da61b9a6SAlan Cox struct sf_buf *sf; 524da61b9a6SAlan Cox int error; 5253ebc1248SPeter Wemm vm_offset_t off; 5263ebc1248SPeter Wemm 5273ebc1248SPeter Wemm /* 5283ebc1248SPeter Wemm * Create the page if it doesn't exist yet. Ignore errors. 5293ebc1248SPeter Wemm */ 530aaadc41fSKonstantin Belousov vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - 531aaadc41fSKonstantin Belousov trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); 5323ebc1248SPeter Wemm 5333ebc1248SPeter Wemm /* 5343ebc1248SPeter Wemm * Find the page from the underlying object. 5353ebc1248SPeter Wemm */ 53628e8da65SAlan Cox if (object != NULL) { 537da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 538da61b9a6SAlan Cox if (sf == NULL) 539da61b9a6SAlan Cox return (KERN_FAILURE); 5403ebc1248SPeter Wemm off = offset - trunc_page(offset); 541da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 542ca0387efSJake Burkholder end - start); 543be996836SAttilio Rao vm_imgact_unmap_page(sf); 544fe0a8a39SKonstantin Belousov if (error != 0) 545a7cddfedSJake Burkholder return (KERN_FAILURE); 5463ebc1248SPeter Wemm } 5473ebc1248SPeter Wemm 548a7cddfedSJake Burkholder return (KERN_SUCCESS); 5493ebc1248SPeter Wemm } 5503ebc1248SPeter Wemm 5513ebc1248SPeter Wemm static int 552e3d8f8feSKonstantin Belousov __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, 553e3d8f8feSKonstantin Belousov vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, 554e3d8f8feSKonstantin Belousov int cow) 5553ebc1248SPeter Wemm { 556da61b9a6SAlan Cox struct sf_buf *sf; 557da61b9a6SAlan Cox vm_offset_t off; 558a063facbSMarcel Moolenaar vm_size_t sz; 559e3d8f8feSKonstantin Belousov int error, locked, rv; 5603ebc1248SPeter Wemm 5613ebc1248SPeter Wemm if (start != trunc_page(start)) { 56281f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset, start, 563ff6f03c7SAlan Cox round_page(start), prot); 56428e8da65SAlan Cox if (rv != KERN_SUCCESS) 565a7cddfedSJake Burkholder return (rv); 5663ebc1248SPeter Wemm offset += round_page(start) - start; 5673ebc1248SPeter Wemm start = round_page(start); 5683ebc1248SPeter Wemm } 5693ebc1248SPeter Wemm if (end != round_page(end)) { 57081f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset + 571ff6f03c7SAlan Cox trunc_page(end) - start, trunc_page(end), end, prot); 57228e8da65SAlan Cox if (rv != KERN_SUCCESS) 573a7cddfedSJake Burkholder return (rv); 5743ebc1248SPeter Wemm end = trunc_page(end); 5753ebc1248SPeter Wemm } 576e383e820SAlan Cox if (start >= end) 577e383e820SAlan Cox return (KERN_SUCCESS); 578e383e820SAlan Cox if ((offset & PAGE_MASK) != 0) { 5793ebc1248SPeter Wemm /* 580e383e820SAlan Cox * The mapping is not page aligned. This means that we have 581e383e820SAlan Cox * to copy the data. 5823ebc1248SPeter Wemm */ 583aaadc41fSKonstantin Belousov rv = vm_map_fixed(map, NULL, 0, start, end - start, 584aaadc41fSKonstantin Belousov prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); 5855420f76bSKonstantin Belousov if (rv != KERN_SUCCESS) 586a7cddfedSJake Burkholder return (rv); 587da61b9a6SAlan Cox if (object == NULL) 588da61b9a6SAlan Cox return (KERN_SUCCESS); 589da61b9a6SAlan Cox for (; start < end; start += sz) { 590da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 591da61b9a6SAlan Cox if (sf == NULL) 592da61b9a6SAlan Cox return (KERN_FAILURE); 5933ebc1248SPeter Wemm off = offset - trunc_page(offset); 5943ebc1248SPeter Wemm sz = end - start; 595da61b9a6SAlan Cox if (sz > PAGE_SIZE - off) 596da61b9a6SAlan Cox sz = PAGE_SIZE - off; 597da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 5983ebc1248SPeter Wemm (caddr_t)start, sz); 599be996836SAttilio Rao vm_imgact_unmap_page(sf); 6005420f76bSKonstantin Belousov if (error != 0) 601a7cddfedSJake Burkholder return (KERN_FAILURE); 602da61b9a6SAlan Cox offset += sz; 6033ebc1248SPeter Wemm } 6043ebc1248SPeter Wemm } else { 605e5e6093bSAlan Cox vm_object_reference(object); 606e383e820SAlan Cox rv = vm_map_fixed(map, object, offset, start, end - start, 60778022527SKonstantin Belousov prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | 60878022527SKonstantin Belousov (object != NULL ? MAP_VN_EXEC : 0)); 609e3d8f8feSKonstantin Belousov if (rv != KERN_SUCCESS) { 610e3d8f8feSKonstantin Belousov locked = VOP_ISLOCKED(imgp->vp); 611b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 612e5e6093bSAlan Cox vm_object_deallocate(object); 613e3d8f8feSKonstantin Belousov vn_lock(imgp->vp, locked | LK_RETRY); 614a7cddfedSJake Burkholder return (rv); 61578022527SKonstantin Belousov } else if (object != NULL) { 61678022527SKonstantin Belousov MPASS(imgp->vp->v_object == object); 61778022527SKonstantin Belousov VOP_SET_TEXT_CHECKED(imgp->vp); 6183ebc1248SPeter Wemm } 6193ebc1248SPeter Wemm } 620e383e820SAlan Cox return (KERN_SUCCESS); 621e383e820SAlan Cox } 6223ebc1248SPeter Wemm 6233ebc1248SPeter Wemm static int 6240bbee4cdSKonstantin Belousov __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, 6251699546dSEdward Tomasz Napierala caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 626e1743d02SSøren Schmidt { 627da61b9a6SAlan Cox struct sf_buf *sf; 628e1743d02SSøren Schmidt size_t map_len; 629292177e6SAlan Cox vm_map_t map; 630292177e6SAlan Cox vm_object_t object; 631e020a35fSMark Johnston vm_offset_t map_addr; 632fa7dd9c5SMatthew Dillon int error, rv, cow; 633e1743d02SSøren Schmidt size_t copy_len; 6340bbee4cdSKonstantin Belousov vm_ooffset_t file_addr; 63552c24af7SPeter Wemm 63625ead034SBrian Feldman /* 63725ead034SBrian Feldman * It's necessary to fail if the filsz + offset taken from the 63825ead034SBrian Feldman * header is greater than the actual file pager object's size. 63925ead034SBrian Feldman * If we were to allow this, then the vm_map_find() below would 64025ead034SBrian Feldman * walk right off the end of the file object and into the ether. 64125ead034SBrian Feldman * 64225ead034SBrian Feldman * While I'm here, might as well check for something else that 64325ead034SBrian Feldman * is invalid: filsz cannot be greater than memsz. 64425ead034SBrian Feldman */ 6459bcf2f2dSKonstantin Belousov if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || 6469bcf2f2dSKonstantin Belousov filsz > memsz) { 64725ead034SBrian Feldman uprintf("elf_load_section: truncated ELF file\n"); 64825ead034SBrian Feldman return (ENOEXEC); 64925ead034SBrian Feldman } 65025ead034SBrian Feldman 651292177e6SAlan Cox object = imgp->object; 652292177e6SAlan Cox map = &imgp->proc->p_vmspace->vm_map; 653545517f1SEdward Tomasz Napierala map_addr = trunc_page((vm_offset_t)vmaddr); 654545517f1SEdward Tomasz Napierala file_addr = trunc_page(offset); 655e1743d02SSøren Schmidt 656e1743d02SSøren Schmidt /* 65752c24af7SPeter Wemm * We have two choices. We can either clear the data in the last page 65852c24af7SPeter Wemm * of an oversized mapping, or we can start the anon mapping a page 65952c24af7SPeter Wemm * early and copy the initialized data into that first page. We 66028e8da65SAlan Cox * choose the second. 66152c24af7SPeter Wemm */ 6629bcf2f2dSKonstantin Belousov if (filsz == 0) 6639bcf2f2dSKonstantin Belousov map_len = 0; 6649bcf2f2dSKonstantin Belousov else if (memsz > filsz) 665545517f1SEdward Tomasz Napierala map_len = trunc_page(offset + filsz) - file_addr; 66652c24af7SPeter Wemm else 667545517f1SEdward Tomasz Napierala map_len = round_page(offset + filsz) - file_addr; 66852c24af7SPeter Wemm 66952c24af7SPeter Wemm if (map_len != 0) { 670fa7dd9c5SMatthew Dillon /* cow flags: don't dump readonly sections in core */ 671fa7dd9c5SMatthew Dillon cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 672fa7dd9c5SMatthew Dillon (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 673fa7dd9c5SMatthew Dillon 67478022527SKonstantin Belousov rv = __elfN(map_insert)(imgp, map, object, file_addr, 67578022527SKonstantin Belousov map_addr, map_addr + map_len, prot, cow); 676e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 677a7cddfedSJake Burkholder return (EINVAL); 67852c24af7SPeter Wemm 67952c24af7SPeter Wemm /* we can stop now if we've covered it all */ 680973d67c4SKonstantin Belousov if (memsz == filsz) 681a7cddfedSJake Burkholder return (0); 68252c24af7SPeter Wemm } 68352c24af7SPeter Wemm 68452c24af7SPeter Wemm /* 68552c24af7SPeter Wemm * We have to get the remaining bit of the file into the first part 68652c24af7SPeter Wemm * of the oversized map segment. This is normally because the .data 68752c24af7SPeter Wemm * segment in the file is extended to provide bss. It's a neat idea 68852c24af7SPeter Wemm * to try and save a page, but it's a pain in the behind to implement. 689e1743d02SSøren Schmidt */ 690545517f1SEdward Tomasz Napierala copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + 691545517f1SEdward Tomasz Napierala filsz); 692545517f1SEdward Tomasz Napierala map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 693545517f1SEdward Tomasz Napierala map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 694e1743d02SSøren Schmidt 69552c24af7SPeter Wemm /* This had damn well better be true! */ 6968191d577SPeter Wemm if (map_len != 0) { 697e3d8f8feSKonstantin Belousov rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, 698c547cbb4SAlan Cox map_addr + map_len, prot, 0); 699973d67c4SKonstantin Belousov if (rv != KERN_SUCCESS) 700a7cddfedSJake Burkholder return (EINVAL); 7018191d577SPeter Wemm } 702e1743d02SSøren Schmidt 70352c24af7SPeter Wemm if (copy_len != 0) { 704da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset + filsz); 705da61b9a6SAlan Cox if (sf == NULL) 706da61b9a6SAlan Cox return (EIO); 707e1743d02SSøren Schmidt 70852c24af7SPeter Wemm /* send the page fragment to user space */ 709e020a35fSMark Johnston error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, 710e020a35fSMark Johnston copy_len); 711be996836SAttilio Rao vm_imgact_unmap_page(sf); 712973d67c4SKonstantin Belousov if (error != 0) 71352c24af7SPeter Wemm return (error); 71452c24af7SPeter Wemm } 715e1743d02SSøren Schmidt 716e1743d02SSøren Schmidt /* 717c547cbb4SAlan Cox * Remove write access to the page if it was only granted by map_insert 718c547cbb4SAlan Cox * to allow copyout. 719e1743d02SSøren Schmidt */ 720c547cbb4SAlan Cox if ((prot & VM_PROT_WRITE) == 0) 721292177e6SAlan Cox vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + 7220659df6fSKonstantin Belousov map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); 7238191d577SPeter Wemm 724ff6f03c7SAlan Cox return (0); 725e1743d02SSøren Schmidt } 726e1743d02SSøren Schmidt 7279bcd7482SEdward Tomasz Napierala static int 7289bcd7482SEdward Tomasz Napierala __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, 7299bcd7482SEdward Tomasz Napierala const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) 7309bcd7482SEdward Tomasz Napierala { 7319bcd7482SEdward Tomasz Napierala vm_prot_t prot; 7329bcd7482SEdward Tomasz Napierala u_long base_addr; 7339bcd7482SEdward Tomasz Napierala bool first; 7349bcd7482SEdward Tomasz Napierala int error, i; 7359bcd7482SEdward Tomasz Napierala 736b65ca345SEdward Tomasz Napierala ASSERT_VOP_LOCKED(imgp->vp, __func__); 737b65ca345SEdward Tomasz Napierala 7389bcd7482SEdward Tomasz Napierala base_addr = 0; 7399bcd7482SEdward Tomasz Napierala first = true; 7409bcd7482SEdward Tomasz Napierala 7419bcd7482SEdward Tomasz Napierala for (i = 0; i < hdr->e_phnum; i++) { 7429bcd7482SEdward Tomasz Napierala if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 7439bcd7482SEdward Tomasz Napierala continue; 7449bcd7482SEdward Tomasz Napierala 7459bcd7482SEdward Tomasz Napierala /* Loadable segment */ 7469bcd7482SEdward Tomasz Napierala prot = __elfN(trans_prot)(phdr[i].p_flags); 7479bcd7482SEdward Tomasz Napierala error = __elfN(load_section)(imgp, phdr[i].p_offset, 7489bcd7482SEdward Tomasz Napierala (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 7499bcd7482SEdward Tomasz Napierala phdr[i].p_memsz, phdr[i].p_filesz, prot); 7509bcd7482SEdward Tomasz Napierala if (error != 0) 7519bcd7482SEdward Tomasz Napierala return (error); 7529bcd7482SEdward Tomasz Napierala 7539bcd7482SEdward Tomasz Napierala /* 7549bcd7482SEdward Tomasz Napierala * Establish the base address if this is the first segment. 7559bcd7482SEdward Tomasz Napierala */ 7569bcd7482SEdward Tomasz Napierala if (first) { 7579bcd7482SEdward Tomasz Napierala base_addr = trunc_page(phdr[i].p_vaddr + rbase); 7589bcd7482SEdward Tomasz Napierala first = false; 7599bcd7482SEdward Tomasz Napierala } 7609bcd7482SEdward Tomasz Napierala } 7619bcd7482SEdward Tomasz Napierala 7629bcd7482SEdward Tomasz Napierala if (base_addrp != NULL) 7639bcd7482SEdward Tomasz Napierala *base_addrp = base_addr; 7649bcd7482SEdward Tomasz Napierala 7659bcd7482SEdward Tomasz Napierala return (0); 7669bcd7482SEdward Tomasz Napierala } 7679bcd7482SEdward Tomasz Napierala 768c33fe779SJohn Polstra /* 769c33fe779SJohn Polstra * Load the file "file" into memory. It may be either a shared object 770c33fe779SJohn Polstra * or an executable. 771c33fe779SJohn Polstra * 772c33fe779SJohn Polstra * The "addr" reference parameter is in/out. On entry, it specifies 773c33fe779SJohn Polstra * the address where a shared object should be loaded. If the file is 774c33fe779SJohn Polstra * an executable, this value is ignored. On exit, "addr" specifies 775c33fe779SJohn Polstra * where the file was actually loaded. 776c33fe779SJohn Polstra * 777c33fe779SJohn Polstra * The "entry" reference parameter is out only. On exit, it specifies 778c33fe779SJohn Polstra * the entry point for the loaded file. 779c33fe779SJohn Polstra */ 780e1743d02SSøren Schmidt static int 7813ebc1248SPeter Wemm __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 7821699546dSEdward Tomasz Napierala u_long *entry) 783e1743d02SSøren Schmidt { 784911c2be0SMark Peek struct { 785911c2be0SMark Peek struct nameidata nd; 786911c2be0SMark Peek struct vattr attr; 787911c2be0SMark Peek struct image_params image_params; 788911c2be0SMark Peek } *tempdata; 789d254af07SMatthew Dillon const Elf_Ehdr *hdr = NULL; 790d254af07SMatthew Dillon const Elf_Phdr *phdr = NULL; 791911c2be0SMark Peek struct nameidata *nd; 792911c2be0SMark Peek struct vattr *attr; 793911c2be0SMark Peek struct image_params *imgp; 79478022527SKonstantin Belousov u_long rbase; 795c33fe779SJohn Polstra u_long base_addr = 0; 7969bcd7482SEdward Tomasz Napierala int error; 797e1743d02SSøren Schmidt 79812bc222eSJonathan Anderson #ifdef CAPABILITY_MODE 79912bc222eSJonathan Anderson /* 80012bc222eSJonathan Anderson * XXXJA: This check can go away once we are sufficiently confident 80112bc222eSJonathan Anderson * that the checks in namei() are correct. 80212bc222eSJonathan Anderson */ 80312bc222eSJonathan Anderson if (IN_CAPABILITY_MODE(curthread)) 80412bc222eSJonathan Anderson return (ECAPMODE); 80512bc222eSJonathan Anderson #endif 80612bc222eSJonathan Anderson 8071073d17eSKonstantin Belousov tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); 808911c2be0SMark Peek nd = &tempdata->nd; 809911c2be0SMark Peek attr = &tempdata->attr; 810911c2be0SMark Peek imgp = &tempdata->image_params; 811911c2be0SMark Peek 812c8a79999SPeter Wemm /* 813c8a79999SPeter Wemm * Initialize part of the common data 814c8a79999SPeter Wemm */ 815c8a79999SPeter Wemm imgp->proc = p; 816911c2be0SMark Peek imgp->attr = attr; 817c8a79999SPeter Wemm 818f422bc30SJohn Baldwin NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, 8197e1d3eefSMateusz Guzik UIO_SYSSPACE, file); 820911c2be0SMark Peek if ((error = namei(nd)) != 0) { 821911c2be0SMark Peek nd->ni_vp = NULL; 822e1743d02SSøren Schmidt goto fail; 823e1743d02SSøren Schmidt } 824bb92cd7bSMateusz Guzik NDFREE_PNBUF(nd); 825911c2be0SMark Peek imgp->vp = nd->ni_vp; 826c8a79999SPeter Wemm 827e1743d02SSøren Schmidt /* 828e1743d02SSøren Schmidt * Check permissions, modes, uid, etc on the file, and "open" it. 829e1743d02SSøren Schmidt */ 830c8a79999SPeter Wemm error = exec_check_permissions(imgp); 831373d1a3fSAlan Cox if (error) 832c8a79999SPeter Wemm goto fail; 833e1743d02SSøren Schmidt 834c8a79999SPeter Wemm error = exec_map_first_page(imgp); 835373d1a3fSAlan Cox if (error) 836373d1a3fSAlan Cox goto fail; 837373d1a3fSAlan Cox 8388516dd18SPoul-Henning Kamp imgp->object = nd->ni_vp->v_object; 839e1743d02SSøren Schmidt 840d254af07SMatthew Dillon hdr = (const Elf_Ehdr *)imgp->image_header; 8413ebc1248SPeter Wemm if ((error = __elfN(check_header)(hdr)) != 0) 842e1743d02SSøren Schmidt goto fail; 843c33fe779SJohn Polstra if (hdr->e_type == ET_DYN) 844c33fe779SJohn Polstra rbase = *addr; 845c33fe779SJohn Polstra else if (hdr->e_type == ET_EXEC) 846c33fe779SJohn Polstra rbase = 0; 847c33fe779SJohn Polstra else { 848c33fe779SJohn Polstra error = ENOEXEC; 849c33fe779SJohn Polstra goto fail; 850c33fe779SJohn Polstra } 851e1743d02SSøren Schmidt 852c8a79999SPeter Wemm /* Only support headers that fit within first page for now */ 8537de1bc13SKonstantin Belousov if (!__elfN(phdr_in_zero_page)(hdr)) { 854c8a79999SPeter Wemm error = ENOEXEC; 855e1743d02SSøren Schmidt goto fail; 856c8a79999SPeter Wemm } 857c8a79999SPeter Wemm 858d254af07SMatthew Dillon phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 85993d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) { 86093d1c728SKonstantin Belousov error = ENOEXEC; 86193d1c728SKonstantin Belousov goto fail; 86293d1c728SKonstantin Belousov } 863e1743d02SSøren Schmidt 8649bcd7482SEdward Tomasz Napierala error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); 865292177e6SAlan Cox if (error != 0) 866e1743d02SSøren Schmidt goto fail; 8679bcd7482SEdward Tomasz Napierala 868c33fe779SJohn Polstra *addr = base_addr; 869c33fe779SJohn Polstra *entry = (unsigned long)hdr->e_entry + rbase; 870e1743d02SSøren Schmidt 871e1743d02SSøren Schmidt fail: 872c8a79999SPeter Wemm if (imgp->firstpage) 873c8a79999SPeter Wemm exec_unmap_first_page(imgp); 8740b2ed1aeSJeff Roberson 87578022527SKonstantin Belousov if (nd->ni_vp) { 87678022527SKonstantin Belousov if (imgp->textset) 87778022527SKonstantin Belousov VOP_UNSET_TEXT_CHECKED(nd->ni_vp); 878373d1a3fSAlan Cox vput(nd->ni_vp); 87978022527SKonstantin Belousov } 880911c2be0SMark Peek free(tempdata, M_TEMP); 881e1743d02SSøren Schmidt 882a7cddfedSJake Burkholder return (error); 883e1743d02SSøren Schmidt } 884e1743d02SSøren Schmidt 885a4007ae1SKonstantin Belousov /* 886a4007ae1SKonstantin Belousov * Select randomized valid address in the map map, between minv and 887a4007ae1SKonstantin Belousov * maxv, with specified alignment. The [minv, maxv) range must belong 888a4007ae1SKonstantin Belousov * to the map. Note that function only allocates the address, it is 889a4007ae1SKonstantin Belousov * up to caller to clamp maxv in a way that the final allocation 890a4007ae1SKonstantin Belousov * length fit into the map. 891a4007ae1SKonstantin Belousov * 892a4007ae1SKonstantin Belousov * Result is returned in *resp, error code indicates that arguments 893a4007ae1SKonstantin Belousov * did not pass sanity checks for overflow and range correctness. 894a4007ae1SKonstantin Belousov */ 8959cf78c1cSKonstantin Belousov static int 8969cf78c1cSKonstantin Belousov __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv, 8979cf78c1cSKonstantin Belousov u_int align, u_long *resp) 898fa50a355SKonstantin Belousov { 899fa50a355SKonstantin Belousov u_long rbase, res; 900fa50a355SKonstantin Belousov 901fa50a355SKonstantin Belousov MPASS(vm_map_min(map) <= minv); 9029cf78c1cSKonstantin Belousov 9039cf78c1cSKonstantin Belousov if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) { 9049cf78c1cSKonstantin Belousov uprintf("Invalid ELF segments layout\n"); 9059cf78c1cSKonstantin Belousov return (ENOEXEC); 9069cf78c1cSKonstantin Belousov } 9079cf78c1cSKonstantin Belousov 908fa50a355SKonstantin Belousov arc4rand(&rbase, sizeof(rbase), 0); 909fa50a355SKonstantin Belousov res = roundup(minv, (u_long)align) + rbase % (maxv - minv); 910fa50a355SKonstantin Belousov res &= ~((u_long)align - 1); 911fa50a355SKonstantin Belousov if (res >= maxv) 912fa50a355SKonstantin Belousov res -= align; 9139cf78c1cSKonstantin Belousov 914fa50a355SKonstantin Belousov KASSERT(res >= minv, 915fa50a355SKonstantin Belousov ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", 916fa50a355SKonstantin Belousov res, minv, maxv, rbase)); 917fa50a355SKonstantin Belousov KASSERT(res < maxv, 918fa50a355SKonstantin Belousov ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", 919fa50a355SKonstantin Belousov res, maxv, minv, rbase)); 9209cf78c1cSKonstantin Belousov 9219cf78c1cSKonstantin Belousov *resp = res; 9229cf78c1cSKonstantin Belousov return (0); 923fa50a355SKonstantin Belousov } 924fa50a355SKonstantin Belousov 92520e1174aSEdward Tomasz Napierala static int 92620e1174aSEdward Tomasz Napierala __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, 92720e1174aSEdward Tomasz Napierala const Elf_Phdr *phdr, u_long et_dyn_addr) 92820e1174aSEdward Tomasz Napierala { 92920e1174aSEdward Tomasz Napierala struct vmspace *vmspace; 93020e1174aSEdward Tomasz Napierala const char *err_str; 93120e1174aSEdward Tomasz Napierala u_long text_size, data_size, total_size, text_addr, data_addr; 93220e1174aSEdward Tomasz Napierala u_long seg_size, seg_addr; 93320e1174aSEdward Tomasz Napierala int i; 93420e1174aSEdward Tomasz Napierala 93520e1174aSEdward Tomasz Napierala err_str = NULL; 93620e1174aSEdward Tomasz Napierala text_size = data_size = total_size = text_addr = data_addr = 0; 93720e1174aSEdward Tomasz Napierala 93820e1174aSEdward Tomasz Napierala for (i = 0; i < hdr->e_phnum; i++) { 93920e1174aSEdward Tomasz Napierala if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) 94020e1174aSEdward Tomasz Napierala continue; 94120e1174aSEdward Tomasz Napierala 94220e1174aSEdward Tomasz Napierala seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 94320e1174aSEdward Tomasz Napierala seg_size = round_page(phdr[i].p_memsz + 94420e1174aSEdward Tomasz Napierala phdr[i].p_vaddr + et_dyn_addr - seg_addr); 94520e1174aSEdward Tomasz Napierala 94620e1174aSEdward Tomasz Napierala /* 94720e1174aSEdward Tomasz Napierala * Make the largest executable segment the official 94820e1174aSEdward Tomasz Napierala * text segment and all others data. 94920e1174aSEdward Tomasz Napierala * 95020e1174aSEdward Tomasz Napierala * Note that obreak() assumes that data_addr + data_size == end 95120e1174aSEdward Tomasz Napierala * of data load area, and the ELF file format expects segments 95220e1174aSEdward Tomasz Napierala * to be sorted by address. If multiple data segments exist, 95320e1174aSEdward Tomasz Napierala * the last one will be used. 95420e1174aSEdward Tomasz Napierala */ 95520e1174aSEdward Tomasz Napierala 95620e1174aSEdward Tomasz Napierala if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { 95720e1174aSEdward Tomasz Napierala text_size = seg_size; 95820e1174aSEdward Tomasz Napierala text_addr = seg_addr; 95920e1174aSEdward Tomasz Napierala } else { 96020e1174aSEdward Tomasz Napierala data_size = seg_size; 96120e1174aSEdward Tomasz Napierala data_addr = seg_addr; 96220e1174aSEdward Tomasz Napierala } 96320e1174aSEdward Tomasz Napierala total_size += seg_size; 96420e1174aSEdward Tomasz Napierala } 96520e1174aSEdward Tomasz Napierala 96620e1174aSEdward Tomasz Napierala if (data_addr == 0 && data_size == 0) { 96720e1174aSEdward Tomasz Napierala data_addr = text_addr; 96820e1174aSEdward Tomasz Napierala data_size = text_size; 96920e1174aSEdward Tomasz Napierala } 97020e1174aSEdward Tomasz Napierala 97120e1174aSEdward Tomasz Napierala /* 97220e1174aSEdward Tomasz Napierala * Check limits. It should be safe to check the 97320e1174aSEdward Tomasz Napierala * limits after loading the segments since we do 97420e1174aSEdward Tomasz Napierala * not actually fault in all the segments pages. 97520e1174aSEdward Tomasz Napierala */ 97620e1174aSEdward Tomasz Napierala PROC_LOCK(imgp->proc); 97720e1174aSEdward Tomasz Napierala if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) 97820e1174aSEdward Tomasz Napierala err_str = "Data segment size exceeds process limit"; 97920e1174aSEdward Tomasz Napierala else if (text_size > maxtsiz) 98020e1174aSEdward Tomasz Napierala err_str = "Text segment size exceeds system limit"; 98120e1174aSEdward Tomasz Napierala else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) 98220e1174aSEdward Tomasz Napierala err_str = "Total segment size exceeds process limit"; 98320e1174aSEdward Tomasz Napierala else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) 98420e1174aSEdward Tomasz Napierala err_str = "Data segment size exceeds resource limit"; 98520e1174aSEdward Tomasz Napierala else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) 98620e1174aSEdward Tomasz Napierala err_str = "Total segment size exceeds resource limit"; 98720e1174aSEdward Tomasz Napierala PROC_UNLOCK(imgp->proc); 98820e1174aSEdward Tomasz Napierala if (err_str != NULL) { 98920e1174aSEdward Tomasz Napierala uprintf("%s\n", err_str); 99020e1174aSEdward Tomasz Napierala return (ENOMEM); 99120e1174aSEdward Tomasz Napierala } 99220e1174aSEdward Tomasz Napierala 99320e1174aSEdward Tomasz Napierala vmspace = imgp->proc->p_vmspace; 99420e1174aSEdward Tomasz Napierala vmspace->vm_tsize = text_size >> PAGE_SHIFT; 99520e1174aSEdward Tomasz Napierala vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 99620e1174aSEdward Tomasz Napierala vmspace->vm_dsize = data_size >> PAGE_SHIFT; 99720e1174aSEdward Tomasz Napierala vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 99820e1174aSEdward Tomasz Napierala 99920e1174aSEdward Tomasz Napierala return (0); 100020e1174aSEdward Tomasz Napierala } 100120e1174aSEdward Tomasz Napierala 100209c78d53SEdward Tomasz Napierala static int 100309c78d53SEdward Tomasz Napierala __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, 100409c78d53SEdward Tomasz Napierala char **interpp, bool *free_interpp) 100509c78d53SEdward Tomasz Napierala { 100609c78d53SEdward Tomasz Napierala struct thread *td; 100709c78d53SEdward Tomasz Napierala char *interp; 100809c78d53SEdward Tomasz Napierala int error, interp_name_len; 100909c78d53SEdward Tomasz Napierala 101009c78d53SEdward Tomasz Napierala KASSERT(phdr->p_type == PT_INTERP, 101109c78d53SEdward Tomasz Napierala ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); 1012b65ca345SEdward Tomasz Napierala ASSERT_VOP_LOCKED(imgp->vp, __func__); 101309c78d53SEdward Tomasz Napierala 101409c78d53SEdward Tomasz Napierala td = curthread; 101509c78d53SEdward Tomasz Napierala 101609c78d53SEdward Tomasz Napierala /* Path to interpreter */ 101709c78d53SEdward Tomasz Napierala if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { 101809c78d53SEdward Tomasz Napierala uprintf("Invalid PT_INTERP\n"); 101909c78d53SEdward Tomasz Napierala return (ENOEXEC); 102009c78d53SEdward Tomasz Napierala } 102109c78d53SEdward Tomasz Napierala 102209c78d53SEdward Tomasz Napierala interp_name_len = phdr->p_filesz; 102309c78d53SEdward Tomasz Napierala if (phdr->p_offset > PAGE_SIZE || 102409c78d53SEdward Tomasz Napierala interp_name_len > PAGE_SIZE - phdr->p_offset) { 10250ddfdc60SKonstantin Belousov /* 1026f1f81d3bSKonstantin Belousov * The vnode lock might be needed by the pagedaemon to 10270ddfdc60SKonstantin Belousov * clean pages owned by the vnode. Do not allow sleep 10280ddfdc60SKonstantin Belousov * waiting for memory with the vnode locked, instead 10290ddfdc60SKonstantin Belousov * try non-sleepable allocation first, and if it 10300ddfdc60SKonstantin Belousov * fails, go to the slow path were we drop the lock 1031f1f81d3bSKonstantin Belousov * and do M_WAITOK. A text reference prevents 1032f1f81d3bSKonstantin Belousov * modifications to the vnode content. 10330ddfdc60SKonstantin Belousov */ 10342d6b8546SKonstantin Belousov interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); 10352d6b8546SKonstantin Belousov if (interp == NULL) { 1036b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 103709c78d53SEdward Tomasz Napierala interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); 103878022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 10392d6b8546SKonstantin Belousov } 10400ddfdc60SKonstantin Belousov 104109c78d53SEdward Tomasz Napierala error = vn_rdwr(UIO_READ, imgp->vp, interp, 104209c78d53SEdward Tomasz Napierala interp_name_len, phdr->p_offset, 104309c78d53SEdward Tomasz Napierala UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, 104409c78d53SEdward Tomasz Napierala NOCRED, NULL, td); 104509c78d53SEdward Tomasz Napierala if (error != 0) { 104609c78d53SEdward Tomasz Napierala free(interp, M_TEMP); 104709c78d53SEdward Tomasz Napierala uprintf("i/o error PT_INTERP %d\n", error); 104809c78d53SEdward Tomasz Napierala return (error); 104909c78d53SEdward Tomasz Napierala } 105009c78d53SEdward Tomasz Napierala interp[interp_name_len] = '\0'; 105109c78d53SEdward Tomasz Napierala 105209c78d53SEdward Tomasz Napierala *interpp = interp; 105309c78d53SEdward Tomasz Napierala *free_interpp = true; 105409c78d53SEdward Tomasz Napierala return (0); 105509c78d53SEdward Tomasz Napierala } 105609c78d53SEdward Tomasz Napierala 105709c78d53SEdward Tomasz Napierala interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; 105809c78d53SEdward Tomasz Napierala if (interp[interp_name_len - 1] != '\0') { 105909c78d53SEdward Tomasz Napierala uprintf("Invalid PT_INTERP\n"); 106009c78d53SEdward Tomasz Napierala return (ENOEXEC); 106109c78d53SEdward Tomasz Napierala } 106209c78d53SEdward Tomasz Napierala 106309c78d53SEdward Tomasz Napierala *interpp = interp; 106409c78d53SEdward Tomasz Napierala *free_interpp = false; 106509c78d53SEdward Tomasz Napierala return (0); 106609c78d53SEdward Tomasz Napierala } 106709c78d53SEdward Tomasz Napierala 10689274fb35SEdward Tomasz Napierala static int 10699274fb35SEdward Tomasz Napierala __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, 10709274fb35SEdward Tomasz Napierala const char *interp, u_long *addr, u_long *entry) 10719274fb35SEdward Tomasz Napierala { 10729274fb35SEdward Tomasz Napierala char *path; 10739274fb35SEdward Tomasz Napierala int error; 10749274fb35SEdward Tomasz Napierala 10759274fb35SEdward Tomasz Napierala if (brand_info->emul_path != NULL && 10769274fb35SEdward Tomasz Napierala brand_info->emul_path[0] != '\0') { 10779274fb35SEdward Tomasz Napierala path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 10789274fb35SEdward Tomasz Napierala snprintf(path, MAXPATHLEN, "%s%s", 10799274fb35SEdward Tomasz Napierala brand_info->emul_path, interp); 10809274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, path, addr, entry); 10819274fb35SEdward Tomasz Napierala free(path, M_TEMP); 10829274fb35SEdward Tomasz Napierala if (error == 0) 10839274fb35SEdward Tomasz Napierala return (0); 10849274fb35SEdward Tomasz Napierala } 10859274fb35SEdward Tomasz Napierala 10869274fb35SEdward Tomasz Napierala if (brand_info->interp_newpath != NULL && 10879274fb35SEdward Tomasz Napierala (brand_info->interp_path == NULL || 10889274fb35SEdward Tomasz Napierala strcmp(interp, brand_info->interp_path) == 0)) { 10899274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, 10909274fb35SEdward Tomasz Napierala brand_info->interp_newpath, addr, entry); 10919274fb35SEdward Tomasz Napierala if (error == 0) 10929274fb35SEdward Tomasz Napierala return (0); 10939274fb35SEdward Tomasz Napierala } 10949274fb35SEdward Tomasz Napierala 10959274fb35SEdward Tomasz Napierala error = __elfN(load_file)(imgp->proc, interp, addr, entry); 10969274fb35SEdward Tomasz Napierala if (error == 0) 10979274fb35SEdward Tomasz Napierala return (0); 10989274fb35SEdward Tomasz Napierala 10999274fb35SEdward Tomasz Napierala uprintf("ELF interpreter %s not found, error %d\n", interp, error); 11009274fb35SEdward Tomasz Napierala return (error); 11019274fb35SEdward Tomasz Napierala } 11029274fb35SEdward Tomasz Napierala 1103fa50a355SKonstantin Belousov /* 1104fa50a355SKonstantin Belousov * Impossible et_dyn_addr initial value indicating that the real base 1105fa50a355SKonstantin Belousov * must be calculated later with some randomization applied. 1106fa50a355SKonstantin Belousov */ 1107fa50a355SKonstantin Belousov #define ET_DYN_ADDR_RAND 1 1108fa50a355SKonstantin Belousov 1109303b270bSEivind Eklund static int 11103ebc1248SPeter Wemm __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 1111e1743d02SSøren Schmidt { 11126c775eb6SKonstantin Belousov struct thread *td; 11136c775eb6SKonstantin Belousov const Elf_Ehdr *hdr; 111432c01de2SDmitry Chagin const Elf_Phdr *phdr; 1115e5e6093bSAlan Cox Elf_Auxargs *elf_auxargs; 11165856e12eSJohn Dyson struct vmspace *vmspace; 1117fa50a355SKonstantin Belousov vm_map_t map; 11189274fb35SEdward Tomasz Napierala char *interp; 1119d1dbc694SJohn Polstra Elf_Brandinfo *brand_info; 11205fe3ed62SJake Burkholder struct sysentvec *sv; 112120e1174aSEdward Tomasz Napierala u_long addr, baddr, et_dyn_addr, entry, proghdr; 1122e499988fSKonstantin Belousov u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc; 1123cefb93f2SKonstantin Belousov uint32_t fctl0; 11246c775eb6SKonstantin Belousov int32_t osrel; 112509c78d53SEdward Tomasz Napierala bool free_interp; 11269274fb35SEdward Tomasz Napierala int error, i, n; 11276c775eb6SKonstantin Belousov 11286c775eb6SKonstantin Belousov hdr = (const Elf_Ehdr *)imgp->image_header; 1129e1743d02SSøren Schmidt 1130e1743d02SSøren Schmidt /* 1131e1743d02SSøren Schmidt * Do we have a valid ELF header ? 1132900b28f9SMaxim Sobolev * 1133900b28f9SMaxim Sobolev * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 1134900b28f9SMaxim Sobolev * if particular brand doesn't support it. 1135e1743d02SSøren Schmidt */ 1136900b28f9SMaxim Sobolev if (__elfN(check_header)(hdr) != 0 || 1137900b28f9SMaxim Sobolev (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 1138a7cddfedSJake Burkholder return (-1); 1139e1743d02SSøren Schmidt 1140e1743d02SSøren Schmidt /* 1141e1743d02SSøren Schmidt * From here on down, we return an errno, not -1, as we've 1142e1743d02SSøren Schmidt * detected an ELF file. 1143e1743d02SSøren Schmidt */ 1144e1743d02SSøren Schmidt 11457de1bc13SKonstantin Belousov if (!__elfN(phdr_in_zero_page)(hdr)) { 11466b16d664SEd Maste uprintf("Program headers not in the first page\n"); 1147a7cddfedSJake Burkholder return (ENOEXEC); 1148e1743d02SSøren Schmidt } 114952c24af7SPeter Wemm phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 11506b16d664SEd Maste if (!aligned(phdr, Elf_Addr)) { 11516b16d664SEd Maste uprintf("Unaligned program headers\n"); 115293d1c728SKonstantin Belousov return (ENOEXEC); 11536b16d664SEd Maste } 11546c775eb6SKonstantin Belousov 11556c775eb6SKonstantin Belousov n = error = 0; 11567564c4adSKonstantin Belousov baddr = 0; 11576c775eb6SKonstantin Belousov osrel = 0; 1158cefb93f2SKonstantin Belousov fctl0 = 0; 11596c775eb6SKonstantin Belousov entry = proghdr = 0; 11609274fb35SEdward Tomasz Napierala interp = NULL; 116109c78d53SEdward Tomasz Napierala free_interp = false; 11626c775eb6SKonstantin Belousov td = curthread; 1163714d6d09SKonstantin Belousov 1164714d6d09SKonstantin Belousov /* 1165714d6d09SKonstantin Belousov * Somewhat arbitrary, limit accepted max alignment for the 1166714d6d09SKonstantin Belousov * loadable segment to the max supported superpage size. Too 1167714d6d09SKonstantin Belousov * large alignment requests are not useful and are indicators 1168714d6d09SKonstantin Belousov * of corrupted or outright malicious binary. 1169714d6d09SKonstantin Belousov */ 1170fa50a355SKonstantin Belousov maxalign = PAGE_SIZE; 1171714d6d09SKonstantin Belousov maxsalign = PAGE_SIZE * 1024; 1172714d6d09SKonstantin Belousov for (i = MAXPAGESIZES - 1; i > 0; i--) { 1173714d6d09SKonstantin Belousov if (pagesizes[i] > maxsalign) 1174714d6d09SKonstantin Belousov maxsalign = pagesizes[i]; 1175714d6d09SKonstantin Belousov } 1176714d6d09SKonstantin Belousov 1177fa50a355SKonstantin Belousov mapsz = 0; 11786c775eb6SKonstantin Belousov 11795fe3ed62SJake Burkholder for (i = 0; i < hdr->e_phnum; i++) { 1180291c06a1SKonstantin Belousov switch (phdr[i].p_type) { 1181291c06a1SKonstantin Belousov case PT_LOAD: 11827564c4adSKonstantin Belousov if (n == 0) 11837564c4adSKonstantin Belousov baddr = phdr[i].p_vaddr; 118436df8f54SKonstantin Belousov if (!powerof2(phdr[i].p_align) || 118536df8f54SKonstantin Belousov phdr[i].p_align > maxsalign) { 1186714d6d09SKonstantin Belousov uprintf("Invalid segment alignment\n"); 1187714d6d09SKonstantin Belousov error = ENOEXEC; 1188714d6d09SKonstantin Belousov goto ret; 1189714d6d09SKonstantin Belousov } 1190fa50a355SKonstantin Belousov if (phdr[i].p_align > maxalign) 1191fa50a355SKonstantin Belousov maxalign = phdr[i].p_align; 1192bf839416SKonstantin Belousov if (mapsz + phdr[i].p_memsz < mapsz) { 1193bf839416SKonstantin Belousov uprintf("Mapsize overflow\n"); 1194bf839416SKonstantin Belousov error = ENOEXEC; 1195bf839416SKonstantin Belousov goto ret; 1196bf839416SKonstantin Belousov } 1197fa50a355SKonstantin Belousov mapsz += phdr[i].p_memsz; 11987564c4adSKonstantin Belousov n++; 11999bcd7482SEdward Tomasz Napierala 12009bcd7482SEdward Tomasz Napierala /* 12019bcd7482SEdward Tomasz Napierala * If this segment contains the program headers, 12029bcd7482SEdward Tomasz Napierala * remember their virtual address for the AT_PHDR 12039bcd7482SEdward Tomasz Napierala * aux entry. Static binaries don't usually include 12049bcd7482SEdward Tomasz Napierala * a PT_PHDR entry. 12059bcd7482SEdward Tomasz Napierala */ 12069bcd7482SEdward Tomasz Napierala if (phdr[i].p_offset == 0 && 120788dd7a0aSKonstantin Belousov hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= 120888dd7a0aSKonstantin Belousov phdr[i].p_filesz) 12099bcd7482SEdward Tomasz Napierala proghdr = phdr[i].p_vaddr + hdr->e_phoff; 1210291c06a1SKonstantin Belousov break; 1211291c06a1SKonstantin Belousov case PT_INTERP: 1212e5e6093bSAlan Cox /* Path to interpreter */ 1213d3ee0a15SJonathan T. Looney if (interp != NULL) { 1214d3ee0a15SJonathan T. Looney uprintf("Multiple PT_INTERP headers\n"); 1215d3ee0a15SJonathan T. Looney error = ENOEXEC; 1216d3ee0a15SJonathan T. Looney goto ret; 1217d3ee0a15SJonathan T. Looney } 121809c78d53SEdward Tomasz Napierala error = __elfN(get_interp)(imgp, &phdr[i], &interp, 121909c78d53SEdward Tomasz Napierala &free_interp); 122009c78d53SEdward Tomasz Napierala if (error != 0) 12216c775eb6SKonstantin Belousov goto ret; 1222291c06a1SKonstantin Belousov break; 1223291c06a1SKonstantin Belousov case PT_GNU_STACK: 1224*fbafa98aSEd Maste if (__elfN(nxstack)) { 1225291c06a1SKonstantin Belousov imgp->stack_prot = 1226291c06a1SKonstantin Belousov __elfN(trans_prot)(phdr[i].p_flags); 1227*fbafa98aSEd Maste if ((imgp->stack_prot & VM_PROT_RW) != 1228*fbafa98aSEd Maste VM_PROT_RW) { 1229*fbafa98aSEd Maste uprintf("Invalid PT_GNU_STACK\n"); 1230*fbafa98aSEd Maste error = ENOEXEC; 1231*fbafa98aSEd Maste goto ret; 1232*fbafa98aSEd Maste } 1233*fbafa98aSEd Maste } 1234316b3843SKonstantin Belousov imgp->stack_sz = phdr[i].p_memsz; 1235291c06a1SKonstantin Belousov break; 12369bcd7482SEdward Tomasz Napierala case PT_PHDR: /* Program header table info */ 12379bcd7482SEdward Tomasz Napierala proghdr = phdr[i].p_vaddr; 12389bcd7482SEdward Tomasz Napierala break; 12393ebc1248SPeter Wemm } 12403ebc1248SPeter Wemm } 12413ebc1248SPeter Wemm 124209c78d53SEdward Tomasz Napierala brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); 12435fe3ed62SJake Burkholder if (brand_info == NULL) { 12445fe3ed62SJake Burkholder uprintf("ELF binary type \"%u\" not known.\n", 12455fe3ed62SJake Burkholder hdr->e_ident[EI_OSABI]); 12466c775eb6SKonstantin Belousov error = ENOEXEC; 12476c775eb6SKonstantin Belousov goto ret; 12483ebc1248SPeter Wemm } 1249fa50a355SKonstantin Belousov sv = brand_info->sysvec; 125077ebe276SEd Maste et_dyn_addr = 0; 1251ab02d85fSKonstantin Belousov if (hdr->e_type == ET_DYN) { 12526b16d664SEd Maste if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { 12536b16d664SEd Maste uprintf("Cannot execute shared object\n"); 12546c775eb6SKonstantin Belousov error = ENOEXEC; 12556c775eb6SKonstantin Belousov goto ret; 12566b16d664SEd Maste } 12577564c4adSKonstantin Belousov /* 12587564c4adSKonstantin Belousov * Honour the base load address from the dso if it is 12597564c4adSKonstantin Belousov * non-zero for some reason. 12607564c4adSKonstantin Belousov */ 1261fa50a355SKonstantin Belousov if (baddr == 0) { 1262fa50a355SKonstantin Belousov if ((sv->sv_flags & SV_ASLR) == 0 || 1263fa50a355SKonstantin Belousov (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) 126495aafd69SKonstantin Belousov et_dyn_addr = __elfN(pie_base); 1265fa50a355SKonstantin Belousov else if ((__elfN(pie_aslr_enabled) && 1266fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || 1267fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) 1268fa50a355SKonstantin Belousov et_dyn_addr = ET_DYN_ADDR_RAND; 1269fa50a355SKonstantin Belousov else 127095aafd69SKonstantin Belousov et_dyn_addr = __elfN(pie_base); 127177ebe276SEd Maste } 1272fa50a355SKonstantin Belousov } 12733ebc1248SPeter Wemm 127460bb3943SAlan Cox /* 127560bb3943SAlan Cox * Avoid a possible deadlock if the current address space is destroyed 127660bb3943SAlan Cox * and that address space maps the locked vnode. In the common case, 127760bb3943SAlan Cox * the locked vnode's v_usecount is decremented but remains greater 127860bb3943SAlan Cox * than zero. Consequently, the vnode lock is not needed by vrele(). 127960bb3943SAlan Cox * However, in cases where the vnode lock is external, such as nullfs, 128060bb3943SAlan Cox * v_usecount may become zero. 12811dfab802SAlan Cox * 12821dfab802SAlan Cox * The VV_TEXT flag prevents modifications to the executable while 12831dfab802SAlan Cox * the vnode is unlocked. 128460bb3943SAlan Cox */ 1285b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 128660bb3943SAlan Cox 1287fa50a355SKonstantin Belousov /* 1288fa50a355SKonstantin Belousov * Decide whether to enable randomization of user mappings. 1289fa50a355SKonstantin Belousov * First, reset user preferences for the setid binaries. 1290fa50a355SKonstantin Belousov * Then, account for the support of the randomization by the 1291fa50a355SKonstantin Belousov * ABI, by user preferences, and make special treatment for 1292fa50a355SKonstantin Belousov * PIE binaries. 1293fa50a355SKonstantin Belousov */ 1294fa50a355SKonstantin Belousov if (imgp->credential_setid) { 1295fa50a355SKonstantin Belousov PROC_LOCK(imgp->proc); 1296796a8e1aSKonstantin Belousov imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE | 1297796a8e1aSKonstantin Belousov P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); 1298fa50a355SKonstantin Belousov PROC_UNLOCK(imgp->proc); 1299fa50a355SKonstantin Belousov } 1300fa50a355SKonstantin Belousov if ((sv->sv_flags & SV_ASLR) == 0 || 1301fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || 1302fa50a355SKonstantin Belousov (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { 1303fa50a355SKonstantin Belousov KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, 1304fa50a355SKonstantin Belousov ("et_dyn_addr == RAND and !ASLR")); 1305fa50a355SKonstantin Belousov } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || 1306fa50a355SKonstantin Belousov (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || 1307fa50a355SKonstantin Belousov et_dyn_addr == ET_DYN_ADDR_RAND) { 1308fa50a355SKonstantin Belousov imgp->map_flags |= MAP_ASLR; 1309fa50a355SKonstantin Belousov /* 1310fa50a355SKonstantin Belousov * If user does not care about sbrk, utilize the bss 1311fa50a355SKonstantin Belousov * grow region for mappings as well. We can select 1312fa50a355SKonstantin Belousov * the base for the image anywere and still not suffer 1313fa50a355SKonstantin Belousov * from the fragmentation. 1314fa50a355SKonstantin Belousov */ 1315fa50a355SKonstantin Belousov if (!__elfN(aslr_honor_sbrk) || 1316fa50a355SKonstantin Belousov (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) 1317fa50a355SKonstantin Belousov imgp->map_flags |= MAP_ASLR_IGNSTART; 13181811c1e9SMark Johnston if (__elfN(aslr_stack)) 13191811c1e9SMark Johnston imgp->map_flags |= MAP_ASLR_STACK; 1320939f0b63SKornel Dulęba if (__elfN(aslr_shared_page)) 1321939f0b63SKornel Dulęba imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE; 1322fa50a355SKonstantin Belousov } 1323fa50a355SKonstantin Belousov 1324796a8e1aSKonstantin Belousov if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && 1325796a8e1aSKonstantin Belousov (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || 1326796a8e1aSKonstantin Belousov (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) 13272e1c94aaSKonstantin Belousov imgp->map_flags |= MAP_WXORX; 13282e1c94aaSKonstantin Belousov 132989b57fcfSKonstantin Belousov error = exec_new_vmspace(imgp, sv); 1330fa50a355SKonstantin Belousov 133119059a13SJohn Baldwin imgp->proc->p_sysent = sv; 1332615f22b2SDmitry Chagin imgp->proc->p_elf_brandinfo = brand_info; 1333e1743d02SSøren Schmidt 13341811c1e9SMark Johnston vmspace = imgp->proc->p_vmspace; 13351811c1e9SMark Johnston map = &vmspace->vm_map; 13361811c1e9SMark Johnston maxv = sv->sv_usrstack; 13371811c1e9SMark Johnston if ((imgp->map_flags & MAP_ASLR_STACK) == 0) 13381811c1e9SMark Johnston maxv -= lim_max(td, RLIMIT_STACK); 13391811c1e9SMark Johnston if (error == 0 && mapsz >= maxv - vm_map_min(map)) { 1340bf839416SKonstantin Belousov uprintf("Excessive mapping size\n"); 1341bf839416SKonstantin Belousov error = ENOEXEC; 1342bf839416SKonstantin Belousov } 1343bf839416SKonstantin Belousov 13449cf78c1cSKonstantin Belousov if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) { 1345fa50a355SKonstantin Belousov KASSERT((map->flags & MAP_ASLR) != 0, 1346fa50a355SKonstantin Belousov ("ET_DYN_ADDR_RAND but !MAP_ASLR")); 13479cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, 1348fa50a355SKonstantin Belousov vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), 1349fa50a355SKonstantin Belousov /* reserve half of the address space to interpreter */ 135036df8f54SKonstantin Belousov maxv / 2, maxalign, &et_dyn_addr); 1351fa50a355SKonstantin Belousov } 1352fa50a355SKonstantin Belousov 135378022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 13546c775eb6SKonstantin Belousov if (error != 0) 13556c775eb6SKonstantin Belousov goto ret; 135660bb3943SAlan Cox 13579bcd7482SEdward Tomasz Napierala error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); 1358292177e6SAlan Cox if (error != 0) 13596c775eb6SKonstantin Belousov goto ret; 1360e1743d02SSøren Schmidt 136120e1174aSEdward Tomasz Napierala error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); 136220e1174aSEdward Tomasz Napierala if (error != 0) 136320e1174aSEdward Tomasz Napierala goto ret; 1364cac45152SMatthew Dillon 1365cac45152SMatthew Dillon /* 1366c460ac3aSPeter Wemm * We load the dynamic linker where a userland call 1367c460ac3aSPeter Wemm * to mmap(0, ...) would put it. The rationale behind this 1368c460ac3aSPeter Wemm * calculation is that it leaves room for the heap to grow to 1369c460ac3aSPeter Wemm * its maximum allowed size. 1370c460ac3aSPeter Wemm */ 13716c775eb6SKonstantin Belousov addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, 1372292177e6SAlan Cox RLIMIT_DATA)); 1373fa50a355SKonstantin Belousov if ((map->flags & MAP_ASLR) != 0) { 1374fa50a355SKonstantin Belousov maxv1 = maxv / 2 + addr / 2; 13759cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, 137641032835SJason A. Harmening (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? 1377e499988fSKonstantin Belousov pagesizes[1] : pagesizes[0], &anon_loc); 13789cf78c1cSKonstantin Belousov if (error != 0) 13799cf78c1cSKonstantin Belousov goto ret; 1380e499988fSKonstantin Belousov map->anon_loc = anon_loc; 1381fa50a355SKonstantin Belousov } else { 1382fa50a355SKonstantin Belousov map->anon_loc = addr; 1383fa50a355SKonstantin Belousov } 1384e1743d02SSøren Schmidt 13851811c1e9SMark Johnston entry = (u_long)hdr->e_entry + et_dyn_addr; 1386ea5a2b2eSSøren Schmidt imgp->entry_addr = entry; 1387ea5a2b2eSSøren Schmidt 138860bb3943SAlan Cox if (interp != NULL) { 1389b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 1390fa50a355SKonstantin Belousov if ((map->flags & MAP_ASLR) != 0) { 139141032835SJason A. Harmening /* Assume that interpreter fits into 1/4 of AS */ 1392fa50a355SKonstantin Belousov maxv1 = maxv / 2 + addr / 2; 13939cf78c1cSKonstantin Belousov error = __CONCAT(rnd_, __elfN(base))(map, addr, 13949cf78c1cSKonstantin Belousov maxv1, PAGE_SIZE, &addr); 1395fa50a355SKonstantin Belousov } 13969cf78c1cSKonstantin Belousov if (error == 0) { 13979cf78c1cSKonstantin Belousov error = __elfN(load_interp)(imgp, brand_info, interp, 13989cf78c1cSKonstantin Belousov &addr, &imgp->entry_addr); 13999cf78c1cSKonstantin Belousov } 140078022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 14019274fb35SEdward Tomasz Napierala if (error != 0) 14026c775eb6SKonstantin Belousov goto ret; 140395c807cfSRobert Watson } else 14047564c4adSKonstantin Belousov addr = et_dyn_addr; 1405ea5a2b2eSSøren Schmidt 14061811c1e9SMark Johnston error = exec_map_stack(imgp); 14071811c1e9SMark Johnston if (error != 0) 14081811c1e9SMark Johnston goto ret; 14091811c1e9SMark Johnston 1410e1743d02SSøren Schmidt /* 1411e3532331SJohn Baldwin * Construct auxargs table (used by the copyout_auxargs routine) 1412e1743d02SSøren Schmidt */ 14132d6b8546SKonstantin Belousov elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); 14142d6b8546SKonstantin Belousov if (elf_auxargs == NULL) { 1415b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 1416a163d034SWarner Losh elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 141778022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 14182d6b8546SKonstantin Belousov } 1419e1743d02SSøren Schmidt elf_auxargs->execfd = -1; 14209bcd7482SEdward Tomasz Napierala elf_auxargs->phdr = proghdr + et_dyn_addr; 1421e1743d02SSøren Schmidt elf_auxargs->phent = hdr->e_phentsize; 1422e1743d02SSøren Schmidt elf_auxargs->phnum = hdr->e_phnum; 1423e1743d02SSøren Schmidt elf_auxargs->pagesz = PAGE_SIZE; 1424e1743d02SSøren Schmidt elf_auxargs->base = addr; 1425e1743d02SSøren Schmidt elf_auxargs->flags = 0; 1426e1743d02SSøren Schmidt elf_auxargs->entry = entry; 1427d36eec69SWarner Losh elf_auxargs->hdr_eflags = hdr->e_flags; 1428e1743d02SSøren Schmidt 1429e1743d02SSøren Schmidt imgp->auxargs = elf_auxargs; 1430e1743d02SSøren Schmidt imgp->interpreted = 0; 1431a0ea661fSNathan Whitehorn imgp->reloc_base = addr; 143232c01de2SDmitry Chagin imgp->proc->p_osrel = osrel; 1433cefb93f2SKonstantin Belousov imgp->proc->p_fctl0 = fctl0; 1434885f13dcSJohn Baldwin imgp->proc->p_elf_flags = hdr->e_flags; 1435f231de47SKonstantin Belousov 14366c775eb6SKonstantin Belousov ret: 1437b4b20492SKonstantin Belousov ASSERT_VOP_LOCKED(imgp->vp, "skipped relock"); 143809c78d53SEdward Tomasz Napierala if (free_interp) 143909c78d53SEdward Tomasz Napierala free(interp, M_TEMP); 1440a7cddfedSJake Burkholder return (error); 1441e1743d02SSøren Schmidt } 1442e1743d02SSøren Schmidt 1443ebf98866SMark Johnston #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE) 14443ebc1248SPeter Wemm 144503b0d68cSJohn Baldwin int 1446d8010b11SJohn Baldwin __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) 1447e1743d02SSøren Schmidt { 1448ecbb00a2SDoug Rabson Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 14495f77b8a8SBrooks Davis Elf_Auxinfo *argarray, *pos; 1450361971fbSKornel Dulęba struct vmspace *vmspace; 145103b0d68cSJohn Baldwin int error; 1452e1743d02SSøren Schmidt 14535f77b8a8SBrooks Davis argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, 14545f77b8a8SBrooks Davis M_WAITOK | M_ZERO); 1455e1743d02SSøren Schmidt 1456361971fbSKornel Dulęba vmspace = imgp->proc->p_vmspace; 1457361971fbSKornel Dulęba 145835c2a5a8SWarner Losh if (args->execfd != -1) 1459e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 1460e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 1461e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 1462e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 1463e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 1464e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 1465e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 1466e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_BASE, args->base); 1467d36eec69SWarner Losh AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); 14683ff06357SKonstantin Belousov if (imgp->execpathp != 0) 1469b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); 1470b96bd95bSIan Lepore AUXARGS_ENTRY(pos, AT_OSRELDATE, 1471b96bd95bSIan Lepore imgp->proc->p_ucred->cr_prison->pr_osreldate); 1472ee235befSKonstantin Belousov if (imgp->canary != 0) { 1473b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); 1474ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); 1475ee235befSKonstantin Belousov } 1476ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); 1477ee235befSKonstantin Belousov if (imgp->pagesizes != 0) { 1478b24e6ac8SBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); 1479ee235befSKonstantin Belousov AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); 1480ee235befSKonstantin Belousov } 1481361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_TIMEKEEP) != 0) { 1482aea81038SKonstantin Belousov AUXARGS_ENTRY(pos, AT_TIMEKEEP, 1483361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_timekeep_offset); 1484aea81038SKonstantin Belousov } 148526d8f3e1SKonstantin Belousov AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj 148626d8f3e1SKonstantin Belousov != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : 148726d8f3e1SKonstantin Belousov imgp->sysent->sv_stackprot); 1488c2f37b92SJohn Baldwin if (imgp->sysent->sv_hwcap != NULL) 1489c2f37b92SJohn Baldwin AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); 1490904d8c49SMichal Meloun if (imgp->sysent->sv_hwcap2 != NULL) 1491904d8c49SMichal Meloun AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); 1492944cf37bSKonstantin Belousov AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ? 1493944cf37bSKonstantin Belousov ELF_BSDF_SIGFASTBLK : 0); 14949df1c38bSBrooks Davis AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); 14959df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); 14969df1c38bSBrooks Davis AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); 14979df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); 14989df1c38bSBrooks Davis AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); 1499361971fbSKornel Dulęba #ifdef RANDOM_FENESTRASX 1500361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_RNG_SEED_VER) != 0) { 1501361971fbSKornel Dulęba AUXARGS_ENTRY(pos, AT_FXRNG, 1502361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_fxrng_gen_offset); 1503361971fbSKornel Dulęba } 1504361971fbSKornel Dulęba #endif 1505361971fbSKornel Dulęba if ((imgp->sysent->sv_flags & SV_DSO_SIG) != 0 && __elfN(vdso) != 0) { 1506361971fbSKornel Dulęba AUXARGS_ENTRY(pos, AT_KPRELOAD, 1507361971fbSKornel Dulęba vmspace->vm_shp_base + imgp->sysent->sv_vdso_offset); 1508361971fbSKornel Dulęba } 1509e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_NULL, 0); 1510e1743d02SSøren Schmidt 1511e1743d02SSøren Schmidt free(imgp->auxargs, M_TEMP); 1512e1743d02SSøren Schmidt imgp->auxargs = NULL; 1513d8b2f079SBrooks Davis KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); 15145f77b8a8SBrooks Davis 1515d8010b11SJohn Baldwin error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); 15165f77b8a8SBrooks Davis free(argarray, M_TEMP); 151703b0d68cSJohn Baldwin return (error); 1518e3532331SJohn Baldwin } 1519e1743d02SSøren Schmidt 1520e3532331SJohn Baldwin int 152131174518SJohn Baldwin __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) 1522e3532331SJohn Baldwin { 1523e3532331SJohn Baldwin Elf_Addr *base; 1524e3532331SJohn Baldwin 1525e3532331SJohn Baldwin base = (Elf_Addr *)*stack_base; 15263ebc1248SPeter Wemm base--; 1527ebf98866SMark Johnston if (elf_suword(base, imgp->args->argc) == -1) 15285f77b8a8SBrooks Davis return (EFAULT); 152931174518SJohn Baldwin *stack_base = (uintptr_t)base; 1530a7cddfedSJake Burkholder return (0); 1531e1743d02SSøren Schmidt } 1532e1743d02SSøren Schmidt 1533e1743d02SSøren Schmidt /* 15348c64af4fSJohn Polstra * Code for generating ELF core dumps. 15358c64af4fSJohn Polstra */ 15368c64af4fSJohn Polstra 15374d77a549SAlfred Perlstein typedef void (*segment_callback)(vm_map_entry_t, void *); 15380ff27d31SJohn Polstra 15390ff27d31SJohn Polstra /* Closure for cb_put_phdr(). */ 15400ff27d31SJohn Polstra struct phdr_closure { 15410ff27d31SJohn Polstra Elf_Phdr *phdr; /* Program header to fill in */ 15420ff27d31SJohn Polstra Elf_Off offset; /* Offset of segment in core file */ 15430ff27d31SJohn Polstra }; 15440ff27d31SJohn Polstra 1545bd390213SMikolaj Golub struct note_info { 1546bd390213SMikolaj Golub int type; /* Note type. */ 15476b71405bSJohn Baldwin struct regset *regset; /* Register set. */ 1548bd390213SMikolaj Golub outfunc_t outfunc; /* Output function. */ 1549bd390213SMikolaj Golub void *outarg; /* Argument for the output function. */ 1550bd390213SMikolaj Golub size_t outsize; /* Output size. */ 1551bd390213SMikolaj Golub TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ 1552bd390213SMikolaj Golub }; 1553bd390213SMikolaj Golub 1554bd390213SMikolaj Golub TAILQ_HEAD(note_info_list, note_info); 1555bd390213SMikolaj Golub 155678f57a9cSMark Johnston extern int compress_user_cores; 155778f57a9cSMark Johnston extern int compress_user_cores_level; 155878f57a9cSMark Johnston 15594d77a549SAlfred Perlstein static void cb_put_phdr(vm_map_entry_t, void *); 15604d77a549SAlfred Perlstein static void cb_size_segment(vm_map_entry_t, void *); 15615bc3c617SKonstantin Belousov static void each_dumpable_segment(struct thread *, segment_callback, void *, 15625bc3c617SKonstantin Belousov int); 1563aa14e9b7SMark Johnston static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, 15645bc3c617SKonstantin Belousov struct note_info_list *, size_t, int); 1565435754a5SEdward Tomasz Napierala static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *); 1566bd390213SMikolaj Golub 1567bd390213SMikolaj Golub static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); 1568bd390213SMikolaj Golub static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); 1569f1fca82eSMikolaj Golub static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); 1570f1fca82eSMikolaj Golub static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); 1571f1fca82eSMikolaj Golub static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); 1572f1fca82eSMikolaj Golub static void note_procstat_files(void *, struct sbuf *, size_t *); 1573f1fca82eSMikolaj Golub static void note_procstat_groups(void *, struct sbuf *, size_t *); 1574f1fca82eSMikolaj Golub static void note_procstat_osrel(void *, struct sbuf *, size_t *); 1575f1fca82eSMikolaj Golub static void note_procstat_rlimit(void *, struct sbuf *, size_t *); 1576f1fca82eSMikolaj Golub static void note_procstat_umask(void *, struct sbuf *, size_t *); 1577f1fca82eSMikolaj Golub static void note_procstat_vmmap(void *, struct sbuf *, size_t *); 15788c64af4fSJohn Polstra 1579aa14e9b7SMark Johnston static int 158078f57a9cSMark Johnston core_compressed_write(void *base, size_t len, off_t offset, void *arg) 1581aa14e9b7SMark Johnston { 1582aa14e9b7SMark Johnston 1583aa14e9b7SMark Johnston return (core_write((struct coredump_params *)arg, base, len, offset, 1584f31695ccSMark Johnston UIO_SYSSPACE, NULL)); 1585aa14e9b7SMark Johnston } 1586aa14e9b7SMark Johnston 15878c64af4fSJohn Polstra int 1588e7228204SAlfred Perlstein __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) 1589fca666a1SJulian Elischer { 1590247aba24SMarcel Moolenaar struct ucred *cred = td->td_ucred; 159186ffb3d1SKonstantin Belousov int compm, error = 0; 15920ff27d31SJohn Polstra struct sseg_closure seginfo; 1593bd390213SMikolaj Golub struct note_info_list notelst; 1594aa14e9b7SMark Johnston struct coredump_params params; 1595bd390213SMikolaj Golub struct note_info *ninfo; 1596aa14e9b7SMark Johnston void *hdr, *tmpbuf; 1597bd390213SMikolaj Golub size_t hdrsize, notesz, coresize; 15988c64af4fSJohn Polstra 1599e7228204SAlfred Perlstein hdr = NULL; 160002d131adSMark Johnston tmpbuf = NULL; 1601bd390213SMikolaj Golub TAILQ_INIT(¬elst); 1602e7228204SAlfred Perlstein 16030ff27d31SJohn Polstra /* Size the program segments. */ 1604905d192dSEdward Tomasz Napierala __elfN(size_segments)(td, &seginfo, flags); 16050ff27d31SJohn Polstra 16060ff27d31SJohn Polstra /* 1607bd390213SMikolaj Golub * Collect info about the core file header area. 16080ff27d31SJohn Polstra */ 1609bd390213SMikolaj Golub hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); 1610c17b0bd2SConrad Meyer if (seginfo.count + 1 >= PN_XNUM) 1611c17b0bd2SConrad Meyer hdrsize += sizeof(Elf_Shdr); 1612435754a5SEdward Tomasz Napierala td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz); 1613bd390213SMikolaj Golub coresize = round_page(hdrsize + notesz) + seginfo.size; 16140ff27d31SJohn Polstra 161502d131adSMark Johnston /* Set up core dump parameters. */ 161602d131adSMark Johnston params.offset = 0; 161702d131adSMark Johnston params.active_cred = cred; 161802d131adSMark Johnston params.file_cred = NOCRED; 161902d131adSMark Johnston params.td = td; 162002d131adSMark Johnston params.vp = vp; 162178f57a9cSMark Johnston params.comp = NULL; 162202d131adSMark Johnston 1623afcc55f3SEdward Tomasz Napierala #ifdef RACCT 16244b5c9cf6SEdward Tomasz Napierala if (racct_enable) { 16251ba5ad42SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 1626bd390213SMikolaj Golub error = racct_add(td->td_proc, RACCT_CORE, coresize); 16271ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 16281ba5ad42SEdward Tomasz Napierala if (error != 0) { 16291ba5ad42SEdward Tomasz Napierala error = EFAULT; 16301ba5ad42SEdward Tomasz Napierala goto done; 16311ba5ad42SEdward Tomasz Napierala } 16324b5c9cf6SEdward Tomasz Napierala } 1633afcc55f3SEdward Tomasz Napierala #endif 1634bd390213SMikolaj Golub if (coresize >= limit) { 1635fba6b1afSAlfred Perlstein error = EFAULT; 1636fba6b1afSAlfred Perlstein goto done; 1637fba6b1afSAlfred Perlstein } 16380ff27d31SJohn Polstra 1639aa14e9b7SMark Johnston /* Create a compression stream if necessary. */ 164086ffb3d1SKonstantin Belousov compm = compress_user_cores; 164186ffb3d1SKonstantin Belousov if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP && 164286ffb3d1SKonstantin Belousov compm == 0) 164386ffb3d1SKonstantin Belousov compm = COMPRESS_GZIP; 164486ffb3d1SKonstantin Belousov if (compm != 0) { 164578f57a9cSMark Johnston params.comp = compressor_init(core_compressed_write, 164686ffb3d1SKonstantin Belousov compm, CORE_BUF_SIZE, 164778f57a9cSMark Johnston compress_user_cores_level, ¶ms); 164878f57a9cSMark Johnston if (params.comp == NULL) { 1649aa14e9b7SMark Johnston error = EFAULT; 1650aa14e9b7SMark Johnston goto done; 1651aa14e9b7SMark Johnston } 1652aa14e9b7SMark Johnston tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); 1653aa14e9b7SMark Johnston } 1654aa14e9b7SMark Johnston 16550ff27d31SJohn Polstra /* 16560ff27d31SJohn Polstra * Allocate memory for building the header, fill it up, 1657bd390213SMikolaj Golub * and write it out following the notes. 16580ff27d31SJohn Polstra */ 1659a163d034SWarner Losh hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 1660aa14e9b7SMark Johnston error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, 16615bc3c617SKonstantin Belousov notesz, flags); 16620ff27d31SJohn Polstra 16630ff27d31SJohn Polstra /* Write the contents of all of the writable segments. */ 16640ff27d31SJohn Polstra if (error == 0) { 16650ff27d31SJohn Polstra Elf_Phdr *php; 16662b471bc6STim J. Robbins off_t offset; 16670ff27d31SJohn Polstra int i; 16680ff27d31SJohn Polstra 16690ff27d31SJohn Polstra php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 1670bd390213SMikolaj Golub offset = round_page(hdrsize + notesz); 16710ff27d31SJohn Polstra for (i = 0; i < seginfo.count; i++) { 1672f31695ccSMark Johnston error = core_output((char *)(uintptr_t)php->p_vaddr, 1673aa14e9b7SMark Johnston php->p_filesz, offset, ¶ms, tmpbuf); 16740ff27d31SJohn Polstra if (error != 0) 16752b471bc6STim J. Robbins break; 16760ff27d31SJohn Polstra offset += php->p_filesz; 16770ff27d31SJohn Polstra php++; 16780ff27d31SJohn Polstra } 167978f57a9cSMark Johnston if (error == 0 && params.comp != NULL) 168078f57a9cSMark Johnston error = compressor_flush(params.comp); 16810ff27d31SJohn Polstra } 1682e7228204SAlfred Perlstein if (error) { 1683e7228204SAlfred Perlstein log(LOG_WARNING, 1684e7228204SAlfred Perlstein "Failed to write core file for process %s (error %d)\n", 1685e7228204SAlfred Perlstein curproc->p_comm, error); 1686e7228204SAlfred Perlstein } 1687e7228204SAlfred Perlstein 1688e7228204SAlfred Perlstein done: 1689aa14e9b7SMark Johnston free(tmpbuf, M_TEMP); 169078f57a9cSMark Johnston if (params.comp != NULL) 169178f57a9cSMark Johnston compressor_fini(params.comp); 1692bd390213SMikolaj Golub while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { 1693bd390213SMikolaj Golub TAILQ_REMOVE(¬elst, ninfo, link); 1694bd390213SMikolaj Golub free(ninfo, M_TEMP); 1695bd390213SMikolaj Golub } 1696bd390213SMikolaj Golub if (hdr != NULL) 16970ff27d31SJohn Polstra free(hdr, M_TEMP); 16980ff27d31SJohn Polstra 1699a7cddfedSJake Burkholder return (error); 17008c64af4fSJohn Polstra } 17018c64af4fSJohn Polstra 17020ff27d31SJohn Polstra /* 17031005d8afSConrad Meyer * A callback for each_dumpable_segment() to write out the segment's 17040ff27d31SJohn Polstra * program header entry. 17050ff27d31SJohn Polstra */ 17060ff27d31SJohn Polstra static void 17075cc6d253SEd Maste cb_put_phdr(vm_map_entry_t entry, void *closure) 17080ff27d31SJohn Polstra { 17090ff27d31SJohn Polstra struct phdr_closure *phc = (struct phdr_closure *)closure; 17100ff27d31SJohn Polstra Elf_Phdr *phdr = phc->phdr; 17110ff27d31SJohn Polstra 17120ff27d31SJohn Polstra phc->offset = round_page(phc->offset); 17130ff27d31SJohn Polstra 17140ff27d31SJohn Polstra phdr->p_type = PT_LOAD; 17150ff27d31SJohn Polstra phdr->p_offset = phc->offset; 17160ff27d31SJohn Polstra phdr->p_vaddr = entry->start; 17170ff27d31SJohn Polstra phdr->p_paddr = 0; 17180ff27d31SJohn Polstra phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 17190ff27d31SJohn Polstra phdr->p_align = PAGE_SIZE; 1720ed167eaaSKonstantin Belousov phdr->p_flags = __elfN(untrans_prot)(entry->protection); 17210ff27d31SJohn Polstra 17220ff27d31SJohn Polstra phc->offset += phdr->p_filesz; 17230ff27d31SJohn Polstra phc->phdr++; 17240ff27d31SJohn Polstra } 17250ff27d31SJohn Polstra 17260ff27d31SJohn Polstra /* 17271005d8afSConrad Meyer * A callback for each_dumpable_segment() to gather information about 17280ff27d31SJohn Polstra * the number of segments and their total size. 17290ff27d31SJohn Polstra */ 17300ff27d31SJohn Polstra static void 1731f3325003SConrad Meyer cb_size_segment(vm_map_entry_t entry, void *closure) 17320ff27d31SJohn Polstra { 17330ff27d31SJohn Polstra struct sseg_closure *ssc = (struct sseg_closure *)closure; 17340ff27d31SJohn Polstra 17350ff27d31SJohn Polstra ssc->count++; 17360ff27d31SJohn Polstra ssc->size += entry->end - entry->start; 17370ff27d31SJohn Polstra } 17380ff27d31SJohn Polstra 1739905d192dSEdward Tomasz Napierala void 1740905d192dSEdward Tomasz Napierala __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo, 1741905d192dSEdward Tomasz Napierala int flags) 1742905d192dSEdward Tomasz Napierala { 1743905d192dSEdward Tomasz Napierala seginfo->count = 0; 1744905d192dSEdward Tomasz Napierala seginfo->size = 0; 1745905d192dSEdward Tomasz Napierala 1746905d192dSEdward Tomasz Napierala each_dumpable_segment(td, cb_size_segment, seginfo, flags); 1747905d192dSEdward Tomasz Napierala } 1748905d192dSEdward Tomasz Napierala 17490ff27d31SJohn Polstra /* 17500ff27d31SJohn Polstra * For each writable segment in the process's memory map, call the given 17510ff27d31SJohn Polstra * function with a pointer to the map entry and some arbitrary 17520ff27d31SJohn Polstra * caller-supplied data. 17530ff27d31SJohn Polstra */ 17540ff27d31SJohn Polstra static void 17555bc3c617SKonstantin Belousov each_dumpable_segment(struct thread *td, segment_callback func, void *closure, 17565bc3c617SKonstantin Belousov int flags) 17570ff27d31SJohn Polstra { 1758247aba24SMarcel Moolenaar struct proc *p = td->td_proc; 17590ff27d31SJohn Polstra vm_map_t map = &p->p_vmspace->vm_map; 17600ff27d31SJohn Polstra vm_map_entry_t entry; 1761976a87a2SAlan Cox vm_object_t backing_object, object; 1762fec41f07SMark Johnston bool ignore_entry; 17630ff27d31SJohn Polstra 1764976a87a2SAlan Cox vm_map_lock_read(map); 17652288078cSDoug Moore VM_MAP_ENTRY_FOREACH(entry, map) { 1766fa7dd9c5SMatthew Dillon /* 1767fa7dd9c5SMatthew Dillon * Don't dump inaccessible mappings, deal with legacy 1768fa7dd9c5SMatthew Dillon * coredump mode. 1769fa7dd9c5SMatthew Dillon * 1770fa7dd9c5SMatthew Dillon * Note that read-only segments related to the elf binary 1771fa7dd9c5SMatthew Dillon * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1772fa7dd9c5SMatthew Dillon * need to arbitrarily ignore such segments. 1773fa7dd9c5SMatthew Dillon */ 177486ffb3d1SKonstantin Belousov if ((flags & SVC_ALL) == 0) { 1775fa7dd9c5SMatthew Dillon if (elf_legacy_coredump) { 177686ffb3d1SKonstantin Belousov if ((entry->protection & VM_PROT_RW) != 177786ffb3d1SKonstantin Belousov VM_PROT_RW) 17780ff27d31SJohn Polstra continue; 1779fa7dd9c5SMatthew Dillon } else { 1780fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_ALL) == 0) 1781fa7dd9c5SMatthew Dillon continue; 1782fa7dd9c5SMatthew Dillon } 178386ffb3d1SKonstantin Belousov } 17840ff27d31SJohn Polstra 17859730a5daSPaul Saab /* 1786fa7dd9c5SMatthew Dillon * Dont include memory segment in the coredump if 1787fa7dd9c5SMatthew Dillon * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1788fa7dd9c5SMatthew Dillon * madvise(2). Do not dump submaps (i.e. parts of the 1789fa7dd9c5SMatthew Dillon * kernel map). 17909730a5daSPaul Saab */ 179186ffb3d1SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 17929730a5daSPaul Saab continue; 179386ffb3d1SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 && 179486ffb3d1SKonstantin Belousov (flags & SVC_ALL) == 0) 179586ffb3d1SKonstantin Belousov continue; 1796976a87a2SAlan Cox if ((object = entry->object.vm_object) == NULL) 17970ff27d31SJohn Polstra continue; 17980ff27d31SJohn Polstra 17990ff27d31SJohn Polstra /* Ignore memory-mapped devices and such things. */ 1800bc403f03SAttilio Rao VM_OBJECT_RLOCK(object); 1801976a87a2SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1802bc403f03SAttilio Rao VM_OBJECT_RLOCK(backing_object); 1803bc403f03SAttilio Rao VM_OBJECT_RUNLOCK(object); 1804976a87a2SAlan Cox object = backing_object; 1805976a87a2SAlan Cox } 1806fec41f07SMark Johnston ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; 1807bc403f03SAttilio Rao VM_OBJECT_RUNLOCK(object); 1808976a87a2SAlan Cox if (ignore_entry) 18090ff27d31SJohn Polstra continue; 18100ff27d31SJohn Polstra 18110ff27d31SJohn Polstra (*func)(entry, closure); 18120ff27d31SJohn Polstra } 1813976a87a2SAlan Cox vm_map_unlock_read(map); 18140ff27d31SJohn Polstra } 18150ff27d31SJohn Polstra 18160ff27d31SJohn Polstra /* 18170ff27d31SJohn Polstra * Write the core file header to the file, including padding up to 18180ff27d31SJohn Polstra * the page boundary. 18190ff27d31SJohn Polstra */ 18208c64af4fSJohn Polstra static int 1821aa14e9b7SMark Johnston __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, 18225bc3c617SKonstantin Belousov size_t hdrsize, struct note_info_list *notelst, size_t notesz, 18235bc3c617SKonstantin Belousov int flags) 18248c64af4fSJohn Polstra { 1825bd390213SMikolaj Golub struct note_info *ninfo; 1826bd390213SMikolaj Golub struct sbuf *sb; 1827bd390213SMikolaj Golub int error; 18288c64af4fSJohn Polstra 18298c64af4fSJohn Polstra /* Fill in the header. */ 18300ff27d31SJohn Polstra bzero(hdr, hdrsize); 18315bc3c617SKonstantin Belousov __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags); 18328c64af4fSJohn Polstra 1833bd390213SMikolaj Golub sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); 1834aa14e9b7SMark Johnston sbuf_set_drain(sb, sbuf_drain_core_output, p); 1835bd390213SMikolaj Golub sbuf_start_section(sb, NULL); 1836bd390213SMikolaj Golub sbuf_bcat(sb, hdr, hdrsize); 1837bd390213SMikolaj Golub TAILQ_FOREACH(ninfo, notelst, link) 1838435754a5SEdward Tomasz Napierala __elfN(putnote)(p->td, ninfo, sb); 1839bd390213SMikolaj Golub /* Align up to a page boundary for the program segments. */ 1840bd390213SMikolaj Golub sbuf_end_section(sb, -1, PAGE_SIZE, 0); 1841bd390213SMikolaj Golub error = sbuf_finish(sb); 1842bd390213SMikolaj Golub sbuf_delete(sb); 1843bd390213SMikolaj Golub 1844bd390213SMikolaj Golub return (error); 1845e7228204SAlfred Perlstein } 1846bd390213SMikolaj Golub 1847435754a5SEdward Tomasz Napierala void 1848bd390213SMikolaj Golub __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, 1849bd390213SMikolaj Golub size_t *sizep) 1850bd390213SMikolaj Golub { 1851bd390213SMikolaj Golub struct proc *p; 1852bd390213SMikolaj Golub struct thread *thr; 1853bd390213SMikolaj Golub size_t size; 1854bd390213SMikolaj Golub 1855bd390213SMikolaj Golub p = td->td_proc; 1856bd390213SMikolaj Golub size = 0; 1857bd390213SMikolaj Golub 18586b71405bSJohn Baldwin size += __elfN(register_note)(td, list, NT_PRPSINFO, 18596b71405bSJohn Baldwin __elfN(note_prpsinfo), p); 1860bd390213SMikolaj Golub 1861bd390213SMikolaj Golub /* 1862bd390213SMikolaj Golub * To have the debugger select the right thread (LWP) as the initial 1863bd390213SMikolaj Golub * thread, we dump the state of the thread passed to us in td first. 1864bd390213SMikolaj Golub * This is the thread that causes the core dump and thus likely to 1865bd390213SMikolaj Golub * be the right thread one wants to have selected in the debugger. 1866bd390213SMikolaj Golub */ 1867bd390213SMikolaj Golub thr = td; 1868bd390213SMikolaj Golub while (thr != NULL) { 18696b71405bSJohn Baldwin size += __elfN(prepare_register_notes)(td, list, thr); 1870435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, -1, 1871bd390213SMikolaj Golub __elfN(note_threadmd), thr); 1872bd390213SMikolaj Golub 187361b4c627SEdward Tomasz Napierala thr = thr == td ? TAILQ_FIRST(&p->p_threads) : 1874bd390213SMikolaj Golub TAILQ_NEXT(thr, td_plist); 1875bd390213SMikolaj Golub if (thr == td) 1876bd390213SMikolaj Golub thr = TAILQ_NEXT(thr, td_plist); 1877dada0278SJohn Polstra } 1878dada0278SJohn Polstra 1879435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC, 1880f1fca82eSMikolaj Golub __elfN(note_procstat_proc), p); 1881435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES, 1882f1fca82eSMikolaj Golub note_procstat_files, p); 1883435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP, 1884f1fca82eSMikolaj Golub note_procstat_vmmap, p); 1885435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS, 1886f1fca82eSMikolaj Golub note_procstat_groups, p); 1887435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK, 1888f1fca82eSMikolaj Golub note_procstat_umask, p); 1889435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT, 1890f1fca82eSMikolaj Golub note_procstat_rlimit, p); 1891435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL, 1892f1fca82eSMikolaj Golub note_procstat_osrel, p); 1893435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS, 1894f1fca82eSMikolaj Golub __elfN(note_procstat_psstrings), p); 1895435754a5SEdward Tomasz Napierala size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV, 1896f1fca82eSMikolaj Golub __elfN(note_procstat_auxv), p); 1897f1fca82eSMikolaj Golub 1898bd390213SMikolaj Golub *sizep = size; 1899bd390213SMikolaj Golub } 1900bd390213SMikolaj Golub 1901905d192dSEdward Tomasz Napierala void 1902bd390213SMikolaj Golub __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, 19035bc3c617SKonstantin Belousov size_t notesz, int flags) 1904bd390213SMikolaj Golub { 1905bd390213SMikolaj Golub Elf_Ehdr *ehdr; 1906bd390213SMikolaj Golub Elf_Phdr *phdr; 1907c17b0bd2SConrad Meyer Elf_Shdr *shdr; 1908bd390213SMikolaj Golub struct phdr_closure phc; 19095d9f7901SDmitry Chagin Elf_Brandinfo *bi; 1910bd390213SMikolaj Golub 1911bd390213SMikolaj Golub ehdr = (Elf_Ehdr *)hdr; 19125d9f7901SDmitry Chagin bi = td->td_proc->p_elf_brandinfo; 1913bd390213SMikolaj Golub 1914bd390213SMikolaj Golub ehdr->e_ident[EI_MAG0] = ELFMAG0; 1915bd390213SMikolaj Golub ehdr->e_ident[EI_MAG1] = ELFMAG1; 1916bd390213SMikolaj Golub ehdr->e_ident[EI_MAG2] = ELFMAG2; 1917bd390213SMikolaj Golub ehdr->e_ident[EI_MAG3] = ELFMAG3; 1918bd390213SMikolaj Golub ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1919bd390213SMikolaj Golub ehdr->e_ident[EI_DATA] = ELF_DATA; 1920bd390213SMikolaj Golub ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1921435754a5SEdward Tomasz Napierala ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi; 1922bd390213SMikolaj Golub ehdr->e_ident[EI_ABIVERSION] = 0; 1923bd390213SMikolaj Golub ehdr->e_ident[EI_PAD] = 0; 1924bd390213SMikolaj Golub ehdr->e_type = ET_CORE; 19255d9f7901SDmitry Chagin ehdr->e_machine = bi->machine; 1926bd390213SMikolaj Golub ehdr->e_version = EV_CURRENT; 1927bd390213SMikolaj Golub ehdr->e_entry = 0; 1928bd390213SMikolaj Golub ehdr->e_phoff = sizeof(Elf_Ehdr); 1929885f13dcSJohn Baldwin ehdr->e_flags = td->td_proc->p_elf_flags; 1930bd390213SMikolaj Golub ehdr->e_ehsize = sizeof(Elf_Ehdr); 1931bd390213SMikolaj Golub ehdr->e_phentsize = sizeof(Elf_Phdr); 1932bd390213SMikolaj Golub ehdr->e_shentsize = sizeof(Elf_Shdr); 1933bd390213SMikolaj Golub ehdr->e_shstrndx = SHN_UNDEF; 1934c17b0bd2SConrad Meyer if (numsegs + 1 < PN_XNUM) { 1935c17b0bd2SConrad Meyer ehdr->e_phnum = numsegs + 1; 1936c17b0bd2SConrad Meyer ehdr->e_shnum = 0; 1937c17b0bd2SConrad Meyer } else { 1938c17b0bd2SConrad Meyer ehdr->e_phnum = PN_XNUM; 1939c17b0bd2SConrad Meyer ehdr->e_shnum = 1; 1940c17b0bd2SConrad Meyer 1941c17b0bd2SConrad Meyer ehdr->e_shoff = ehdr->e_phoff + 1942c17b0bd2SConrad Meyer (numsegs + 1) * ehdr->e_phentsize; 1943c17b0bd2SConrad Meyer KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), 1944c17b0bd2SConrad Meyer ("e_shoff: %zu, hdrsize - shdr: %zu", 194507f825e8SConrad Meyer (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); 1946c17b0bd2SConrad Meyer 1947c17b0bd2SConrad Meyer shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); 1948c17b0bd2SConrad Meyer memset(shdr, 0, sizeof(*shdr)); 1949c17b0bd2SConrad Meyer /* 1950c17b0bd2SConrad Meyer * A special first section is used to hold large segment and 1951c17b0bd2SConrad Meyer * section counts. This was proposed by Sun Microsystems in 1952c17b0bd2SConrad Meyer * Solaris and has been adopted by Linux; the standard ELF 1953c17b0bd2SConrad Meyer * tools are already familiar with the technique. 1954c17b0bd2SConrad Meyer * 1955c17b0bd2SConrad Meyer * See table 7-7 of the Solaris "Linker and Libraries Guide" 1956c17b0bd2SConrad Meyer * (or 12-7 depending on the version of the document) for more 1957c17b0bd2SConrad Meyer * details. 1958c17b0bd2SConrad Meyer */ 1959c17b0bd2SConrad Meyer shdr->sh_type = SHT_NULL; 1960c17b0bd2SConrad Meyer shdr->sh_size = ehdr->e_shnum; 1961c17b0bd2SConrad Meyer shdr->sh_link = ehdr->e_shstrndx; 1962c17b0bd2SConrad Meyer shdr->sh_info = numsegs + 1; 1963c17b0bd2SConrad Meyer } 1964bd390213SMikolaj Golub 1965bd390213SMikolaj Golub /* 1966bd390213SMikolaj Golub * Fill in the program header entries. 1967bd390213SMikolaj Golub */ 1968c17b0bd2SConrad Meyer phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); 1969bd390213SMikolaj Golub 1970bd390213SMikolaj Golub /* The note segement. */ 1971bd390213SMikolaj Golub phdr->p_type = PT_NOTE; 1972bd390213SMikolaj Golub phdr->p_offset = hdrsize; 1973bd390213SMikolaj Golub phdr->p_vaddr = 0; 1974bd390213SMikolaj Golub phdr->p_paddr = 0; 1975bd390213SMikolaj Golub phdr->p_filesz = notesz; 1976bd390213SMikolaj Golub phdr->p_memsz = 0; 1977bd390213SMikolaj Golub phdr->p_flags = PF_R; 19781b8388cdSMikolaj Golub phdr->p_align = ELF_NOTE_ROUNDSIZE; 1979bd390213SMikolaj Golub phdr++; 1980bd390213SMikolaj Golub 1981bd390213SMikolaj Golub /* All the writable segments from the program. */ 1982bd390213SMikolaj Golub phc.phdr = phdr; 1983bd390213SMikolaj Golub phc.offset = round_page(hdrsize + notesz); 19845bc3c617SKonstantin Belousov each_dumpable_segment(td, cb_put_phdr, &phc, flags); 1985bd390213SMikolaj Golub } 1986bd390213SMikolaj Golub 19876b71405bSJohn Baldwin static size_t 19886b71405bSJohn Baldwin __elfN(register_regset_note)(struct thread *td, struct note_info_list *list, 19896b71405bSJohn Baldwin struct regset *regset, struct thread *target_td) 19906b71405bSJohn Baldwin { 19916b71405bSJohn Baldwin const struct sysentvec *sv; 19926b71405bSJohn Baldwin struct note_info *ninfo; 19936b71405bSJohn Baldwin size_t size, notesize; 19946b71405bSJohn Baldwin 19956b71405bSJohn Baldwin size = 0; 19966b71405bSJohn Baldwin if (!regset->get(regset, target_td, NULL, &size) || size == 0) 19976b71405bSJohn Baldwin return (0); 19986b71405bSJohn Baldwin 19996b71405bSJohn Baldwin ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 20006b71405bSJohn Baldwin ninfo->type = regset->note; 20016b71405bSJohn Baldwin ninfo->regset = regset; 20026b71405bSJohn Baldwin ninfo->outarg = target_td; 20036b71405bSJohn Baldwin ninfo->outsize = size; 20046b71405bSJohn Baldwin TAILQ_INSERT_TAIL(list, ninfo, link); 20056b71405bSJohn Baldwin 20066b71405bSJohn Baldwin sv = td->td_proc->p_sysent; 20076b71405bSJohn Baldwin notesize = sizeof(Elf_Note) + /* note header */ 20086b71405bSJohn Baldwin roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 20096b71405bSJohn Baldwin /* note name */ 20106b71405bSJohn Baldwin roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 20116b71405bSJohn Baldwin 20126b71405bSJohn Baldwin return (notesize); 20136b71405bSJohn Baldwin } 20146b71405bSJohn Baldwin 2015435754a5SEdward Tomasz Napierala size_t 2016435754a5SEdward Tomasz Napierala __elfN(register_note)(struct thread *td, struct note_info_list *list, 2017435754a5SEdward Tomasz Napierala int type, outfunc_t out, void *arg) 2018bd390213SMikolaj Golub { 2019435754a5SEdward Tomasz Napierala const struct sysentvec *sv; 2020bd390213SMikolaj Golub struct note_info *ninfo; 2021bd390213SMikolaj Golub size_t size, notesize; 2022bd390213SMikolaj Golub 2023435754a5SEdward Tomasz Napierala sv = td->td_proc->p_sysent; 2024bd390213SMikolaj Golub size = 0; 2025bd390213SMikolaj Golub out(arg, NULL, &size); 2026bd390213SMikolaj Golub ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); 2027bd390213SMikolaj Golub ninfo->type = type; 2028bd390213SMikolaj Golub ninfo->outfunc = out; 2029bd390213SMikolaj Golub ninfo->outarg = arg; 2030bd390213SMikolaj Golub ninfo->outsize = size; 2031bd390213SMikolaj Golub TAILQ_INSERT_TAIL(list, ninfo, link); 2032bd390213SMikolaj Golub 2033bd390213SMikolaj Golub if (type == -1) 2034bd390213SMikolaj Golub return (size); 2035bd390213SMikolaj Golub 2036bd390213SMikolaj Golub notesize = sizeof(Elf_Note) + /* note header */ 2037435754a5SEdward Tomasz Napierala roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + 2038180e57e5SJohn Baldwin /* note name */ 2039180e57e5SJohn Baldwin roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2040180e57e5SJohn Baldwin 2041180e57e5SJohn Baldwin return (notesize); 2042180e57e5SJohn Baldwin } 2043180e57e5SJohn Baldwin 2044180e57e5SJohn Baldwin static size_t 2045180e57e5SJohn Baldwin append_note_data(const void *src, void *dst, size_t len) 2046180e57e5SJohn Baldwin { 2047180e57e5SJohn Baldwin size_t padded_len; 2048180e57e5SJohn Baldwin 2049180e57e5SJohn Baldwin padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); 2050180e57e5SJohn Baldwin if (dst != NULL) { 2051180e57e5SJohn Baldwin bcopy(src, dst, len); 2052180e57e5SJohn Baldwin bzero((char *)dst + len, padded_len - len); 2053180e57e5SJohn Baldwin } 2054180e57e5SJohn Baldwin return (padded_len); 2055180e57e5SJohn Baldwin } 2056180e57e5SJohn Baldwin 2057180e57e5SJohn Baldwin size_t 2058180e57e5SJohn Baldwin __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) 2059180e57e5SJohn Baldwin { 2060180e57e5SJohn Baldwin Elf_Note *note; 2061180e57e5SJohn Baldwin char *buf; 2062180e57e5SJohn Baldwin size_t notesize; 2063180e57e5SJohn Baldwin 2064180e57e5SJohn Baldwin buf = dst; 2065180e57e5SJohn Baldwin if (buf != NULL) { 2066180e57e5SJohn Baldwin note = (Elf_Note *)buf; 2067180e57e5SJohn Baldwin note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); 2068180e57e5SJohn Baldwin note->n_descsz = size; 2069180e57e5SJohn Baldwin note->n_type = type; 2070180e57e5SJohn Baldwin buf += sizeof(*note); 2071180e57e5SJohn Baldwin buf += append_note_data(FREEBSD_ABI_VENDOR, buf, 2072180e57e5SJohn Baldwin sizeof(FREEBSD_ABI_VENDOR)); 2073180e57e5SJohn Baldwin append_note_data(src, buf, size); 2074180e57e5SJohn Baldwin if (descp != NULL) 2075180e57e5SJohn Baldwin *descp = buf; 2076180e57e5SJohn Baldwin } 2077180e57e5SJohn Baldwin 2078180e57e5SJohn Baldwin notesize = sizeof(Elf_Note) + /* note header */ 2079180e57e5SJohn Baldwin roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + 2080180e57e5SJohn Baldwin /* note name */ 20811b8388cdSMikolaj Golub roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ 2082bd390213SMikolaj Golub 2083bd390213SMikolaj Golub return (notesize); 2084bd390213SMikolaj Golub } 2085bd390213SMikolaj Golub 2086bd390213SMikolaj Golub static void 2087435754a5SEdward Tomasz Napierala __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb) 2088bd390213SMikolaj Golub { 2089bd390213SMikolaj Golub Elf_Note note; 2090435754a5SEdward Tomasz Napierala const struct sysentvec *sv; 209114bdbaf2SConrad Meyer ssize_t old_len, sect_len; 209214bdbaf2SConrad Meyer size_t new_len, descsz, i; 2093bd390213SMikolaj Golub 2094bd390213SMikolaj Golub if (ninfo->type == -1) { 2095bd390213SMikolaj Golub ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 2096bd390213SMikolaj Golub return; 2097bd390213SMikolaj Golub } 2098bd390213SMikolaj Golub 2099435754a5SEdward Tomasz Napierala sv = td->td_proc->p_sysent; 2100435754a5SEdward Tomasz Napierala 2101435754a5SEdward Tomasz Napierala note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1; 2102bd390213SMikolaj Golub note.n_descsz = ninfo->outsize; 2103bd390213SMikolaj Golub note.n_type = ninfo->type; 2104bd390213SMikolaj Golub 2105bd390213SMikolaj Golub sbuf_bcat(sb, ¬e, sizeof(note)); 2106bd390213SMikolaj Golub sbuf_start_section(sb, &old_len); 2107435754a5SEdward Tomasz Napierala sbuf_bcat(sb, sv->sv_elf_core_abi_vendor, 2108435754a5SEdward Tomasz Napierala strlen(sv->sv_elf_core_abi_vendor) + 1); 21091b8388cdSMikolaj Golub sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 2110bd390213SMikolaj Golub if (note.n_descsz == 0) 2111bd390213SMikolaj Golub return; 2112bd390213SMikolaj Golub sbuf_start_section(sb, &old_len); 21136b71405bSJohn Baldwin if (ninfo->regset != NULL) { 21146b71405bSJohn Baldwin struct regset *regset = ninfo->regset; 21156b71405bSJohn Baldwin void *buf; 21166b71405bSJohn Baldwin 21176b71405bSJohn Baldwin buf = malloc(ninfo->outsize, M_TEMP, M_ZERO | M_WAITOK); 21186b71405bSJohn Baldwin (void)regset->get(regset, ninfo->outarg, buf, &ninfo->outsize); 21196b71405bSJohn Baldwin sbuf_bcat(sb, buf, ninfo->outsize); 21206b71405bSJohn Baldwin free(buf, M_TEMP); 21216b71405bSJohn Baldwin } else 2122bd390213SMikolaj Golub ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); 212314bdbaf2SConrad Meyer sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); 212414bdbaf2SConrad Meyer if (sect_len < 0) 212514bdbaf2SConrad Meyer return; 212614bdbaf2SConrad Meyer 212714bdbaf2SConrad Meyer new_len = (size_t)sect_len; 212814bdbaf2SConrad Meyer descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); 212914bdbaf2SConrad Meyer if (new_len < descsz) { 213014bdbaf2SConrad Meyer /* 213114bdbaf2SConrad Meyer * It is expected that individual note emitters will correctly 213214bdbaf2SConrad Meyer * predict their expected output size and fill up to that size 213314bdbaf2SConrad Meyer * themselves, padding in a format-specific way if needed. 213414bdbaf2SConrad Meyer * However, in case they don't, just do it here with zeros. 213514bdbaf2SConrad Meyer */ 213614bdbaf2SConrad Meyer for (i = 0; i < descsz - new_len; i++) 213714bdbaf2SConrad Meyer sbuf_putc(sb, 0); 213814bdbaf2SConrad Meyer } else if (new_len > descsz) { 213914bdbaf2SConrad Meyer /* 214014bdbaf2SConrad Meyer * We can't always truncate sb -- we may have drained some 214114bdbaf2SConrad Meyer * of it already. 214214bdbaf2SConrad Meyer */ 214314bdbaf2SConrad Meyer KASSERT(new_len == descsz, ("%s: Note type %u changed as we " 214414bdbaf2SConrad Meyer "read it (%zu > %zu). Since it is longer than " 214514bdbaf2SConrad Meyer "expected, this coredump's notes are corrupt. THIS " 214614bdbaf2SConrad Meyer "IS A BUG in the note_procstat routine for type %u.\n", 214714bdbaf2SConrad Meyer __func__, (unsigned)note.n_type, new_len, descsz, 214814bdbaf2SConrad Meyer (unsigned)note.n_type)); 214914bdbaf2SConrad Meyer } 2150bd390213SMikolaj Golub } 2151bd390213SMikolaj Golub 2152bd390213SMikolaj Golub /* 2153bd390213SMikolaj Golub * Miscellaneous note out functions. 2154bd390213SMikolaj Golub */ 2155bd390213SMikolaj Golub 2156841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2157841c0c7eSNathan Whitehorn #include <compat/freebsd32/freebsd32.h> 215851645e83SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h> 2159841c0c7eSNathan Whitehorn 216062919d78SPeter Wemm typedef struct prstatus32 elf_prstatus_t; 216162919d78SPeter Wemm typedef struct prpsinfo32 elf_prpsinfo_t; 216262919d78SPeter Wemm typedef struct fpreg32 elf_prfpregset_t; 216362919d78SPeter Wemm typedef struct fpreg32 elf_fpregset_t; 216462919d78SPeter Wemm typedef struct reg32 elf_gregset_t; 21657f08176eSAttilio Rao typedef struct thrmisc32 elf_thrmisc_t; 21660288d427SJohn Baldwin typedef struct ptrace_lwpinfo32 elf_lwpinfo_t; 2167f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 2168f1fca82eSMikolaj Golub typedef struct kinfo_proc32 elf_kinfo_proc_t; 2169f1fca82eSMikolaj Golub typedef uint32_t elf_ps_strings_t; 217062919d78SPeter Wemm #else 217162919d78SPeter Wemm typedef prstatus_t elf_prstatus_t; 217262919d78SPeter Wemm typedef prpsinfo_t elf_prpsinfo_t; 217362919d78SPeter Wemm typedef prfpregset_t elf_prfpregset_t; 217462919d78SPeter Wemm typedef prfpregset_t elf_fpregset_t; 217562919d78SPeter Wemm typedef gregset_t elf_gregset_t; 21767f08176eSAttilio Rao typedef thrmisc_t elf_thrmisc_t; 21770288d427SJohn Baldwin typedef struct ptrace_lwpinfo elf_lwpinfo_t; 2178f1fca82eSMikolaj Golub #define ELF_KERN_PROC_MASK 0 2179f1fca82eSMikolaj Golub typedef struct kinfo_proc elf_kinfo_proc_t; 2180f1fca82eSMikolaj Golub typedef vm_offset_t elf_ps_strings_t; 218162919d78SPeter Wemm #endif 218262919d78SPeter Wemm 21838c64af4fSJohn Polstra static void 2184bd390213SMikolaj Golub __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) 21858c64af4fSJohn Polstra { 2186c77547d2SJohn Baldwin struct sbuf sbarg; 2187c77547d2SJohn Baldwin size_t len; 2188c77547d2SJohn Baldwin char *cp, *end; 2189247aba24SMarcel Moolenaar struct proc *p; 2190bd390213SMikolaj Golub elf_prpsinfo_t *psinfo; 2191c77547d2SJohn Baldwin int error; 21928c64af4fSJohn Polstra 219361b4c627SEdward Tomasz Napierala p = arg; 2194bd390213SMikolaj Golub if (sb != NULL) { 2195bd390213SMikolaj Golub KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); 2196bd390213SMikolaj Golub psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); 21978c9b7b2cSMarcel Moolenaar psinfo->pr_version = PRPSINFO_VERSION; 219862919d78SPeter Wemm psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 2199ccd3953eSJohn Baldwin strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 2200c77547d2SJohn Baldwin PROC_LOCK(p); 2201c77547d2SJohn Baldwin if (p->p_args != NULL) { 2202c77547d2SJohn Baldwin len = sizeof(psinfo->pr_psargs) - 1; 2203c77547d2SJohn Baldwin if (len > p->p_args->ar_length) 2204c77547d2SJohn Baldwin len = p->p_args->ar_length; 2205c77547d2SJohn Baldwin memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); 2206c77547d2SJohn Baldwin PROC_UNLOCK(p); 2207c77547d2SJohn Baldwin error = 0; 2208c77547d2SJohn Baldwin } else { 2209c77547d2SJohn Baldwin _PHOLD(p); 2210c77547d2SJohn Baldwin PROC_UNLOCK(p); 2211c77547d2SJohn Baldwin sbuf_new(&sbarg, psinfo->pr_psargs, 2212c77547d2SJohn Baldwin sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); 2213c77547d2SJohn Baldwin error = proc_getargv(curthread, p, &sbarg); 2214c77547d2SJohn Baldwin PRELE(p); 221500d17cf3SKonstantin Belousov if (sbuf_finish(&sbarg) == 0) { 221600d17cf3SKonstantin Belousov len = sbuf_len(&sbarg); 221700d17cf3SKonstantin Belousov if (len > 0) 221800d17cf3SKonstantin Belousov len--; 221900d17cf3SKonstantin Belousov } else { 2220c77547d2SJohn Baldwin len = sizeof(psinfo->pr_psargs) - 1; 222100d17cf3SKonstantin Belousov } 2222c77547d2SJohn Baldwin sbuf_delete(&sbarg); 2223c77547d2SJohn Baldwin } 222400d17cf3SKonstantin Belousov if (error != 0 || len == 0 || (ssize_t)len == -1) 2225ccd3953eSJohn Baldwin strlcpy(psinfo->pr_psargs, p->p_comm, 22268c9b7b2cSMarcel Moolenaar sizeof(psinfo->pr_psargs)); 2227c77547d2SJohn Baldwin else { 2228c77547d2SJohn Baldwin KASSERT(len < sizeof(psinfo->pr_psargs), 2229c77547d2SJohn Baldwin ("len is too long: %zu vs %zu", len, 2230c77547d2SJohn Baldwin sizeof(psinfo->pr_psargs))); 2231c77547d2SJohn Baldwin cp = psinfo->pr_psargs; 2232c77547d2SJohn Baldwin end = cp + len - 1; 2233c77547d2SJohn Baldwin for (;;) { 2234c77547d2SJohn Baldwin cp = memchr(cp, '\0', end - cp); 2235c77547d2SJohn Baldwin if (cp == NULL) 2236c77547d2SJohn Baldwin break; 2237c77547d2SJohn Baldwin *cp = ' '; 2238c77547d2SJohn Baldwin } 2239c77547d2SJohn Baldwin } 2240ccb83afdSJohn Baldwin psinfo->pr_pid = p->p_pid; 2241bd390213SMikolaj Golub sbuf_bcat(sb, psinfo, sizeof(*psinfo)); 2242bd390213SMikolaj Golub free(psinfo, M_TEMP); 2243bd390213SMikolaj Golub } 2244bd390213SMikolaj Golub *sizep = sizeof(*psinfo); 2245bd390213SMikolaj Golub } 2246bd390213SMikolaj Golub 2247548a2ec4SAndrew Turner static bool 2248548a2ec4SAndrew Turner __elfN(get_prstatus)(struct regset *rs, struct thread *td, void *buf, 2249548a2ec4SAndrew Turner size_t *sizep) 2250bd390213SMikolaj Golub { 2251bd390213SMikolaj Golub elf_prstatus_t *status; 2252bd390213SMikolaj Golub 2253548a2ec4SAndrew Turner if (buf != NULL) { 2254548a2ec4SAndrew Turner KASSERT(*sizep == sizeof(*status), ("%s: invalid size", 2255548a2ec4SAndrew Turner __func__)); 2256548a2ec4SAndrew Turner status = buf; 22571babcad6SMark Johnston memset(status, 0, *sizep); 22588c9b7b2cSMarcel Moolenaar status->pr_version = PRSTATUS_VERSION; 225962919d78SPeter Wemm status->pr_statussz = sizeof(elf_prstatus_t); 226062919d78SPeter Wemm status->pr_gregsetsz = sizeof(elf_gregset_t); 226162919d78SPeter Wemm status->pr_fpregsetsz = sizeof(elf_fpregset_t); 22628c9b7b2cSMarcel Moolenaar status->pr_osreldate = osreldate; 2263bd390213SMikolaj Golub status->pr_cursig = td->td_proc->p_sig; 2264bd390213SMikolaj Golub status->pr_pid = td->td_tid; 2265841c0c7eSNathan Whitehorn #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2266bd390213SMikolaj Golub fill_regs32(td, &status->pr_reg); 226762919d78SPeter Wemm #else 2268bd390213SMikolaj Golub fill_regs(td, &status->pr_reg); 226962919d78SPeter Wemm #endif 2270548a2ec4SAndrew Turner } 2271548a2ec4SAndrew Turner *sizep = sizeof(*status); 2272548a2ec4SAndrew Turner return (true); 2273548a2ec4SAndrew Turner } 2274548a2ec4SAndrew Turner 2275548a2ec4SAndrew Turner static bool 2276548a2ec4SAndrew Turner __elfN(set_prstatus)(struct regset *rs, struct thread *td, void *buf, 2277548a2ec4SAndrew Turner size_t size) 2278548a2ec4SAndrew Turner { 2279548a2ec4SAndrew Turner elf_prstatus_t *status; 2280548a2ec4SAndrew Turner 2281548a2ec4SAndrew Turner KASSERT(size == sizeof(*status), ("%s: invalid size", __func__)); 2282548a2ec4SAndrew Turner status = buf; 2283548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2284548a2ec4SAndrew Turner set_regs32(td, &status->pr_reg); 2285548a2ec4SAndrew Turner #else 2286548a2ec4SAndrew Turner set_regs(td, &status->pr_reg); 2287548a2ec4SAndrew Turner #endif 2288548a2ec4SAndrew Turner return (true); 2289548a2ec4SAndrew Turner } 2290548a2ec4SAndrew Turner 2291548a2ec4SAndrew Turner static struct regset __elfN(regset_prstatus) = { 2292548a2ec4SAndrew Turner .note = NT_PRSTATUS, 2293548a2ec4SAndrew Turner .size = sizeof(elf_prstatus_t), 2294548a2ec4SAndrew Turner .get = __elfN(get_prstatus), 2295548a2ec4SAndrew Turner .set = __elfN(set_prstatus), 2296548a2ec4SAndrew Turner }; 2297548a2ec4SAndrew Turner ELF_REGSET(__elfN(regset_prstatus)); 2298548a2ec4SAndrew Turner 2299548a2ec4SAndrew Turner static bool 2300548a2ec4SAndrew Turner __elfN(get_fpregset)(struct regset *rs, struct thread *td, void *buf, 2301548a2ec4SAndrew Turner size_t *sizep) 2302548a2ec4SAndrew Turner { 2303548a2ec4SAndrew Turner elf_prfpregset_t *fpregset; 2304548a2ec4SAndrew Turner 2305548a2ec4SAndrew Turner if (buf != NULL) { 2306548a2ec4SAndrew Turner KASSERT(*sizep == sizeof(*fpregset), ("%s: invalid size", 2307548a2ec4SAndrew Turner __func__)); 2308548a2ec4SAndrew Turner fpregset = buf; 2309548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2310548a2ec4SAndrew Turner fill_fpregs32(td, fpregset); 2311548a2ec4SAndrew Turner #else 2312548a2ec4SAndrew Turner fill_fpregs(td, fpregset); 2313548a2ec4SAndrew Turner #endif 2314548a2ec4SAndrew Turner } 23150b25cbc7SJohn Baldwin *sizep = sizeof(*fpregset); 2316548a2ec4SAndrew Turner return (true); 2317548a2ec4SAndrew Turner } 2318548a2ec4SAndrew Turner 2319548a2ec4SAndrew Turner static bool 2320548a2ec4SAndrew Turner __elfN(set_fpregset)(struct regset *rs, struct thread *td, void *buf, 2321548a2ec4SAndrew Turner size_t size) 2322548a2ec4SAndrew Turner { 2323548a2ec4SAndrew Turner elf_prfpregset_t *fpregset; 2324548a2ec4SAndrew Turner 2325548a2ec4SAndrew Turner fpregset = buf; 2326548a2ec4SAndrew Turner KASSERT(size == sizeof(*fpregset), ("%s: invalid size", __func__)); 2327548a2ec4SAndrew Turner #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2328548a2ec4SAndrew Turner set_fpregs32(td, fpregset); 2329548a2ec4SAndrew Turner #else 2330548a2ec4SAndrew Turner set_fpregs(td, fpregset); 2331548a2ec4SAndrew Turner #endif 2332548a2ec4SAndrew Turner return (true); 2333548a2ec4SAndrew Turner } 2334548a2ec4SAndrew Turner 2335548a2ec4SAndrew Turner static struct regset __elfN(regset_fpregset) = { 2336548a2ec4SAndrew Turner .note = NT_FPREGSET, 2337548a2ec4SAndrew Turner .size = sizeof(elf_prfpregset_t), 2338548a2ec4SAndrew Turner .get = __elfN(get_fpregset), 2339548a2ec4SAndrew Turner .set = __elfN(set_fpregset), 2340548a2ec4SAndrew Turner }; 2341548a2ec4SAndrew Turner ELF_REGSET(__elfN(regset_fpregset)); 2342548a2ec4SAndrew Turner 23430288d427SJohn Baldwin static bool 23440288d427SJohn Baldwin __elfN(get_thrmisc)(struct regset *rs, struct thread *td, void *buf, 23450288d427SJohn Baldwin size_t *sizep) 23460288d427SJohn Baldwin { 23470288d427SJohn Baldwin elf_thrmisc_t *thrmisc; 23480288d427SJohn Baldwin 23490288d427SJohn Baldwin if (buf != NULL) { 23500288d427SJohn Baldwin KASSERT(*sizep == sizeof(*thrmisc), 23510288d427SJohn Baldwin ("%s: invalid size", __func__)); 23520288d427SJohn Baldwin thrmisc = buf; 23530288d427SJohn Baldwin bzero(thrmisc, sizeof(*thrmisc)); 23540288d427SJohn Baldwin strcpy(thrmisc->pr_tname, td->td_name); 23550288d427SJohn Baldwin } 23560288d427SJohn Baldwin *sizep = sizeof(*thrmisc); 23570288d427SJohn Baldwin return (true); 23580288d427SJohn Baldwin } 23590288d427SJohn Baldwin 23600288d427SJohn Baldwin static struct regset __elfN(regset_thrmisc) = { 23610288d427SJohn Baldwin .note = NT_THRMISC, 23620288d427SJohn Baldwin .size = sizeof(elf_thrmisc_t), 23630288d427SJohn Baldwin .get = __elfN(get_thrmisc), 23640288d427SJohn Baldwin }; 23650288d427SJohn Baldwin ELF_REGSET(__elfN(regset_thrmisc)); 23660288d427SJohn Baldwin 23670288d427SJohn Baldwin static bool 23680288d427SJohn Baldwin __elfN(get_lwpinfo)(struct regset *rs, struct thread *td, void *buf, 23690288d427SJohn Baldwin size_t *sizep) 23700288d427SJohn Baldwin { 23710288d427SJohn Baldwin elf_lwpinfo_t pl; 23720288d427SJohn Baldwin size_t size; 23730288d427SJohn Baldwin int structsize; 23740288d427SJohn Baldwin 23750288d427SJohn Baldwin size = sizeof(structsize) + sizeof(pl); 23760288d427SJohn Baldwin if (buf != NULL) { 23770288d427SJohn Baldwin KASSERT(*sizep == size, ("%s: invalid size", __func__)); 23780288d427SJohn Baldwin structsize = sizeof(pl); 23790288d427SJohn Baldwin memcpy(buf, &structsize, sizeof(structsize)); 23800288d427SJohn Baldwin bzero(&pl, sizeof(pl)); 23810288d427SJohn Baldwin pl.pl_lwpid = td->td_tid; 23820288d427SJohn Baldwin pl.pl_event = PL_EVENT_NONE; 23830288d427SJohn Baldwin pl.pl_sigmask = td->td_sigmask; 23840288d427SJohn Baldwin pl.pl_siglist = td->td_siglist; 23850288d427SJohn Baldwin if (td->td_si.si_signo != 0) { 23860288d427SJohn Baldwin pl.pl_event = PL_EVENT_SIGNAL; 23870288d427SJohn Baldwin pl.pl_flags |= PL_FLAG_SI; 23880288d427SJohn Baldwin #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 23890288d427SJohn Baldwin siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); 23900288d427SJohn Baldwin #else 23910288d427SJohn Baldwin pl.pl_siginfo = td->td_si; 23920288d427SJohn Baldwin #endif 23930288d427SJohn Baldwin } 23940288d427SJohn Baldwin strcpy(pl.pl_tdname, td->td_name); 23950288d427SJohn Baldwin /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ 23960288d427SJohn Baldwin memcpy((int *)buf + 1, &pl, sizeof(pl)); 23970288d427SJohn Baldwin } 23980288d427SJohn Baldwin *sizep = size; 23990288d427SJohn Baldwin return (true); 24000288d427SJohn Baldwin } 24010288d427SJohn Baldwin 24020288d427SJohn Baldwin static struct regset __elfN(regset_lwpinfo) = { 24030288d427SJohn Baldwin .note = NT_PTLWPINFO, 24040288d427SJohn Baldwin .size = sizeof(int) + sizeof(elf_lwpinfo_t), 24050288d427SJohn Baldwin .get = __elfN(get_lwpinfo), 24060288d427SJohn Baldwin }; 24070288d427SJohn Baldwin ELF_REGSET(__elfN(regset_lwpinfo)); 24080288d427SJohn Baldwin 24096b71405bSJohn Baldwin static size_t 24106b71405bSJohn Baldwin __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list, 24116b71405bSJohn Baldwin struct thread *target_td) 2412bd390213SMikolaj Golub { 24136b71405bSJohn Baldwin struct sysentvec *sv = td->td_proc->p_sysent; 24146b71405bSJohn Baldwin struct regset **regsetp, **regset_end, *regset; 24156b71405bSJohn Baldwin size_t size; 2416bd390213SMikolaj Golub 24176b71405bSJohn Baldwin size = 0; 24186b71405bSJohn Baldwin 24196b71405bSJohn Baldwin /* NT_PRSTATUS must be the first register set note. */ 24206b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, &__elfN(regset_prstatus), 24216b71405bSJohn Baldwin target_td); 24226b71405bSJohn Baldwin 24236b71405bSJohn Baldwin regsetp = sv->sv_regset_begin; 24246b71405bSJohn Baldwin if (regsetp == NULL) { 24256b71405bSJohn Baldwin /* XXX: This shouldn't be true for any FreeBSD ABIs. */ 24266b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, 24276b71405bSJohn Baldwin &__elfN(regset_fpregset), target_td); 24286b71405bSJohn Baldwin return (size); 2429bd390213SMikolaj Golub } 24306b71405bSJohn Baldwin regset_end = sv->sv_regset_end; 24316b71405bSJohn Baldwin MPASS(regset_end != NULL); 24326b71405bSJohn Baldwin for (; regsetp < regset_end; regsetp++) { 24336b71405bSJohn Baldwin regset = *regsetp; 24346b71405bSJohn Baldwin if (regset->note == NT_PRSTATUS) 24356b71405bSJohn Baldwin continue; 24366b71405bSJohn Baldwin size += __elfN(register_regset_note)(td, list, regset, 24376b71405bSJohn Baldwin target_td); 24386b71405bSJohn Baldwin } 24396b71405bSJohn Baldwin return (size); 2440bd390213SMikolaj Golub } 2441bd390213SMikolaj Golub 24424da47b2fSMarcel Moolenaar /* 24434da47b2fSMarcel Moolenaar * Allow for MD specific notes, as well as any MD 24444da47b2fSMarcel Moolenaar * specific preparations for writing MI notes. 24454da47b2fSMarcel Moolenaar */ 24468c64af4fSJohn Polstra static void 2447bd390213SMikolaj Golub __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) 24488c64af4fSJohn Polstra { 2449bd390213SMikolaj Golub struct thread *td; 2450bd390213SMikolaj Golub void *buf; 2451bd390213SMikolaj Golub size_t size; 24528c64af4fSJohn Polstra 2453bd390213SMikolaj Golub td = (struct thread *)arg; 2454bd390213SMikolaj Golub size = *sizep; 2455bd390213SMikolaj Golub if (size != 0 && sb != NULL) 2456bd390213SMikolaj Golub buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); 245783a396ceSChristian Brueffer else 245883a396ceSChristian Brueffer buf = NULL; 2459bd390213SMikolaj Golub size = 0; 2460bd390213SMikolaj Golub __elfN(dump_thread)(td, buf, &size); 246164779280SKonstantin Belousov KASSERT(sb == NULL || *sizep == size, ("invalid size")); 246283a396ceSChristian Brueffer if (size != 0 && sb != NULL) 2463bd390213SMikolaj Golub sbuf_bcat(sb, buf, size); 2464a1761d73SChristian Brueffer free(buf, M_TEMP); 2465bd390213SMikolaj Golub *sizep = size; 24668c64af4fSJohn Polstra } 24678c64af4fSJohn Polstra 2468f1fca82eSMikolaj Golub #ifdef KINFO_PROC_SIZE 2469f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); 2470f1fca82eSMikolaj Golub #endif 2471f1fca82eSMikolaj Golub 2472f1fca82eSMikolaj Golub static void 2473f1fca82eSMikolaj Golub __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) 2474f1fca82eSMikolaj Golub { 2475f1fca82eSMikolaj Golub struct proc *p; 2476f1fca82eSMikolaj Golub size_t size; 2477f1fca82eSMikolaj Golub int structsize; 2478f1fca82eSMikolaj Golub 247961b4c627SEdward Tomasz Napierala p = arg; 2480f1fca82eSMikolaj Golub size = sizeof(structsize) + p->p_numthreads * 2481f1fca82eSMikolaj Golub sizeof(elf_kinfo_proc_t); 2482f1fca82eSMikolaj Golub 2483f1fca82eSMikolaj Golub if (sb != NULL) { 2484f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2485f1fca82eSMikolaj Golub structsize = sizeof(elf_kinfo_proc_t); 2486f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 24874daea938SKonstantin Belousov sx_slock(&proctree_lock); 2488f1fca82eSMikolaj Golub PROC_LOCK(p); 2489f1fca82eSMikolaj Golub kern_proc_out(p, sb, ELF_KERN_PROC_MASK); 24904daea938SKonstantin Belousov sx_sunlock(&proctree_lock); 2491f1fca82eSMikolaj Golub } 2492f1fca82eSMikolaj Golub *sizep = size; 2493f1fca82eSMikolaj Golub } 2494f1fca82eSMikolaj Golub 2495f1fca82eSMikolaj Golub #ifdef KINFO_FILE_SIZE 2496f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2497f1fca82eSMikolaj Golub #endif 2498f1fca82eSMikolaj Golub 2499f1fca82eSMikolaj Golub static void 2500f1fca82eSMikolaj Golub note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) 2501f1fca82eSMikolaj Golub { 2502f1fca82eSMikolaj Golub struct proc *p; 250314bdbaf2SConrad Meyer size_t size, sect_sz, i; 250414bdbaf2SConrad Meyer ssize_t start_len, sect_len; 250514bdbaf2SConrad Meyer int structsize, filedesc_flags; 250614bdbaf2SConrad Meyer 2507bcb60d52SConrad Meyer if (coredump_pack_fileinfo) 250814bdbaf2SConrad Meyer filedesc_flags = KERN_FILEDESC_PACK_KINFO; 250914bdbaf2SConrad Meyer else 251014bdbaf2SConrad Meyer filedesc_flags = 0; 2511f1fca82eSMikolaj Golub 251261b4c627SEdward Tomasz Napierala p = arg; 251314bdbaf2SConrad Meyer structsize = sizeof(struct kinfo_file); 2514f1fca82eSMikolaj Golub if (sb == NULL) { 2515f1fca82eSMikolaj Golub size = 0; 2516f1fca82eSMikolaj Golub sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 25175c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2518f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2519f1fca82eSMikolaj Golub PROC_LOCK(p); 252014bdbaf2SConrad Meyer kern_proc_filedesc_out(p, sb, -1, filedesc_flags); 2521f1fca82eSMikolaj Golub sbuf_finish(sb); 2522f1fca82eSMikolaj Golub sbuf_delete(sb); 2523f1fca82eSMikolaj Golub *sizep = size; 2524f1fca82eSMikolaj Golub } else { 252514bdbaf2SConrad Meyer sbuf_start_section(sb, &start_len); 252614bdbaf2SConrad Meyer 2527f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2528f1fca82eSMikolaj Golub PROC_LOCK(p); 252914bdbaf2SConrad Meyer kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), 253014bdbaf2SConrad Meyer filedesc_flags); 253114bdbaf2SConrad Meyer 253214bdbaf2SConrad Meyer sect_len = sbuf_end_section(sb, start_len, 0, 0); 253314bdbaf2SConrad Meyer if (sect_len < 0) 253414bdbaf2SConrad Meyer return; 253514bdbaf2SConrad Meyer sect_sz = sect_len; 253614bdbaf2SConrad Meyer 253714bdbaf2SConrad Meyer KASSERT(sect_sz <= *sizep, 253814bdbaf2SConrad Meyer ("kern_proc_filedesc_out did not respect maxlen; " 253914bdbaf2SConrad Meyer "requested %zu, got %zu", *sizep - sizeof(structsize), 254014bdbaf2SConrad Meyer sect_sz - sizeof(structsize))); 254114bdbaf2SConrad Meyer 254214bdbaf2SConrad Meyer for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) 254314bdbaf2SConrad Meyer sbuf_putc(sb, 0); 2544f1fca82eSMikolaj Golub } 2545f1fca82eSMikolaj Golub } 2546f1fca82eSMikolaj Golub 2547f1fca82eSMikolaj Golub #ifdef KINFO_VMENTRY_SIZE 2548f1fca82eSMikolaj Golub CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); 2549f1fca82eSMikolaj Golub #endif 2550f1fca82eSMikolaj Golub 2551f1fca82eSMikolaj Golub static void 2552f1fca82eSMikolaj Golub note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) 2553f1fca82eSMikolaj Golub { 2554f1fca82eSMikolaj Golub struct proc *p; 2555f1fca82eSMikolaj Golub size_t size; 2556e6b95927SConrad Meyer int structsize, vmmap_flags; 2557e6b95927SConrad Meyer 2558e6b95927SConrad Meyer if (coredump_pack_vmmapinfo) 2559e6b95927SConrad Meyer vmmap_flags = KERN_VMMAP_PACK_KINFO; 2560e6b95927SConrad Meyer else 2561e6b95927SConrad Meyer vmmap_flags = 0; 2562f1fca82eSMikolaj Golub 256361b4c627SEdward Tomasz Napierala p = arg; 2564e6b95927SConrad Meyer structsize = sizeof(struct kinfo_vmentry); 2565f1fca82eSMikolaj Golub if (sb == NULL) { 2566f1fca82eSMikolaj Golub size = 0; 2567f1fca82eSMikolaj Golub sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); 25685c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2569f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2570f1fca82eSMikolaj Golub PROC_LOCK(p); 2571e6b95927SConrad Meyer kern_proc_vmmap_out(p, sb, -1, vmmap_flags); 2572f1fca82eSMikolaj Golub sbuf_finish(sb); 2573f1fca82eSMikolaj Golub sbuf_delete(sb); 2574f1fca82eSMikolaj Golub *sizep = size; 2575f1fca82eSMikolaj Golub } else { 2576f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2577f1fca82eSMikolaj Golub PROC_LOCK(p); 2578e6b95927SConrad Meyer kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), 2579e6b95927SConrad Meyer vmmap_flags); 2580f1fca82eSMikolaj Golub } 2581f1fca82eSMikolaj Golub } 2582f1fca82eSMikolaj Golub 2583f1fca82eSMikolaj Golub static void 2584f1fca82eSMikolaj Golub note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) 2585f1fca82eSMikolaj Golub { 2586f1fca82eSMikolaj Golub struct proc *p; 2587f1fca82eSMikolaj Golub size_t size; 2588f1fca82eSMikolaj Golub int structsize; 2589f1fca82eSMikolaj Golub 259061b4c627SEdward Tomasz Napierala p = arg; 2591f1fca82eSMikolaj Golub size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); 2592f1fca82eSMikolaj Golub if (sb != NULL) { 2593f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2594f1fca82eSMikolaj Golub structsize = sizeof(gid_t); 2595f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2596f1fca82eSMikolaj Golub sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * 2597f1fca82eSMikolaj Golub sizeof(gid_t)); 2598f1fca82eSMikolaj Golub } 2599f1fca82eSMikolaj Golub *sizep = size; 2600f1fca82eSMikolaj Golub } 2601f1fca82eSMikolaj Golub 2602f1fca82eSMikolaj Golub static void 2603f1fca82eSMikolaj Golub note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) 2604f1fca82eSMikolaj Golub { 2605f1fca82eSMikolaj Golub struct proc *p; 2606f1fca82eSMikolaj Golub size_t size; 2607f1fca82eSMikolaj Golub int structsize; 2608f1fca82eSMikolaj Golub 260961b4c627SEdward Tomasz Napierala p = arg; 261085078b85SConrad Meyer size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); 2611f1fca82eSMikolaj Golub if (sb != NULL) { 2612f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 261385078b85SConrad Meyer structsize = sizeof(p->p_pd->pd_cmask); 2614f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 261585078b85SConrad Meyer sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); 2616f1fca82eSMikolaj Golub } 2617f1fca82eSMikolaj Golub *sizep = size; 2618f1fca82eSMikolaj Golub } 2619f1fca82eSMikolaj Golub 2620f1fca82eSMikolaj Golub static void 2621f1fca82eSMikolaj Golub note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) 2622f1fca82eSMikolaj Golub { 2623f1fca82eSMikolaj Golub struct proc *p; 2624f1fca82eSMikolaj Golub struct rlimit rlim[RLIM_NLIMITS]; 2625f1fca82eSMikolaj Golub size_t size; 2626f1fca82eSMikolaj Golub int structsize, i; 2627f1fca82eSMikolaj Golub 262861b4c627SEdward Tomasz Napierala p = arg; 2629f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(rlim); 2630f1fca82eSMikolaj Golub if (sb != NULL) { 2631f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2632f1fca82eSMikolaj Golub structsize = sizeof(rlim); 2633f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2634f1fca82eSMikolaj Golub PROC_LOCK(p); 2635f1fca82eSMikolaj Golub for (i = 0; i < RLIM_NLIMITS; i++) 2636f6f6d240SMateusz Guzik lim_rlimit_proc(p, i, &rlim[i]); 2637f1fca82eSMikolaj Golub PROC_UNLOCK(p); 2638f1fca82eSMikolaj Golub sbuf_bcat(sb, rlim, sizeof(rlim)); 2639f1fca82eSMikolaj Golub } 2640f1fca82eSMikolaj Golub *sizep = size; 2641f1fca82eSMikolaj Golub } 2642f1fca82eSMikolaj Golub 2643f1fca82eSMikolaj Golub static void 2644f1fca82eSMikolaj Golub note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) 2645f1fca82eSMikolaj Golub { 2646f1fca82eSMikolaj Golub struct proc *p; 2647f1fca82eSMikolaj Golub size_t size; 2648f1fca82eSMikolaj Golub int structsize; 2649f1fca82eSMikolaj Golub 265061b4c627SEdward Tomasz Napierala p = arg; 2651f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(p->p_osrel); 2652f1fca82eSMikolaj Golub if (sb != NULL) { 2653f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2654f1fca82eSMikolaj Golub structsize = sizeof(p->p_osrel); 2655f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2656f1fca82eSMikolaj Golub sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); 2657f1fca82eSMikolaj Golub } 2658f1fca82eSMikolaj Golub *sizep = size; 2659f1fca82eSMikolaj Golub } 2660f1fca82eSMikolaj Golub 2661f1fca82eSMikolaj Golub static void 2662f1fca82eSMikolaj Golub __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) 2663f1fca82eSMikolaj Golub { 2664f1fca82eSMikolaj Golub struct proc *p; 2665f1fca82eSMikolaj Golub elf_ps_strings_t ps_strings; 2666f1fca82eSMikolaj Golub size_t size; 2667f1fca82eSMikolaj Golub int structsize; 2668f1fca82eSMikolaj Golub 266961b4c627SEdward Tomasz Napierala p = arg; 2670f1fca82eSMikolaj Golub size = sizeof(structsize) + sizeof(ps_strings); 2671f1fca82eSMikolaj Golub if (sb != NULL) { 2672f1fca82eSMikolaj Golub KASSERT(*sizep == size, ("invalid size")); 2673f1fca82eSMikolaj Golub structsize = sizeof(ps_strings); 2674f1fca82eSMikolaj Golub #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 2675706f4a81SMark Johnston ps_strings = PTROUT(PROC_PS_STRINGS(p)); 2676f1fca82eSMikolaj Golub #else 2677706f4a81SMark Johnston ps_strings = PROC_PS_STRINGS(p); 2678f1fca82eSMikolaj Golub #endif 2679f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2680f1fca82eSMikolaj Golub sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); 2681f1fca82eSMikolaj Golub } 2682f1fca82eSMikolaj Golub *sizep = size; 2683f1fca82eSMikolaj Golub } 2684f1fca82eSMikolaj Golub 2685f1fca82eSMikolaj Golub static void 2686f1fca82eSMikolaj Golub __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) 2687f1fca82eSMikolaj Golub { 2688f1fca82eSMikolaj Golub struct proc *p; 2689f1fca82eSMikolaj Golub size_t size; 2690f1fca82eSMikolaj Golub int structsize; 2691f1fca82eSMikolaj Golub 269261b4c627SEdward Tomasz Napierala p = arg; 2693f1fca82eSMikolaj Golub if (sb == NULL) { 2694f1fca82eSMikolaj Golub size = 0; 269506250515SEdward Tomasz Napierala sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo), 269606250515SEdward Tomasz Napierala SBUF_FIXEDLEN); 26975c32e9fcSAlexander Motin sbuf_set_drain(sb, sbuf_count_drain, &size); 2698f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2699f1fca82eSMikolaj Golub PHOLD(p); 2700f1fca82eSMikolaj Golub proc_getauxv(curthread, p, sb); 2701f1fca82eSMikolaj Golub PRELE(p); 2702f1fca82eSMikolaj Golub sbuf_finish(sb); 2703f1fca82eSMikolaj Golub sbuf_delete(sb); 2704f1fca82eSMikolaj Golub *sizep = size; 2705f1fca82eSMikolaj Golub } else { 2706f1fca82eSMikolaj Golub structsize = sizeof(Elf_Auxinfo); 2707f1fca82eSMikolaj Golub sbuf_bcat(sb, &structsize, sizeof(structsize)); 2708f1fca82eSMikolaj Golub PHOLD(p); 2709f1fca82eSMikolaj Golub proc_getauxv(curthread, p, sb); 2710f1fca82eSMikolaj Golub PRELE(p); 2711f1fca82eSMikolaj Golub } 2712f1fca82eSMikolaj Golub } 2713f1fca82eSMikolaj Golub 271419621645SAlex Richardson static bool 271592328a32SKonstantin Belousov __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, 271692328a32SKonstantin Belousov const char *note_vendor, const Elf_Phdr *pnote, 271719621645SAlex Richardson bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg) 271832c01de2SDmitry Chagin { 2719267c52fcSKonstantin Belousov const Elf_Note *note, *note0, *note_end; 272032c01de2SDmitry Chagin const char *note_name; 27216c775eb6SKonstantin Belousov char *buf; 27226c775eb6SKonstantin Belousov int i, error; 272319621645SAlex Richardson bool res; 272432c01de2SDmitry Chagin 27256c775eb6SKonstantin Belousov /* We need some limit, might as well use PAGE_SIZE. */ 27266c775eb6SKonstantin Belousov if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) 272719621645SAlex Richardson return (false); 27286c775eb6SKonstantin Belousov ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); 27296c775eb6SKonstantin Belousov if (pnote->p_offset > PAGE_SIZE || 27306c775eb6SKonstantin Belousov pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { 27312d6b8546SKonstantin Belousov buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); 27322d6b8546SKonstantin Belousov if (buf == NULL) { 2733b249ce48SMateusz Guzik VOP_UNLOCK(imgp->vp); 27346c775eb6SKonstantin Belousov buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); 273578022527SKonstantin Belousov vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 27362d6b8546SKonstantin Belousov } 27376c775eb6SKonstantin Belousov error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, 27386c775eb6SKonstantin Belousov pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, 27396c775eb6SKonstantin Belousov curthread->td_ucred, NOCRED, NULL, curthread); 27406c775eb6SKonstantin Belousov if (error != 0) { 27416c775eb6SKonstantin Belousov uprintf("i/o error PT_NOTE\n"); 2742eda8fe63SKonstantin Belousov goto retf; 27436c775eb6SKonstantin Belousov } 27446c775eb6SKonstantin Belousov note = note0 = (const Elf_Note *)buf; 27456c775eb6SKonstantin Belousov note_end = (const Elf_Note *)(buf + pnote->p_filesz); 27466c775eb6SKonstantin Belousov } else { 27476c775eb6SKonstantin Belousov note = note0 = (const Elf_Note *)(imgp->image_header + 27486c775eb6SKonstantin Belousov pnote->p_offset); 274932c01de2SDmitry Chagin note_end = (const Elf_Note *)(imgp->image_header + 275032c01de2SDmitry Chagin pnote->p_offset + pnote->p_filesz); 27516c775eb6SKonstantin Belousov buf = NULL; 27526c775eb6SKonstantin Belousov } 2753267c52fcSKonstantin Belousov for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 2754d1ae5c83SKonstantin Belousov if (!aligned(note, Elf32_Addr) || (const char *)note_end - 27556c775eb6SKonstantin Belousov (const char *)note < sizeof(Elf_Note)) { 2756eda8fe63SKonstantin Belousov goto retf; 27576c775eb6SKonstantin Belousov } 275892328a32SKonstantin Belousov if (note->n_namesz != checknote->n_namesz || 275992328a32SKonstantin Belousov note->n_descsz != checknote->n_descsz || 276092328a32SKonstantin Belousov note->n_type != checknote->n_type) 276132c01de2SDmitry Chagin goto nextnote; 276232c01de2SDmitry Chagin note_name = (const char *)(note + 1); 276392328a32SKonstantin Belousov if (note_name + checknote->n_namesz >= 276492328a32SKonstantin Belousov (const char *)note_end || strncmp(note_vendor, 276592328a32SKonstantin Belousov note_name, checknote->n_namesz) != 0) 276632c01de2SDmitry Chagin goto nextnote; 276732c01de2SDmitry Chagin 276892328a32SKonstantin Belousov if (cb(note, cb_arg, &res)) 27696c775eb6SKonstantin Belousov goto ret; 277032c01de2SDmitry Chagin nextnote: 277132c01de2SDmitry Chagin note = (const Elf_Note *)((const char *)(note + 1) + 27721b8388cdSMikolaj Golub roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + 27731b8388cdSMikolaj Golub roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); 277432c01de2SDmitry Chagin } 2775eda8fe63SKonstantin Belousov retf: 277619621645SAlex Richardson res = false; 27776c775eb6SKonstantin Belousov ret: 27786c775eb6SKonstantin Belousov free(buf, M_TEMP); 27796c775eb6SKonstantin Belousov return (res); 278032c01de2SDmitry Chagin } 278132c01de2SDmitry Chagin 278292328a32SKonstantin Belousov struct brandnote_cb_arg { 278392328a32SKonstantin Belousov Elf_Brandnote *brandnote; 278492328a32SKonstantin Belousov int32_t *osrel; 278592328a32SKonstantin Belousov }; 278692328a32SKonstantin Belousov 278719621645SAlex Richardson static bool 278819621645SAlex Richardson brandnote_cb(const Elf_Note *note, void *arg0, bool *res) 278992328a32SKonstantin Belousov { 279092328a32SKonstantin Belousov struct brandnote_cb_arg *arg; 279192328a32SKonstantin Belousov 279292328a32SKonstantin Belousov arg = arg0; 279392328a32SKonstantin Belousov 279492328a32SKonstantin Belousov /* 279592328a32SKonstantin Belousov * Fetch the osreldate for binary from the ELF OSABI-note if 279692328a32SKonstantin Belousov * necessary. 279792328a32SKonstantin Belousov */ 279892328a32SKonstantin Belousov *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && 279992328a32SKonstantin Belousov arg->brandnote->trans_osrel != NULL ? 280019621645SAlex Richardson arg->brandnote->trans_osrel(note, arg->osrel) : true; 280192328a32SKonstantin Belousov 280219621645SAlex Richardson return (true); 280392328a32SKonstantin Belousov } 280492328a32SKonstantin Belousov 2805cefb93f2SKonstantin Belousov static Elf_Note fctl_note = { 2806cefb93f2SKonstantin Belousov .n_namesz = sizeof(FREEBSD_ABI_VENDOR), 2807cefb93f2SKonstantin Belousov .n_descsz = sizeof(uint32_t), 2808cefb93f2SKonstantin Belousov .n_type = NT_FREEBSD_FEATURE_CTL, 2809cefb93f2SKonstantin Belousov }; 2810cefb93f2SKonstantin Belousov 2811cefb93f2SKonstantin Belousov struct fctl_cb_arg { 281219621645SAlex Richardson bool *has_fctl0; 2813cefb93f2SKonstantin Belousov uint32_t *fctl0; 2814cefb93f2SKonstantin Belousov }; 2815cefb93f2SKonstantin Belousov 281619621645SAlex Richardson static bool 281719621645SAlex Richardson note_fctl_cb(const Elf_Note *note, void *arg0, bool *res) 2818cefb93f2SKonstantin Belousov { 2819cefb93f2SKonstantin Belousov struct fctl_cb_arg *arg; 2820cefb93f2SKonstantin Belousov const Elf32_Word *desc; 2821cefb93f2SKonstantin Belousov uintptr_t p; 2822cefb93f2SKonstantin Belousov 2823cefb93f2SKonstantin Belousov arg = arg0; 2824cefb93f2SKonstantin Belousov p = (uintptr_t)(note + 1); 2825cefb93f2SKonstantin Belousov p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); 2826cefb93f2SKonstantin Belousov desc = (const Elf32_Word *)p; 282719621645SAlex Richardson *arg->has_fctl0 = true; 2828cefb93f2SKonstantin Belousov *arg->fctl0 = desc[0]; 282919621645SAlex Richardson *res = true; 283019621645SAlex Richardson return (true); 2831cefb93f2SKonstantin Belousov } 2832cefb93f2SKonstantin Belousov 283332c01de2SDmitry Chagin /* 2834cefb93f2SKonstantin Belousov * Try to find the appropriate ABI-note section for checknote, fetch 2835cefb93f2SKonstantin Belousov * the osreldate and feature control flags for binary from the ELF 2836cefb93f2SKonstantin Belousov * OSABI-note. Only the first page of the image is searched, the same 2837cefb93f2SKonstantin Belousov * as for headers. 28381a9c7decSKonstantin Belousov */ 283919621645SAlex Richardson static bool 284092328a32SKonstantin Belousov __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, 284119621645SAlex Richardson int32_t *osrel, bool *has_fctl0, uint32_t *fctl0) 28421a9c7decSKonstantin Belousov { 28431a9c7decSKonstantin Belousov const Elf_Phdr *phdr; 28441a9c7decSKonstantin Belousov const Elf_Ehdr *hdr; 284592328a32SKonstantin Belousov struct brandnote_cb_arg b_arg; 2846cefb93f2SKonstantin Belousov struct fctl_cb_arg f_arg; 2847cefb93f2SKonstantin Belousov int i, j; 28481a9c7decSKonstantin Belousov 28491a9c7decSKonstantin Belousov hdr = (const Elf_Ehdr *)imgp->image_header; 28501a9c7decSKonstantin Belousov phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 285192328a32SKonstantin Belousov b_arg.brandnote = brandnote; 285292328a32SKonstantin Belousov b_arg.osrel = osrel; 28530cad2aa2SKonstantin Belousov f_arg.has_fctl0 = has_fctl0; 2854cefb93f2SKonstantin Belousov f_arg.fctl0 = fctl0; 28551a9c7decSKonstantin Belousov 28561a9c7decSKonstantin Belousov for (i = 0; i < hdr->e_phnum; i++) { 285792328a32SKonstantin Belousov if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, 285892328a32SKonstantin Belousov &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, 285992328a32SKonstantin Belousov &b_arg)) { 2860cefb93f2SKonstantin Belousov for (j = 0; j < hdr->e_phnum; j++) { 2861cefb93f2SKonstantin Belousov if (phdr[j].p_type == PT_NOTE && 2862cefb93f2SKonstantin Belousov __elfN(parse_notes)(imgp, &fctl_note, 2863cefb93f2SKonstantin Belousov FREEBSD_ABI_VENDOR, &phdr[j], 2864cefb93f2SKonstantin Belousov note_fctl_cb, &f_arg)) 2865cefb93f2SKonstantin Belousov break; 2866cefb93f2SKonstantin Belousov } 286719621645SAlex Richardson return (true); 28681a9c7decSKonstantin Belousov } 286992328a32SKonstantin Belousov } 287019621645SAlex Richardson return (false); 28711a9c7decSKonstantin Belousov 28721a9c7decSKonstantin Belousov } 28731a9c7decSKonstantin Belousov 28741a9c7decSKonstantin Belousov /* 2875e1743d02SSøren Schmidt * Tell kern_execve.c about it, with a little help from the linker. 2876e1743d02SSøren Schmidt */ 2877a360a43dSJake Burkholder static struct execsw __elfN(execsw) = { 2878b7feabf9SEd Maste .ex_imgact = __CONCAT(exec_, __elfN(imgact)), 2879b7feabf9SEd Maste .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 2880a360a43dSJake Burkholder }; 2881a360a43dSJake Burkholder EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 2882e7228204SAlfred Perlstein 2883ed167eaaSKonstantin Belousov static vm_prot_t 2884ed167eaaSKonstantin Belousov __elfN(trans_prot)(Elf_Word flags) 2885ed167eaaSKonstantin Belousov { 2886ed167eaaSKonstantin Belousov vm_prot_t prot; 2887ed167eaaSKonstantin Belousov 2888ed167eaaSKonstantin Belousov prot = 0; 2889ed167eaaSKonstantin Belousov if (flags & PF_X) 2890ed167eaaSKonstantin Belousov prot |= VM_PROT_EXECUTE; 2891ed167eaaSKonstantin Belousov if (flags & PF_W) 2892ed167eaaSKonstantin Belousov prot |= VM_PROT_WRITE; 2893ed167eaaSKonstantin Belousov if (flags & PF_R) 2894ed167eaaSKonstantin Belousov prot |= VM_PROT_READ; 2895eb785fabSKonstantin Belousov #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) 2896126b36a2SKonstantin Belousov if (i386_read_exec && (flags & PF_R)) 2897676eda08SMarcel Moolenaar prot |= VM_PROT_EXECUTE; 2898676eda08SMarcel Moolenaar #endif 2899ed167eaaSKonstantin Belousov return (prot); 2900ed167eaaSKonstantin Belousov } 2901ed167eaaSKonstantin Belousov 2902ed167eaaSKonstantin Belousov static Elf_Word 2903ed167eaaSKonstantin Belousov __elfN(untrans_prot)(vm_prot_t prot) 2904ed167eaaSKonstantin Belousov { 2905ed167eaaSKonstantin Belousov Elf_Word flags; 2906ed167eaaSKonstantin Belousov 2907ed167eaaSKonstantin Belousov flags = 0; 2908ed167eaaSKonstantin Belousov if (prot & VM_PROT_EXECUTE) 2909ed167eaaSKonstantin Belousov flags |= PF_X; 2910ed167eaaSKonstantin Belousov if (prot & VM_PROT_READ) 2911ed167eaaSKonstantin Belousov flags |= PF_R; 2912ed167eaaSKonstantin Belousov if (prot & VM_PROT_WRITE) 2913ed167eaaSKonstantin Belousov flags |= PF_W; 2914ed167eaaSKonstantin Belousov return (flags); 2915ed167eaaSKonstantin Belousov } 2916