1e1743d02SSøren Schmidt /*- 221a3ee0eSDavid E. O'Brien * Copyright (c) 2000 David O'Brien 3e1743d02SSøren Schmidt * Copyright (c) 1995-1996 S�ren Schmidt 4e1743d02SSøren Schmidt * Copyright (c) 1996 Peter Wemm 5e1743d02SSøren Schmidt * All rights reserved. 6e1743d02SSøren Schmidt * 7e1743d02SSøren Schmidt * Redistribution and use in source and binary forms, with or without 8e1743d02SSøren Schmidt * modification, are permitted provided that the following conditions 9e1743d02SSøren Schmidt * are met: 10e1743d02SSøren Schmidt * 1. Redistributions of source code must retain the above copyright 11e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer 12e1743d02SSøren Schmidt * in this position and unchanged. 13e1743d02SSøren Schmidt * 2. Redistributions in binary form must reproduce the above copyright 14e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer in the 15e1743d02SSøren Schmidt * documentation and/or other materials provided with the distribution. 16e1743d02SSøren Schmidt * 3. The name of the author may not be used to endorse or promote products 1721dc7d4fSJens Schweikhardt * derived from this software without specific prior written permission 18e1743d02SSøren Schmidt * 19e1743d02SSøren Schmidt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20e1743d02SSøren Schmidt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21e1743d02SSøren Schmidt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22e1743d02SSøren Schmidt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23e1743d02SSøren Schmidt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24e1743d02SSøren Schmidt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25e1743d02SSøren Schmidt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26e1743d02SSøren Schmidt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27e1743d02SSøren Schmidt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28e1743d02SSøren Schmidt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29e1743d02SSøren Schmidt */ 30e1743d02SSøren Schmidt 31677b542eSDavid E. O'Brien #include <sys/cdefs.h> 32677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 33677b542eSDavid E. O'Brien 3462919d78SPeter Wemm #include "opt_compat.h" 3562919d78SPeter Wemm 36e1743d02SSøren Schmidt #include <sys/param.h> 37e1743d02SSøren Schmidt #include <sys/exec.h> 388c64af4fSJohn Polstra #include <sys/fcntl.h> 39e1743d02SSøren Schmidt #include <sys/imgact.h> 40e1743d02SSøren Schmidt #include <sys/imgact_elf.h> 41e1743d02SSøren Schmidt #include <sys/kernel.h> 42f34fa851SJohn Baldwin #include <sys/lock.h> 43e1743d02SSøren Schmidt #include <sys/malloc.h> 4468ff2a43SChristian S.J. Peron #include <sys/mount.h> 4535e0e5b3SJohn Baldwin #include <sys/mutex.h> 468c64af4fSJohn Polstra #include <sys/mman.h> 47a794e791SBruce Evans #include <sys/namei.h> 488c64af4fSJohn Polstra #include <sys/pioctl.h> 49a794e791SBruce Evans #include <sys/proc.h> 508c64af4fSJohn Polstra #include <sys/procfs.h> 518c64af4fSJohn Polstra #include <sys/resourcevar.h> 52da61b9a6SAlan Cox #include <sys/sf_buf.h> 5336240ea5SDoug Rabson #include <sys/systm.h> 54e1743d02SSøren Schmidt #include <sys/signalvar.h> 558c64af4fSJohn Polstra #include <sys/stat.h> 561005a129SJohn Baldwin #include <sys/sx.h> 578c64af4fSJohn Polstra #include <sys/syscall.h> 58e1743d02SSøren Schmidt #include <sys/sysctl.h> 598c64af4fSJohn Polstra #include <sys/sysent.h> 60a794e791SBruce Evans #include <sys/vnode.h> 61e1743d02SSøren Schmidt 62e1743d02SSøren Schmidt #include <vm/vm.h> 63e1743d02SSøren Schmidt #include <vm/vm_kern.h> 64e1743d02SSøren Schmidt #include <vm/vm_param.h> 65e1743d02SSøren Schmidt #include <vm/pmap.h> 66e1743d02SSøren Schmidt #include <vm/vm_map.h> 670ff27d31SJohn Polstra #include <vm/vm_object.h> 68e1743d02SSøren Schmidt #include <vm/vm_extern.h> 69e1743d02SSøren Schmidt 7052c24af7SPeter Wemm #include <machine/elf.h> 71e1743d02SSøren Schmidt #include <machine/md_var.h> 72e1743d02SSøren Schmidt 7362919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 7462919d78SPeter Wemm #include <machine/fpu.h> 7562919d78SPeter Wemm #include <compat/ia32/ia32_reg.h> 7662919d78SPeter Wemm #endif 7762919d78SPeter Wemm 78c815a20cSDavid E. O'Brien #define OLD_EI_BRAND 8 79c815a20cSDavid E. O'Brien 803ebc1248SPeter Wemm static int __elfN(check_header)(const Elf_Ehdr *hdr); 815fe3ed62SJake Burkholder static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr, 825fe3ed62SJake Burkholder const char *interp); 833ebc1248SPeter Wemm static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 843ebc1248SPeter Wemm u_long *entry, size_t pagesize); 85373d1a3fSAlan Cox static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object, 868c64af4fSJohn Polstra vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 873ebc1248SPeter Wemm vm_prot_t prot, size_t pagesize); 883ebc1248SPeter Wemm static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 89e1743d02SSøren Schmidt 90a360a43dSJake Burkholder SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, 91a360a43dSJake Burkholder ""); 92a360a43dSJake Burkholder 93e548a1d4SJake Burkholder int __elfN(fallback_brand) = -1; 94e548a1d4SJake Burkholder SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 95e548a1d4SJake Burkholder fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0, 96a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 97a360a43dSJake Burkholder TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand", 98e548a1d4SJake Burkholder &__elfN(fallback_brand)); 99a360a43dSJake Burkholder 100d8a4f230SBruce Evans static int elf_trace = 0; 101a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, ""); 102a360a43dSJake Burkholder 103551d79e1SMarcel Moolenaar static int elf_legacy_coredump = 0; 104a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 105551d79e1SMarcel Moolenaar &elf_legacy_coredump, 0, ""); 106e1743d02SSøren Schmidt 1073ebc1248SPeter Wemm static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 108e1743d02SSøren Schmidt 10993d1c728SKonstantin Belousov #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) 11093d1c728SKonstantin Belousov #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) 11193d1c728SKonstantin Belousov #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a)) 11293d1c728SKonstantin Belousov 113e1743d02SSøren Schmidt int 1143ebc1248SPeter Wemm __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 115e1743d02SSøren Schmidt { 116e1743d02SSøren Schmidt int i; 117e1743d02SSøren Schmidt 1183ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 119ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == NULL) { 120ea5a2b2eSSøren Schmidt elf_brand_list[i] = entry; 121e1743d02SSøren Schmidt break; 122e1743d02SSøren Schmidt } 123e1743d02SSøren Schmidt } 124ea5a2b2eSSøren Schmidt if (i == MAX_BRANDS) 125a7cddfedSJake Burkholder return (-1); 126a7cddfedSJake Burkholder return (0); 127e1743d02SSøren Schmidt } 128e1743d02SSøren Schmidt 129e1743d02SSøren Schmidt int 1303ebc1248SPeter Wemm __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 131e1743d02SSøren Schmidt { 132e1743d02SSøren Schmidt int i; 133e1743d02SSøren Schmidt 1343ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 135ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == entry) { 136ea5a2b2eSSøren Schmidt elf_brand_list[i] = NULL; 137e1743d02SSøren Schmidt break; 138e1743d02SSøren Schmidt } 139e1743d02SSøren Schmidt } 140ea5a2b2eSSøren Schmidt if (i == MAX_BRANDS) 141a7cddfedSJake Burkholder return (-1); 142a7cddfedSJake Burkholder return (0); 143e1743d02SSøren Schmidt } 144e1743d02SSøren Schmidt 145096977faSMark Newton int 1463ebc1248SPeter Wemm __elfN(brand_inuse)(Elf_Brandinfo *entry) 147096977faSMark Newton { 148096977faSMark Newton struct proc *p; 149553629ebSJake Burkholder int rval = FALSE; 150096977faSMark Newton 1511005a129SJohn Baldwin sx_slock(&allproc_lock); 1524f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 153553629ebSJake Burkholder if (p->p_sysent == entry->sysvec) { 154553629ebSJake Burkholder rval = TRUE; 155553629ebSJake Burkholder break; 156096977faSMark Newton } 157553629ebSJake Burkholder } 1581005a129SJohn Baldwin sx_sunlock(&allproc_lock); 159096977faSMark Newton 160553629ebSJake Burkholder return (rval); 161096977faSMark Newton } 162096977faSMark Newton 1635fe3ed62SJake Burkholder static Elf_Brandinfo * 1645fe3ed62SJake Burkholder __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp) 1655fe3ed62SJake Burkholder { 1665fe3ed62SJake Burkholder Elf_Brandinfo *bi; 1675fe3ed62SJake Burkholder int i; 1685fe3ed62SJake Burkholder 1695fe3ed62SJake Burkholder /* 1705fe3ed62SJake Burkholder * We support three types of branding -- (1) the ELF EI_OSABI field 1715fe3ed62SJake Burkholder * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 1725fe3ed62SJake Burkholder * branding w/in the ELF header, and (3) path of the `interp_path' 1735fe3ed62SJake Burkholder * field. We should also look for an ".note.ABI-tag" ELF section now 1745fe3ed62SJake Burkholder * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 1755fe3ed62SJake Burkholder */ 1765fe3ed62SJake Burkholder 1775fe3ed62SJake Burkholder /* If the executable has a brand, search for it in the brand list. */ 1785fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 1795fe3ed62SJake Burkholder bi = elf_brand_list[i]; 1805fe3ed62SJake Burkholder if (bi != NULL && hdr->e_machine == bi->machine && 1815fe3ed62SJake Burkholder (hdr->e_ident[EI_OSABI] == bi->brand || 1825fe3ed62SJake Burkholder strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 1835fe3ed62SJake Burkholder bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) 1845fe3ed62SJake Burkholder return (bi); 1855fe3ed62SJake Burkholder } 1865fe3ed62SJake Burkholder 1875fe3ed62SJake Burkholder /* Lacking a known brand, search for a recognized interpreter. */ 1885fe3ed62SJake Burkholder if (interp != NULL) { 1895fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 1905fe3ed62SJake Burkholder bi = elf_brand_list[i]; 1915fe3ed62SJake Burkholder if (bi != NULL && hdr->e_machine == bi->machine && 1925fe3ed62SJake Burkholder strcmp(interp, bi->interp_path) == 0) 1935fe3ed62SJake Burkholder return (bi); 1945fe3ed62SJake Burkholder } 1955fe3ed62SJake Burkholder } 1965fe3ed62SJake Burkholder 1975fe3ed62SJake Burkholder /* Lacking a recognized interpreter, try the default brand */ 1985fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 1995fe3ed62SJake Burkholder bi = elf_brand_list[i]; 2005fe3ed62SJake Burkholder if (bi != NULL && hdr->e_machine == bi->machine && 201e548a1d4SJake Burkholder __elfN(fallback_brand) == bi->brand) 2025fe3ed62SJake Burkholder return (bi); 2035fe3ed62SJake Burkholder } 2045fe3ed62SJake Burkholder return (NULL); 2055fe3ed62SJake Burkholder } 2065fe3ed62SJake Burkholder 207e1743d02SSøren Schmidt static int 2083ebc1248SPeter Wemm __elfN(check_header)(const Elf_Ehdr *hdr) 209e1743d02SSøren Schmidt { 210d0ca7c29SPeter Wemm Elf_Brandinfo *bi; 2113ebc1248SPeter Wemm int i; 2123ebc1248SPeter Wemm 21352c24af7SPeter Wemm if (!IS_ELF(*hdr) || 21452c24af7SPeter Wemm hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 21552c24af7SPeter Wemm hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 2163dc19c46SJacques Vidrine hdr->e_ident[EI_VERSION] != EV_CURRENT || 2173dc19c46SJacques Vidrine hdr->e_phentsize != sizeof(Elf_Phdr) || 2183dc19c46SJacques Vidrine hdr->e_version != ELF_TARG_VER) 219a7cddfedSJake Burkholder return (ENOEXEC); 220e1743d02SSøren Schmidt 2213ebc1248SPeter Wemm /* 2223ebc1248SPeter Wemm * Make sure we have at least one brand for this machine. 2233ebc1248SPeter Wemm */ 2243ebc1248SPeter Wemm 2253ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 226d0ca7c29SPeter Wemm bi = elf_brand_list[i]; 227d0ca7c29SPeter Wemm if (bi != NULL && bi->machine == hdr->e_machine) 2283ebc1248SPeter Wemm break; 2293ebc1248SPeter Wemm } 2303ebc1248SPeter Wemm if (i == MAX_BRANDS) 231a7cddfedSJake Burkholder return (ENOEXEC); 232e1743d02SSøren Schmidt 233a7cddfedSJake Burkholder return (0); 234e1743d02SSøren Schmidt } 235e1743d02SSøren Schmidt 236e1743d02SSøren Schmidt static int 2373ebc1248SPeter Wemm __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 238ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot) 2393ebc1248SPeter Wemm { 240da61b9a6SAlan Cox struct sf_buf *sf; 241da61b9a6SAlan Cox int error; 2423ebc1248SPeter Wemm vm_offset_t off; 2433ebc1248SPeter Wemm 2443ebc1248SPeter Wemm /* 2453ebc1248SPeter Wemm * Create the page if it doesn't exist yet. Ignore errors. 2463ebc1248SPeter Wemm */ 2473ebc1248SPeter Wemm vm_map_lock(map); 248ff6f03c7SAlan Cox vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), 249ff6f03c7SAlan Cox VM_PROT_ALL, VM_PROT_ALL, 0); 2503ebc1248SPeter Wemm vm_map_unlock(map); 2513ebc1248SPeter Wemm 2523ebc1248SPeter Wemm /* 2533ebc1248SPeter Wemm * Find the page from the underlying object. 2543ebc1248SPeter Wemm */ 2553ebc1248SPeter Wemm if (object) { 256da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 257da61b9a6SAlan Cox if (sf == NULL) 258da61b9a6SAlan Cox return (KERN_FAILURE); 2593ebc1248SPeter Wemm off = offset - trunc_page(offset); 260da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 261ca0387efSJake Burkholder end - start); 262da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 2633ebc1248SPeter Wemm if (error) { 264a7cddfedSJake Burkholder return (KERN_FAILURE); 2653ebc1248SPeter Wemm } 2663ebc1248SPeter Wemm } 2673ebc1248SPeter Wemm 268a7cddfedSJake Burkholder return (KERN_SUCCESS); 2693ebc1248SPeter Wemm } 2703ebc1248SPeter Wemm 2713ebc1248SPeter Wemm static int 2723ebc1248SPeter Wemm __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 273ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) 2743ebc1248SPeter Wemm { 275da61b9a6SAlan Cox struct sf_buf *sf; 276da61b9a6SAlan Cox vm_offset_t off; 277a063facbSMarcel Moolenaar vm_size_t sz; 278a063facbSMarcel Moolenaar int error, rv; 2793ebc1248SPeter Wemm 2803ebc1248SPeter Wemm if (start != trunc_page(start)) { 28181f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset, start, 282ff6f03c7SAlan Cox round_page(start), prot); 2833ebc1248SPeter Wemm if (rv) 284a7cddfedSJake Burkholder return (rv); 2853ebc1248SPeter Wemm offset += round_page(start) - start; 2863ebc1248SPeter Wemm start = round_page(start); 2873ebc1248SPeter Wemm } 2883ebc1248SPeter Wemm if (end != round_page(end)) { 28981f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset + 290ff6f03c7SAlan Cox trunc_page(end) - start, trunc_page(end), end, prot); 2913ebc1248SPeter Wemm if (rv) 292a7cddfedSJake Burkholder return (rv); 2933ebc1248SPeter Wemm end = trunc_page(end); 2943ebc1248SPeter Wemm } 2953ebc1248SPeter Wemm if (end > start) { 2963ebc1248SPeter Wemm if (offset & PAGE_MASK) { 2973ebc1248SPeter Wemm /* 2983ebc1248SPeter Wemm * The mapping is not page aligned. This means we have 2993ebc1248SPeter Wemm * to copy the data. Sigh. 3003ebc1248SPeter Wemm */ 301584716b0SAlan Cox rv = vm_map_find(map, NULL, 0, &start, end - start, 302ff6f03c7SAlan Cox FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0); 3033ebc1248SPeter Wemm if (rv) 304a7cddfedSJake Burkholder return (rv); 305da61b9a6SAlan Cox if (object == NULL) 306da61b9a6SAlan Cox return (KERN_SUCCESS); 307da61b9a6SAlan Cox for (; start < end; start += sz) { 308da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 309da61b9a6SAlan Cox if (sf == NULL) 310da61b9a6SAlan Cox return (KERN_FAILURE); 3113ebc1248SPeter Wemm off = offset - trunc_page(offset); 3123ebc1248SPeter Wemm sz = end - start; 313da61b9a6SAlan Cox if (sz > PAGE_SIZE - off) 314da61b9a6SAlan Cox sz = PAGE_SIZE - off; 315da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 3163ebc1248SPeter Wemm (caddr_t)start, sz); 317da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 3183ebc1248SPeter Wemm if (error) { 319a7cddfedSJake Burkholder return (KERN_FAILURE); 3203ebc1248SPeter Wemm } 321da61b9a6SAlan Cox offset += sz; 3223ebc1248SPeter Wemm } 3233ebc1248SPeter Wemm rv = KERN_SUCCESS; 3243ebc1248SPeter Wemm } else { 325e5e6093bSAlan Cox vm_object_reference(object); 3263ebc1248SPeter Wemm vm_map_lock(map); 3273ebc1248SPeter Wemm rv = vm_map_insert(map, object, offset, start, end, 328ff6f03c7SAlan Cox prot, VM_PROT_ALL, cow); 3293ebc1248SPeter Wemm vm_map_unlock(map); 330e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 331e5e6093bSAlan Cox vm_object_deallocate(object); 3323ebc1248SPeter Wemm } 333a7cddfedSJake Burkholder return (rv); 3343ebc1248SPeter Wemm } else { 335a7cddfedSJake Burkholder return (KERN_SUCCESS); 3363ebc1248SPeter Wemm } 3373ebc1248SPeter Wemm } 3383ebc1248SPeter Wemm 3393ebc1248SPeter Wemm static int 340373d1a3fSAlan Cox __elfN(load_section)(struct vmspace *vmspace, 341373d1a3fSAlan Cox vm_object_t object, vm_offset_t offset, 3423ebc1248SPeter Wemm caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, 3433ebc1248SPeter Wemm size_t pagesize) 344e1743d02SSøren Schmidt { 345da61b9a6SAlan Cox struct sf_buf *sf; 346e1743d02SSøren Schmidt size_t map_len; 347e1743d02SSøren Schmidt vm_offset_t map_addr; 348fa7dd9c5SMatthew Dillon int error, rv, cow; 349e1743d02SSøren Schmidt size_t copy_len; 35052c24af7SPeter Wemm vm_offset_t file_addr; 35152c24af7SPeter Wemm 35225ead034SBrian Feldman /* 35325ead034SBrian Feldman * It's necessary to fail if the filsz + offset taken from the 35425ead034SBrian Feldman * header is greater than the actual file pager object's size. 35525ead034SBrian Feldman * If we were to allow this, then the vm_map_find() below would 35625ead034SBrian Feldman * walk right off the end of the file object and into the ether. 35725ead034SBrian Feldman * 35825ead034SBrian Feldman * While I'm here, might as well check for something else that 35925ead034SBrian Feldman * is invalid: filsz cannot be greater than memsz. 36025ead034SBrian Feldman */ 36125ead034SBrian Feldman if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 36225ead034SBrian Feldman filsz > memsz) { 36325ead034SBrian Feldman uprintf("elf_load_section: truncated ELF file\n"); 36425ead034SBrian Feldman return (ENOEXEC); 36525ead034SBrian Feldman } 36625ead034SBrian Feldman 3673ebc1248SPeter Wemm map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); 3683ebc1248SPeter Wemm file_addr = trunc_page_ps(offset, pagesize); 369e1743d02SSøren Schmidt 370e1743d02SSøren Schmidt /* 37152c24af7SPeter Wemm * We have two choices. We can either clear the data in the last page 37252c24af7SPeter Wemm * of an oversized mapping, or we can start the anon mapping a page 37352c24af7SPeter Wemm * early and copy the initialized data into that first page. We 37452c24af7SPeter Wemm * choose the second.. 37552c24af7SPeter Wemm */ 37652c24af7SPeter Wemm if (memsz > filsz) 3773ebc1248SPeter Wemm map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; 37852c24af7SPeter Wemm else 3793ebc1248SPeter Wemm map_len = round_page_ps(offset + filsz, pagesize) - file_addr; 38052c24af7SPeter Wemm 38152c24af7SPeter Wemm if (map_len != 0) { 382fa7dd9c5SMatthew Dillon /* cow flags: don't dump readonly sections in core */ 383fa7dd9c5SMatthew Dillon cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 384fa7dd9c5SMatthew Dillon (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 385fa7dd9c5SMatthew Dillon 3863ebc1248SPeter Wemm rv = __elfN(map_insert)(&vmspace->vm_map, 38752c24af7SPeter Wemm object, 38852c24af7SPeter Wemm file_addr, /* file offset */ 38952c24af7SPeter Wemm map_addr, /* virtual start */ 39052c24af7SPeter Wemm map_addr + map_len,/* virtual end */ 39152c24af7SPeter Wemm prot, 392fa7dd9c5SMatthew Dillon cow); 393e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 394a7cddfedSJake Burkholder return (EINVAL); 39552c24af7SPeter Wemm 39652c24af7SPeter Wemm /* we can stop now if we've covered it all */ 39723955314SAlfred Perlstein if (memsz == filsz) { 398a7cddfedSJake Burkholder return (0); 39952c24af7SPeter Wemm } 40023955314SAlfred Perlstein } 40152c24af7SPeter Wemm 40252c24af7SPeter Wemm 40352c24af7SPeter Wemm /* 40452c24af7SPeter Wemm * We have to get the remaining bit of the file into the first part 40552c24af7SPeter Wemm * of the oversized map segment. This is normally because the .data 40652c24af7SPeter Wemm * segment in the file is extended to provide bss. It's a neat idea 40752c24af7SPeter Wemm * to try and save a page, but it's a pain in the behind to implement. 408e1743d02SSøren Schmidt */ 4093ebc1248SPeter Wemm copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); 4103ebc1248SPeter Wemm map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); 411ca0387efSJake Burkholder map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - 412ca0387efSJake Burkholder map_addr; 413e1743d02SSøren Schmidt 41452c24af7SPeter Wemm /* This had damn well better be true! */ 4158191d577SPeter Wemm if (map_len != 0) { 41681f223caSJake Burkholder rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, 417ff6f03c7SAlan Cox map_addr + map_len, VM_PROT_ALL, 0); 41823955314SAlfred Perlstein if (rv != KERN_SUCCESS) { 419a7cddfedSJake Burkholder return (EINVAL); 4208191d577SPeter Wemm } 42123955314SAlfred Perlstein } 422e1743d02SSøren Schmidt 42352c24af7SPeter Wemm if (copy_len != 0) { 4243ebc1248SPeter Wemm vm_offset_t off; 425da61b9a6SAlan Cox 426da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset + filsz); 427da61b9a6SAlan Cox if (sf == NULL) 428da61b9a6SAlan Cox return (EIO); 429e1743d02SSøren Schmidt 43052c24af7SPeter Wemm /* send the page fragment to user space */ 43181f223caSJake Burkholder off = trunc_page_ps(offset + filsz, pagesize) - 43281f223caSJake Burkholder trunc_page(offset + filsz); 433da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 434da61b9a6SAlan Cox (caddr_t)map_addr, copy_len); 435da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 43623955314SAlfred Perlstein if (error) { 43752c24af7SPeter Wemm return (error); 43852c24af7SPeter Wemm } 43923955314SAlfred Perlstein } 440e1743d02SSøren Schmidt 441e1743d02SSøren Schmidt /* 4423ebc1248SPeter Wemm * set it to the specified protection. 4433ebc1248SPeter Wemm * XXX had better undo the damage from pasting over the cracks here! 444e1743d02SSøren Schmidt */ 4453ebc1248SPeter Wemm vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), 4463ebc1248SPeter Wemm round_page(map_addr + map_len), prot, FALSE); 4478191d577SPeter Wemm 448ff6f03c7SAlan Cox return (0); 449e1743d02SSøren Schmidt } 450e1743d02SSøren Schmidt 451c33fe779SJohn Polstra /* 452c33fe779SJohn Polstra * Load the file "file" into memory. It may be either a shared object 453c33fe779SJohn Polstra * or an executable. 454c33fe779SJohn Polstra * 455c33fe779SJohn Polstra * The "addr" reference parameter is in/out. On entry, it specifies 456c33fe779SJohn Polstra * the address where a shared object should be loaded. If the file is 457c33fe779SJohn Polstra * an executable, this value is ignored. On exit, "addr" specifies 458c33fe779SJohn Polstra * where the file was actually loaded. 459c33fe779SJohn Polstra * 460c33fe779SJohn Polstra * The "entry" reference parameter is out only. On exit, it specifies 461c33fe779SJohn Polstra * the entry point for the loaded file. 462c33fe779SJohn Polstra */ 463e1743d02SSøren Schmidt static int 4643ebc1248SPeter Wemm __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 4653ebc1248SPeter Wemm u_long *entry, size_t pagesize) 466e1743d02SSøren Schmidt { 467911c2be0SMark Peek struct { 468911c2be0SMark Peek struct nameidata nd; 469911c2be0SMark Peek struct vattr attr; 470911c2be0SMark Peek struct image_params image_params; 471911c2be0SMark Peek } *tempdata; 472d254af07SMatthew Dillon const Elf_Ehdr *hdr = NULL; 473d254af07SMatthew Dillon const Elf_Phdr *phdr = NULL; 474911c2be0SMark Peek struct nameidata *nd; 475e1743d02SSøren Schmidt struct vmspace *vmspace = p->p_vmspace; 476911c2be0SMark Peek struct vattr *attr; 477911c2be0SMark Peek struct image_params *imgp; 47852c24af7SPeter Wemm vm_prot_t prot; 479c33fe779SJohn Polstra u_long rbase; 480c33fe779SJohn Polstra u_long base_addr = 0; 48168ff2a43SChristian S.J. Peron int vfslocked, error, i, numsegs; 482e1743d02SSøren Schmidt 483b40ce416SJulian Elischer if (curthread->td_proc != p) 484b40ce416SJulian Elischer panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ 485b40ce416SJulian Elischer 486a163d034SWarner Losh tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 487911c2be0SMark Peek nd = &tempdata->nd; 488911c2be0SMark Peek attr = &tempdata->attr; 489911c2be0SMark Peek imgp = &tempdata->image_params; 490911c2be0SMark Peek 491c8a79999SPeter Wemm /* 492c8a79999SPeter Wemm * Initialize part of the common data 493c8a79999SPeter Wemm */ 494c8a79999SPeter Wemm imgp->proc = p; 495911c2be0SMark Peek imgp->attr = attr; 496c8a79999SPeter Wemm imgp->firstpage = NULL; 49759c8bc40SAlan Cox imgp->image_header = NULL; 4980b2ed1aeSJeff Roberson imgp->object = NULL; 4996d7bdc8dSRobert Watson imgp->execlabel = NULL; 500c8a79999SPeter Wemm 501b40ce416SJulian Elischer /* XXXKSE */ 50268ff2a43SChristian S.J. Peron NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, 50368ff2a43SChristian S.J. Peron curthread); 50468ff2a43SChristian S.J. Peron vfslocked = 0; 505911c2be0SMark Peek if ((error = namei(nd)) != 0) { 506911c2be0SMark Peek nd->ni_vp = NULL; 507e1743d02SSøren Schmidt goto fail; 508e1743d02SSøren Schmidt } 50968ff2a43SChristian S.J. Peron vfslocked = NDHASGIANT(nd); 510911c2be0SMark Peek NDFREE(nd, NDF_ONLY_PNBUF); 511911c2be0SMark Peek imgp->vp = nd->ni_vp; 512c8a79999SPeter Wemm 513e1743d02SSøren Schmidt /* 514e1743d02SSøren Schmidt * Check permissions, modes, uid, etc on the file, and "open" it. 515e1743d02SSøren Schmidt */ 516c8a79999SPeter Wemm error = exec_check_permissions(imgp); 517373d1a3fSAlan Cox if (error) 518c8a79999SPeter Wemm goto fail; 519e1743d02SSøren Schmidt 520c8a79999SPeter Wemm error = exec_map_first_page(imgp); 521373d1a3fSAlan Cox if (error) 522373d1a3fSAlan Cox goto fail; 523373d1a3fSAlan Cox 52425ead034SBrian Feldman /* 52525ead034SBrian Feldman * Also make certain that the interpreter stays the same, so set 526e6e370a7SJeff Roberson * its VV_TEXT flag, too. 52725ead034SBrian Feldman */ 528e6e370a7SJeff Roberson nd->ni_vp->v_vflag |= VV_TEXT; 529e6e370a7SJeff Roberson 5308516dd18SPoul-Henning Kamp imgp->object = nd->ni_vp->v_object; 531e1743d02SSøren Schmidt 532d254af07SMatthew Dillon hdr = (const Elf_Ehdr *)imgp->image_header; 5333ebc1248SPeter Wemm if ((error = __elfN(check_header)(hdr)) != 0) 534e1743d02SSøren Schmidt goto fail; 535c33fe779SJohn Polstra if (hdr->e_type == ET_DYN) 536c33fe779SJohn Polstra rbase = *addr; 537c33fe779SJohn Polstra else if (hdr->e_type == ET_EXEC) 538c33fe779SJohn Polstra rbase = 0; 539c33fe779SJohn Polstra else { 540c33fe779SJohn Polstra error = ENOEXEC; 541c33fe779SJohn Polstra goto fail; 542c33fe779SJohn Polstra } 543e1743d02SSøren Schmidt 544c8a79999SPeter Wemm /* Only support headers that fit within first page for now */ 5453dc19c46SJacques Vidrine /* (multiplication of two Elf_Half fields will not overflow) */ 54652c24af7SPeter Wemm if ((hdr->e_phoff > PAGE_SIZE) || 5473dc19c46SJacques Vidrine (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { 548c8a79999SPeter Wemm error = ENOEXEC; 549e1743d02SSøren Schmidt goto fail; 550c8a79999SPeter Wemm } 551c8a79999SPeter Wemm 552d254af07SMatthew Dillon phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 55393d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) { 55493d1c728SKonstantin Belousov error = ENOEXEC; 55593d1c728SKonstantin Belousov goto fail; 55693d1c728SKonstantin Belousov } 557e1743d02SSøren Schmidt 558c33fe779SJohn Polstra for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 55952c24af7SPeter Wemm if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 56052c24af7SPeter Wemm prot = 0; 561e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_X) 562e1743d02SSøren Schmidt prot |= VM_PROT_EXECUTE; 563e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_W) 564e1743d02SSøren Schmidt prot |= VM_PROT_WRITE; 565e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_R) 566e1743d02SSøren Schmidt prot |= VM_PROT_READ; 567e1743d02SSøren Schmidt 568373d1a3fSAlan Cox if ((error = __elfN(load_section)(vmspace, 569373d1a3fSAlan Cox imgp->object, phdr[i].p_offset, 57081f223caSJake Burkholder (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 57181f223caSJake Burkholder phdr[i].p_memsz, phdr[i].p_filesz, prot, 57281f223caSJake Burkholder pagesize)) != 0) 573e1743d02SSøren Schmidt goto fail; 574e1743d02SSøren Schmidt /* 575c33fe779SJohn Polstra * Establish the base address if this is the 576c33fe779SJohn Polstra * first segment. 577e1743d02SSøren Schmidt */ 578c33fe779SJohn Polstra if (numsegs == 0) 579ca0387efSJake Burkholder base_addr = trunc_page(phdr[i].p_vaddr + 580ca0387efSJake Burkholder rbase); 581c33fe779SJohn Polstra numsegs++; 582e1743d02SSøren Schmidt } 583e1743d02SSøren Schmidt } 584c33fe779SJohn Polstra *addr = base_addr; 585c33fe779SJohn Polstra *entry = (unsigned long)hdr->e_entry + rbase; 586e1743d02SSøren Schmidt 587e1743d02SSøren Schmidt fail: 588c8a79999SPeter Wemm if (imgp->firstpage) 589c8a79999SPeter Wemm exec_unmap_first_page(imgp); 5900b2ed1aeSJeff Roberson 591911c2be0SMark Peek if (nd->ni_vp) 592373d1a3fSAlan Cox vput(nd->ni_vp); 593911c2be0SMark Peek 59468ff2a43SChristian S.J. Peron VFS_UNLOCK_GIANT(vfslocked); 595911c2be0SMark Peek free(tempdata, M_TEMP); 596e1743d02SSøren Schmidt 597a7cddfedSJake Burkholder return (error); 598e1743d02SSøren Schmidt } 599e1743d02SSøren Schmidt 600f231de47SKonstantin Belousov static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; 601f231de47SKonstantin Belousov 602303b270bSEivind Eklund static int 6033ebc1248SPeter Wemm __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 604e1743d02SSøren Schmidt { 605ecbb00a2SDoug Rabson const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 606f231de47SKonstantin Belousov const Elf_Phdr *phdr, *pnote = NULL; 607e5e6093bSAlan Cox Elf_Auxargs *elf_auxargs; 6085856e12eSJohn Dyson struct vmspace *vmspace; 60952c24af7SPeter Wemm vm_prot_t prot; 61021c2d047SMatthew Dillon u_long text_size = 0, data_size = 0, total_size = 0; 611e1743d02SSøren Schmidt u_long text_addr = 0, data_addr = 0; 612cac45152SMatthew Dillon u_long seg_size, seg_addr; 613e1743d02SSøren Schmidt u_long addr, entry = 0, proghdr = 0; 614610ecfe0SMaxim Sobolev int error = 0, i; 615c8a79999SPeter Wemm const char *interp = NULL; 616d1dbc694SJohn Polstra Elf_Brandinfo *brand_info; 617f231de47SKonstantin Belousov const Elf_Note *note, *note_end; 618911c2be0SMark Peek char *path; 619f231de47SKonstantin Belousov const char *note_name; 620619eb6e5SJeff Roberson struct thread *td = curthread; 6215fe3ed62SJake Burkholder struct sysentvec *sv; 622e1743d02SSøren Schmidt 623e1743d02SSøren Schmidt /* 624e1743d02SSøren Schmidt * Do we have a valid ELF header ? 625900b28f9SMaxim Sobolev * 626900b28f9SMaxim Sobolev * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 627900b28f9SMaxim Sobolev * if particular brand doesn't support it. 628e1743d02SSøren Schmidt */ 629900b28f9SMaxim Sobolev if (__elfN(check_header)(hdr) != 0 || 630900b28f9SMaxim Sobolev (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 631a7cddfedSJake Burkholder return (-1); 632e1743d02SSøren Schmidt 633e1743d02SSøren Schmidt /* 634e1743d02SSøren Schmidt * From here on down, we return an errno, not -1, as we've 635e1743d02SSøren Schmidt * detected an ELF file. 636e1743d02SSøren Schmidt */ 637e1743d02SSøren Schmidt 638e1743d02SSøren Schmidt if ((hdr->e_phoff > PAGE_SIZE) || 63952c24af7SPeter Wemm (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 640c8a79999SPeter Wemm /* Only support headers in first page for now */ 641a7cddfedSJake Burkholder return (ENOEXEC); 642e1743d02SSøren Schmidt } 64352c24af7SPeter Wemm phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 64493d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) 64593d1c728SKonstantin Belousov return (ENOEXEC); 6465fe3ed62SJake Burkholder for (i = 0; i < hdr->e_phnum; i++) { 647e5e6093bSAlan Cox if (phdr[i].p_type == PT_INTERP) { 648e5e6093bSAlan Cox /* Path to interpreter */ 6495fe3ed62SJake Burkholder if (phdr[i].p_filesz > MAXPATHLEN || 65060bb3943SAlan Cox phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) 65160bb3943SAlan Cox return (ENOEXEC); 6525fe3ed62SJake Burkholder interp = imgp->image_header + phdr[i].p_offset; 6535fe3ed62SJake Burkholder break; 6543ebc1248SPeter Wemm } 6553ebc1248SPeter Wemm } 6563ebc1248SPeter Wemm 6575fe3ed62SJake Burkholder brand_info = __elfN(get_brandinfo)(hdr, interp); 6585fe3ed62SJake Burkholder if (brand_info == NULL) { 6595fe3ed62SJake Burkholder uprintf("ELF binary type \"%u\" not known.\n", 6605fe3ed62SJake Burkholder hdr->e_ident[EI_OSABI]); 66160bb3943SAlan Cox return (ENOEXEC); 6623ebc1248SPeter Wemm } 663900b28f9SMaxim Sobolev if (hdr->e_type == ET_DYN && 664d49b2109SMaxim Sobolev (brand_info->flags & BI_CAN_EXEC_DYN) == 0) 665d49b2109SMaxim Sobolev return (ENOEXEC); 6665fe3ed62SJake Burkholder sv = brand_info->sysvec; 6679b68618dSPeter Wemm if (interp != NULL && brand_info->interp_newpath != NULL) 6689b68618dSPeter Wemm interp = brand_info->interp_newpath; 6693ebc1248SPeter Wemm 67060bb3943SAlan Cox /* 67160bb3943SAlan Cox * Avoid a possible deadlock if the current address space is destroyed 67260bb3943SAlan Cox * and that address space maps the locked vnode. In the common case, 67360bb3943SAlan Cox * the locked vnode's v_usecount is decremented but remains greater 67460bb3943SAlan Cox * than zero. Consequently, the vnode lock is not needed by vrele(). 67560bb3943SAlan Cox * However, in cases where the vnode lock is external, such as nullfs, 67660bb3943SAlan Cox * v_usecount may become zero. 67760bb3943SAlan Cox */ 67860bb3943SAlan Cox VOP_UNLOCK(imgp->vp, 0, td); 67960bb3943SAlan Cox 68089b57fcfSKonstantin Belousov error = exec_new_vmspace(imgp, sv); 68119059a13SJohn Baldwin imgp->proc->p_sysent = sv; 682e1743d02SSøren Schmidt 68360bb3943SAlan Cox vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 68489b57fcfSKonstantin Belousov if (error) 68589b57fcfSKonstantin Belousov return (error); 68660bb3943SAlan Cox 6875856e12eSJohn Dyson vmspace = imgp->proc->p_vmspace; 6885856e12eSJohn Dyson 689e1743d02SSøren Schmidt for (i = 0; i < hdr->e_phnum; i++) { 690e1743d02SSøren Schmidt switch (phdr[i].p_type) { 691e1743d02SSøren Schmidt case PT_LOAD: /* Loadable segment */ 69252c24af7SPeter Wemm prot = 0; 693e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_X) 694e1743d02SSøren Schmidt prot |= VM_PROT_EXECUTE; 695e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_W) 696e1743d02SSøren Schmidt prot |= VM_PROT_WRITE; 697e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_R) 698e1743d02SSøren Schmidt prot |= VM_PROT_READ; 699e1743d02SSøren Schmidt 7003ebc1248SPeter Wemm #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) 7013ebc1248SPeter Wemm /* 7023ebc1248SPeter Wemm * Some x86 binaries assume read == executable, 7033ebc1248SPeter Wemm * notably the M3 runtime and therefore cvsup 7043ebc1248SPeter Wemm */ 7053ebc1248SPeter Wemm if (prot & VM_PROT_READ) 7063ebc1248SPeter Wemm prot |= VM_PROT_EXECUTE; 7073ebc1248SPeter Wemm #endif 7083ebc1248SPeter Wemm 709373d1a3fSAlan Cox if ((error = __elfN(load_section)(vmspace, 710373d1a3fSAlan Cox imgp->object, phdr[i].p_offset, 7113ebc1248SPeter Wemm (caddr_t)(uintptr_t)phdr[i].p_vaddr, 71281f223caSJake Burkholder phdr[i].p_memsz, phdr[i].p_filesz, prot, 7135fe3ed62SJake Burkholder sv->sv_pagesize)) != 0) 71460bb3943SAlan Cox return (error); 715e1743d02SSøren Schmidt 716cfaf7e60SDoug Rabson /* 717cfaf7e60SDoug Rabson * If this segment contains the program headers, 718cfaf7e60SDoug Rabson * remember their virtual address for the AT_PHDR 719cfaf7e60SDoug Rabson * aux entry. Static binaries don't usually include 720cfaf7e60SDoug Rabson * a PT_PHDR entry. 721cfaf7e60SDoug Rabson */ 722cfaf7e60SDoug Rabson if (phdr[i].p_offset == 0 && 723cfaf7e60SDoug Rabson hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize 724cfaf7e60SDoug Rabson <= phdr[i].p_filesz) 725cfaf7e60SDoug Rabson proghdr = phdr[i].p_vaddr + hdr->e_phoff; 726cfaf7e60SDoug Rabson 727cac45152SMatthew Dillon seg_addr = trunc_page(phdr[i].p_vaddr); 728cac45152SMatthew Dillon seg_size = round_page(phdr[i].p_memsz + 729cac45152SMatthew Dillon phdr[i].p_vaddr - seg_addr); 730cac45152SMatthew Dillon 731e1743d02SSøren Schmidt /* 73221c2d047SMatthew Dillon * Is this .text or .data? We can't use 73321c2d047SMatthew Dillon * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 73421c2d047SMatthew Dillon * alpha terribly and possibly does other bad 73521c2d047SMatthew Dillon * things so we stick to the old way of figuring 73621c2d047SMatthew Dillon * it out: If the segment contains the program 73721c2d047SMatthew Dillon * entry point, it's a text segment, otherwise it 73821c2d047SMatthew Dillon * is a data segment. 73921c2d047SMatthew Dillon * 74021c2d047SMatthew Dillon * Note that obreak() assumes that data_addr + 74121c2d047SMatthew Dillon * data_size == end of data load area, and the ELF 74221c2d047SMatthew Dillon * file format expects segments to be sorted by 74321c2d047SMatthew Dillon * address. If multiple data segments exist, the 74421c2d047SMatthew Dillon * last one will be used. 745e1743d02SSøren Schmidt */ 7469782ecbaSPeter Wemm if (hdr->e_entry >= phdr[i].p_vaddr && 7479782ecbaSPeter Wemm hdr->e_entry < (phdr[i].p_vaddr + 7489782ecbaSPeter Wemm phdr[i].p_memsz)) { 7499782ecbaSPeter Wemm text_size = seg_size; 7509782ecbaSPeter Wemm text_addr = seg_addr; 7519782ecbaSPeter Wemm entry = (u_long)hdr->e_entry; 7529782ecbaSPeter Wemm } else { 75321c2d047SMatthew Dillon data_size = seg_size; 754cac45152SMatthew Dillon data_addr = seg_addr; 755cac45152SMatthew Dillon } 75621c2d047SMatthew Dillon total_size += seg_size; 75796725dd0SAlexander Kabaev break; 75896725dd0SAlexander Kabaev case PT_PHDR: /* Program header table info */ 75996725dd0SAlexander Kabaev proghdr = phdr[i].p_vaddr; 76096725dd0SAlexander Kabaev break; 761f231de47SKonstantin Belousov case PT_NOTE: 762f231de47SKonstantin Belousov pnote = &phdr[i]; 763f231de47SKonstantin Belousov break; 76496725dd0SAlexander Kabaev default: 76596725dd0SAlexander Kabaev break; 76696725dd0SAlexander Kabaev } 76796725dd0SAlexander Kabaev } 76896725dd0SAlexander Kabaev 76996725dd0SAlexander Kabaev if (data_addr == 0 && data_size == 0) { 77096725dd0SAlexander Kabaev data_addr = text_addr; 77196725dd0SAlexander Kabaev data_size = text_size; 77296725dd0SAlexander Kabaev } 773cac45152SMatthew Dillon 774cac45152SMatthew Dillon /* 775cac45152SMatthew Dillon * Check limits. It should be safe to check the 77696725dd0SAlexander Kabaev * limits after loading the segments since we do 77796725dd0SAlexander Kabaev * not actually fault in all the segments pages. 778cac45152SMatthew Dillon */ 77991d5354aSJohn Baldwin PROC_LOCK(imgp->proc); 78091d5354aSJohn Baldwin if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) || 781cac45152SMatthew Dillon text_size > maxtsiz || 78291d5354aSJohn Baldwin total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) { 78391d5354aSJohn Baldwin PROC_UNLOCK(imgp->proc); 78460bb3943SAlan Cox return (ENOMEM); 785cac45152SMatthew Dillon } 786e1743d02SSøren Schmidt 787e1743d02SSøren Schmidt vmspace->vm_tsize = text_size >> PAGE_SHIFT; 7887cd99438SBruce Evans vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 789e1743d02SSøren Schmidt vmspace->vm_dsize = data_size >> PAGE_SHIFT; 7907cd99438SBruce Evans vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 791e1743d02SSøren Schmidt 792c460ac3aSPeter Wemm /* 793c460ac3aSPeter Wemm * We load the dynamic linker where a userland call 794c460ac3aSPeter Wemm * to mmap(0, ...) would put it. The rationale behind this 795c460ac3aSPeter Wemm * calculation is that it leaves room for the heap to grow to 796c460ac3aSPeter Wemm * its maximum allowed size. 797c460ac3aSPeter Wemm */ 798c460ac3aSPeter Wemm addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + 79991d5354aSJohn Baldwin lim_max(imgp->proc, RLIMIT_DATA)); 80091d5354aSJohn Baldwin PROC_UNLOCK(imgp->proc); 801e1743d02SSøren Schmidt 802ea5a2b2eSSøren Schmidt imgp->entry_addr = entry; 803ea5a2b2eSSøren Schmidt 80460bb3943SAlan Cox if (interp != NULL) { 80560bb3943SAlan Cox VOP_UNLOCK(imgp->vp, 0, td); 80660bb3943SAlan Cox if (brand_info->emul_path != NULL && 8079b68618dSPeter Wemm brand_info->emul_path[0] != '\0') { 808a163d034SWarner Losh path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 80960bb3943SAlan Cox snprintf(path, MAXPATHLEN, "%s%s", 81060bb3943SAlan Cox brand_info->emul_path, interp); 8119b68618dSPeter Wemm error = __elfN(load_file)(imgp->proc, path, &addr, 8129b68618dSPeter Wemm &imgp->entry_addr, sv->sv_pagesize); 813911c2be0SMark Peek free(path, M_TEMP); 8149b68618dSPeter Wemm if (error == 0) 8159b68618dSPeter Wemm interp = NULL; 8169b68618dSPeter Wemm } 8179b68618dSPeter Wemm if (interp != NULL) { 8189b68618dSPeter Wemm error = __elfN(load_file)(imgp->proc, interp, &addr, 8199b68618dSPeter Wemm &imgp->entry_addr, sv->sv_pagesize); 82060bb3943SAlan Cox } 82160bb3943SAlan Cox vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 8229b68618dSPeter Wemm if (error != 0) { 8239b68618dSPeter Wemm uprintf("ELF interpreter %s not found\n", interp); 82460bb3943SAlan Cox return (error); 825e1743d02SSøren Schmidt } 826e1743d02SSøren Schmidt } 827ea5a2b2eSSøren Schmidt 828e1743d02SSøren Schmidt /* 829e1743d02SSøren Schmidt * Construct auxargs table (used by the fixup routine) 830e1743d02SSøren Schmidt */ 831a163d034SWarner Losh elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 832e1743d02SSøren Schmidt elf_auxargs->execfd = -1; 833e1743d02SSøren Schmidt elf_auxargs->phdr = proghdr; 834e1743d02SSøren Schmidt elf_auxargs->phent = hdr->e_phentsize; 835e1743d02SSøren Schmidt elf_auxargs->phnum = hdr->e_phnum; 836e1743d02SSøren Schmidt elf_auxargs->pagesz = PAGE_SIZE; 837e1743d02SSøren Schmidt elf_auxargs->base = addr; 838e1743d02SSøren Schmidt elf_auxargs->flags = 0; 839e1743d02SSøren Schmidt elf_auxargs->entry = entry; 840e1743d02SSøren Schmidt elf_auxargs->trace = elf_trace; 841e1743d02SSøren Schmidt 842e1743d02SSøren Schmidt imgp->auxargs = elf_auxargs; 843e1743d02SSøren Schmidt imgp->interpreted = 0; 844e1743d02SSøren Schmidt 845f231de47SKonstantin Belousov /* 846f231de47SKonstantin Belousov * Try to fetch the osreldate for FreeBSD binary from the ELF 847f231de47SKonstantin Belousov * OSABI-note. Only the first page of the image is searched, 848f231de47SKonstantin Belousov * the same as for headers. 849f231de47SKonstantin Belousov */ 850f231de47SKonstantin Belousov if (pnote != NULL && pnote->p_offset < PAGE_SIZE && 851f231de47SKonstantin Belousov pnote->p_offset + pnote->p_filesz < PAGE_SIZE ) { 852f231de47SKonstantin Belousov note = (const Elf_Note *)(imgp->image_header + pnote->p_offset); 853f231de47SKonstantin Belousov if (!aligned(note, Elf32_Addr)) { 854f231de47SKonstantin Belousov free(imgp->auxargs, M_TEMP); 855f231de47SKonstantin Belousov imgp->auxargs = NULL; 856f231de47SKonstantin Belousov return (ENOEXEC); 857f231de47SKonstantin Belousov } 858f231de47SKonstantin Belousov note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + 859f231de47SKonstantin Belousov pnote->p_filesz); 860f231de47SKonstantin Belousov while (note < note_end) { 861f231de47SKonstantin Belousov if (note->n_namesz == sizeof(FREEBSD_ABI_VENDOR) && 862f231de47SKonstantin Belousov note->n_descsz == sizeof(int32_t) && 863f231de47SKonstantin Belousov note->n_type == 1 /* ABI_NOTETYPE */) { 864f231de47SKonstantin Belousov note_name = (const char *)(note + 1); 865f231de47SKonstantin Belousov if (strncmp(FREEBSD_ABI_VENDOR, note_name, 866f231de47SKonstantin Belousov sizeof(FREEBSD_ABI_VENDOR)) == 0) { 867f231de47SKonstantin Belousov imgp->proc->p_osrel = *(const int32_t *) 868f231de47SKonstantin Belousov (note_name + 869f231de47SKonstantin Belousov round_page_ps(sizeof(FREEBSD_ABI_VENDOR), 870f231de47SKonstantin Belousov sizeof(Elf32_Addr))); 871f231de47SKonstantin Belousov break; 872f231de47SKonstantin Belousov } 873f231de47SKonstantin Belousov } 874f231de47SKonstantin Belousov note = (const Elf_Note *)((const char *)(note + 1) + 875f231de47SKonstantin Belousov round_page_ps(note->n_namesz, sizeof(Elf32_Addr)) + 876f231de47SKonstantin Belousov round_page_ps(note->n_descsz, sizeof(Elf32_Addr))); 877f231de47SKonstantin Belousov } 878f231de47SKonstantin Belousov } 879f231de47SKonstantin Belousov 880a7cddfedSJake Burkholder return (error); 881e1743d02SSøren Schmidt } 882e1743d02SSøren Schmidt 883a360a43dSJake Burkholder #define suword __CONCAT(suword, __ELF_WORD_SIZE) 8843ebc1248SPeter Wemm 8853ebc1248SPeter Wemm int 8863ebc1248SPeter Wemm __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 887e1743d02SSøren Schmidt { 888ecbb00a2SDoug Rabson Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 889a360a43dSJake Burkholder Elf_Addr *base; 890a360a43dSJake Burkholder Elf_Addr *pos; 891e1743d02SSøren Schmidt 892a360a43dSJake Burkholder base = (Elf_Addr *)*stack_base; 893610ecfe0SMaxim Sobolev pos = base + (imgp->args->argc + imgp->args->envc + 2); 894e1743d02SSøren Schmidt 895e1743d02SSøren Schmidt if (args->trace) { 896e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_DEBUG, 1); 897e1743d02SSøren Schmidt } 898e1743d02SSøren Schmidt if (args->execfd != -1) { 899e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 900e1743d02SSøren Schmidt } 901e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 902e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 903e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 904e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 905e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 906e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 907e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_BASE, args->base); 908e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_NULL, 0); 909e1743d02SSøren Schmidt 910e1743d02SSøren Schmidt free(imgp->auxargs, M_TEMP); 911e1743d02SSøren Schmidt imgp->auxargs = NULL; 912e1743d02SSøren Schmidt 9133ebc1248SPeter Wemm base--; 914610ecfe0SMaxim Sobolev suword(base, (long)imgp->args->argc); 9153ebc1248SPeter Wemm *stack_base = (register_t *)base; 916a7cddfedSJake Burkholder return (0); 917e1743d02SSøren Schmidt } 918e1743d02SSøren Schmidt 919e1743d02SSøren Schmidt /* 9208c64af4fSJohn Polstra * Code for generating ELF core dumps. 9218c64af4fSJohn Polstra */ 9228c64af4fSJohn Polstra 9234d77a549SAlfred Perlstein typedef void (*segment_callback)(vm_map_entry_t, void *); 9240ff27d31SJohn Polstra 9250ff27d31SJohn Polstra /* Closure for cb_put_phdr(). */ 9260ff27d31SJohn Polstra struct phdr_closure { 9270ff27d31SJohn Polstra Elf_Phdr *phdr; /* Program header to fill in */ 9280ff27d31SJohn Polstra Elf_Off offset; /* Offset of segment in core file */ 9290ff27d31SJohn Polstra }; 9300ff27d31SJohn Polstra 9310ff27d31SJohn Polstra /* Closure for cb_size_segment(). */ 9320ff27d31SJohn Polstra struct sseg_closure { 9330ff27d31SJohn Polstra int count; /* Count of writable segments. */ 9340ff27d31SJohn Polstra size_t size; /* Total size of all writable segments. */ 9350ff27d31SJohn Polstra }; 9360ff27d31SJohn Polstra 9374d77a549SAlfred Perlstein static void cb_put_phdr(vm_map_entry_t, void *); 9384d77a549SAlfred Perlstein static void cb_size_segment(vm_map_entry_t, void *); 939247aba24SMarcel Moolenaar static void each_writable_segment(struct thread *, segment_callback, void *); 9403ebc1248SPeter Wemm static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, 9414d77a549SAlfred Perlstein int, void *, size_t); 942247aba24SMarcel Moolenaar static void __elfN(puthdr)(struct thread *, void *, size_t *, int); 9433ebc1248SPeter Wemm static void __elfN(putnote)(void *, size_t *, const char *, int, 9444d77a549SAlfred Perlstein const void *, size_t); 9458c64af4fSJohn Polstra 9468c64af4fSJohn Polstra int 9473ebc1248SPeter Wemm __elfN(coredump)(td, vp, limit) 948b40ce416SJulian Elischer struct thread *td; 949247aba24SMarcel Moolenaar struct vnode *vp; 950fca666a1SJulian Elischer off_t limit; 951fca666a1SJulian Elischer { 952247aba24SMarcel Moolenaar struct ucred *cred = td->td_ucred; 953fca666a1SJulian Elischer int error = 0; 9540ff27d31SJohn Polstra struct sseg_closure seginfo; 9550ff27d31SJohn Polstra void *hdr; 9568c64af4fSJohn Polstra size_t hdrsize; 9578c64af4fSJohn Polstra 9580ff27d31SJohn Polstra /* Size the program segments. */ 9590ff27d31SJohn Polstra seginfo.count = 0; 9600ff27d31SJohn Polstra seginfo.size = 0; 961247aba24SMarcel Moolenaar each_writable_segment(td, cb_size_segment, &seginfo); 9620ff27d31SJohn Polstra 9630ff27d31SJohn Polstra /* 9640ff27d31SJohn Polstra * Calculate the size of the core file header area by making 9650ff27d31SJohn Polstra * a dry run of generating it. Nothing is written, but the 9660ff27d31SJohn Polstra * size is calculated. 9670ff27d31SJohn Polstra */ 9680ff27d31SJohn Polstra hdrsize = 0; 969247aba24SMarcel Moolenaar __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count); 9700ff27d31SJohn Polstra 971fca666a1SJulian Elischer if (hdrsize + seginfo.size >= limit) 9728c64af4fSJohn Polstra return (EFAULT); 9730ff27d31SJohn Polstra 9740ff27d31SJohn Polstra /* 9750ff27d31SJohn Polstra * Allocate memory for building the header, fill it up, 9760ff27d31SJohn Polstra * and write it out. 9770ff27d31SJohn Polstra */ 978a163d034SWarner Losh hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 9790ff27d31SJohn Polstra if (hdr == NULL) { 980a7cddfedSJake Burkholder return (EINVAL); 9810ff27d31SJohn Polstra } 9823ebc1248SPeter Wemm error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); 9830ff27d31SJohn Polstra 9840ff27d31SJohn Polstra /* Write the contents of all of the writable segments. */ 9850ff27d31SJohn Polstra if (error == 0) { 9860ff27d31SJohn Polstra Elf_Phdr *php; 9872b471bc6STim J. Robbins off_t offset; 9880ff27d31SJohn Polstra int i; 9890ff27d31SJohn Polstra 9900ff27d31SJohn Polstra php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 9910ff27d31SJohn Polstra offset = hdrsize; 9920ff27d31SJohn Polstra for (i = 0; i < seginfo.count; i++) { 99306ae1e91SMatthew Dillon error = vn_rdwr_inchunks(UIO_WRITE, vp, 9942b471bc6STim J. Robbins (caddr_t)(uintptr_t)php->p_vaddr, 9952b471bc6STim J. Robbins php->p_filesz, offset, UIO_USERSPACE, 996f99619a0STim J. Robbins IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 9972b471bc6STim J. Robbins curthread); /* XXXKSE */ 9980ff27d31SJohn Polstra if (error != 0) 9992b471bc6STim J. Robbins break; 10000ff27d31SJohn Polstra offset += php->p_filesz; 10010ff27d31SJohn Polstra php++; 10020ff27d31SJohn Polstra } 10030ff27d31SJohn Polstra } 10040ff27d31SJohn Polstra free(hdr, M_TEMP); 10050ff27d31SJohn Polstra 1006a7cddfedSJake Burkholder return (error); 10078c64af4fSJohn Polstra } 10088c64af4fSJohn Polstra 10090ff27d31SJohn Polstra /* 10100ff27d31SJohn Polstra * A callback for each_writable_segment() to write out the segment's 10110ff27d31SJohn Polstra * program header entry. 10120ff27d31SJohn Polstra */ 10130ff27d31SJohn Polstra static void 10140ff27d31SJohn Polstra cb_put_phdr(entry, closure) 10150ff27d31SJohn Polstra vm_map_entry_t entry; 10160ff27d31SJohn Polstra void *closure; 10170ff27d31SJohn Polstra { 10180ff27d31SJohn Polstra struct phdr_closure *phc = (struct phdr_closure *)closure; 10190ff27d31SJohn Polstra Elf_Phdr *phdr = phc->phdr; 10200ff27d31SJohn Polstra 10210ff27d31SJohn Polstra phc->offset = round_page(phc->offset); 10220ff27d31SJohn Polstra 10230ff27d31SJohn Polstra phdr->p_type = PT_LOAD; 10240ff27d31SJohn Polstra phdr->p_offset = phc->offset; 10250ff27d31SJohn Polstra phdr->p_vaddr = entry->start; 10260ff27d31SJohn Polstra phdr->p_paddr = 0; 10270ff27d31SJohn Polstra phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 10280ff27d31SJohn Polstra phdr->p_align = PAGE_SIZE; 10290ff27d31SJohn Polstra phdr->p_flags = 0; 10300ff27d31SJohn Polstra if (entry->protection & VM_PROT_READ) 10310ff27d31SJohn Polstra phdr->p_flags |= PF_R; 10320ff27d31SJohn Polstra if (entry->protection & VM_PROT_WRITE) 10330ff27d31SJohn Polstra phdr->p_flags |= PF_W; 10340ff27d31SJohn Polstra if (entry->protection & VM_PROT_EXECUTE) 10350ff27d31SJohn Polstra phdr->p_flags |= PF_X; 10360ff27d31SJohn Polstra 10370ff27d31SJohn Polstra phc->offset += phdr->p_filesz; 10380ff27d31SJohn Polstra phc->phdr++; 10390ff27d31SJohn Polstra } 10400ff27d31SJohn Polstra 10410ff27d31SJohn Polstra /* 10420ff27d31SJohn Polstra * A callback for each_writable_segment() to gather information about 10430ff27d31SJohn Polstra * the number of segments and their total size. 10440ff27d31SJohn Polstra */ 10450ff27d31SJohn Polstra static void 10460ff27d31SJohn Polstra cb_size_segment(entry, closure) 10470ff27d31SJohn Polstra vm_map_entry_t entry; 10480ff27d31SJohn Polstra void *closure; 10490ff27d31SJohn Polstra { 10500ff27d31SJohn Polstra struct sseg_closure *ssc = (struct sseg_closure *)closure; 10510ff27d31SJohn Polstra 10520ff27d31SJohn Polstra ssc->count++; 10530ff27d31SJohn Polstra ssc->size += entry->end - entry->start; 10540ff27d31SJohn Polstra } 10550ff27d31SJohn Polstra 10560ff27d31SJohn Polstra /* 10570ff27d31SJohn Polstra * For each writable segment in the process's memory map, call the given 10580ff27d31SJohn Polstra * function with a pointer to the map entry and some arbitrary 10590ff27d31SJohn Polstra * caller-supplied data. 10600ff27d31SJohn Polstra */ 10610ff27d31SJohn Polstra static void 1062247aba24SMarcel Moolenaar each_writable_segment(td, func, closure) 1063247aba24SMarcel Moolenaar struct thread *td; 10640ff27d31SJohn Polstra segment_callback func; 10650ff27d31SJohn Polstra void *closure; 10660ff27d31SJohn Polstra { 1067247aba24SMarcel Moolenaar struct proc *p = td->td_proc; 10680ff27d31SJohn Polstra vm_map_t map = &p->p_vmspace->vm_map; 10690ff27d31SJohn Polstra vm_map_entry_t entry; 1070976a87a2SAlan Cox vm_object_t backing_object, object; 1071976a87a2SAlan Cox boolean_t ignore_entry; 10720ff27d31SJohn Polstra 1073976a87a2SAlan Cox vm_map_lock_read(map); 10740ff27d31SJohn Polstra for (entry = map->header.next; entry != &map->header; 10750ff27d31SJohn Polstra entry = entry->next) { 1076fa7dd9c5SMatthew Dillon /* 1077fa7dd9c5SMatthew Dillon * Don't dump inaccessible mappings, deal with legacy 1078fa7dd9c5SMatthew Dillon * coredump mode. 1079fa7dd9c5SMatthew Dillon * 1080fa7dd9c5SMatthew Dillon * Note that read-only segments related to the elf binary 1081fa7dd9c5SMatthew Dillon * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1082fa7dd9c5SMatthew Dillon * need to arbitrarily ignore such segments. 1083fa7dd9c5SMatthew Dillon */ 1084fa7dd9c5SMatthew Dillon if (elf_legacy_coredump) { 1085fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 10860ff27d31SJohn Polstra continue; 1087fa7dd9c5SMatthew Dillon } else { 1088fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_ALL) == 0) 1089fa7dd9c5SMatthew Dillon continue; 1090fa7dd9c5SMatthew Dillon } 10910ff27d31SJohn Polstra 10929730a5daSPaul Saab /* 1093fa7dd9c5SMatthew Dillon * Dont include memory segment in the coredump if 1094fa7dd9c5SMatthew Dillon * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1095fa7dd9c5SMatthew Dillon * madvise(2). Do not dump submaps (i.e. parts of the 1096fa7dd9c5SMatthew Dillon * kernel map). 10979730a5daSPaul Saab */ 1098fa7dd9c5SMatthew Dillon if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 10999730a5daSPaul Saab continue; 11009730a5daSPaul Saab 1101976a87a2SAlan Cox if ((object = entry->object.vm_object) == NULL) 11020ff27d31SJohn Polstra continue; 11030ff27d31SJohn Polstra 11040ff27d31SJohn Polstra /* Ignore memory-mapped devices and such things. */ 1105976a87a2SAlan Cox VM_OBJECT_LOCK(object); 1106976a87a2SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1107976a87a2SAlan Cox VM_OBJECT_LOCK(backing_object); 1108976a87a2SAlan Cox VM_OBJECT_UNLOCK(object); 1109976a87a2SAlan Cox object = backing_object; 1110976a87a2SAlan Cox } 1111976a87a2SAlan Cox ignore_entry = object->type != OBJT_DEFAULT && 1112976a87a2SAlan Cox object->type != OBJT_SWAP && object->type != OBJT_VNODE; 1113976a87a2SAlan Cox VM_OBJECT_UNLOCK(object); 1114976a87a2SAlan Cox if (ignore_entry) 11150ff27d31SJohn Polstra continue; 11160ff27d31SJohn Polstra 11170ff27d31SJohn Polstra (*func)(entry, closure); 11180ff27d31SJohn Polstra } 1119976a87a2SAlan Cox vm_map_unlock_read(map); 11200ff27d31SJohn Polstra } 11210ff27d31SJohn Polstra 11220ff27d31SJohn Polstra /* 11230ff27d31SJohn Polstra * Write the core file header to the file, including padding up to 11240ff27d31SJohn Polstra * the page boundary. 11250ff27d31SJohn Polstra */ 11268c64af4fSJohn Polstra static int 11273ebc1248SPeter Wemm __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) 1128b40ce416SJulian Elischer struct thread *td; 11298c64af4fSJohn Polstra struct vnode *vp; 11308c64af4fSJohn Polstra struct ucred *cred; 11310ff27d31SJohn Polstra int numsegs; 11320ff27d31SJohn Polstra size_t hdrsize; 11330ff27d31SJohn Polstra void *hdr; 11348c64af4fSJohn Polstra { 1135911c2be0SMark Peek size_t off; 11368c64af4fSJohn Polstra 11378c64af4fSJohn Polstra /* Fill in the header. */ 11380ff27d31SJohn Polstra bzero(hdr, hdrsize); 11398c64af4fSJohn Polstra off = 0; 1140247aba24SMarcel Moolenaar __elfN(puthdr)(td, hdr, &off, numsegs); 11418c64af4fSJohn Polstra 11428c64af4fSJohn Polstra /* Write it to the core file. */ 1143a7cddfedSJake Burkholder return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 11449ca43589SRobert Watson UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 1145a7cddfedSJake Burkholder td)); /* XXXKSE */ 1146dada0278SJohn Polstra } 1147dada0278SJohn Polstra 114862919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 114962919d78SPeter Wemm typedef struct prstatus32 elf_prstatus_t; 115062919d78SPeter Wemm typedef struct prpsinfo32 elf_prpsinfo_t; 115162919d78SPeter Wemm typedef struct fpreg32 elf_prfpregset_t; 115262919d78SPeter Wemm typedef struct fpreg32 elf_fpregset_t; 115362919d78SPeter Wemm typedef struct reg32 elf_gregset_t; 115462919d78SPeter Wemm #else 115562919d78SPeter Wemm typedef prstatus_t elf_prstatus_t; 115662919d78SPeter Wemm typedef prpsinfo_t elf_prpsinfo_t; 115762919d78SPeter Wemm typedef prfpregset_t elf_prfpregset_t; 115862919d78SPeter Wemm typedef prfpregset_t elf_fpregset_t; 115962919d78SPeter Wemm typedef gregset_t elf_gregset_t; 116062919d78SPeter Wemm #endif 116162919d78SPeter Wemm 11628c64af4fSJohn Polstra static void 1163247aba24SMarcel Moolenaar __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs) 11648c64af4fSJohn Polstra { 11658c9b7b2cSMarcel Moolenaar struct { 116662919d78SPeter Wemm elf_prstatus_t status; 116762919d78SPeter Wemm elf_prfpregset_t fpregset; 116862919d78SPeter Wemm elf_prpsinfo_t psinfo; 11698c9b7b2cSMarcel Moolenaar } *tempdata; 117062919d78SPeter Wemm elf_prstatus_t *status; 117162919d78SPeter Wemm elf_prfpregset_t *fpregset; 117262919d78SPeter Wemm elf_prpsinfo_t *psinfo; 1173247aba24SMarcel Moolenaar struct proc *p; 1174247aba24SMarcel Moolenaar struct thread *thr; 11758c9b7b2cSMarcel Moolenaar size_t ehoff, noteoff, notesz, phoff; 11768c64af4fSJohn Polstra 1177247aba24SMarcel Moolenaar p = td->td_proc; 1178247aba24SMarcel Moolenaar 11798c64af4fSJohn Polstra ehoff = *off; 11808c64af4fSJohn Polstra *off += sizeof(Elf_Ehdr); 11818c64af4fSJohn Polstra 11828c64af4fSJohn Polstra phoff = *off; 11830ff27d31SJohn Polstra *off += (numsegs + 1) * sizeof(Elf_Phdr); 11848c64af4fSJohn Polstra 11858c64af4fSJohn Polstra noteoff = *off; 11868c9b7b2cSMarcel Moolenaar /* 11878c9b7b2cSMarcel Moolenaar * Don't allocate space for the notes if we're just calculating 11888c9b7b2cSMarcel Moolenaar * the size of the header. We also don't collect the data. 11898c9b7b2cSMarcel Moolenaar */ 11908c9b7b2cSMarcel Moolenaar if (dst != NULL) { 11918c9b7b2cSMarcel Moolenaar tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK); 11928c9b7b2cSMarcel Moolenaar status = &tempdata->status; 11938c9b7b2cSMarcel Moolenaar fpregset = &tempdata->fpregset; 11948c9b7b2cSMarcel Moolenaar psinfo = &tempdata->psinfo; 11958c9b7b2cSMarcel Moolenaar } else { 11968c9b7b2cSMarcel Moolenaar tempdata = NULL; 11978c9b7b2cSMarcel Moolenaar status = NULL; 11988c9b7b2cSMarcel Moolenaar fpregset = NULL; 11998c9b7b2cSMarcel Moolenaar psinfo = NULL; 12008c9b7b2cSMarcel Moolenaar } 12018c9b7b2cSMarcel Moolenaar 12028c9b7b2cSMarcel Moolenaar if (dst != NULL) { 12038c9b7b2cSMarcel Moolenaar psinfo->pr_version = PRPSINFO_VERSION; 120462919d78SPeter Wemm psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 1205e01eafefSJulian Elischer strlcpy(psinfo->pr_fname, td->td_name, sizeof(psinfo->pr_fname)); 12068c9b7b2cSMarcel Moolenaar /* 12078c9b7b2cSMarcel Moolenaar * XXX - We don't fill in the command line arguments properly 12088c9b7b2cSMarcel Moolenaar * yet. 12098c9b7b2cSMarcel Moolenaar */ 1210e01eafefSJulian Elischer strlcpy(psinfo->pr_psargs, td->td_name, 12118c9b7b2cSMarcel Moolenaar sizeof(psinfo->pr_psargs)); 12128c9b7b2cSMarcel Moolenaar } 12138c9b7b2cSMarcel Moolenaar __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 12148c9b7b2cSMarcel Moolenaar sizeof *psinfo); 12158c9b7b2cSMarcel Moolenaar 12168c9b7b2cSMarcel Moolenaar /* 12171f7a1baaSMarcel Moolenaar * To have the debugger select the right thread (LWP) as the initial 12181f7a1baaSMarcel Moolenaar * thread, we dump the state of the thread passed to us in td first. 12191f7a1baaSMarcel Moolenaar * This is the thread that causes the core dump and thus likely to 12201f7a1baaSMarcel Moolenaar * be the right thread one wants to have selected in the debugger. 12218c9b7b2cSMarcel Moolenaar */ 1222247aba24SMarcel Moolenaar thr = td; 1223247aba24SMarcel Moolenaar while (thr != NULL) { 12248c9b7b2cSMarcel Moolenaar if (dst != NULL) { 12258c9b7b2cSMarcel Moolenaar status->pr_version = PRSTATUS_VERSION; 122662919d78SPeter Wemm status->pr_statussz = sizeof(elf_prstatus_t); 122762919d78SPeter Wemm status->pr_gregsetsz = sizeof(elf_gregset_t); 122862919d78SPeter Wemm status->pr_fpregsetsz = sizeof(elf_fpregset_t); 12298c9b7b2cSMarcel Moolenaar status->pr_osreldate = osreldate; 12308c9b7b2cSMarcel Moolenaar status->pr_cursig = p->p_sig; 12311f7a1baaSMarcel Moolenaar status->pr_pid = thr->td_tid; 123262919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 123362919d78SPeter Wemm fill_regs32(thr, &status->pr_reg); 123462919d78SPeter Wemm fill_fpregs32(thr, fpregset); 123562919d78SPeter Wemm #else 12368c9b7b2cSMarcel Moolenaar fill_regs(thr, &status->pr_reg); 12378c9b7b2cSMarcel Moolenaar fill_fpregs(thr, fpregset); 123862919d78SPeter Wemm #endif 12398c9b7b2cSMarcel Moolenaar } 12403ebc1248SPeter Wemm __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, 12418c64af4fSJohn Polstra sizeof *status); 12423ebc1248SPeter Wemm __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 12438c64af4fSJohn Polstra sizeof *fpregset); 12444da47b2fSMarcel Moolenaar /* 12454da47b2fSMarcel Moolenaar * Allow for MD specific notes, as well as any MD 12464da47b2fSMarcel Moolenaar * specific preparations for writing MI notes. 12474da47b2fSMarcel Moolenaar */ 12484da47b2fSMarcel Moolenaar __elfN(dump_thread)(thr, dst, off); 1249247aba24SMarcel Moolenaar 1250247aba24SMarcel Moolenaar thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 12518c9b7b2cSMarcel Moolenaar TAILQ_NEXT(thr, td_plist); 1252247aba24SMarcel Moolenaar if (thr == td) 12538c9b7b2cSMarcel Moolenaar thr = TAILQ_NEXT(thr, td_plist); 1254247aba24SMarcel Moolenaar } 12558c9b7b2cSMarcel Moolenaar 12568c64af4fSJohn Polstra notesz = *off - noteoff; 12578c64af4fSJohn Polstra 12588c9b7b2cSMarcel Moolenaar if (dst != NULL) 12598c9b7b2cSMarcel Moolenaar free(tempdata, M_TEMP); 12608c9b7b2cSMarcel Moolenaar 12610ff27d31SJohn Polstra /* Align up to a page boundary for the program segments. */ 12628c64af4fSJohn Polstra *off = round_page(*off); 12638c64af4fSJohn Polstra 12648c64af4fSJohn Polstra if (dst != NULL) { 12658c64af4fSJohn Polstra Elf_Ehdr *ehdr; 12668c64af4fSJohn Polstra Elf_Phdr *phdr; 12670ff27d31SJohn Polstra struct phdr_closure phc; 12688c64af4fSJohn Polstra 12698c64af4fSJohn Polstra /* 12708c64af4fSJohn Polstra * Fill in the ELF header. 12718c64af4fSJohn Polstra */ 12728c64af4fSJohn Polstra ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 12738c64af4fSJohn Polstra ehdr->e_ident[EI_MAG0] = ELFMAG0; 12748c64af4fSJohn Polstra ehdr->e_ident[EI_MAG1] = ELFMAG1; 12758c64af4fSJohn Polstra ehdr->e_ident[EI_MAG2] = ELFMAG2; 12768c64af4fSJohn Polstra ehdr->e_ident[EI_MAG3] = ELFMAG3; 12778c64af4fSJohn Polstra ehdr->e_ident[EI_CLASS] = ELF_CLASS; 12788c64af4fSJohn Polstra ehdr->e_ident[EI_DATA] = ELF_DATA; 12798c64af4fSJohn Polstra ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1280c815a20cSDavid E. O'Brien ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1281c815a20cSDavid E. O'Brien ehdr->e_ident[EI_ABIVERSION] = 0; 12828c64af4fSJohn Polstra ehdr->e_ident[EI_PAD] = 0; 12838c64af4fSJohn Polstra ehdr->e_type = ET_CORE; 128462919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 128562919d78SPeter Wemm ehdr->e_machine = EM_386; 128662919d78SPeter Wemm #else 12878c64af4fSJohn Polstra ehdr->e_machine = ELF_ARCH; 128862919d78SPeter Wemm #endif 12898c64af4fSJohn Polstra ehdr->e_version = EV_CURRENT; 12908c64af4fSJohn Polstra ehdr->e_entry = 0; 12918c64af4fSJohn Polstra ehdr->e_phoff = phoff; 12928c64af4fSJohn Polstra ehdr->e_flags = 0; 12938c64af4fSJohn Polstra ehdr->e_ehsize = sizeof(Elf_Ehdr); 12948c64af4fSJohn Polstra ehdr->e_phentsize = sizeof(Elf_Phdr); 12950ff27d31SJohn Polstra ehdr->e_phnum = numsegs + 1; 12968c64af4fSJohn Polstra ehdr->e_shentsize = sizeof(Elf_Shdr); 12978c64af4fSJohn Polstra ehdr->e_shnum = 0; 12988c64af4fSJohn Polstra ehdr->e_shstrndx = SHN_UNDEF; 12998c64af4fSJohn Polstra 13008c64af4fSJohn Polstra /* 13018c64af4fSJohn Polstra * Fill in the program header entries. 13028c64af4fSJohn Polstra */ 13038c64af4fSJohn Polstra phdr = (Elf_Phdr *)((char *)dst + phoff); 13048c64af4fSJohn Polstra 13058c64af4fSJohn Polstra /* The note segement. */ 13068c64af4fSJohn Polstra phdr->p_type = PT_NOTE; 13078c64af4fSJohn Polstra phdr->p_offset = noteoff; 13088c64af4fSJohn Polstra phdr->p_vaddr = 0; 13098c64af4fSJohn Polstra phdr->p_paddr = 0; 13108c64af4fSJohn Polstra phdr->p_filesz = notesz; 13118c64af4fSJohn Polstra phdr->p_memsz = 0; 13128c64af4fSJohn Polstra phdr->p_flags = 0; 13138c64af4fSJohn Polstra phdr->p_align = 0; 13148c64af4fSJohn Polstra phdr++; 13158c64af4fSJohn Polstra 13160ff27d31SJohn Polstra /* All the writable segments from the program. */ 13170ff27d31SJohn Polstra phc.phdr = phdr; 13180ff27d31SJohn Polstra phc.offset = *off; 1319247aba24SMarcel Moolenaar each_writable_segment(td, cb_put_phdr, &phc); 13208c64af4fSJohn Polstra } 13218c64af4fSJohn Polstra } 13228c64af4fSJohn Polstra 13238c64af4fSJohn Polstra static void 13243ebc1248SPeter Wemm __elfN(putnote)(void *dst, size_t *off, const char *name, int type, 13258c64af4fSJohn Polstra const void *desc, size_t descsz) 13268c64af4fSJohn Polstra { 13278c64af4fSJohn Polstra Elf_Note note; 13288c64af4fSJohn Polstra 13298c64af4fSJohn Polstra note.n_namesz = strlen(name) + 1; 13308c64af4fSJohn Polstra note.n_descsz = descsz; 13318c64af4fSJohn Polstra note.n_type = type; 13328c64af4fSJohn Polstra if (dst != NULL) 13338c64af4fSJohn Polstra bcopy(¬e, (char *)dst + *off, sizeof note); 13348c64af4fSJohn Polstra *off += sizeof note; 13358c64af4fSJohn Polstra if (dst != NULL) 13368c64af4fSJohn Polstra bcopy(name, (char *)dst + *off, note.n_namesz); 13378c64af4fSJohn Polstra *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 13388c64af4fSJohn Polstra if (dst != NULL) 13398c64af4fSJohn Polstra bcopy(desc, (char *)dst + *off, note.n_descsz); 13408c64af4fSJohn Polstra *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 13418c64af4fSJohn Polstra } 13428c64af4fSJohn Polstra 13438c64af4fSJohn Polstra /* 1344e1743d02SSøren Schmidt * Tell kern_execve.c about it, with a little help from the linker. 1345e1743d02SSøren Schmidt */ 1346a360a43dSJake Burkholder static struct execsw __elfN(execsw) = { 1347a360a43dSJake Burkholder __CONCAT(exec_, __elfN(imgact)), 1348a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 1349a360a43dSJake Burkholder }; 1350a360a43dSJake Burkholder EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 1351