1e1743d02SSøren Schmidt /*- 221a3ee0eSDavid E. O'Brien * Copyright (c) 2000 David O'Brien 3e1743d02SSøren Schmidt * Copyright (c) 1995-1996 S�ren Schmidt 4e1743d02SSøren Schmidt * Copyright (c) 1996 Peter Wemm 5e1743d02SSøren Schmidt * All rights reserved. 6e1743d02SSøren Schmidt * 7e1743d02SSøren Schmidt * Redistribution and use in source and binary forms, with or without 8e1743d02SSøren Schmidt * modification, are permitted provided that the following conditions 9e1743d02SSøren Schmidt * are met: 10e1743d02SSøren Schmidt * 1. Redistributions of source code must retain the above copyright 11e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer 12e1743d02SSøren Schmidt * in this position and unchanged. 13e1743d02SSøren Schmidt * 2. Redistributions in binary form must reproduce the above copyright 14e1743d02SSøren Schmidt * notice, this list of conditions and the following disclaimer in the 15e1743d02SSøren Schmidt * documentation and/or other materials provided with the distribution. 16e1743d02SSøren Schmidt * 3. The name of the author may not be used to endorse or promote products 1721dc7d4fSJens Schweikhardt * derived from this software without specific prior written permission 18e1743d02SSøren Schmidt * 19e1743d02SSøren Schmidt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20e1743d02SSøren Schmidt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21e1743d02SSøren Schmidt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22e1743d02SSøren Schmidt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23e1743d02SSøren Schmidt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24e1743d02SSøren Schmidt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25e1743d02SSøren Schmidt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26e1743d02SSøren Schmidt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27e1743d02SSøren Schmidt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28e1743d02SSøren Schmidt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29e1743d02SSøren Schmidt */ 30e1743d02SSøren Schmidt 31677b542eSDavid E. O'Brien #include <sys/cdefs.h> 32677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 33677b542eSDavid E. O'Brien 3462919d78SPeter Wemm #include "opt_compat.h" 3562919d78SPeter Wemm 36e1743d02SSøren Schmidt #include <sys/param.h> 37e1743d02SSøren Schmidt #include <sys/exec.h> 388c64af4fSJohn Polstra #include <sys/fcntl.h> 39e1743d02SSøren Schmidt #include <sys/imgact.h> 40e1743d02SSøren Schmidt #include <sys/imgact_elf.h> 41e1743d02SSøren Schmidt #include <sys/kernel.h> 42f34fa851SJohn Baldwin #include <sys/lock.h> 43e1743d02SSøren Schmidt #include <sys/malloc.h> 4468ff2a43SChristian S.J. Peron #include <sys/mount.h> 4535e0e5b3SJohn Baldwin #include <sys/mutex.h> 468c64af4fSJohn Polstra #include <sys/mman.h> 47a794e791SBruce Evans #include <sys/namei.h> 488c64af4fSJohn Polstra #include <sys/pioctl.h> 49a794e791SBruce Evans #include <sys/proc.h> 508c64af4fSJohn Polstra #include <sys/procfs.h> 518c64af4fSJohn Polstra #include <sys/resourcevar.h> 52da61b9a6SAlan Cox #include <sys/sf_buf.h> 5336240ea5SDoug Rabson #include <sys/systm.h> 54e1743d02SSøren Schmidt #include <sys/signalvar.h> 558c64af4fSJohn Polstra #include <sys/stat.h> 561005a129SJohn Baldwin #include <sys/sx.h> 578c64af4fSJohn Polstra #include <sys/syscall.h> 58e1743d02SSøren Schmidt #include <sys/sysctl.h> 598c64af4fSJohn Polstra #include <sys/sysent.h> 60a794e791SBruce Evans #include <sys/vnode.h> 61e1743d02SSøren Schmidt 62e1743d02SSøren Schmidt #include <vm/vm.h> 63e1743d02SSøren Schmidt #include <vm/vm_kern.h> 64e1743d02SSøren Schmidt #include <vm/vm_param.h> 65e1743d02SSøren Schmidt #include <vm/pmap.h> 66e1743d02SSøren Schmidt #include <vm/vm_map.h> 670ff27d31SJohn Polstra #include <vm/vm_object.h> 68e1743d02SSøren Schmidt #include <vm/vm_extern.h> 69e1743d02SSøren Schmidt 7052c24af7SPeter Wemm #include <machine/elf.h> 71e1743d02SSøren Schmidt #include <machine/md_var.h> 72e1743d02SSøren Schmidt 7362919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 7462919d78SPeter Wemm #include <machine/fpu.h> 7562919d78SPeter Wemm #include <compat/ia32/ia32_reg.h> 7662919d78SPeter Wemm #endif 7762919d78SPeter Wemm 78c815a20cSDavid E. O'Brien #define OLD_EI_BRAND 8 79c815a20cSDavid E. O'Brien 803ebc1248SPeter Wemm static int __elfN(check_header)(const Elf_Ehdr *hdr); 8132c01de2SDmitry Chagin static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, 8232c01de2SDmitry Chagin const char *interp, int32_t *osrel); 833ebc1248SPeter Wemm static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 843ebc1248SPeter Wemm u_long *entry, size_t pagesize); 85373d1a3fSAlan Cox static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object, 868c64af4fSJohn Polstra vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 873ebc1248SPeter Wemm vm_prot_t prot, size_t pagesize); 883ebc1248SPeter Wemm static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 8989ffc202SBjoern A. Zeeb static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, 9089ffc202SBjoern A. Zeeb int32_t *osrel); 9189ffc202SBjoern A. Zeeb static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); 9232c01de2SDmitry Chagin static boolean_t __elfN(check_note)(struct image_params *imgp, 9332c01de2SDmitry Chagin Elf_Brandnote *checknote, int32_t *osrel); 94e1743d02SSøren Schmidt 95a360a43dSJake Burkholder SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, 96a360a43dSJake Burkholder ""); 97a360a43dSJake Burkholder 98e548a1d4SJake Burkholder int __elfN(fallback_brand) = -1; 99e548a1d4SJake Burkholder SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 100e548a1d4SJake Burkholder fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0, 101a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 102a360a43dSJake Burkholder TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand", 103e548a1d4SJake Burkholder &__elfN(fallback_brand)); 104a360a43dSJake Burkholder 105551d79e1SMarcel Moolenaar static int elf_legacy_coredump = 0; 106a360a43dSJake Burkholder SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 107551d79e1SMarcel Moolenaar &elf_legacy_coredump, 0, ""); 108e1743d02SSøren Schmidt 1093ebc1248SPeter Wemm static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 110e1743d02SSøren Schmidt 11193d1c728SKonstantin Belousov #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) 11293d1c728SKonstantin Belousov #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) 11393d1c728SKonstantin Belousov #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a)) 11493d1c728SKonstantin Belousov 11532c01de2SDmitry Chagin static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; 11632c01de2SDmitry Chagin 11732c01de2SDmitry Chagin Elf_Brandnote __elfN(freebsd_brandnote) = { 11832c01de2SDmitry Chagin .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), 11932c01de2SDmitry Chagin .hdr.n_descsz = sizeof(int32_t), 12032c01de2SDmitry Chagin .hdr.n_type = 1, 12132c01de2SDmitry Chagin .vendor = FREEBSD_ABI_VENDOR, 12289ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 12389ffc202SBjoern A. Zeeb .trans_osrel = __elfN(freebsd_trans_osrel) 12432c01de2SDmitry Chagin }; 12532c01de2SDmitry Chagin 12689ffc202SBjoern A. Zeeb static boolean_t 12789ffc202SBjoern A. Zeeb __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) 12889ffc202SBjoern A. Zeeb { 12989ffc202SBjoern A. Zeeb uintptr_t p; 13089ffc202SBjoern A. Zeeb 13189ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 13289ffc202SBjoern A. Zeeb p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 13389ffc202SBjoern A. Zeeb *osrel = *(const int32_t *)(p); 13489ffc202SBjoern A. Zeeb 13589ffc202SBjoern A. Zeeb return (TRUE); 13689ffc202SBjoern A. Zeeb } 13789ffc202SBjoern A. Zeeb 13889ffc202SBjoern A. Zeeb static const char GNU_ABI_VENDOR[] = "GNU"; 13989ffc202SBjoern A. Zeeb static int GNU_KFREEBSD_ABI_DESC = 3; 14089ffc202SBjoern A. Zeeb 14189ffc202SBjoern A. Zeeb Elf_Brandnote __elfN(kfreebsd_brandnote) = { 14289ffc202SBjoern A. Zeeb .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), 14389ffc202SBjoern A. Zeeb .hdr.n_descsz = 16, /* XXX at least 16 */ 14489ffc202SBjoern A. Zeeb .hdr.n_type = 1, 14589ffc202SBjoern A. Zeeb .vendor = GNU_ABI_VENDOR, 14689ffc202SBjoern A. Zeeb .flags = BN_TRANSLATE_OSREL, 14789ffc202SBjoern A. Zeeb .trans_osrel = kfreebsd_trans_osrel 14889ffc202SBjoern A. Zeeb }; 14989ffc202SBjoern A. Zeeb 15089ffc202SBjoern A. Zeeb static boolean_t 15189ffc202SBjoern A. Zeeb kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) 15289ffc202SBjoern A. Zeeb { 15389ffc202SBjoern A. Zeeb const Elf32_Word *desc; 15489ffc202SBjoern A. Zeeb uintptr_t p; 15589ffc202SBjoern A. Zeeb 15689ffc202SBjoern A. Zeeb p = (uintptr_t)(note + 1); 15789ffc202SBjoern A. Zeeb p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 15889ffc202SBjoern A. Zeeb 15989ffc202SBjoern A. Zeeb desc = (const Elf32_Word *)p; 16089ffc202SBjoern A. Zeeb if (desc[0] != GNU_KFREEBSD_ABI_DESC) 16189ffc202SBjoern A. Zeeb return (FALSE); 16289ffc202SBjoern A. Zeeb 16389ffc202SBjoern A. Zeeb /* 16489ffc202SBjoern A. Zeeb * Debian GNU/kFreeBSD embed the earliest compatible kernel version 16589ffc202SBjoern A. Zeeb * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way. 16689ffc202SBjoern A. Zeeb */ 16789ffc202SBjoern A. Zeeb *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; 16889ffc202SBjoern A. Zeeb 16989ffc202SBjoern A. Zeeb return (TRUE); 17089ffc202SBjoern A. Zeeb } 17189ffc202SBjoern A. Zeeb 172e1743d02SSøren Schmidt int 1733ebc1248SPeter Wemm __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 174e1743d02SSøren Schmidt { 175e1743d02SSøren Schmidt int i; 176e1743d02SSøren Schmidt 1773ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 178ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == NULL) { 179ea5a2b2eSSøren Schmidt elf_brand_list[i] = entry; 180e1743d02SSøren Schmidt break; 181e1743d02SSøren Schmidt } 182e1743d02SSøren Schmidt } 183925c8b5bSBjoern A. Zeeb if (i == MAX_BRANDS) { 184925c8b5bSBjoern A. Zeeb printf("WARNING: %s: could not insert brandinfo entry: %p\n", 185925c8b5bSBjoern A. Zeeb __func__, entry); 186a7cddfedSJake Burkholder return (-1); 187925c8b5bSBjoern A. Zeeb } 188a7cddfedSJake Burkholder return (0); 189e1743d02SSøren Schmidt } 190e1743d02SSøren Schmidt 191e1743d02SSøren Schmidt int 1923ebc1248SPeter Wemm __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 193e1743d02SSøren Schmidt { 194e1743d02SSøren Schmidt int i; 195e1743d02SSøren Schmidt 1963ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 197ea5a2b2eSSøren Schmidt if (elf_brand_list[i] == entry) { 198ea5a2b2eSSøren Schmidt elf_brand_list[i] = NULL; 199e1743d02SSøren Schmidt break; 200e1743d02SSøren Schmidt } 201e1743d02SSøren Schmidt } 202ea5a2b2eSSøren Schmidt if (i == MAX_BRANDS) 203a7cddfedSJake Burkholder return (-1); 204a7cddfedSJake Burkholder return (0); 205e1743d02SSøren Schmidt } 206e1743d02SSøren Schmidt 207096977faSMark Newton int 2083ebc1248SPeter Wemm __elfN(brand_inuse)(Elf_Brandinfo *entry) 209096977faSMark Newton { 210096977faSMark Newton struct proc *p; 211553629ebSJake Burkholder int rval = FALSE; 212096977faSMark Newton 2131005a129SJohn Baldwin sx_slock(&allproc_lock); 2144f506694SXin LI FOREACH_PROC_IN_SYSTEM(p) { 215553629ebSJake Burkholder if (p->p_sysent == entry->sysvec) { 216553629ebSJake Burkholder rval = TRUE; 217553629ebSJake Burkholder break; 218096977faSMark Newton } 219553629ebSJake Burkholder } 2201005a129SJohn Baldwin sx_sunlock(&allproc_lock); 221096977faSMark Newton 222553629ebSJake Burkholder return (rval); 223096977faSMark Newton } 224096977faSMark Newton 2255fe3ed62SJake Burkholder static Elf_Brandinfo * 22632c01de2SDmitry Chagin __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, 22732c01de2SDmitry Chagin int32_t *osrel) 2285fe3ed62SJake Burkholder { 22932c01de2SDmitry Chagin const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 2305fe3ed62SJake Burkholder Elf_Brandinfo *bi; 23132c01de2SDmitry Chagin boolean_t ret; 2325fe3ed62SJake Burkholder int i; 2335fe3ed62SJake Burkholder 2345fe3ed62SJake Burkholder /* 23532c01de2SDmitry Chagin * We support four types of branding -- (1) the ELF EI_OSABI field 2365fe3ed62SJake Burkholder * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 23732c01de2SDmitry Chagin * branding w/in the ELF header, (3) path of the `interp_path' 23832c01de2SDmitry Chagin * field, and (4) the ".note.ABI-tag" ELF section. 2395fe3ed62SJake Burkholder */ 2405fe3ed62SJake Burkholder 24132c01de2SDmitry Chagin /* Look for an ".note.ABI-tag" ELF section */ 24232c01de2SDmitry Chagin for (i = 0; i < MAX_BRANDS; i++) { 24332c01de2SDmitry Chagin bi = elf_brand_list[i]; 244ecc2fda8SBjoern A. Zeeb if (bi == NULL) 245ecc2fda8SBjoern A. Zeeb continue; 246ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && (bi->flags & 247ecc2fda8SBjoern A. Zeeb (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { 24832c01de2SDmitry Chagin ret = __elfN(check_note)(imgp, bi->brand_note, osrel); 24932c01de2SDmitry Chagin if (ret) 25032c01de2SDmitry Chagin return (bi); 25132c01de2SDmitry Chagin } 25232c01de2SDmitry Chagin } 25332c01de2SDmitry Chagin 2545fe3ed62SJake Burkholder /* If the executable has a brand, search for it in the brand list. */ 2555fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 2565fe3ed62SJake Burkholder bi = elf_brand_list[i]; 257ecc2fda8SBjoern A. Zeeb if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) 258ecc2fda8SBjoern A. Zeeb continue; 259ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 2605fe3ed62SJake Burkholder (hdr->e_ident[EI_OSABI] == bi->brand || 2615fe3ed62SJake Burkholder strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 2625fe3ed62SJake Burkholder bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) 2635fe3ed62SJake Burkholder return (bi); 2645fe3ed62SJake Burkholder } 2655fe3ed62SJake Burkholder 2665fe3ed62SJake Burkholder /* Lacking a known brand, search for a recognized interpreter. */ 2675fe3ed62SJake Burkholder if (interp != NULL) { 2685fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 2695fe3ed62SJake Burkholder bi = elf_brand_list[i]; 270ecc2fda8SBjoern A. Zeeb if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) 271ecc2fda8SBjoern A. Zeeb continue; 272ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 2735fe3ed62SJake Burkholder strcmp(interp, bi->interp_path) == 0) 2745fe3ed62SJake Burkholder return (bi); 2755fe3ed62SJake Burkholder } 2765fe3ed62SJake Burkholder } 2775fe3ed62SJake Burkholder 2785fe3ed62SJake Burkholder /* Lacking a recognized interpreter, try the default brand */ 2795fe3ed62SJake Burkholder for (i = 0; i < MAX_BRANDS; i++) { 2805fe3ed62SJake Burkholder bi = elf_brand_list[i]; 281ecc2fda8SBjoern A. Zeeb if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) 282ecc2fda8SBjoern A. Zeeb continue; 283ecc2fda8SBjoern A. Zeeb if (hdr->e_machine == bi->machine && 284e548a1d4SJake Burkholder __elfN(fallback_brand) == bi->brand) 2855fe3ed62SJake Burkholder return (bi); 2865fe3ed62SJake Burkholder } 2875fe3ed62SJake Burkholder return (NULL); 2885fe3ed62SJake Burkholder } 2895fe3ed62SJake Burkholder 290e1743d02SSøren Schmidt static int 2913ebc1248SPeter Wemm __elfN(check_header)(const Elf_Ehdr *hdr) 292e1743d02SSøren Schmidt { 293d0ca7c29SPeter Wemm Elf_Brandinfo *bi; 2943ebc1248SPeter Wemm int i; 2953ebc1248SPeter Wemm 29652c24af7SPeter Wemm if (!IS_ELF(*hdr) || 29752c24af7SPeter Wemm hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 29852c24af7SPeter Wemm hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 2993dc19c46SJacques Vidrine hdr->e_ident[EI_VERSION] != EV_CURRENT || 3003dc19c46SJacques Vidrine hdr->e_phentsize != sizeof(Elf_Phdr) || 3013dc19c46SJacques Vidrine hdr->e_version != ELF_TARG_VER) 302a7cddfedSJake Burkholder return (ENOEXEC); 303e1743d02SSøren Schmidt 3043ebc1248SPeter Wemm /* 3053ebc1248SPeter Wemm * Make sure we have at least one brand for this machine. 3063ebc1248SPeter Wemm */ 3073ebc1248SPeter Wemm 3083ebc1248SPeter Wemm for (i = 0; i < MAX_BRANDS; i++) { 309d0ca7c29SPeter Wemm bi = elf_brand_list[i]; 310d0ca7c29SPeter Wemm if (bi != NULL && bi->machine == hdr->e_machine) 3113ebc1248SPeter Wemm break; 3123ebc1248SPeter Wemm } 3133ebc1248SPeter Wemm if (i == MAX_BRANDS) 314a7cddfedSJake Burkholder return (ENOEXEC); 315e1743d02SSøren Schmidt 316a7cddfedSJake Burkholder return (0); 317e1743d02SSøren Schmidt } 318e1743d02SSøren Schmidt 319e1743d02SSøren Schmidt static int 3203ebc1248SPeter Wemm __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 321ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot) 3223ebc1248SPeter Wemm { 323da61b9a6SAlan Cox struct sf_buf *sf; 324da61b9a6SAlan Cox int error; 3253ebc1248SPeter Wemm vm_offset_t off; 3263ebc1248SPeter Wemm 3273ebc1248SPeter Wemm /* 3283ebc1248SPeter Wemm * Create the page if it doesn't exist yet. Ignore errors. 3293ebc1248SPeter Wemm */ 3303ebc1248SPeter Wemm vm_map_lock(map); 331ff6f03c7SAlan Cox vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), 332ff6f03c7SAlan Cox VM_PROT_ALL, VM_PROT_ALL, 0); 3333ebc1248SPeter Wemm vm_map_unlock(map); 3343ebc1248SPeter Wemm 3353ebc1248SPeter Wemm /* 3363ebc1248SPeter Wemm * Find the page from the underlying object. 3373ebc1248SPeter Wemm */ 3383ebc1248SPeter Wemm if (object) { 339da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 340da61b9a6SAlan Cox if (sf == NULL) 341da61b9a6SAlan Cox return (KERN_FAILURE); 3423ebc1248SPeter Wemm off = offset - trunc_page(offset); 343da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, 344ca0387efSJake Burkholder end - start); 345da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 3463ebc1248SPeter Wemm if (error) { 347a7cddfedSJake Burkholder return (KERN_FAILURE); 3483ebc1248SPeter Wemm } 3493ebc1248SPeter Wemm } 3503ebc1248SPeter Wemm 351a7cddfedSJake Burkholder return (KERN_SUCCESS); 3523ebc1248SPeter Wemm } 3533ebc1248SPeter Wemm 3543ebc1248SPeter Wemm static int 3553ebc1248SPeter Wemm __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 356ff6f03c7SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) 3573ebc1248SPeter Wemm { 358da61b9a6SAlan Cox struct sf_buf *sf; 359da61b9a6SAlan Cox vm_offset_t off; 360a063facbSMarcel Moolenaar vm_size_t sz; 361a063facbSMarcel Moolenaar int error, rv; 3623ebc1248SPeter Wemm 3633ebc1248SPeter Wemm if (start != trunc_page(start)) { 36481f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset, start, 365ff6f03c7SAlan Cox round_page(start), prot); 3663ebc1248SPeter Wemm if (rv) 367a7cddfedSJake Burkholder return (rv); 3683ebc1248SPeter Wemm offset += round_page(start) - start; 3693ebc1248SPeter Wemm start = round_page(start); 3703ebc1248SPeter Wemm } 3713ebc1248SPeter Wemm if (end != round_page(end)) { 37281f223caSJake Burkholder rv = __elfN(map_partial)(map, object, offset + 373ff6f03c7SAlan Cox trunc_page(end) - start, trunc_page(end), end, prot); 3743ebc1248SPeter Wemm if (rv) 375a7cddfedSJake Burkholder return (rv); 3763ebc1248SPeter Wemm end = trunc_page(end); 3773ebc1248SPeter Wemm } 3783ebc1248SPeter Wemm if (end > start) { 3793ebc1248SPeter Wemm if (offset & PAGE_MASK) { 3803ebc1248SPeter Wemm /* 3813ebc1248SPeter Wemm * The mapping is not page aligned. This means we have 3823ebc1248SPeter Wemm * to copy the data. Sigh. 3833ebc1248SPeter Wemm */ 384584716b0SAlan Cox rv = vm_map_find(map, NULL, 0, &start, end - start, 385ff6f03c7SAlan Cox FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0); 3863ebc1248SPeter Wemm if (rv) 387a7cddfedSJake Burkholder return (rv); 388da61b9a6SAlan Cox if (object == NULL) 389da61b9a6SAlan Cox return (KERN_SUCCESS); 390da61b9a6SAlan Cox for (; start < end; start += sz) { 391da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset); 392da61b9a6SAlan Cox if (sf == NULL) 393da61b9a6SAlan Cox return (KERN_FAILURE); 3943ebc1248SPeter Wemm off = offset - trunc_page(offset); 3953ebc1248SPeter Wemm sz = end - start; 396da61b9a6SAlan Cox if (sz > PAGE_SIZE - off) 397da61b9a6SAlan Cox sz = PAGE_SIZE - off; 398da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 3993ebc1248SPeter Wemm (caddr_t)start, sz); 400da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 4013ebc1248SPeter Wemm if (error) { 402a7cddfedSJake Burkholder return (KERN_FAILURE); 4033ebc1248SPeter Wemm } 404da61b9a6SAlan Cox offset += sz; 4053ebc1248SPeter Wemm } 4063ebc1248SPeter Wemm rv = KERN_SUCCESS; 4073ebc1248SPeter Wemm } else { 408e5e6093bSAlan Cox vm_object_reference(object); 4093ebc1248SPeter Wemm vm_map_lock(map); 4103ebc1248SPeter Wemm rv = vm_map_insert(map, object, offset, start, end, 411ff6f03c7SAlan Cox prot, VM_PROT_ALL, cow); 4123ebc1248SPeter Wemm vm_map_unlock(map); 413e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 414e5e6093bSAlan Cox vm_object_deallocate(object); 4153ebc1248SPeter Wemm } 416a7cddfedSJake Burkholder return (rv); 4173ebc1248SPeter Wemm } else { 418a7cddfedSJake Burkholder return (KERN_SUCCESS); 4193ebc1248SPeter Wemm } 4203ebc1248SPeter Wemm } 4213ebc1248SPeter Wemm 4223ebc1248SPeter Wemm static int 423373d1a3fSAlan Cox __elfN(load_section)(struct vmspace *vmspace, 424373d1a3fSAlan Cox vm_object_t object, vm_offset_t offset, 4253ebc1248SPeter Wemm caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, 4263ebc1248SPeter Wemm size_t pagesize) 427e1743d02SSøren Schmidt { 428da61b9a6SAlan Cox struct sf_buf *sf; 429e1743d02SSøren Schmidt size_t map_len; 430e1743d02SSøren Schmidt vm_offset_t map_addr; 431fa7dd9c5SMatthew Dillon int error, rv, cow; 432e1743d02SSøren Schmidt size_t copy_len; 43352c24af7SPeter Wemm vm_offset_t file_addr; 43452c24af7SPeter Wemm 43525ead034SBrian Feldman /* 43625ead034SBrian Feldman * It's necessary to fail if the filsz + offset taken from the 43725ead034SBrian Feldman * header is greater than the actual file pager object's size. 43825ead034SBrian Feldman * If we were to allow this, then the vm_map_find() below would 43925ead034SBrian Feldman * walk right off the end of the file object and into the ether. 44025ead034SBrian Feldman * 44125ead034SBrian Feldman * While I'm here, might as well check for something else that 44225ead034SBrian Feldman * is invalid: filsz cannot be greater than memsz. 44325ead034SBrian Feldman */ 44425ead034SBrian Feldman if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 44525ead034SBrian Feldman filsz > memsz) { 44625ead034SBrian Feldman uprintf("elf_load_section: truncated ELF file\n"); 44725ead034SBrian Feldman return (ENOEXEC); 44825ead034SBrian Feldman } 44925ead034SBrian Feldman 4503ebc1248SPeter Wemm map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); 4513ebc1248SPeter Wemm file_addr = trunc_page_ps(offset, pagesize); 452e1743d02SSøren Schmidt 453e1743d02SSøren Schmidt /* 45452c24af7SPeter Wemm * We have two choices. We can either clear the data in the last page 45552c24af7SPeter Wemm * of an oversized mapping, or we can start the anon mapping a page 45652c24af7SPeter Wemm * early and copy the initialized data into that first page. We 45752c24af7SPeter Wemm * choose the second.. 45852c24af7SPeter Wemm */ 45952c24af7SPeter Wemm if (memsz > filsz) 4603ebc1248SPeter Wemm map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; 46152c24af7SPeter Wemm else 4623ebc1248SPeter Wemm map_len = round_page_ps(offset + filsz, pagesize) - file_addr; 46352c24af7SPeter Wemm 46452c24af7SPeter Wemm if (map_len != 0) { 465fa7dd9c5SMatthew Dillon /* cow flags: don't dump readonly sections in core */ 466fa7dd9c5SMatthew Dillon cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 467fa7dd9c5SMatthew Dillon (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 468fa7dd9c5SMatthew Dillon 4693ebc1248SPeter Wemm rv = __elfN(map_insert)(&vmspace->vm_map, 47052c24af7SPeter Wemm object, 47152c24af7SPeter Wemm file_addr, /* file offset */ 47252c24af7SPeter Wemm map_addr, /* virtual start */ 47352c24af7SPeter Wemm map_addr + map_len,/* virtual end */ 47452c24af7SPeter Wemm prot, 475fa7dd9c5SMatthew Dillon cow); 476e5e6093bSAlan Cox if (rv != KERN_SUCCESS) 477a7cddfedSJake Burkholder return (EINVAL); 47852c24af7SPeter Wemm 47952c24af7SPeter Wemm /* we can stop now if we've covered it all */ 48023955314SAlfred Perlstein if (memsz == filsz) { 481a7cddfedSJake Burkholder return (0); 48252c24af7SPeter Wemm } 48323955314SAlfred Perlstein } 48452c24af7SPeter Wemm 48552c24af7SPeter Wemm 48652c24af7SPeter Wemm /* 48752c24af7SPeter Wemm * We have to get the remaining bit of the file into the first part 48852c24af7SPeter Wemm * of the oversized map segment. This is normally because the .data 48952c24af7SPeter Wemm * segment in the file is extended to provide bss. It's a neat idea 49052c24af7SPeter Wemm * to try and save a page, but it's a pain in the behind to implement. 491e1743d02SSøren Schmidt */ 4923ebc1248SPeter Wemm copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); 4933ebc1248SPeter Wemm map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); 494ca0387efSJake Burkholder map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - 495ca0387efSJake Burkholder map_addr; 496e1743d02SSøren Schmidt 49752c24af7SPeter Wemm /* This had damn well better be true! */ 4988191d577SPeter Wemm if (map_len != 0) { 49981f223caSJake Burkholder rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, 500ff6f03c7SAlan Cox map_addr + map_len, VM_PROT_ALL, 0); 50123955314SAlfred Perlstein if (rv != KERN_SUCCESS) { 502a7cddfedSJake Burkholder return (EINVAL); 5038191d577SPeter Wemm } 50423955314SAlfred Perlstein } 505e1743d02SSøren Schmidt 50652c24af7SPeter Wemm if (copy_len != 0) { 5073ebc1248SPeter Wemm vm_offset_t off; 508da61b9a6SAlan Cox 509da61b9a6SAlan Cox sf = vm_imgact_map_page(object, offset + filsz); 510da61b9a6SAlan Cox if (sf == NULL) 511da61b9a6SAlan Cox return (EIO); 512e1743d02SSøren Schmidt 51352c24af7SPeter Wemm /* send the page fragment to user space */ 51481f223caSJake Burkholder off = trunc_page_ps(offset + filsz, pagesize) - 51581f223caSJake Burkholder trunc_page(offset + filsz); 516da61b9a6SAlan Cox error = copyout((caddr_t)sf_buf_kva(sf) + off, 517da61b9a6SAlan Cox (caddr_t)map_addr, copy_len); 518da61b9a6SAlan Cox vm_imgact_unmap_page(sf); 51923955314SAlfred Perlstein if (error) { 52052c24af7SPeter Wemm return (error); 52152c24af7SPeter Wemm } 52223955314SAlfred Perlstein } 523e1743d02SSøren Schmidt 524e1743d02SSøren Schmidt /* 5253ebc1248SPeter Wemm * set it to the specified protection. 5263ebc1248SPeter Wemm * XXX had better undo the damage from pasting over the cracks here! 527e1743d02SSøren Schmidt */ 5283ebc1248SPeter Wemm vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), 5293ebc1248SPeter Wemm round_page(map_addr + map_len), prot, FALSE); 5308191d577SPeter Wemm 531ff6f03c7SAlan Cox return (0); 532e1743d02SSøren Schmidt } 533e1743d02SSøren Schmidt 534c33fe779SJohn Polstra /* 535c33fe779SJohn Polstra * Load the file "file" into memory. It may be either a shared object 536c33fe779SJohn Polstra * or an executable. 537c33fe779SJohn Polstra * 538c33fe779SJohn Polstra * The "addr" reference parameter is in/out. On entry, it specifies 539c33fe779SJohn Polstra * the address where a shared object should be loaded. If the file is 540c33fe779SJohn Polstra * an executable, this value is ignored. On exit, "addr" specifies 541c33fe779SJohn Polstra * where the file was actually loaded. 542c33fe779SJohn Polstra * 543c33fe779SJohn Polstra * The "entry" reference parameter is out only. On exit, it specifies 544c33fe779SJohn Polstra * the entry point for the loaded file. 545c33fe779SJohn Polstra */ 546e1743d02SSøren Schmidt static int 5473ebc1248SPeter Wemm __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 5483ebc1248SPeter Wemm u_long *entry, size_t pagesize) 549e1743d02SSøren Schmidt { 550911c2be0SMark Peek struct { 551911c2be0SMark Peek struct nameidata nd; 552911c2be0SMark Peek struct vattr attr; 553911c2be0SMark Peek struct image_params image_params; 554911c2be0SMark Peek } *tempdata; 555d254af07SMatthew Dillon const Elf_Ehdr *hdr = NULL; 556d254af07SMatthew Dillon const Elf_Phdr *phdr = NULL; 557911c2be0SMark Peek struct nameidata *nd; 558e1743d02SSøren Schmidt struct vmspace *vmspace = p->p_vmspace; 559911c2be0SMark Peek struct vattr *attr; 560911c2be0SMark Peek struct image_params *imgp; 56152c24af7SPeter Wemm vm_prot_t prot; 562c33fe779SJohn Polstra u_long rbase; 563c33fe779SJohn Polstra u_long base_addr = 0; 56468ff2a43SChristian S.J. Peron int vfslocked, error, i, numsegs; 565e1743d02SSøren Schmidt 566a163d034SWarner Losh tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 567911c2be0SMark Peek nd = &tempdata->nd; 568911c2be0SMark Peek attr = &tempdata->attr; 569911c2be0SMark Peek imgp = &tempdata->image_params; 570911c2be0SMark Peek 571c8a79999SPeter Wemm /* 572c8a79999SPeter Wemm * Initialize part of the common data 573c8a79999SPeter Wemm */ 574c8a79999SPeter Wemm imgp->proc = p; 575911c2be0SMark Peek imgp->attr = attr; 576c8a79999SPeter Wemm imgp->firstpage = NULL; 57759c8bc40SAlan Cox imgp->image_header = NULL; 5780b2ed1aeSJeff Roberson imgp->object = NULL; 5796d7bdc8dSRobert Watson imgp->execlabel = NULL; 580c8a79999SPeter Wemm 58168ff2a43SChristian S.J. Peron NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, 58268ff2a43SChristian S.J. Peron curthread); 58368ff2a43SChristian S.J. Peron vfslocked = 0; 584911c2be0SMark Peek if ((error = namei(nd)) != 0) { 585911c2be0SMark Peek nd->ni_vp = NULL; 586e1743d02SSøren Schmidt goto fail; 587e1743d02SSøren Schmidt } 58868ff2a43SChristian S.J. Peron vfslocked = NDHASGIANT(nd); 589911c2be0SMark Peek NDFREE(nd, NDF_ONLY_PNBUF); 590911c2be0SMark Peek imgp->vp = nd->ni_vp; 591c8a79999SPeter Wemm 592e1743d02SSøren Schmidt /* 593e1743d02SSøren Schmidt * Check permissions, modes, uid, etc on the file, and "open" it. 594e1743d02SSøren Schmidt */ 595c8a79999SPeter Wemm error = exec_check_permissions(imgp); 596373d1a3fSAlan Cox if (error) 597c8a79999SPeter Wemm goto fail; 598e1743d02SSøren Schmidt 599c8a79999SPeter Wemm error = exec_map_first_page(imgp); 600373d1a3fSAlan Cox if (error) 601373d1a3fSAlan Cox goto fail; 602373d1a3fSAlan Cox 60325ead034SBrian Feldman /* 60425ead034SBrian Feldman * Also make certain that the interpreter stays the same, so set 605e6e370a7SJeff Roberson * its VV_TEXT flag, too. 60625ead034SBrian Feldman */ 607e6e370a7SJeff Roberson nd->ni_vp->v_vflag |= VV_TEXT; 608e6e370a7SJeff Roberson 6098516dd18SPoul-Henning Kamp imgp->object = nd->ni_vp->v_object; 610e1743d02SSøren Schmidt 611d254af07SMatthew Dillon hdr = (const Elf_Ehdr *)imgp->image_header; 6123ebc1248SPeter Wemm if ((error = __elfN(check_header)(hdr)) != 0) 613e1743d02SSøren Schmidt goto fail; 614c33fe779SJohn Polstra if (hdr->e_type == ET_DYN) 615c33fe779SJohn Polstra rbase = *addr; 616c33fe779SJohn Polstra else if (hdr->e_type == ET_EXEC) 617c33fe779SJohn Polstra rbase = 0; 618c33fe779SJohn Polstra else { 619c33fe779SJohn Polstra error = ENOEXEC; 620c33fe779SJohn Polstra goto fail; 621c33fe779SJohn Polstra } 622e1743d02SSøren Schmidt 623c8a79999SPeter Wemm /* Only support headers that fit within first page for now */ 6243dc19c46SJacques Vidrine /* (multiplication of two Elf_Half fields will not overflow) */ 62552c24af7SPeter Wemm if ((hdr->e_phoff > PAGE_SIZE) || 6263dc19c46SJacques Vidrine (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { 627c8a79999SPeter Wemm error = ENOEXEC; 628e1743d02SSøren Schmidt goto fail; 629c8a79999SPeter Wemm } 630c8a79999SPeter Wemm 631d254af07SMatthew Dillon phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 63293d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) { 63393d1c728SKonstantin Belousov error = ENOEXEC; 63493d1c728SKonstantin Belousov goto fail; 63593d1c728SKonstantin Belousov } 636e1743d02SSøren Schmidt 637c33fe779SJohn Polstra for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 6385b33842aSKonstantin Belousov if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { 6395b33842aSKonstantin Belousov /* Loadable segment */ 64052c24af7SPeter Wemm prot = 0; 641e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_X) 642e1743d02SSøren Schmidt prot |= VM_PROT_EXECUTE; 643e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_W) 644e1743d02SSøren Schmidt prot |= VM_PROT_WRITE; 645e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_R) 646e1743d02SSøren Schmidt prot |= VM_PROT_READ; 647e1743d02SSøren Schmidt 648373d1a3fSAlan Cox if ((error = __elfN(load_section)(vmspace, 649373d1a3fSAlan Cox imgp->object, phdr[i].p_offset, 65081f223caSJake Burkholder (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 65181f223caSJake Burkholder phdr[i].p_memsz, phdr[i].p_filesz, prot, 65281f223caSJake Burkholder pagesize)) != 0) 653e1743d02SSøren Schmidt goto fail; 654e1743d02SSøren Schmidt /* 655c33fe779SJohn Polstra * Establish the base address if this is the 656c33fe779SJohn Polstra * first segment. 657e1743d02SSøren Schmidt */ 658c33fe779SJohn Polstra if (numsegs == 0) 659ca0387efSJake Burkholder base_addr = trunc_page(phdr[i].p_vaddr + 660ca0387efSJake Burkholder rbase); 661c33fe779SJohn Polstra numsegs++; 662e1743d02SSøren Schmidt } 663e1743d02SSøren Schmidt } 664c33fe779SJohn Polstra *addr = base_addr; 665c33fe779SJohn Polstra *entry = (unsigned long)hdr->e_entry + rbase; 666e1743d02SSøren Schmidt 667e1743d02SSøren Schmidt fail: 668c8a79999SPeter Wemm if (imgp->firstpage) 669c8a79999SPeter Wemm exec_unmap_first_page(imgp); 6700b2ed1aeSJeff Roberson 671911c2be0SMark Peek if (nd->ni_vp) 672373d1a3fSAlan Cox vput(nd->ni_vp); 673911c2be0SMark Peek 67468ff2a43SChristian S.J. Peron VFS_UNLOCK_GIANT(vfslocked); 675911c2be0SMark Peek free(tempdata, M_TEMP); 676e1743d02SSøren Schmidt 677a7cddfedSJake Burkholder return (error); 678e1743d02SSøren Schmidt } 679e1743d02SSøren Schmidt 680303b270bSEivind Eklund static int 6813ebc1248SPeter Wemm __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 682e1743d02SSøren Schmidt { 683ecbb00a2SDoug Rabson const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 68432c01de2SDmitry Chagin const Elf_Phdr *phdr; 685e5e6093bSAlan Cox Elf_Auxargs *elf_auxargs; 6865856e12eSJohn Dyson struct vmspace *vmspace; 68752c24af7SPeter Wemm vm_prot_t prot; 68821c2d047SMatthew Dillon u_long text_size = 0, data_size = 0, total_size = 0; 689e1743d02SSøren Schmidt u_long text_addr = 0, data_addr = 0; 690cac45152SMatthew Dillon u_long seg_size, seg_addr; 6917564c4adSKonstantin Belousov u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0; 69232c01de2SDmitry Chagin int32_t osrel = 0; 6937564c4adSKonstantin Belousov int error = 0, i, n; 6944113f8d7SPeter Wemm const char *interp = NULL, *newinterp = NULL; 695d1dbc694SJohn Polstra Elf_Brandinfo *brand_info; 696911c2be0SMark Peek char *path; 6975fe3ed62SJake Burkholder struct sysentvec *sv; 698e1743d02SSøren Schmidt 699e1743d02SSøren Schmidt /* 700e1743d02SSøren Schmidt * Do we have a valid ELF header ? 701900b28f9SMaxim Sobolev * 702900b28f9SMaxim Sobolev * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later 703900b28f9SMaxim Sobolev * if particular brand doesn't support it. 704e1743d02SSøren Schmidt */ 705900b28f9SMaxim Sobolev if (__elfN(check_header)(hdr) != 0 || 706900b28f9SMaxim Sobolev (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) 707a7cddfedSJake Burkholder return (-1); 708e1743d02SSøren Schmidt 709e1743d02SSøren Schmidt /* 710e1743d02SSøren Schmidt * From here on down, we return an errno, not -1, as we've 711e1743d02SSøren Schmidt * detected an ELF file. 712e1743d02SSøren Schmidt */ 713e1743d02SSøren Schmidt 714e1743d02SSøren Schmidt if ((hdr->e_phoff > PAGE_SIZE) || 71552c24af7SPeter Wemm (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 716c8a79999SPeter Wemm /* Only support headers in first page for now */ 717a7cddfedSJake Burkholder return (ENOEXEC); 718e1743d02SSøren Schmidt } 71952c24af7SPeter Wemm phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 72093d1c728SKonstantin Belousov if (!aligned(phdr, Elf_Addr)) 72193d1c728SKonstantin Belousov return (ENOEXEC); 7227564c4adSKonstantin Belousov n = 0; 7237564c4adSKonstantin Belousov baddr = 0; 7245fe3ed62SJake Burkholder for (i = 0; i < hdr->e_phnum; i++) { 7257564c4adSKonstantin Belousov if (phdr[i].p_type == PT_LOAD) { 7267564c4adSKonstantin Belousov if (n == 0) 7277564c4adSKonstantin Belousov baddr = phdr[i].p_vaddr; 7287564c4adSKonstantin Belousov n++; 7297564c4adSKonstantin Belousov continue; 7307564c4adSKonstantin Belousov } 731e5e6093bSAlan Cox if (phdr[i].p_type == PT_INTERP) { 732e5e6093bSAlan Cox /* Path to interpreter */ 7335fe3ed62SJake Burkholder if (phdr[i].p_filesz > MAXPATHLEN || 73460bb3943SAlan Cox phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) 73560bb3943SAlan Cox return (ENOEXEC); 7365fe3ed62SJake Burkholder interp = imgp->image_header + phdr[i].p_offset; 7377564c4adSKonstantin Belousov continue; 7383ebc1248SPeter Wemm } 7393ebc1248SPeter Wemm } 7403ebc1248SPeter Wemm 74132c01de2SDmitry Chagin brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel); 7425fe3ed62SJake Burkholder if (brand_info == NULL) { 7435fe3ed62SJake Burkholder uprintf("ELF binary type \"%u\" not known.\n", 7445fe3ed62SJake Burkholder hdr->e_ident[EI_OSABI]); 74560bb3943SAlan Cox return (ENOEXEC); 7463ebc1248SPeter Wemm } 747ab02d85fSKonstantin Belousov if (hdr->e_type == ET_DYN) { 748ab02d85fSKonstantin Belousov if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) 749d49b2109SMaxim Sobolev return (ENOEXEC); 7507564c4adSKonstantin Belousov /* 7517564c4adSKonstantin Belousov * Honour the base load address from the dso if it is 7527564c4adSKonstantin Belousov * non-zero for some reason. 7537564c4adSKonstantin Belousov */ 7547564c4adSKonstantin Belousov if (baddr == 0) 755ab02d85fSKonstantin Belousov et_dyn_addr = ET_DYN_LOAD_ADDR; 7567564c4adSKonstantin Belousov else 7577564c4adSKonstantin Belousov et_dyn_addr = 0; 758ab02d85fSKonstantin Belousov } else 759ab02d85fSKonstantin Belousov et_dyn_addr = 0; 7605fe3ed62SJake Burkholder sv = brand_info->sysvec; 7619b68618dSPeter Wemm if (interp != NULL && brand_info->interp_newpath != NULL) 7624113f8d7SPeter Wemm newinterp = brand_info->interp_newpath; 7633ebc1248SPeter Wemm 76460bb3943SAlan Cox /* 76560bb3943SAlan Cox * Avoid a possible deadlock if the current address space is destroyed 76660bb3943SAlan Cox * and that address space maps the locked vnode. In the common case, 76760bb3943SAlan Cox * the locked vnode's v_usecount is decremented but remains greater 76860bb3943SAlan Cox * than zero. Consequently, the vnode lock is not needed by vrele(). 76960bb3943SAlan Cox * However, in cases where the vnode lock is external, such as nullfs, 77060bb3943SAlan Cox * v_usecount may become zero. 77160bb3943SAlan Cox */ 77222db15c0SAttilio Rao VOP_UNLOCK(imgp->vp, 0); 77360bb3943SAlan Cox 77489b57fcfSKonstantin Belousov error = exec_new_vmspace(imgp, sv); 77519059a13SJohn Baldwin imgp->proc->p_sysent = sv; 776e1743d02SSøren Schmidt 777cb05b60aSAttilio Rao vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); 77889b57fcfSKonstantin Belousov if (error) 77989b57fcfSKonstantin Belousov return (error); 78060bb3943SAlan Cox 7815856e12eSJohn Dyson vmspace = imgp->proc->p_vmspace; 7825856e12eSJohn Dyson 783e1743d02SSøren Schmidt for (i = 0; i < hdr->e_phnum; i++) { 784e1743d02SSøren Schmidt switch (phdr[i].p_type) { 785e1743d02SSøren Schmidt case PT_LOAD: /* Loadable segment */ 7865b33842aSKonstantin Belousov if (phdr[i].p_memsz == 0) 7875b33842aSKonstantin Belousov break; 78852c24af7SPeter Wemm prot = 0; 789e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_X) 790e1743d02SSøren Schmidt prot |= VM_PROT_EXECUTE; 791e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_W) 792e1743d02SSøren Schmidt prot |= VM_PROT_WRITE; 793e1743d02SSøren Schmidt if (phdr[i].p_flags & PF_R) 794e1743d02SSøren Schmidt prot |= VM_PROT_READ; 795e1743d02SSøren Schmidt 7963ebc1248SPeter Wemm #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) 7973ebc1248SPeter Wemm /* 7983ebc1248SPeter Wemm * Some x86 binaries assume read == executable, 7993ebc1248SPeter Wemm * notably the M3 runtime and therefore cvsup 8003ebc1248SPeter Wemm */ 8013ebc1248SPeter Wemm if (prot & VM_PROT_READ) 8023ebc1248SPeter Wemm prot |= VM_PROT_EXECUTE; 8033ebc1248SPeter Wemm #endif 8043ebc1248SPeter Wemm 805373d1a3fSAlan Cox if ((error = __elfN(load_section)(vmspace, 806373d1a3fSAlan Cox imgp->object, phdr[i].p_offset, 807ab02d85fSKonstantin Belousov (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, 80881f223caSJake Burkholder phdr[i].p_memsz, phdr[i].p_filesz, prot, 8095fe3ed62SJake Burkholder sv->sv_pagesize)) != 0) 81060bb3943SAlan Cox return (error); 811e1743d02SSøren Schmidt 812cfaf7e60SDoug Rabson /* 813cfaf7e60SDoug Rabson * If this segment contains the program headers, 814cfaf7e60SDoug Rabson * remember their virtual address for the AT_PHDR 815cfaf7e60SDoug Rabson * aux entry. Static binaries don't usually include 816cfaf7e60SDoug Rabson * a PT_PHDR entry. 817cfaf7e60SDoug Rabson */ 818cfaf7e60SDoug Rabson if (phdr[i].p_offset == 0 && 819cfaf7e60SDoug Rabson hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize 820cfaf7e60SDoug Rabson <= phdr[i].p_filesz) 821ab02d85fSKonstantin Belousov proghdr = phdr[i].p_vaddr + hdr->e_phoff + 822ab02d85fSKonstantin Belousov et_dyn_addr; 823cfaf7e60SDoug Rabson 824ab02d85fSKonstantin Belousov seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); 825cac45152SMatthew Dillon seg_size = round_page(phdr[i].p_memsz + 826ab02d85fSKonstantin Belousov phdr[i].p_vaddr + et_dyn_addr - seg_addr); 827cac45152SMatthew Dillon 828e1743d02SSøren Schmidt /* 82921c2d047SMatthew Dillon * Is this .text or .data? We can't use 83021c2d047SMatthew Dillon * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 83121c2d047SMatthew Dillon * alpha terribly and possibly does other bad 83221c2d047SMatthew Dillon * things so we stick to the old way of figuring 83321c2d047SMatthew Dillon * it out: If the segment contains the program 83421c2d047SMatthew Dillon * entry point, it's a text segment, otherwise it 83521c2d047SMatthew Dillon * is a data segment. 83621c2d047SMatthew Dillon * 83721c2d047SMatthew Dillon * Note that obreak() assumes that data_addr + 83821c2d047SMatthew Dillon * data_size == end of data load area, and the ELF 83921c2d047SMatthew Dillon * file format expects segments to be sorted by 84021c2d047SMatthew Dillon * address. If multiple data segments exist, the 84121c2d047SMatthew Dillon * last one will be used. 842e1743d02SSøren Schmidt */ 8439782ecbaSPeter Wemm if (hdr->e_entry >= phdr[i].p_vaddr && 8449782ecbaSPeter Wemm hdr->e_entry < (phdr[i].p_vaddr + 8459782ecbaSPeter Wemm phdr[i].p_memsz)) { 8469782ecbaSPeter Wemm text_size = seg_size; 8479782ecbaSPeter Wemm text_addr = seg_addr; 848ab02d85fSKonstantin Belousov entry = (u_long)hdr->e_entry + et_dyn_addr; 8499782ecbaSPeter Wemm } else { 85021c2d047SMatthew Dillon data_size = seg_size; 851cac45152SMatthew Dillon data_addr = seg_addr; 852cac45152SMatthew Dillon } 85321c2d047SMatthew Dillon total_size += seg_size; 85496725dd0SAlexander Kabaev break; 85596725dd0SAlexander Kabaev case PT_PHDR: /* Program header table info */ 856ab02d85fSKonstantin Belousov proghdr = phdr[i].p_vaddr + et_dyn_addr; 85796725dd0SAlexander Kabaev break; 85896725dd0SAlexander Kabaev default: 85996725dd0SAlexander Kabaev break; 86096725dd0SAlexander Kabaev } 86196725dd0SAlexander Kabaev } 86296725dd0SAlexander Kabaev 86396725dd0SAlexander Kabaev if (data_addr == 0 && data_size == 0) { 86496725dd0SAlexander Kabaev data_addr = text_addr; 86596725dd0SAlexander Kabaev data_size = text_size; 86696725dd0SAlexander Kabaev } 867cac45152SMatthew Dillon 868cac45152SMatthew Dillon /* 869cac45152SMatthew Dillon * Check limits. It should be safe to check the 87096725dd0SAlexander Kabaev * limits after loading the segments since we do 87196725dd0SAlexander Kabaev * not actually fault in all the segments pages. 872cac45152SMatthew Dillon */ 87391d5354aSJohn Baldwin PROC_LOCK(imgp->proc); 87491d5354aSJohn Baldwin if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) || 875cac45152SMatthew Dillon text_size > maxtsiz || 87691d5354aSJohn Baldwin total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) { 87791d5354aSJohn Baldwin PROC_UNLOCK(imgp->proc); 87860bb3943SAlan Cox return (ENOMEM); 879cac45152SMatthew Dillon } 880e1743d02SSøren Schmidt 881e1743d02SSøren Schmidt vmspace->vm_tsize = text_size >> PAGE_SHIFT; 8827cd99438SBruce Evans vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 883e1743d02SSøren Schmidt vmspace->vm_dsize = data_size >> PAGE_SHIFT; 8847cd99438SBruce Evans vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 885e1743d02SSøren Schmidt 886c460ac3aSPeter Wemm /* 887c460ac3aSPeter Wemm * We load the dynamic linker where a userland call 888c460ac3aSPeter Wemm * to mmap(0, ...) would put it. The rationale behind this 889c460ac3aSPeter Wemm * calculation is that it leaves room for the heap to grow to 890c460ac3aSPeter Wemm * its maximum allowed size. 891c460ac3aSPeter Wemm */ 892c460ac3aSPeter Wemm addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + 89391d5354aSJohn Baldwin lim_max(imgp->proc, RLIMIT_DATA)); 89491d5354aSJohn Baldwin PROC_UNLOCK(imgp->proc); 895e1743d02SSøren Schmidt 896ea5a2b2eSSøren Schmidt imgp->entry_addr = entry; 897ea5a2b2eSSøren Schmidt 89860bb3943SAlan Cox if (interp != NULL) { 8994113f8d7SPeter Wemm int have_interp = FALSE; 90022db15c0SAttilio Rao VOP_UNLOCK(imgp->vp, 0); 90160bb3943SAlan Cox if (brand_info->emul_path != NULL && 9029b68618dSPeter Wemm brand_info->emul_path[0] != '\0') { 903a163d034SWarner Losh path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 90460bb3943SAlan Cox snprintf(path, MAXPATHLEN, "%s%s", 90560bb3943SAlan Cox brand_info->emul_path, interp); 9069b68618dSPeter Wemm error = __elfN(load_file)(imgp->proc, path, &addr, 9079b68618dSPeter Wemm &imgp->entry_addr, sv->sv_pagesize); 908911c2be0SMark Peek free(path, M_TEMP); 9099b68618dSPeter Wemm if (error == 0) 9104113f8d7SPeter Wemm have_interp = TRUE; 9119b68618dSPeter Wemm } 9124113f8d7SPeter Wemm if (!have_interp && newinterp != NULL) { 9134113f8d7SPeter Wemm error = __elfN(load_file)(imgp->proc, newinterp, &addr, 9144113f8d7SPeter Wemm &imgp->entry_addr, sv->sv_pagesize); 915387ad998SKonstantin Belousov if (error == 0) 9164113f8d7SPeter Wemm have_interp = TRUE; 9174113f8d7SPeter Wemm } 9184113f8d7SPeter Wemm if (!have_interp) { 9199b68618dSPeter Wemm error = __elfN(load_file)(imgp->proc, interp, &addr, 9209b68618dSPeter Wemm &imgp->entry_addr, sv->sv_pagesize); 92160bb3943SAlan Cox } 922cb05b60aSAttilio Rao vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); 9239b68618dSPeter Wemm if (error != 0) { 9249b68618dSPeter Wemm uprintf("ELF interpreter %s not found\n", interp); 92560bb3943SAlan Cox return (error); 926e1743d02SSøren Schmidt } 92795c807cfSRobert Watson } else 9287564c4adSKonstantin Belousov addr = et_dyn_addr; 929ea5a2b2eSSøren Schmidt 930e1743d02SSøren Schmidt /* 931e1743d02SSøren Schmidt * Construct auxargs table (used by the fixup routine) 932e1743d02SSøren Schmidt */ 933a163d034SWarner Losh elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 934e1743d02SSøren Schmidt elf_auxargs->execfd = -1; 935e1743d02SSøren Schmidt elf_auxargs->phdr = proghdr; 936e1743d02SSøren Schmidt elf_auxargs->phent = hdr->e_phentsize; 937e1743d02SSøren Schmidt elf_auxargs->phnum = hdr->e_phnum; 938e1743d02SSøren Schmidt elf_auxargs->pagesz = PAGE_SIZE; 939e1743d02SSøren Schmidt elf_auxargs->base = addr; 940e1743d02SSøren Schmidt elf_auxargs->flags = 0; 941e1743d02SSøren Schmidt elf_auxargs->entry = entry; 942e1743d02SSøren Schmidt 943e1743d02SSøren Schmidt imgp->auxargs = elf_auxargs; 944e1743d02SSøren Schmidt imgp->interpreted = 0; 94532c01de2SDmitry Chagin imgp->proc->p_osrel = osrel; 946f231de47SKonstantin Belousov 947a7cddfedSJake Burkholder return (error); 948e1743d02SSøren Schmidt } 949e1743d02SSøren Schmidt 950a360a43dSJake Burkholder #define suword __CONCAT(suword, __ELF_WORD_SIZE) 9513ebc1248SPeter Wemm 9523ebc1248SPeter Wemm int 9533ebc1248SPeter Wemm __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 954e1743d02SSøren Schmidt { 955ecbb00a2SDoug Rabson Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 956a360a43dSJake Burkholder Elf_Addr *base; 957a360a43dSJake Burkholder Elf_Addr *pos; 958e1743d02SSøren Schmidt 959a360a43dSJake Burkholder base = (Elf_Addr *)*stack_base; 960610ecfe0SMaxim Sobolev pos = base + (imgp->args->argc + imgp->args->envc + 2); 961e1743d02SSøren Schmidt 96235c2a5a8SWarner Losh if (args->execfd != -1) 963e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 964e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 965e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 966e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 967e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 968e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 969e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 970e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_BASE, args->base); 9713ff06357SKonstantin Belousov if (imgp->execpathp != 0) 9723ff06357SKonstantin Belousov AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); 973e1743d02SSøren Schmidt AUXARGS_ENTRY(pos, AT_NULL, 0); 974e1743d02SSøren Schmidt 975e1743d02SSøren Schmidt free(imgp->auxargs, M_TEMP); 976e1743d02SSøren Schmidt imgp->auxargs = NULL; 977e1743d02SSøren Schmidt 9783ebc1248SPeter Wemm base--; 979610ecfe0SMaxim Sobolev suword(base, (long)imgp->args->argc); 9803ebc1248SPeter Wemm *stack_base = (register_t *)base; 981a7cddfedSJake Burkholder return (0); 982e1743d02SSøren Schmidt } 983e1743d02SSøren Schmidt 984e1743d02SSøren Schmidt /* 9858c64af4fSJohn Polstra * Code for generating ELF core dumps. 9868c64af4fSJohn Polstra */ 9878c64af4fSJohn Polstra 9884d77a549SAlfred Perlstein typedef void (*segment_callback)(vm_map_entry_t, void *); 9890ff27d31SJohn Polstra 9900ff27d31SJohn Polstra /* Closure for cb_put_phdr(). */ 9910ff27d31SJohn Polstra struct phdr_closure { 9920ff27d31SJohn Polstra Elf_Phdr *phdr; /* Program header to fill in */ 9930ff27d31SJohn Polstra Elf_Off offset; /* Offset of segment in core file */ 9940ff27d31SJohn Polstra }; 9950ff27d31SJohn Polstra 9960ff27d31SJohn Polstra /* Closure for cb_size_segment(). */ 9970ff27d31SJohn Polstra struct sseg_closure { 9980ff27d31SJohn Polstra int count; /* Count of writable segments. */ 9990ff27d31SJohn Polstra size_t size; /* Total size of all writable segments. */ 10000ff27d31SJohn Polstra }; 10010ff27d31SJohn Polstra 10024d77a549SAlfred Perlstein static void cb_put_phdr(vm_map_entry_t, void *); 10034d77a549SAlfred Perlstein static void cb_size_segment(vm_map_entry_t, void *); 1004247aba24SMarcel Moolenaar static void each_writable_segment(struct thread *, segment_callback, void *); 10053ebc1248SPeter Wemm static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, 10064d77a549SAlfred Perlstein int, void *, size_t); 1007247aba24SMarcel Moolenaar static void __elfN(puthdr)(struct thread *, void *, size_t *, int); 10083ebc1248SPeter Wemm static void __elfN(putnote)(void *, size_t *, const char *, int, 10094d77a549SAlfred Perlstein const void *, size_t); 10108c64af4fSJohn Polstra 10118c64af4fSJohn Polstra int 10123ebc1248SPeter Wemm __elfN(coredump)(td, vp, limit) 1013b40ce416SJulian Elischer struct thread *td; 1014247aba24SMarcel Moolenaar struct vnode *vp; 1015fca666a1SJulian Elischer off_t limit; 1016fca666a1SJulian Elischer { 1017247aba24SMarcel Moolenaar struct ucred *cred = td->td_ucred; 1018fca666a1SJulian Elischer int error = 0; 10190ff27d31SJohn Polstra struct sseg_closure seginfo; 10200ff27d31SJohn Polstra void *hdr; 10218c64af4fSJohn Polstra size_t hdrsize; 10228c64af4fSJohn Polstra 10230ff27d31SJohn Polstra /* Size the program segments. */ 10240ff27d31SJohn Polstra seginfo.count = 0; 10250ff27d31SJohn Polstra seginfo.size = 0; 1026247aba24SMarcel Moolenaar each_writable_segment(td, cb_size_segment, &seginfo); 10270ff27d31SJohn Polstra 10280ff27d31SJohn Polstra /* 10290ff27d31SJohn Polstra * Calculate the size of the core file header area by making 10300ff27d31SJohn Polstra * a dry run of generating it. Nothing is written, but the 10310ff27d31SJohn Polstra * size is calculated. 10320ff27d31SJohn Polstra */ 10330ff27d31SJohn Polstra hdrsize = 0; 1034247aba24SMarcel Moolenaar __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count); 10350ff27d31SJohn Polstra 1036fca666a1SJulian Elischer if (hdrsize + seginfo.size >= limit) 10378c64af4fSJohn Polstra return (EFAULT); 10380ff27d31SJohn Polstra 10390ff27d31SJohn Polstra /* 10400ff27d31SJohn Polstra * Allocate memory for building the header, fill it up, 10410ff27d31SJohn Polstra * and write it out. 10420ff27d31SJohn Polstra */ 1043a163d034SWarner Losh hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 10440ff27d31SJohn Polstra if (hdr == NULL) { 1045a7cddfedSJake Burkholder return (EINVAL); 10460ff27d31SJohn Polstra } 10473ebc1248SPeter Wemm error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); 10480ff27d31SJohn Polstra 10490ff27d31SJohn Polstra /* Write the contents of all of the writable segments. */ 10500ff27d31SJohn Polstra if (error == 0) { 10510ff27d31SJohn Polstra Elf_Phdr *php; 10522b471bc6STim J. Robbins off_t offset; 10530ff27d31SJohn Polstra int i; 10540ff27d31SJohn Polstra 10550ff27d31SJohn Polstra php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 10560ff27d31SJohn Polstra offset = hdrsize; 10570ff27d31SJohn Polstra for (i = 0; i < seginfo.count; i++) { 105806ae1e91SMatthew Dillon error = vn_rdwr_inchunks(UIO_WRITE, vp, 10592b471bc6STim J. Robbins (caddr_t)(uintptr_t)php->p_vaddr, 10602b471bc6STim J. Robbins php->p_filesz, offset, UIO_USERSPACE, 1061f99619a0STim J. Robbins IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 10626617724cSJeff Roberson curthread); 10630ff27d31SJohn Polstra if (error != 0) 10642b471bc6STim J. Robbins break; 10650ff27d31SJohn Polstra offset += php->p_filesz; 10660ff27d31SJohn Polstra php++; 10670ff27d31SJohn Polstra } 10680ff27d31SJohn Polstra } 10690ff27d31SJohn Polstra free(hdr, M_TEMP); 10700ff27d31SJohn Polstra 1071a7cddfedSJake Burkholder return (error); 10728c64af4fSJohn Polstra } 10738c64af4fSJohn Polstra 10740ff27d31SJohn Polstra /* 10750ff27d31SJohn Polstra * A callback for each_writable_segment() to write out the segment's 10760ff27d31SJohn Polstra * program header entry. 10770ff27d31SJohn Polstra */ 10780ff27d31SJohn Polstra static void 10790ff27d31SJohn Polstra cb_put_phdr(entry, closure) 10800ff27d31SJohn Polstra vm_map_entry_t entry; 10810ff27d31SJohn Polstra void *closure; 10820ff27d31SJohn Polstra { 10830ff27d31SJohn Polstra struct phdr_closure *phc = (struct phdr_closure *)closure; 10840ff27d31SJohn Polstra Elf_Phdr *phdr = phc->phdr; 10850ff27d31SJohn Polstra 10860ff27d31SJohn Polstra phc->offset = round_page(phc->offset); 10870ff27d31SJohn Polstra 10880ff27d31SJohn Polstra phdr->p_type = PT_LOAD; 10890ff27d31SJohn Polstra phdr->p_offset = phc->offset; 10900ff27d31SJohn Polstra phdr->p_vaddr = entry->start; 10910ff27d31SJohn Polstra phdr->p_paddr = 0; 10920ff27d31SJohn Polstra phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 10930ff27d31SJohn Polstra phdr->p_align = PAGE_SIZE; 10940ff27d31SJohn Polstra phdr->p_flags = 0; 10950ff27d31SJohn Polstra if (entry->protection & VM_PROT_READ) 10960ff27d31SJohn Polstra phdr->p_flags |= PF_R; 10970ff27d31SJohn Polstra if (entry->protection & VM_PROT_WRITE) 10980ff27d31SJohn Polstra phdr->p_flags |= PF_W; 10990ff27d31SJohn Polstra if (entry->protection & VM_PROT_EXECUTE) 11000ff27d31SJohn Polstra phdr->p_flags |= PF_X; 11010ff27d31SJohn Polstra 11020ff27d31SJohn Polstra phc->offset += phdr->p_filesz; 11030ff27d31SJohn Polstra phc->phdr++; 11040ff27d31SJohn Polstra } 11050ff27d31SJohn Polstra 11060ff27d31SJohn Polstra /* 11070ff27d31SJohn Polstra * A callback for each_writable_segment() to gather information about 11080ff27d31SJohn Polstra * the number of segments and their total size. 11090ff27d31SJohn Polstra */ 11100ff27d31SJohn Polstra static void 11110ff27d31SJohn Polstra cb_size_segment(entry, closure) 11120ff27d31SJohn Polstra vm_map_entry_t entry; 11130ff27d31SJohn Polstra void *closure; 11140ff27d31SJohn Polstra { 11150ff27d31SJohn Polstra struct sseg_closure *ssc = (struct sseg_closure *)closure; 11160ff27d31SJohn Polstra 11170ff27d31SJohn Polstra ssc->count++; 11180ff27d31SJohn Polstra ssc->size += entry->end - entry->start; 11190ff27d31SJohn Polstra } 11200ff27d31SJohn Polstra 11210ff27d31SJohn Polstra /* 11220ff27d31SJohn Polstra * For each writable segment in the process's memory map, call the given 11230ff27d31SJohn Polstra * function with a pointer to the map entry and some arbitrary 11240ff27d31SJohn Polstra * caller-supplied data. 11250ff27d31SJohn Polstra */ 11260ff27d31SJohn Polstra static void 1127247aba24SMarcel Moolenaar each_writable_segment(td, func, closure) 1128247aba24SMarcel Moolenaar struct thread *td; 11290ff27d31SJohn Polstra segment_callback func; 11300ff27d31SJohn Polstra void *closure; 11310ff27d31SJohn Polstra { 1132247aba24SMarcel Moolenaar struct proc *p = td->td_proc; 11330ff27d31SJohn Polstra vm_map_t map = &p->p_vmspace->vm_map; 11340ff27d31SJohn Polstra vm_map_entry_t entry; 1135976a87a2SAlan Cox vm_object_t backing_object, object; 1136976a87a2SAlan Cox boolean_t ignore_entry; 11370ff27d31SJohn Polstra 1138976a87a2SAlan Cox vm_map_lock_read(map); 11390ff27d31SJohn Polstra for (entry = map->header.next; entry != &map->header; 11400ff27d31SJohn Polstra entry = entry->next) { 1141fa7dd9c5SMatthew Dillon /* 1142fa7dd9c5SMatthew Dillon * Don't dump inaccessible mappings, deal with legacy 1143fa7dd9c5SMatthew Dillon * coredump mode. 1144fa7dd9c5SMatthew Dillon * 1145fa7dd9c5SMatthew Dillon * Note that read-only segments related to the elf binary 1146fa7dd9c5SMatthew Dillon * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1147fa7dd9c5SMatthew Dillon * need to arbitrarily ignore such segments. 1148fa7dd9c5SMatthew Dillon */ 1149fa7dd9c5SMatthew Dillon if (elf_legacy_coredump) { 1150fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 11510ff27d31SJohn Polstra continue; 1152fa7dd9c5SMatthew Dillon } else { 1153fa7dd9c5SMatthew Dillon if ((entry->protection & VM_PROT_ALL) == 0) 1154fa7dd9c5SMatthew Dillon continue; 1155fa7dd9c5SMatthew Dillon } 11560ff27d31SJohn Polstra 11579730a5daSPaul Saab /* 1158fa7dd9c5SMatthew Dillon * Dont include memory segment in the coredump if 1159fa7dd9c5SMatthew Dillon * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1160fa7dd9c5SMatthew Dillon * madvise(2). Do not dump submaps (i.e. parts of the 1161fa7dd9c5SMatthew Dillon * kernel map). 11629730a5daSPaul Saab */ 1163fa7dd9c5SMatthew Dillon if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 11649730a5daSPaul Saab continue; 11659730a5daSPaul Saab 1166976a87a2SAlan Cox if ((object = entry->object.vm_object) == NULL) 11670ff27d31SJohn Polstra continue; 11680ff27d31SJohn Polstra 11690ff27d31SJohn Polstra /* Ignore memory-mapped devices and such things. */ 1170976a87a2SAlan Cox VM_OBJECT_LOCK(object); 1171976a87a2SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1172976a87a2SAlan Cox VM_OBJECT_LOCK(backing_object); 1173976a87a2SAlan Cox VM_OBJECT_UNLOCK(object); 1174976a87a2SAlan Cox object = backing_object; 1175976a87a2SAlan Cox } 1176976a87a2SAlan Cox ignore_entry = object->type != OBJT_DEFAULT && 1177976a87a2SAlan Cox object->type != OBJT_SWAP && object->type != OBJT_VNODE; 1178976a87a2SAlan Cox VM_OBJECT_UNLOCK(object); 1179976a87a2SAlan Cox if (ignore_entry) 11800ff27d31SJohn Polstra continue; 11810ff27d31SJohn Polstra 11820ff27d31SJohn Polstra (*func)(entry, closure); 11830ff27d31SJohn Polstra } 1184976a87a2SAlan Cox vm_map_unlock_read(map); 11850ff27d31SJohn Polstra } 11860ff27d31SJohn Polstra 11870ff27d31SJohn Polstra /* 11880ff27d31SJohn Polstra * Write the core file header to the file, including padding up to 11890ff27d31SJohn Polstra * the page boundary. 11900ff27d31SJohn Polstra */ 11918c64af4fSJohn Polstra static int 11923ebc1248SPeter Wemm __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) 1193b40ce416SJulian Elischer struct thread *td; 11948c64af4fSJohn Polstra struct vnode *vp; 11958c64af4fSJohn Polstra struct ucred *cred; 11960ff27d31SJohn Polstra int numsegs; 11970ff27d31SJohn Polstra size_t hdrsize; 11980ff27d31SJohn Polstra void *hdr; 11998c64af4fSJohn Polstra { 1200911c2be0SMark Peek size_t off; 12018c64af4fSJohn Polstra 12028c64af4fSJohn Polstra /* Fill in the header. */ 12030ff27d31SJohn Polstra bzero(hdr, hdrsize); 12048c64af4fSJohn Polstra off = 0; 1205247aba24SMarcel Moolenaar __elfN(puthdr)(td, hdr, &off, numsegs); 12068c64af4fSJohn Polstra 12078c64af4fSJohn Polstra /* Write it to the core file. */ 1208a7cddfedSJake Burkholder return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 12099ca43589SRobert Watson UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 12106617724cSJeff Roberson td)); 1211dada0278SJohn Polstra } 1212dada0278SJohn Polstra 121362919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 121462919d78SPeter Wemm typedef struct prstatus32 elf_prstatus_t; 121562919d78SPeter Wemm typedef struct prpsinfo32 elf_prpsinfo_t; 121662919d78SPeter Wemm typedef struct fpreg32 elf_prfpregset_t; 121762919d78SPeter Wemm typedef struct fpreg32 elf_fpregset_t; 121862919d78SPeter Wemm typedef struct reg32 elf_gregset_t; 121962919d78SPeter Wemm #else 122062919d78SPeter Wemm typedef prstatus_t elf_prstatus_t; 122162919d78SPeter Wemm typedef prpsinfo_t elf_prpsinfo_t; 122262919d78SPeter Wemm typedef prfpregset_t elf_prfpregset_t; 122362919d78SPeter Wemm typedef prfpregset_t elf_fpregset_t; 122462919d78SPeter Wemm typedef gregset_t elf_gregset_t; 122562919d78SPeter Wemm #endif 122662919d78SPeter Wemm 12278c64af4fSJohn Polstra static void 1228247aba24SMarcel Moolenaar __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs) 12298c64af4fSJohn Polstra { 12308c9b7b2cSMarcel Moolenaar struct { 123162919d78SPeter Wemm elf_prstatus_t status; 123262919d78SPeter Wemm elf_prfpregset_t fpregset; 123362919d78SPeter Wemm elf_prpsinfo_t psinfo; 12348c9b7b2cSMarcel Moolenaar } *tempdata; 123562919d78SPeter Wemm elf_prstatus_t *status; 123662919d78SPeter Wemm elf_prfpregset_t *fpregset; 123762919d78SPeter Wemm elf_prpsinfo_t *psinfo; 1238247aba24SMarcel Moolenaar struct proc *p; 1239247aba24SMarcel Moolenaar struct thread *thr; 12408c9b7b2cSMarcel Moolenaar size_t ehoff, noteoff, notesz, phoff; 12418c64af4fSJohn Polstra 1242247aba24SMarcel Moolenaar p = td->td_proc; 1243247aba24SMarcel Moolenaar 12448c64af4fSJohn Polstra ehoff = *off; 12458c64af4fSJohn Polstra *off += sizeof(Elf_Ehdr); 12468c64af4fSJohn Polstra 12478c64af4fSJohn Polstra phoff = *off; 12480ff27d31SJohn Polstra *off += (numsegs + 1) * sizeof(Elf_Phdr); 12498c64af4fSJohn Polstra 12508c64af4fSJohn Polstra noteoff = *off; 12518c9b7b2cSMarcel Moolenaar /* 12528c9b7b2cSMarcel Moolenaar * Don't allocate space for the notes if we're just calculating 12538c9b7b2cSMarcel Moolenaar * the size of the header. We also don't collect the data. 12548c9b7b2cSMarcel Moolenaar */ 12558c9b7b2cSMarcel Moolenaar if (dst != NULL) { 12568c9b7b2cSMarcel Moolenaar tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK); 12578c9b7b2cSMarcel Moolenaar status = &tempdata->status; 12588c9b7b2cSMarcel Moolenaar fpregset = &tempdata->fpregset; 12598c9b7b2cSMarcel Moolenaar psinfo = &tempdata->psinfo; 12608c9b7b2cSMarcel Moolenaar } else { 12618c9b7b2cSMarcel Moolenaar tempdata = NULL; 12628c9b7b2cSMarcel Moolenaar status = NULL; 12638c9b7b2cSMarcel Moolenaar fpregset = NULL; 12648c9b7b2cSMarcel Moolenaar psinfo = NULL; 12658c9b7b2cSMarcel Moolenaar } 12668c9b7b2cSMarcel Moolenaar 12678c9b7b2cSMarcel Moolenaar if (dst != NULL) { 12688c9b7b2cSMarcel Moolenaar psinfo->pr_version = PRPSINFO_VERSION; 126962919d78SPeter Wemm psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); 1270ccd3953eSJohn Baldwin strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 12718c9b7b2cSMarcel Moolenaar /* 12728c9b7b2cSMarcel Moolenaar * XXX - We don't fill in the command line arguments properly 12738c9b7b2cSMarcel Moolenaar * yet. 12748c9b7b2cSMarcel Moolenaar */ 1275ccd3953eSJohn Baldwin strlcpy(psinfo->pr_psargs, p->p_comm, 12768c9b7b2cSMarcel Moolenaar sizeof(psinfo->pr_psargs)); 12778c9b7b2cSMarcel Moolenaar } 12788c9b7b2cSMarcel Moolenaar __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 12798c9b7b2cSMarcel Moolenaar sizeof *psinfo); 12808c9b7b2cSMarcel Moolenaar 12818c9b7b2cSMarcel Moolenaar /* 12821f7a1baaSMarcel Moolenaar * To have the debugger select the right thread (LWP) as the initial 12831f7a1baaSMarcel Moolenaar * thread, we dump the state of the thread passed to us in td first. 12841f7a1baaSMarcel Moolenaar * This is the thread that causes the core dump and thus likely to 12851f7a1baaSMarcel Moolenaar * be the right thread one wants to have selected in the debugger. 12868c9b7b2cSMarcel Moolenaar */ 1287247aba24SMarcel Moolenaar thr = td; 1288247aba24SMarcel Moolenaar while (thr != NULL) { 12898c9b7b2cSMarcel Moolenaar if (dst != NULL) { 12908c9b7b2cSMarcel Moolenaar status->pr_version = PRSTATUS_VERSION; 129162919d78SPeter Wemm status->pr_statussz = sizeof(elf_prstatus_t); 129262919d78SPeter Wemm status->pr_gregsetsz = sizeof(elf_gregset_t); 129362919d78SPeter Wemm status->pr_fpregsetsz = sizeof(elf_fpregset_t); 12948c9b7b2cSMarcel Moolenaar status->pr_osreldate = osreldate; 12958c9b7b2cSMarcel Moolenaar status->pr_cursig = p->p_sig; 12961f7a1baaSMarcel Moolenaar status->pr_pid = thr->td_tid; 129762919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 129862919d78SPeter Wemm fill_regs32(thr, &status->pr_reg); 129962919d78SPeter Wemm fill_fpregs32(thr, fpregset); 130062919d78SPeter Wemm #else 13018c9b7b2cSMarcel Moolenaar fill_regs(thr, &status->pr_reg); 13028c9b7b2cSMarcel Moolenaar fill_fpregs(thr, fpregset); 130362919d78SPeter Wemm #endif 13048c9b7b2cSMarcel Moolenaar } 13053ebc1248SPeter Wemm __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, 13068c64af4fSJohn Polstra sizeof *status); 13073ebc1248SPeter Wemm __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 13088c64af4fSJohn Polstra sizeof *fpregset); 13094da47b2fSMarcel Moolenaar /* 13104da47b2fSMarcel Moolenaar * Allow for MD specific notes, as well as any MD 13114da47b2fSMarcel Moolenaar * specific preparations for writing MI notes. 13124da47b2fSMarcel Moolenaar */ 13134da47b2fSMarcel Moolenaar __elfN(dump_thread)(thr, dst, off); 1314247aba24SMarcel Moolenaar 1315247aba24SMarcel Moolenaar thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 13168c9b7b2cSMarcel Moolenaar TAILQ_NEXT(thr, td_plist); 1317247aba24SMarcel Moolenaar if (thr == td) 13188c9b7b2cSMarcel Moolenaar thr = TAILQ_NEXT(thr, td_plist); 1319247aba24SMarcel Moolenaar } 13208c9b7b2cSMarcel Moolenaar 13218c64af4fSJohn Polstra notesz = *off - noteoff; 13228c64af4fSJohn Polstra 13238c9b7b2cSMarcel Moolenaar if (dst != NULL) 13248c9b7b2cSMarcel Moolenaar free(tempdata, M_TEMP); 13258c9b7b2cSMarcel Moolenaar 13260ff27d31SJohn Polstra /* Align up to a page boundary for the program segments. */ 13278c64af4fSJohn Polstra *off = round_page(*off); 13288c64af4fSJohn Polstra 13298c64af4fSJohn Polstra if (dst != NULL) { 13308c64af4fSJohn Polstra Elf_Ehdr *ehdr; 13318c64af4fSJohn Polstra Elf_Phdr *phdr; 13320ff27d31SJohn Polstra struct phdr_closure phc; 13338c64af4fSJohn Polstra 13348c64af4fSJohn Polstra /* 13358c64af4fSJohn Polstra * Fill in the ELF header. 13368c64af4fSJohn Polstra */ 13378c64af4fSJohn Polstra ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 13388c64af4fSJohn Polstra ehdr->e_ident[EI_MAG0] = ELFMAG0; 13398c64af4fSJohn Polstra ehdr->e_ident[EI_MAG1] = ELFMAG1; 13408c64af4fSJohn Polstra ehdr->e_ident[EI_MAG2] = ELFMAG2; 13418c64af4fSJohn Polstra ehdr->e_ident[EI_MAG3] = ELFMAG3; 13428c64af4fSJohn Polstra ehdr->e_ident[EI_CLASS] = ELF_CLASS; 13438c64af4fSJohn Polstra ehdr->e_ident[EI_DATA] = ELF_DATA; 13448c64af4fSJohn Polstra ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1345c815a20cSDavid E. O'Brien ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1346c815a20cSDavid E. O'Brien ehdr->e_ident[EI_ABIVERSION] = 0; 13478c64af4fSJohn Polstra ehdr->e_ident[EI_PAD] = 0; 13488c64af4fSJohn Polstra ehdr->e_type = ET_CORE; 134962919d78SPeter Wemm #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 135062919d78SPeter Wemm ehdr->e_machine = EM_386; 135162919d78SPeter Wemm #else 13528c64af4fSJohn Polstra ehdr->e_machine = ELF_ARCH; 135362919d78SPeter Wemm #endif 13548c64af4fSJohn Polstra ehdr->e_version = EV_CURRENT; 13558c64af4fSJohn Polstra ehdr->e_entry = 0; 13568c64af4fSJohn Polstra ehdr->e_phoff = phoff; 13578c64af4fSJohn Polstra ehdr->e_flags = 0; 13588c64af4fSJohn Polstra ehdr->e_ehsize = sizeof(Elf_Ehdr); 13598c64af4fSJohn Polstra ehdr->e_phentsize = sizeof(Elf_Phdr); 13600ff27d31SJohn Polstra ehdr->e_phnum = numsegs + 1; 13618c64af4fSJohn Polstra ehdr->e_shentsize = sizeof(Elf_Shdr); 13628c64af4fSJohn Polstra ehdr->e_shnum = 0; 13638c64af4fSJohn Polstra ehdr->e_shstrndx = SHN_UNDEF; 13648c64af4fSJohn Polstra 13658c64af4fSJohn Polstra /* 13668c64af4fSJohn Polstra * Fill in the program header entries. 13678c64af4fSJohn Polstra */ 13688c64af4fSJohn Polstra phdr = (Elf_Phdr *)((char *)dst + phoff); 13698c64af4fSJohn Polstra 13708c64af4fSJohn Polstra /* The note segement. */ 13718c64af4fSJohn Polstra phdr->p_type = PT_NOTE; 13728c64af4fSJohn Polstra phdr->p_offset = noteoff; 13738c64af4fSJohn Polstra phdr->p_vaddr = 0; 13748c64af4fSJohn Polstra phdr->p_paddr = 0; 13758c64af4fSJohn Polstra phdr->p_filesz = notesz; 13768c64af4fSJohn Polstra phdr->p_memsz = 0; 13778c64af4fSJohn Polstra phdr->p_flags = 0; 13788c64af4fSJohn Polstra phdr->p_align = 0; 13798c64af4fSJohn Polstra phdr++; 13808c64af4fSJohn Polstra 13810ff27d31SJohn Polstra /* All the writable segments from the program. */ 13820ff27d31SJohn Polstra phc.phdr = phdr; 13830ff27d31SJohn Polstra phc.offset = *off; 1384247aba24SMarcel Moolenaar each_writable_segment(td, cb_put_phdr, &phc); 13858c64af4fSJohn Polstra } 13868c64af4fSJohn Polstra } 13878c64af4fSJohn Polstra 13888c64af4fSJohn Polstra static void 13893ebc1248SPeter Wemm __elfN(putnote)(void *dst, size_t *off, const char *name, int type, 13908c64af4fSJohn Polstra const void *desc, size_t descsz) 13918c64af4fSJohn Polstra { 13928c64af4fSJohn Polstra Elf_Note note; 13938c64af4fSJohn Polstra 13948c64af4fSJohn Polstra note.n_namesz = strlen(name) + 1; 13958c64af4fSJohn Polstra note.n_descsz = descsz; 13968c64af4fSJohn Polstra note.n_type = type; 13978c64af4fSJohn Polstra if (dst != NULL) 13988c64af4fSJohn Polstra bcopy(¬e, (char *)dst + *off, sizeof note); 13998c64af4fSJohn Polstra *off += sizeof note; 14008c64af4fSJohn Polstra if (dst != NULL) 14018c64af4fSJohn Polstra bcopy(name, (char *)dst + *off, note.n_namesz); 14028c64af4fSJohn Polstra *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 14038c64af4fSJohn Polstra if (dst != NULL) 14048c64af4fSJohn Polstra bcopy(desc, (char *)dst + *off, note.n_descsz); 14058c64af4fSJohn Polstra *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 14068c64af4fSJohn Polstra } 14078c64af4fSJohn Polstra 14088c64af4fSJohn Polstra /* 140932c01de2SDmitry Chagin * Try to find the appropriate ABI-note section for checknote, 141032c01de2SDmitry Chagin * fetch the osreldate for binary from the ELF OSABI-note. Only the 141132c01de2SDmitry Chagin * first page of the image is searched, the same as for headers. 141232c01de2SDmitry Chagin */ 141332c01de2SDmitry Chagin static boolean_t 141432c01de2SDmitry Chagin __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, 141532c01de2SDmitry Chagin int32_t *osrel) 141632c01de2SDmitry Chagin { 1417267c52fcSKonstantin Belousov const Elf_Note *note, *note0, *note_end; 1418429f5a58SKonstantin Belousov const Elf_Phdr *phdr, *pnote; 1419429f5a58SKonstantin Belousov const Elf_Ehdr *hdr; 142032c01de2SDmitry Chagin const char *note_name; 142132c01de2SDmitry Chagin int i; 142232c01de2SDmitry Chagin 142332c01de2SDmitry Chagin pnote = NULL; 1424429f5a58SKonstantin Belousov hdr = (const Elf_Ehdr *)imgp->image_header; 1425429f5a58SKonstantin Belousov phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 142632c01de2SDmitry Chagin 142732c01de2SDmitry Chagin for (i = 0; i < hdr->e_phnum; i++) { 142832c01de2SDmitry Chagin if (phdr[i].p_type == PT_NOTE) { 142932c01de2SDmitry Chagin pnote = &phdr[i]; 143032c01de2SDmitry Chagin break; 143132c01de2SDmitry Chagin } 143232c01de2SDmitry Chagin } 143332c01de2SDmitry Chagin 143432c01de2SDmitry Chagin if (pnote == NULL || pnote->p_offset >= PAGE_SIZE || 143532c01de2SDmitry Chagin pnote->p_offset + pnote->p_filesz >= PAGE_SIZE) 143632c01de2SDmitry Chagin return (FALSE); 143732c01de2SDmitry Chagin 1438267c52fcSKonstantin Belousov note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); 143932c01de2SDmitry Chagin note_end = (const Elf_Note *)(imgp->image_header + 144032c01de2SDmitry Chagin pnote->p_offset + pnote->p_filesz); 1441267c52fcSKonstantin Belousov for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { 1442267c52fcSKonstantin Belousov if (!aligned(note, Elf32_Addr)) 1443267c52fcSKonstantin Belousov return (FALSE); 144432c01de2SDmitry Chagin if (note->n_namesz != checknote->hdr.n_namesz || 144532c01de2SDmitry Chagin note->n_descsz != checknote->hdr.n_descsz || 144632c01de2SDmitry Chagin note->n_type != checknote->hdr.n_type) 144732c01de2SDmitry Chagin goto nextnote; 144832c01de2SDmitry Chagin note_name = (const char *)(note + 1); 144932c01de2SDmitry Chagin if (strncmp(checknote->vendor, note_name, 145032c01de2SDmitry Chagin checknote->hdr.n_namesz) != 0) 145132c01de2SDmitry Chagin goto nextnote; 145232c01de2SDmitry Chagin 145332c01de2SDmitry Chagin /* 145432c01de2SDmitry Chagin * Fetch the osreldate for binary 145532c01de2SDmitry Chagin * from the ELF OSABI-note if necessary. 145632c01de2SDmitry Chagin */ 145789ffc202SBjoern A. Zeeb if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 && 145889ffc202SBjoern A. Zeeb checknote->trans_osrel != NULL) 145989ffc202SBjoern A. Zeeb return (checknote->trans_osrel(note, osrel)); 146032c01de2SDmitry Chagin return (TRUE); 146132c01de2SDmitry Chagin 146232c01de2SDmitry Chagin nextnote: 146332c01de2SDmitry Chagin note = (const Elf_Note *)((const char *)(note + 1) + 146432c01de2SDmitry Chagin roundup2(note->n_namesz, sizeof(Elf32_Addr)) + 146532c01de2SDmitry Chagin roundup2(note->n_descsz, sizeof(Elf32_Addr))); 146632c01de2SDmitry Chagin } 146732c01de2SDmitry Chagin 146832c01de2SDmitry Chagin return (FALSE); 146932c01de2SDmitry Chagin } 147032c01de2SDmitry Chagin 147132c01de2SDmitry Chagin /* 1472e1743d02SSøren Schmidt * Tell kern_execve.c about it, with a little help from the linker. 1473e1743d02SSøren Schmidt */ 1474a360a43dSJake Burkholder static struct execsw __elfN(execsw) = { 1475a360a43dSJake Burkholder __CONCAT(exec_, __elfN(imgact)), 1476a360a43dSJake Burkholder __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 1477a360a43dSJake Burkholder }; 1478a360a43dSJake Burkholder EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 1479