13124c3e0SJohn Polstra /*- 2e6209940SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3e6209940SPedro F. Giffuni * 43124c3e0SJohn Polstra * Copyright 1996-1998 John D. Polstra. 53124c3e0SJohn Polstra * All rights reserved. 63124c3e0SJohn Polstra * 73124c3e0SJohn Polstra * Redistribution and use in source and binary forms, with or without 83124c3e0SJohn Polstra * modification, are permitted provided that the following conditions 93124c3e0SJohn Polstra * are met: 103124c3e0SJohn Polstra * 1. Redistributions of source code must retain the above copyright 113124c3e0SJohn Polstra * notice, this list of conditions and the following disclaimer. 123124c3e0SJohn Polstra * 2. Redistributions in binary form must reproduce the above copyright 133124c3e0SJohn Polstra * notice, this list of conditions and the following disclaimer in the 143124c3e0SJohn Polstra * documentation and/or other materials provided with the distribution. 153124c3e0SJohn Polstra * 163124c3e0SJohn Polstra * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 173124c3e0SJohn Polstra * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 183124c3e0SJohn Polstra * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 193124c3e0SJohn Polstra * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 203124c3e0SJohn Polstra * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 213124c3e0SJohn Polstra * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 223124c3e0SJohn Polstra * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 233124c3e0SJohn Polstra * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 243124c3e0SJohn Polstra * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 253124c3e0SJohn Polstra * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 263124c3e0SJohn Polstra * 277f3dea24SPeter Wemm * $FreeBSD$ 283124c3e0SJohn Polstra */ 293124c3e0SJohn Polstra 303124c3e0SJohn Polstra #include <sys/param.h> 313124c3e0SJohn Polstra #include <sys/mman.h> 327360ae0fSJohn Polstra #include <sys/stat.h> 333124c3e0SJohn Polstra 343124c3e0SJohn Polstra #include <errno.h> 353124c3e0SJohn Polstra #include <stddef.h> 36926ea445SJohn Polstra #include <stdlib.h> 373124c3e0SJohn Polstra #include <string.h> 383124c3e0SJohn Polstra #include <unistd.h> 393124c3e0SJohn Polstra 40b5393d9fSDoug Rabson #include "debug.h" 413124c3e0SJohn Polstra #include "rtld.h" 423124c3e0SJohn Polstra 437fd852f8SMaxim Sobolev static Elf_Ehdr *get_elf_header(int, const char *, const struct stat *); 44fa7dd9c5SMatthew Dillon static int convert_flags(int); /* Elf flags -> mmap flags */ 453124c3e0SJohn Polstra 46a36deee3SKonstantin Belousov int __getosreldate(void); 47a36deee3SKonstantin Belousov 483124c3e0SJohn Polstra /* 49bfb1ef60SJohn Polstra * Map a shared object into memory. The "fd" argument is a file descriptor, 503124c3e0SJohn Polstra * which must be open on the object and positioned at its beginning. 51bfb1ef60SJohn Polstra * The "path" argument is a pathname that is used only for error messages. 523124c3e0SJohn Polstra * 533124c3e0SJohn Polstra * The return value is a pointer to a newly-allocated Obj_Entry structure 543124c3e0SJohn Polstra * for the shared object. Returns NULL on failure. 553124c3e0SJohn Polstra */ 563124c3e0SJohn Polstra Obj_Entry * 577360ae0fSJohn Polstra map_object(int fd, const char *path, const struct stat *sb) 583124c3e0SJohn Polstra { 593124c3e0SJohn Polstra Obj_Entry *obj; 60341b3de6SMatthew N. Dodd Elf_Ehdr *hdr; 6178af18bdSDavid E. O'Brien int i; 6213575fc4SDoug Rabson Elf_Phdr *phdr; 6313575fc4SDoug Rabson Elf_Phdr *phlimit; 648b7f25d4SAlexander Kabaev Elf_Phdr **segs; 653124c3e0SJohn Polstra int nsegs; 6613575fc4SDoug Rabson Elf_Phdr *phdyn; 67a607e5d7SJohn Polstra Elf_Phdr *phinterp; 68017246d0SDoug Rabson Elf_Phdr *phtls; 693124c3e0SJohn Polstra caddr_t mapbase; 703124c3e0SJohn Polstra size_t mapsize; 7113575fc4SDoug Rabson Elf_Addr base_vaddr; 7213575fc4SDoug Rabson Elf_Addr base_vlimit; 733124c3e0SJohn Polstra caddr_t base_addr; 74ea8577c7SAlan Cox int base_flags; 7513575fc4SDoug Rabson Elf_Off data_offset; 7613575fc4SDoug Rabson Elf_Addr data_vaddr; 7713575fc4SDoug Rabson Elf_Addr data_vlimit; 783124c3e0SJohn Polstra caddr_t data_addr; 798b7f25d4SAlexander Kabaev int data_prot; 80fa7dd9c5SMatthew Dillon int data_flags; 8113575fc4SDoug Rabson Elf_Addr clear_vaddr; 823124c3e0SJohn Polstra caddr_t clear_addr; 838b7f25d4SAlexander Kabaev caddr_t clear_page; 8449f90ad2SAlexander Kabaev Elf_Addr phdr_vaddr; 8549f90ad2SAlexander Kabaev size_t nclear, phsize; 8613575fc4SDoug Rabson Elf_Addr bss_vaddr; 8713575fc4SDoug Rabson Elf_Addr bss_vlimit; 883124c3e0SJohn Polstra caddr_t bss_addr; 89212f264cSKonstantin Belousov Elf_Word stack_flags; 906d7610d7SKonstantin Belousov Elf_Addr relro_page; 916d7610d7SKonstantin Belousov size_t relro_size; 9283aa9cc0SKonstantin Belousov Elf_Addr note_start; 9383aa9cc0SKonstantin Belousov Elf_Addr note_end; 94bd56d410SKonstantin Belousov char *note_map; 95bd56d410SKonstantin Belousov size_t note_map_len; 96ca7e27bbSAlex Richardson Elf_Addr text_end; 973124c3e0SJohn Polstra 987fd852f8SMaxim Sobolev hdr = get_elf_header(fd, path, sb); 99341b3de6SMatthew N. Dodd if (hdr == NULL) 100341b3de6SMatthew N. Dodd return (NULL); 1013124c3e0SJohn Polstra 1023124c3e0SJohn Polstra /* 1033124c3e0SJohn Polstra * Scan the program header entries, and save key information. 1043124c3e0SJohn Polstra * 10511e0093fSKonstantin Belousov * We expect that the loadable segments are ordered by load address. 1063124c3e0SJohn Polstra */ 107341b3de6SMatthew N. Dodd phdr = (Elf_Phdr *)((char *)hdr + hdr->e_phoff); 10849f90ad2SAlexander Kabaev phsize = hdr->e_phnum * sizeof (phdr[0]); 109341b3de6SMatthew N. Dodd phlimit = phdr + hdr->e_phnum; 1108b7f25d4SAlexander Kabaev nsegs = -1; 11149f90ad2SAlexander Kabaev phdyn = phinterp = phtls = NULL; 11249f90ad2SAlexander Kabaev phdr_vaddr = 0; 1136d7610d7SKonstantin Belousov relro_page = 0; 1146d7610d7SKonstantin Belousov relro_size = 0; 11583aa9cc0SKonstantin Belousov note_start = 0; 11683aa9cc0SKonstantin Belousov note_end = 0; 117bd56d410SKonstantin Belousov note_map = NULL; 118*3ab5b6bdSAlex Richardson note_map_len = 0; 119341b3de6SMatthew N. Dodd segs = alloca(sizeof(segs[0]) * hdr->e_phnum); 120cb38d494SKonstantin Belousov stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W; 121ca7e27bbSAlex Richardson text_end = 0; 1223124c3e0SJohn Polstra while (phdr < phlimit) { 1233124c3e0SJohn Polstra switch (phdr->p_type) { 1243124c3e0SJohn Polstra 125a607e5d7SJohn Polstra case PT_INTERP: 126a607e5d7SJohn Polstra phinterp = phdr; 127a607e5d7SJohn Polstra break; 128a607e5d7SJohn Polstra 1293124c3e0SJohn Polstra case PT_LOAD: 1308b7f25d4SAlexander Kabaev segs[++nsegs] = phdr; 13149f90ad2SAlexander Kabaev if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) { 1328b7f25d4SAlexander Kabaev _rtld_error("%s: PT_LOAD segment %d not page-aligned", 1338b7f25d4SAlexander Kabaev path, nsegs); 134e474e51eSKonstantin Belousov goto error; 135bfb1ef60SJohn Polstra } 136ca7e27bbSAlex Richardson if ((segs[nsegs]->p_flags & PF_X) == PF_X) { 137ca7e27bbSAlex Richardson text_end = MAX(text_end, 138ca7e27bbSAlex Richardson round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz)); 139ca7e27bbSAlex Richardson } 1403124c3e0SJohn Polstra break; 1413124c3e0SJohn Polstra 1423124c3e0SJohn Polstra case PT_PHDR: 14349f90ad2SAlexander Kabaev phdr_vaddr = phdr->p_vaddr; 14449f90ad2SAlexander Kabaev phsize = phdr->p_memsz; 1453124c3e0SJohn Polstra break; 1463124c3e0SJohn Polstra 1473124c3e0SJohn Polstra case PT_DYNAMIC: 1483124c3e0SJohn Polstra phdyn = phdr; 1493124c3e0SJohn Polstra break; 150017246d0SDoug Rabson 151017246d0SDoug Rabson case PT_TLS: 152017246d0SDoug Rabson phtls = phdr; 153017246d0SDoug Rabson break; 154212f264cSKonstantin Belousov 155212f264cSKonstantin Belousov case PT_GNU_STACK: 156212f264cSKonstantin Belousov stack_flags = phdr->p_flags; 157212f264cSKonstantin Belousov break; 1586d7610d7SKonstantin Belousov 1596d7610d7SKonstantin Belousov case PT_GNU_RELRO: 1606d7610d7SKonstantin Belousov relro_page = phdr->p_vaddr; 1616d7610d7SKonstantin Belousov relro_size = phdr->p_memsz; 1626d7610d7SKonstantin Belousov break; 16383aa9cc0SKonstantin Belousov 16483aa9cc0SKonstantin Belousov case PT_NOTE: 1655eab36f2SKonstantin Belousov if (phdr->p_offset > PAGE_SIZE || 166bd56d410SKonstantin Belousov phdr->p_offset + phdr->p_filesz > PAGE_SIZE) { 167bd56d410SKonstantin Belousov note_map_len = round_page(phdr->p_offset + 168bd56d410SKonstantin Belousov phdr->p_filesz) - trunc_page(phdr->p_offset); 169bd56d410SKonstantin Belousov note_map = mmap(NULL, note_map_len, PROT_READ, 170bd56d410SKonstantin Belousov MAP_PRIVATE, fd, trunc_page(phdr->p_offset)); 171bd56d410SKonstantin Belousov if (note_map == MAP_FAILED) { 172bd56d410SKonstantin Belousov _rtld_error("%s: error mapping PT_NOTE (%d)", path, errno); 173bd56d410SKonstantin Belousov goto error; 174bd56d410SKonstantin Belousov } 175bd56d410SKonstantin Belousov note_start = (Elf_Addr)(note_map + phdr->p_offset - 176bd56d410SKonstantin Belousov trunc_page(phdr->p_offset)); 177bd56d410SKonstantin Belousov } else { 1785eab36f2SKonstantin Belousov note_start = (Elf_Addr)(char *)hdr + phdr->p_offset; 179bd56d410SKonstantin Belousov } 18083aa9cc0SKonstantin Belousov note_end = note_start + phdr->p_filesz; 18183aa9cc0SKonstantin Belousov break; 1823124c3e0SJohn Polstra } 1833124c3e0SJohn Polstra 1843124c3e0SJohn Polstra ++phdr; 1853124c3e0SJohn Polstra } 1863124c3e0SJohn Polstra if (phdyn == NULL) { 187bfb1ef60SJohn Polstra _rtld_error("%s: object is not dynamically-linked", path); 188e474e51eSKonstantin Belousov goto error; 1893124c3e0SJohn Polstra } 1903124c3e0SJohn Polstra 1918b7f25d4SAlexander Kabaev if (nsegs < 0) { 192bfb1ef60SJohn Polstra _rtld_error("%s: too few PT_LOAD segments", path); 193e474e51eSKonstantin Belousov goto error; 194bfb1ef60SJohn Polstra } 1953124c3e0SJohn Polstra 1963124c3e0SJohn Polstra /* 1973124c3e0SJohn Polstra * Map the entire address space of the object, to stake out our 1983124c3e0SJohn Polstra * contiguous region, and to establish the base address for relocation. 1993124c3e0SJohn Polstra */ 2003124c3e0SJohn Polstra base_vaddr = trunc_page(segs[0]->p_vaddr); 2018b7f25d4SAlexander Kabaev base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 2023124c3e0SJohn Polstra mapsize = base_vlimit - base_vaddr; 20315789513STijl Coosemans base_addr = (caddr_t) base_vaddr; 204a36deee3SKonstantin Belousov base_flags = __getosreldate() >= P_OSREL_MAP_GUARD ? MAP_GUARD : 205a36deee3SKonstantin Belousov MAP_PRIVATE | MAP_ANON | MAP_NOCORE; 206ea8577c7SAlan Cox if (npagesizes > 1 && round_page(segs[0]->p_filesz) >= pagesizes[1]) 207ea8577c7SAlan Cox base_flags |= MAP_ALIGNED_SUPER; 20891041919SKonstantin Belousov if (base_vaddr != 0) 20991041919SKonstantin Belousov base_flags |= MAP_FIXED | MAP_EXCL; 2103124c3e0SJohn Polstra 211ea8577c7SAlan Cox mapbase = mmap(base_addr, mapsize, PROT_NONE, base_flags, -1, 0); 2123124c3e0SJohn Polstra if (mapbase == (caddr_t) -1) { 213bfb1ef60SJohn Polstra _rtld_error("%s: mmap of entire address space failed: %s", 2146fea10fbSKonstantin Belousov path, rtld_strerror(errno)); 215e474e51eSKonstantin Belousov goto error; 2163124c3e0SJohn Polstra } 2173124c3e0SJohn Polstra if (base_addr != NULL && mapbase != base_addr) { 218bfb1ef60SJohn Polstra _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 219bfb1ef60SJohn Polstra path, base_addr, mapbase); 220e474e51eSKonstantin Belousov goto error1; 2213124c3e0SJohn Polstra } 2223124c3e0SJohn Polstra 2238b7f25d4SAlexander Kabaev for (i = 0; i <= nsegs; i++) { 2248b7f25d4SAlexander Kabaev /* Overlay the segment onto the proper region. */ 2258b7f25d4SAlexander Kabaev data_offset = trunc_page(segs[i]->p_offset); 2268b7f25d4SAlexander Kabaev data_vaddr = trunc_page(segs[i]->p_vaddr); 2278b7f25d4SAlexander Kabaev data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 2283124c3e0SJohn Polstra data_addr = mapbase + (data_vaddr - base_vaddr); 229fa7dd9c5SMatthew Dillon data_prot = convert_prot(segs[i]->p_flags); 230fa7dd9c5SMatthew Dillon data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 231a3c8e04eSKonstantin Belousov if (mmap(data_addr, data_vlimit - data_vaddr, data_prot, 232e474e51eSKonstantin Belousov data_flags | MAP_PREFAULT_READ, fd, data_offset) == (caddr_t) -1) { 2336fea10fbSKonstantin Belousov _rtld_error("%s: mmap of data failed: %s", path, 2346fea10fbSKonstantin Belousov rtld_strerror(errno)); 235e474e51eSKonstantin Belousov goto error1; 2363124c3e0SJohn Polstra } 2373124c3e0SJohn Polstra 23869ca61baSKonstantin Belousov /* Do BSS setup */ 23969ca61baSKonstantin Belousov if (segs[i]->p_filesz != segs[i]->p_memsz) { 24069ca61baSKonstantin Belousov 2418b7f25d4SAlexander Kabaev /* Clear any BSS in the last page of the segment. */ 2428b7f25d4SAlexander Kabaev clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 2433124c3e0SJohn Polstra clear_addr = mapbase + (clear_vaddr - base_vaddr); 2448b7f25d4SAlexander Kabaev clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 24569ca61baSKonstantin Belousov 2468b7f25d4SAlexander Kabaev if ((nclear = data_vlimit - clear_vaddr) > 0) { 2478b7f25d4SAlexander Kabaev /* Make sure the end of the segment is writable */ 24869ca61baSKonstantin Belousov if ((data_prot & PROT_WRITE) == 0 && -1 == 24969ca61baSKonstantin Belousov mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 2508b7f25d4SAlexander Kabaev _rtld_error("%s: mprotect failed: %s", path, 2516fea10fbSKonstantin Belousov rtld_strerror(errno)); 252e474e51eSKonstantin Belousov goto error1; 2538b7f25d4SAlexander Kabaev } 2548b7f25d4SAlexander Kabaev 2553124c3e0SJohn Polstra memset(clear_addr, 0, nclear); 2563124c3e0SJohn Polstra 2578b7f25d4SAlexander Kabaev /* Reset the data protection back */ 2588b7f25d4SAlexander Kabaev if ((data_prot & PROT_WRITE) == 0) 2598b7f25d4SAlexander Kabaev mprotect(clear_page, PAGE_SIZE, data_prot); 2608b7f25d4SAlexander Kabaev } 2618b7f25d4SAlexander Kabaev 2623124c3e0SJohn Polstra /* Overlay the BSS segment onto the proper region. */ 2633124c3e0SJohn Polstra bss_vaddr = data_vlimit; 2648b7f25d4SAlexander Kabaev bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 2653124c3e0SJohn Polstra bss_addr = mapbase + (bss_vaddr - base_vaddr); 2663124c3e0SJohn Polstra if (bss_vlimit > bss_vaddr) { /* There is something to do */ 267750b5e31SKonstantin Belousov if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 268750b5e31SKonstantin Belousov data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) { 269750b5e31SKonstantin Belousov _rtld_error("%s: mmap of bss failed: %s", path, 2706fea10fbSKonstantin Belousov rtld_strerror(errno)); 271e474e51eSKonstantin Belousov goto error1; 2723124c3e0SJohn Polstra } 2733124c3e0SJohn Polstra } 27469ca61baSKonstantin Belousov } 27569ca61baSKonstantin Belousov 27649f90ad2SAlexander Kabaev if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff && 27749f90ad2SAlexander Kabaev (data_vlimit - data_vaddr + data_offset) >= 27849f90ad2SAlexander Kabaev (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) { 27949f90ad2SAlexander Kabaev phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset; 28049f90ad2SAlexander Kabaev } 2818b7f25d4SAlexander Kabaev } 2823124c3e0SJohn Polstra 283926ea445SJohn Polstra obj = obj_new(); 2847360ae0fSJohn Polstra if (sb != NULL) { 2857360ae0fSJohn Polstra obj->dev = sb->st_dev; 2867360ae0fSJohn Polstra obj->ino = sb->st_ino; 2877360ae0fSJohn Polstra } 2883124c3e0SJohn Polstra obj->mapbase = mapbase; 2893124c3e0SJohn Polstra obj->mapsize = mapsize; 290ca7e27bbSAlex Richardson obj->textsize = text_end - base_vaddr; 2913124c3e0SJohn Polstra obj->vaddrbase = base_vaddr; 2923124c3e0SJohn Polstra obj->relocbase = mapbase - base_vaddr; 293a607e5d7SJohn Polstra obj->dynamic = (const Elf_Dyn *)(obj->relocbase + phdyn->p_vaddr); 294341b3de6SMatthew N. Dodd if (hdr->e_entry != 0) 295341b3de6SMatthew N. Dodd obj->entry = (caddr_t)(obj->relocbase + hdr->e_entry); 29649f90ad2SAlexander Kabaev if (phdr_vaddr != 0) { 29749f90ad2SAlexander Kabaev obj->phdr = (const Elf_Phdr *)(obj->relocbase + phdr_vaddr); 29849f90ad2SAlexander Kabaev } else { 29949f90ad2SAlexander Kabaev obj->phdr = malloc(phsize); 30049f90ad2SAlexander Kabaev if (obj->phdr == NULL) { 30149f90ad2SAlexander Kabaev obj_free(obj); 30249f90ad2SAlexander Kabaev _rtld_error("%s: cannot allocate program header", path); 303e474e51eSKonstantin Belousov goto error1; 3043124c3e0SJohn Polstra } 305903e0ffdSAlex Richardson memcpy(__DECONST(char *, obj->phdr), (char *)hdr + hdr->e_phoff, phsize); 30649f90ad2SAlexander Kabaev obj->phdr_alloc = true; 30749f90ad2SAlexander Kabaev } 30849f90ad2SAlexander Kabaev obj->phsize = phsize; 309a607e5d7SJohn Polstra if (phinterp != NULL) 310a607e5d7SJohn Polstra obj->interp = (const char *)(obj->relocbase + phinterp->p_vaddr); 311017246d0SDoug Rabson if (phtls != NULL) { 312017246d0SDoug Rabson tls_dtv_generation++; 313017246d0SDoug Rabson obj->tlsindex = ++tls_max_index; 314017246d0SDoug Rabson obj->tlssize = phtls->p_memsz; 315017246d0SDoug Rabson obj->tlsalign = phtls->p_align; 316017246d0SDoug Rabson obj->tlsinitsize = phtls->p_filesz; 317017246d0SDoug Rabson obj->tlsinit = mapbase + phtls->p_vaddr; 318017246d0SDoug Rabson } 319212f264cSKonstantin Belousov obj->stack_flags = stack_flags; 3206d7610d7SKonstantin Belousov obj->relro_page = obj->relocbase + trunc_page(relro_page); 3216d7610d7SKonstantin Belousov obj->relro_size = round_page(relro_size); 322d958a71bSAlexander Kabaev if (note_start < note_end) 323d958a71bSAlexander Kabaev digest_notes(obj, note_start, note_end); 324bd56d410SKonstantin Belousov if (note_map != NULL) 325bd56d410SKonstantin Belousov munmap(note_map, note_map_len); 326e474e51eSKonstantin Belousov munmap(hdr, PAGE_SIZE); 327e474e51eSKonstantin Belousov return (obj); 328e474e51eSKonstantin Belousov 329e474e51eSKonstantin Belousov error1: 330e474e51eSKonstantin Belousov munmap(mapbase, mapsize); 331e474e51eSKonstantin Belousov error: 332bd56d410SKonstantin Belousov if (note_map != NULL && note_map != MAP_FAILED) 333bd56d410SKonstantin Belousov munmap(note_map, note_map_len); 334e474e51eSKonstantin Belousov munmap(hdr, PAGE_SIZE); 335e474e51eSKonstantin Belousov return (NULL); 3363124c3e0SJohn Polstra } 3373124c3e0SJohn Polstra 338341b3de6SMatthew N. Dodd static Elf_Ehdr * 3397fd852f8SMaxim Sobolev get_elf_header(int fd, const char *path, const struct stat *sbp) 340341b3de6SMatthew N. Dodd { 341e474e51eSKonstantin Belousov Elf_Ehdr *hdr; 342341b3de6SMatthew N. Dodd 3437fd852f8SMaxim Sobolev /* Make sure file has enough data for the ELF header */ 34478b64846SAlex Richardson if (sbp != NULL && sbp->st_size < (off_t)sizeof(Elf_Ehdr)) { 3457fd852f8SMaxim Sobolev _rtld_error("%s: invalid file format", path); 3467fd852f8SMaxim Sobolev return (NULL); 3477fd852f8SMaxim Sobolev } 3487fd852f8SMaxim Sobolev 349e474e51eSKonstantin Belousov hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ, 350e474e51eSKonstantin Belousov fd, 0); 351e474e51eSKonstantin Belousov if (hdr == (Elf_Ehdr *)MAP_FAILED) { 3526fea10fbSKonstantin Belousov _rtld_error("%s: read error: %s", path, rtld_strerror(errno)); 353e474e51eSKonstantin Belousov return (NULL); 354341b3de6SMatthew N. Dodd } 355341b3de6SMatthew N. Dodd 356341b3de6SMatthew N. Dodd /* Make sure the file is valid */ 357e474e51eSKonstantin Belousov if (!IS_ELF(*hdr)) { 358341b3de6SMatthew N. Dodd _rtld_error("%s: invalid file format", path); 359e474e51eSKonstantin Belousov goto error; 360341b3de6SMatthew N. Dodd } 361e474e51eSKonstantin Belousov if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 362e474e51eSKonstantin Belousov hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { 363341b3de6SMatthew N. Dodd _rtld_error("%s: unsupported file layout", path); 364e474e51eSKonstantin Belousov goto error; 365341b3de6SMatthew N. Dodd } 366e474e51eSKonstantin Belousov if (hdr->e_ident[EI_VERSION] != EV_CURRENT || 367e474e51eSKonstantin Belousov hdr->e_version != EV_CURRENT) { 368341b3de6SMatthew N. Dodd _rtld_error("%s: unsupported file version", path); 369e474e51eSKonstantin Belousov goto error; 370341b3de6SMatthew N. Dodd } 371e474e51eSKonstantin Belousov if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { 372341b3de6SMatthew N. Dodd _rtld_error("%s: unsupported file type", path); 373e474e51eSKonstantin Belousov goto error; 374341b3de6SMatthew N. Dodd } 375e474e51eSKonstantin Belousov if (hdr->e_machine != ELF_TARG_MACH) { 376341b3de6SMatthew N. Dodd _rtld_error("%s: unsupported machine", path); 377e474e51eSKonstantin Belousov goto error; 378341b3de6SMatthew N. Dodd } 379341b3de6SMatthew N. Dodd 380341b3de6SMatthew N. Dodd /* 381341b3de6SMatthew N. Dodd * We rely on the program header being in the first page. This is 382341b3de6SMatthew N. Dodd * not strictly required by the ABI specification, but it seems to 383341b3de6SMatthew N. Dodd * always true in practice. And, it simplifies things considerably. 384341b3de6SMatthew N. Dodd */ 385e474e51eSKonstantin Belousov if (hdr->e_phentsize != sizeof(Elf_Phdr)) { 386341b3de6SMatthew N. Dodd _rtld_error( 387341b3de6SMatthew N. Dodd "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 388e474e51eSKonstantin Belousov goto error; 389341b3de6SMatthew N. Dodd } 390e474e51eSKonstantin Belousov if (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) > 391e474e51eSKonstantin Belousov (size_t)PAGE_SIZE) { 392341b3de6SMatthew N. Dodd _rtld_error("%s: program header too large", path); 393e474e51eSKonstantin Belousov goto error; 394341b3de6SMatthew N. Dodd } 395e474e51eSKonstantin Belousov return (hdr); 396341b3de6SMatthew N. Dodd 397e474e51eSKonstantin Belousov error: 398e474e51eSKonstantin Belousov munmap(hdr, PAGE_SIZE); 399e474e51eSKonstantin Belousov return (NULL); 400341b3de6SMatthew N. Dodd } 401341b3de6SMatthew N. Dodd 402926ea445SJohn Polstra void 403926ea445SJohn Polstra obj_free(Obj_Entry *obj) 404926ea445SJohn Polstra { 405926ea445SJohn Polstra Objlist_Entry *elm; 406926ea445SJohn Polstra 4070eb88f20SAlexander Kabaev if (obj->tls_done) 408ddab7ee8SDoug Rabson free_tls_offset(obj); 409926ea445SJohn Polstra while (obj->needed != NULL) { 410926ea445SJohn Polstra Needed_Entry *needed = obj->needed; 411926ea445SJohn Polstra obj->needed = needed->next; 412926ea445SJohn Polstra free(needed); 413926ea445SJohn Polstra } 4140eb88f20SAlexander Kabaev while (!STAILQ_EMPTY(&obj->names)) { 4150eb88f20SAlexander Kabaev Name_Entry *entry = STAILQ_FIRST(&obj->names); 4160eb88f20SAlexander Kabaev STAILQ_REMOVE_HEAD(&obj->names, link); 4170eb88f20SAlexander Kabaev free(entry); 4180eb88f20SAlexander Kabaev } 419926ea445SJohn Polstra while (!STAILQ_EMPTY(&obj->dldags)) { 420926ea445SJohn Polstra elm = STAILQ_FIRST(&obj->dldags); 421926ea445SJohn Polstra STAILQ_REMOVE_HEAD(&obj->dldags, link); 422926ea445SJohn Polstra free(elm); 423926ea445SJohn Polstra } 424926ea445SJohn Polstra while (!STAILQ_EMPTY(&obj->dagmembers)) { 425926ea445SJohn Polstra elm = STAILQ_FIRST(&obj->dagmembers); 426926ea445SJohn Polstra STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 427926ea445SJohn Polstra free(elm); 428926ea445SJohn Polstra } 42949f90ad2SAlexander Kabaev if (obj->vertab) 4300eb88f20SAlexander Kabaev free(obj->vertab); 43149f90ad2SAlexander Kabaev if (obj->origin_path) 432da9f2454SMatthew N. Dodd free(obj->origin_path); 43328551690SKonstantin Belousov if (obj->z_origin) 434903e0ffdSAlex Richardson free(__DECONST(void*, obj->rpath)); 43549f90ad2SAlexander Kabaev if (obj->priv) 43663c1e7cbSAlexander Kabaev free(obj->priv); 43749f90ad2SAlexander Kabaev if (obj->path) 43849f90ad2SAlexander Kabaev free(obj->path); 43949f90ad2SAlexander Kabaev if (obj->phdr_alloc) 440903e0ffdSAlex Richardson free(__DECONST(void *, obj->phdr)); 441926ea445SJohn Polstra free(obj); 442926ea445SJohn Polstra } 443926ea445SJohn Polstra 444926ea445SJohn Polstra Obj_Entry * 445926ea445SJohn Polstra obj_new(void) 446926ea445SJohn Polstra { 447926ea445SJohn Polstra Obj_Entry *obj; 448926ea445SJohn Polstra 449926ea445SJohn Polstra obj = CNEW(Obj_Entry); 450926ea445SJohn Polstra STAILQ_INIT(&obj->dldags); 451926ea445SJohn Polstra STAILQ_INIT(&obj->dagmembers); 4520eb88f20SAlexander Kabaev STAILQ_INIT(&obj->names); 453926ea445SJohn Polstra return obj; 454926ea445SJohn Polstra } 455926ea445SJohn Polstra 4563124c3e0SJohn Polstra /* 4573124c3e0SJohn Polstra * Given a set of ELF protection flags, return the corresponding protection 4583124c3e0SJohn Polstra * flags for MMAP. 4593124c3e0SJohn Polstra */ 460ca8c8dc3SKonstantin Belousov int 461fa7dd9c5SMatthew Dillon convert_prot(int elfflags) 4623124c3e0SJohn Polstra { 4633124c3e0SJohn Polstra int prot = 0; 4643124c3e0SJohn Polstra if (elfflags & PF_R) 4653124c3e0SJohn Polstra prot |= PROT_READ; 4663124c3e0SJohn Polstra if (elfflags & PF_W) 4673124c3e0SJohn Polstra prot |= PROT_WRITE; 4683124c3e0SJohn Polstra if (elfflags & PF_X) 4693124c3e0SJohn Polstra prot |= PROT_EXEC; 4703124c3e0SJohn Polstra return prot; 4713124c3e0SJohn Polstra } 472fa7dd9c5SMatthew Dillon 473fa7dd9c5SMatthew Dillon static int 474fa7dd9c5SMatthew Dillon convert_flags(int elfflags) 475fa7dd9c5SMatthew Dillon { 476fa7dd9c5SMatthew Dillon int flags = MAP_PRIVATE; /* All mappings are private */ 477fa7dd9c5SMatthew Dillon 478fa7dd9c5SMatthew Dillon /* 479fa7dd9c5SMatthew Dillon * Readonly mappings are marked "MAP_NOCORE", because they can be 480fa7dd9c5SMatthew Dillon * reconstructed by a debugger. 481fa7dd9c5SMatthew Dillon */ 482fa7dd9c5SMatthew Dillon if (!(elfflags & PF_W)) 483fa7dd9c5SMatthew Dillon flags |= MAP_NOCORE; 484fa7dd9c5SMatthew Dillon return flags; 485fa7dd9c5SMatthew Dillon } 486