1 /*- 2 * Copyright 1996-1998 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/mman.h> 30 #include <sys/stat.h> 31 32 #include <errno.h> 33 #include <stddef.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <unistd.h> 37 38 #include "debug.h" 39 #include "rtld.h" 40 41 static int convert_prot(int); /* Elf flags -> mmap protection */ 42 static int convert_flags(int); /* Elf flags -> mmap flags */ 43 44 /* 45 * Map a shared object into memory. The "fd" argument is a file descriptor, 46 * which must be open on the object and positioned at its beginning. 47 * The "path" argument is a pathname that is used only for error messages. 48 * 49 * The return value is a pointer to a newly-allocated Obj_Entry structure 50 * for the shared object. Returns NULL on failure. 51 */ 52 Obj_Entry * 53 map_object(int fd, const char *path, const struct stat *sb) 54 { 55 Obj_Entry *obj; 56 union { 57 Elf_Ehdr hdr; 58 char buf[PAGE_SIZE]; 59 } u; 60 int nbytes, i; 61 Elf_Phdr *phdr; 62 Elf_Phdr *phlimit; 63 Elf_Phdr **segs; 64 int nsegs; 65 Elf_Phdr *phdyn; 66 Elf_Phdr *phphdr; 67 Elf_Phdr *phinterp; 68 caddr_t mapbase; 69 size_t mapsize; 70 Elf_Off base_offset; 71 Elf_Addr base_vaddr; 72 Elf_Addr base_vlimit; 73 caddr_t base_addr; 74 Elf_Off data_offset; 75 Elf_Addr data_vaddr; 76 Elf_Addr data_vlimit; 77 caddr_t data_addr; 78 int data_prot; 79 int data_flags; 80 Elf_Addr clear_vaddr; 81 caddr_t clear_addr; 82 caddr_t clear_page; 83 size_t nclear; 84 Elf_Addr bss_vaddr; 85 Elf_Addr bss_vlimit; 86 caddr_t bss_addr; 87 88 if ((nbytes = read(fd, u.buf, PAGE_SIZE)) == -1) { 89 _rtld_error("%s: read error: %s", path, strerror(errno)); 90 return NULL; 91 } 92 93 /* Make sure the file is valid */ 94 if (nbytes < sizeof(Elf_Ehdr) 95 || u.hdr.e_ident[EI_MAG0] != ELFMAG0 96 || u.hdr.e_ident[EI_MAG1] != ELFMAG1 97 || u.hdr.e_ident[EI_MAG2] != ELFMAG2 98 || u.hdr.e_ident[EI_MAG3] != ELFMAG3) { 99 _rtld_error("%s: invalid file format", path); 100 return NULL; 101 } 102 if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS 103 || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) { 104 _rtld_error("%s: unsupported file layout", path); 105 return NULL; 106 } 107 if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT 108 || u.hdr.e_version != EV_CURRENT) { 109 _rtld_error("%s: unsupported file version", path); 110 return NULL; 111 } 112 if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) { 113 _rtld_error("%s: unsupported file type", path); 114 return NULL; 115 } 116 if (u.hdr.e_machine != ELF_TARG_MACH) { 117 _rtld_error("%s: unsupported machine", path); 118 return NULL; 119 } 120 121 /* 122 * We rely on the program header being in the first page. This is 123 * not strictly required by the ABI specification, but it seems to 124 * always true in practice. And, it simplifies things considerably. 125 */ 126 if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) { 127 _rtld_error( 128 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 129 return NULL; 130 } 131 if (u.hdr.e_phoff + u.hdr.e_phnum*sizeof(Elf_Phdr) > nbytes) { 132 _rtld_error("%s: program header too large", path); 133 return NULL; 134 } 135 136 /* 137 * Scan the program header entries, and save key information. 138 * 139 * We rely on there being exactly two load segments, text and data, 140 * in that order. 141 */ 142 phdr = (Elf_Phdr *) (u.buf + u.hdr.e_phoff); 143 phlimit = phdr + u.hdr.e_phnum; 144 nsegs = -1; 145 phdyn = phphdr = phinterp = NULL; 146 segs = alloca(sizeof(segs[0]) * u.hdr.e_phnum); 147 while (phdr < phlimit) { 148 switch (phdr->p_type) { 149 150 case PT_INTERP: 151 phinterp = phdr; 152 break; 153 154 case PT_LOAD: 155 segs[++nsegs] = phdr; 156 if (segs[nsegs]->p_align < PAGE_SIZE) { 157 _rtld_error("%s: PT_LOAD segment %d not page-aligned", 158 path, nsegs); 159 return NULL; 160 } 161 break; 162 163 case PT_PHDR: 164 phphdr = phdr; 165 break; 166 167 case PT_DYNAMIC: 168 phdyn = phdr; 169 break; 170 } 171 172 ++phdr; 173 } 174 if (phdyn == NULL) { 175 _rtld_error("%s: object is not dynamically-linked", path); 176 return NULL; 177 } 178 179 if (nsegs < 0) { 180 _rtld_error("%s: too few PT_LOAD segments", path); 181 return NULL; 182 } 183 184 /* 185 * Map the entire address space of the object, to stake out our 186 * contiguous region, and to establish the base address for relocation. 187 */ 188 base_offset = trunc_page(segs[0]->p_offset); 189 base_vaddr = trunc_page(segs[0]->p_vaddr); 190 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 191 mapsize = base_vlimit - base_vaddr; 192 base_addr = u.hdr.e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL; 193 194 mapbase = mmap(base_addr, mapsize, convert_prot(segs[0]->p_flags), 195 convert_flags(segs[0]->p_flags), fd, base_offset); 196 if (mapbase == (caddr_t) -1) { 197 _rtld_error("%s: mmap of entire address space failed: %s", 198 path, strerror(errno)); 199 return NULL; 200 } 201 if (base_addr != NULL && mapbase != base_addr) { 202 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 203 path, base_addr, mapbase); 204 munmap(mapbase, mapsize); 205 return NULL; 206 } 207 208 for (i = 0; i <= nsegs; i++) { 209 /* Overlay the segment onto the proper region. */ 210 data_offset = trunc_page(segs[i]->p_offset); 211 data_vaddr = trunc_page(segs[i]->p_vaddr); 212 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 213 data_addr = mapbase + (data_vaddr - base_vaddr); 214 data_prot = convert_prot(segs[i]->p_flags); 215 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 216 /* Do not call mmap on the first segment - this is redundant */ 217 if (i && mmap(data_addr, data_vlimit - data_vaddr, data_prot, 218 data_flags, fd, data_offset) == (caddr_t) -1) { 219 _rtld_error("%s: mmap of data failed: %s", path, strerror(errno)); 220 return NULL; 221 } 222 223 /* Clear any BSS in the last page of the segment. */ 224 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 225 clear_addr = mapbase + (clear_vaddr - base_vaddr); 226 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 227 if ((nclear = data_vlimit - clear_vaddr) > 0) { 228 /* Make sure the end of the segment is writable */ 229 if ((data_prot & PROT_WRITE) == 0 && 230 -1 == mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 231 _rtld_error("%s: mprotect failed: %s", path, 232 strerror(errno)); 233 return NULL; 234 } 235 236 memset(clear_addr, 0, nclear); 237 238 /* Reset the data protection back */ 239 if ((data_prot & PROT_WRITE) == 0) 240 mprotect(clear_page, PAGE_SIZE, data_prot); 241 } 242 243 /* Overlay the BSS segment onto the proper region. */ 244 bss_vaddr = data_vlimit; 245 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 246 bss_addr = mapbase + (bss_vaddr - base_vaddr); 247 if (bss_vlimit > bss_vaddr) { /* There is something to do */ 248 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 249 MAP_PRIVATE|MAP_FIXED|MAP_ANON, -1, 0) == (caddr_t) -1) { 250 _rtld_error("%s: mmap of bss failed: %s", path, 251 strerror(errno)); 252 return NULL; 253 } 254 } 255 } 256 257 obj = obj_new(); 258 if (sb != NULL) { 259 obj->dev = sb->st_dev; 260 obj->ino = sb->st_ino; 261 } 262 obj->mapbase = mapbase; 263 obj->mapsize = mapsize; 264 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - 265 base_vaddr; 266 obj->vaddrbase = base_vaddr; 267 obj->relocbase = mapbase - base_vaddr; 268 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); 269 if (u.hdr.e_entry != 0) 270 obj->entry = (caddr_t) (obj->relocbase + u.hdr.e_entry); 271 if (phphdr != NULL) { 272 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phphdr->p_vaddr); 273 obj->phsize = phphdr->p_memsz; 274 } 275 if (phinterp != NULL) 276 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); 277 278 return obj; 279 } 280 281 void 282 obj_free(Obj_Entry *obj) 283 { 284 Objlist_Entry *elm; 285 286 free(obj->path); 287 while (obj->needed != NULL) { 288 Needed_Entry *needed = obj->needed; 289 obj->needed = needed->next; 290 free(needed); 291 } 292 while (!STAILQ_EMPTY(&obj->dldags)) { 293 elm = STAILQ_FIRST(&obj->dldags); 294 STAILQ_REMOVE_HEAD(&obj->dldags, link); 295 free(elm); 296 } 297 while (!STAILQ_EMPTY(&obj->dagmembers)) { 298 elm = STAILQ_FIRST(&obj->dagmembers); 299 STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 300 free(elm); 301 } 302 free(obj->priv); 303 free(obj); 304 } 305 306 Obj_Entry * 307 obj_new(void) 308 { 309 Obj_Entry *obj; 310 311 obj = CNEW(Obj_Entry); 312 STAILQ_INIT(&obj->dldags); 313 STAILQ_INIT(&obj->dagmembers); 314 return obj; 315 } 316 317 /* 318 * Given a set of ELF protection flags, return the corresponding protection 319 * flags for MMAP. 320 */ 321 static int 322 convert_prot(int elfflags) 323 { 324 int prot = 0; 325 if (elfflags & PF_R) 326 prot |= PROT_READ; 327 if (elfflags & PF_W) 328 prot |= PROT_WRITE; 329 if (elfflags & PF_X) 330 prot |= PROT_EXEC; 331 return prot; 332 } 333 334 static int 335 convert_flags(int elfflags) 336 { 337 int flags = MAP_PRIVATE; /* All mappings are private */ 338 339 /* 340 * Readonly mappings are marked "MAP_NOCORE", because they can be 341 * reconstructed by a debugger. 342 */ 343 if (!(elfflags & PF_W)) 344 flags |= MAP_NOCORE; 345 return flags; 346 } 347