1 /*- 2 * Copyright 1996-1998 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/mman.h> 30 #include <sys/stat.h> 31 32 #include <errno.h> 33 #include <stddef.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <unistd.h> 37 38 #include "debug.h" 39 #include "rtld.h" 40 41 static Elf_Ehdr *get_elf_header(int, const char *, const struct stat *); 42 static int convert_prot(int); /* Elf flags -> mmap protection */ 43 static int convert_flags(int); /* Elf flags -> mmap flags */ 44 45 /* 46 * Map a shared object into memory. The "fd" argument is a file descriptor, 47 * which must be open on the object and positioned at its beginning. 48 * The "path" argument is a pathname that is used only for error messages. 49 * 50 * The return value is a pointer to a newly-allocated Obj_Entry structure 51 * for the shared object. Returns NULL on failure. 52 */ 53 Obj_Entry * 54 map_object(int fd, const char *path, const struct stat *sb) 55 { 56 Obj_Entry *obj; 57 Elf_Ehdr *hdr; 58 int i; 59 Elf_Phdr *phdr; 60 Elf_Phdr *phlimit; 61 Elf_Phdr **segs; 62 int nsegs; 63 Elf_Phdr *phdyn; 64 Elf_Phdr *phinterp; 65 Elf_Phdr *phtls; 66 caddr_t mapbase; 67 size_t mapsize; 68 Elf_Addr base_vaddr; 69 Elf_Addr base_vlimit; 70 caddr_t base_addr; 71 int base_flags; 72 Elf_Off data_offset; 73 Elf_Addr data_vaddr; 74 Elf_Addr data_vlimit; 75 caddr_t data_addr; 76 int data_prot; 77 int data_flags; 78 Elf_Addr clear_vaddr; 79 caddr_t clear_addr; 80 caddr_t clear_page; 81 Elf_Addr phdr_vaddr; 82 size_t nclear, phsize; 83 Elf_Addr bss_vaddr; 84 Elf_Addr bss_vlimit; 85 caddr_t bss_addr; 86 Elf_Word stack_flags; 87 Elf_Addr relro_page; 88 size_t relro_size; 89 Elf_Addr note_start; 90 Elf_Addr note_end; 91 char *note_map; 92 size_t note_map_len; 93 94 hdr = get_elf_header(fd, path, sb); 95 if (hdr == NULL) 96 return (NULL); 97 98 /* 99 * Scan the program header entries, and save key information. 100 * 101 * We expect that the loadable segments are ordered by load address. 102 */ 103 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff); 104 phsize = hdr->e_phnum * sizeof (phdr[0]); 105 phlimit = phdr + hdr->e_phnum; 106 nsegs = -1; 107 phdyn = phinterp = phtls = NULL; 108 phdr_vaddr = 0; 109 relro_page = 0; 110 relro_size = 0; 111 note_start = 0; 112 note_end = 0; 113 note_map = NULL; 114 segs = alloca(sizeof(segs[0]) * hdr->e_phnum); 115 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W; 116 while (phdr < phlimit) { 117 switch (phdr->p_type) { 118 119 case PT_INTERP: 120 phinterp = phdr; 121 break; 122 123 case PT_LOAD: 124 segs[++nsegs] = phdr; 125 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) { 126 _rtld_error("%s: PT_LOAD segment %d not page-aligned", 127 path, nsegs); 128 goto error; 129 } 130 break; 131 132 case PT_PHDR: 133 phdr_vaddr = phdr->p_vaddr; 134 phsize = phdr->p_memsz; 135 break; 136 137 case PT_DYNAMIC: 138 phdyn = phdr; 139 break; 140 141 case PT_TLS: 142 phtls = phdr; 143 break; 144 145 case PT_GNU_STACK: 146 stack_flags = phdr->p_flags; 147 break; 148 149 case PT_GNU_RELRO: 150 relro_page = phdr->p_vaddr; 151 relro_size = phdr->p_memsz; 152 break; 153 154 case PT_NOTE: 155 if (phdr->p_offset > PAGE_SIZE || 156 phdr->p_offset + phdr->p_filesz > PAGE_SIZE) { 157 note_map_len = round_page(phdr->p_offset + 158 phdr->p_filesz) - trunc_page(phdr->p_offset); 159 note_map = mmap(NULL, note_map_len, PROT_READ, 160 MAP_PRIVATE, fd, trunc_page(phdr->p_offset)); 161 if (note_map == MAP_FAILED) { 162 _rtld_error("%s: error mapping PT_NOTE (%d)", path, errno); 163 goto error; 164 } 165 note_start = (Elf_Addr)(note_map + phdr->p_offset - 166 trunc_page(phdr->p_offset)); 167 } else { 168 note_start = (Elf_Addr)(char *)hdr + phdr->p_offset; 169 } 170 note_end = note_start + phdr->p_filesz; 171 break; 172 } 173 174 ++phdr; 175 } 176 if (phdyn == NULL) { 177 _rtld_error("%s: object is not dynamically-linked", path); 178 goto error; 179 } 180 181 if (nsegs < 0) { 182 _rtld_error("%s: too few PT_LOAD segments", path); 183 goto error; 184 } 185 186 /* 187 * Map the entire address space of the object, to stake out our 188 * contiguous region, and to establish the base address for relocation. 189 */ 190 base_vaddr = trunc_page(segs[0]->p_vaddr); 191 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 192 mapsize = base_vlimit - base_vaddr; 193 base_addr = (caddr_t) base_vaddr; 194 base_flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE; 195 if (npagesizes > 1 && round_page(segs[0]->p_filesz) >= pagesizes[1]) 196 base_flags |= MAP_ALIGNED_SUPER; 197 198 mapbase = mmap(base_addr, mapsize, PROT_NONE, base_flags, -1, 0); 199 if (mapbase == (caddr_t) -1) { 200 _rtld_error("%s: mmap of entire address space failed: %s", 201 path, rtld_strerror(errno)); 202 goto error; 203 } 204 if (base_addr != NULL && mapbase != base_addr) { 205 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 206 path, base_addr, mapbase); 207 goto error1; 208 } 209 210 for (i = 0; i <= nsegs; i++) { 211 /* Overlay the segment onto the proper region. */ 212 data_offset = trunc_page(segs[i]->p_offset); 213 data_vaddr = trunc_page(segs[i]->p_vaddr); 214 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 215 data_addr = mapbase + (data_vaddr - base_vaddr); 216 data_prot = convert_prot(segs[i]->p_flags); 217 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 218 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot, 219 data_flags | MAP_PREFAULT_READ, fd, data_offset) == (caddr_t) -1) { 220 _rtld_error("%s: mmap of data failed: %s", path, 221 rtld_strerror(errno)); 222 goto error1; 223 } 224 225 /* Do BSS setup */ 226 if (segs[i]->p_filesz != segs[i]->p_memsz) { 227 228 /* Clear any BSS in the last page of the segment. */ 229 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 230 clear_addr = mapbase + (clear_vaddr - base_vaddr); 231 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 232 233 if ((nclear = data_vlimit - clear_vaddr) > 0) { 234 /* Make sure the end of the segment is writable */ 235 if ((data_prot & PROT_WRITE) == 0 && -1 == 236 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 237 _rtld_error("%s: mprotect failed: %s", path, 238 rtld_strerror(errno)); 239 goto error1; 240 } 241 242 memset(clear_addr, 0, nclear); 243 244 /* Reset the data protection back */ 245 if ((data_prot & PROT_WRITE) == 0) 246 mprotect(clear_page, PAGE_SIZE, data_prot); 247 } 248 249 /* Overlay the BSS segment onto the proper region. */ 250 bss_vaddr = data_vlimit; 251 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 252 bss_addr = mapbase + (bss_vaddr - base_vaddr); 253 if (bss_vlimit > bss_vaddr) { /* There is something to do */ 254 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 255 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) { 256 _rtld_error("%s: mmap of bss failed: %s", path, 257 rtld_strerror(errno)); 258 goto error1; 259 } 260 } 261 } 262 263 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff && 264 (data_vlimit - data_vaddr + data_offset) >= 265 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) { 266 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset; 267 } 268 } 269 270 obj = obj_new(); 271 if (sb != NULL) { 272 obj->dev = sb->st_dev; 273 obj->ino = sb->st_ino; 274 } 275 obj->mapbase = mapbase; 276 obj->mapsize = mapsize; 277 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - 278 base_vaddr; 279 obj->vaddrbase = base_vaddr; 280 obj->relocbase = mapbase - base_vaddr; 281 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); 282 if (hdr->e_entry != 0) 283 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry); 284 if (phdr_vaddr != 0) { 285 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr); 286 } else { 287 obj->phdr = malloc(phsize); 288 if (obj->phdr == NULL) { 289 obj_free(obj); 290 _rtld_error("%s: cannot allocate program header", path); 291 goto error1; 292 } 293 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize); 294 obj->phdr_alloc = true; 295 } 296 obj->phsize = phsize; 297 if (phinterp != NULL) 298 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); 299 if (phtls != NULL) { 300 tls_dtv_generation++; 301 obj->tlsindex = ++tls_max_index; 302 obj->tlssize = phtls->p_memsz; 303 obj->tlsalign = phtls->p_align; 304 obj->tlsinitsize = phtls->p_filesz; 305 obj->tlsinit = mapbase + phtls->p_vaddr; 306 } 307 obj->stack_flags = stack_flags; 308 obj->relro_page = obj->relocbase + trunc_page(relro_page); 309 obj->relro_size = round_page(relro_size); 310 if (note_start < note_end) 311 digest_notes(obj, note_start, note_end); 312 if (note_map != NULL) 313 munmap(note_map, note_map_len); 314 munmap(hdr, PAGE_SIZE); 315 return (obj); 316 317 error1: 318 munmap(mapbase, mapsize); 319 error: 320 if (note_map != NULL && note_map != MAP_FAILED) 321 munmap(note_map, note_map_len); 322 munmap(hdr, PAGE_SIZE); 323 return (NULL); 324 } 325 326 static Elf_Ehdr * 327 get_elf_header(int fd, const char *path, const struct stat *sbp) 328 { 329 Elf_Ehdr *hdr; 330 331 /* Make sure file has enough data for the ELF header */ 332 if (sbp != NULL && sbp->st_size < sizeof(Elf_Ehdr)) { 333 _rtld_error("%s: invalid file format", path); 334 return (NULL); 335 } 336 337 hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ, 338 fd, 0); 339 if (hdr == (Elf_Ehdr *)MAP_FAILED) { 340 _rtld_error("%s: read error: %s", path, rtld_strerror(errno)); 341 return (NULL); 342 } 343 344 /* Make sure the file is valid */ 345 if (!IS_ELF(*hdr)) { 346 _rtld_error("%s: invalid file format", path); 347 goto error; 348 } 349 if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 350 hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { 351 _rtld_error("%s: unsupported file layout", path); 352 goto error; 353 } 354 if (hdr->e_ident[EI_VERSION] != EV_CURRENT || 355 hdr->e_version != EV_CURRENT) { 356 _rtld_error("%s: unsupported file version", path); 357 goto error; 358 } 359 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { 360 _rtld_error("%s: unsupported file type", path); 361 goto error; 362 } 363 if (hdr->e_machine != ELF_TARG_MACH) { 364 _rtld_error("%s: unsupported machine", path); 365 goto error; 366 } 367 368 /* 369 * We rely on the program header being in the first page. This is 370 * not strictly required by the ABI specification, but it seems to 371 * always true in practice. And, it simplifies things considerably. 372 */ 373 if (hdr->e_phentsize != sizeof(Elf_Phdr)) { 374 _rtld_error( 375 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 376 goto error; 377 } 378 if (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) > 379 (size_t)PAGE_SIZE) { 380 _rtld_error("%s: program header too large", path); 381 goto error; 382 } 383 return (hdr); 384 385 error: 386 munmap(hdr, PAGE_SIZE); 387 return (NULL); 388 } 389 390 void 391 obj_free(Obj_Entry *obj) 392 { 393 Objlist_Entry *elm; 394 395 if (obj->tls_done) 396 free_tls_offset(obj); 397 while (obj->needed != NULL) { 398 Needed_Entry *needed = obj->needed; 399 obj->needed = needed->next; 400 free(needed); 401 } 402 while (!STAILQ_EMPTY(&obj->names)) { 403 Name_Entry *entry = STAILQ_FIRST(&obj->names); 404 STAILQ_REMOVE_HEAD(&obj->names, link); 405 free(entry); 406 } 407 while (!STAILQ_EMPTY(&obj->dldags)) { 408 elm = STAILQ_FIRST(&obj->dldags); 409 STAILQ_REMOVE_HEAD(&obj->dldags, link); 410 free(elm); 411 } 412 while (!STAILQ_EMPTY(&obj->dagmembers)) { 413 elm = STAILQ_FIRST(&obj->dagmembers); 414 STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 415 free(elm); 416 } 417 if (obj->vertab) 418 free(obj->vertab); 419 if (obj->origin_path) 420 free(obj->origin_path); 421 if (obj->z_origin) 422 free(obj->rpath); 423 if (obj->priv) 424 free(obj->priv); 425 if (obj->path) 426 free(obj->path); 427 if (obj->phdr_alloc) 428 free((void *)obj->phdr); 429 free(obj); 430 } 431 432 Obj_Entry * 433 obj_new(void) 434 { 435 Obj_Entry *obj; 436 437 obj = CNEW(Obj_Entry); 438 STAILQ_INIT(&obj->dldags); 439 STAILQ_INIT(&obj->dagmembers); 440 STAILQ_INIT(&obj->names); 441 return obj; 442 } 443 444 /* 445 * Given a set of ELF protection flags, return the corresponding protection 446 * flags for MMAP. 447 */ 448 static int 449 convert_prot(int elfflags) 450 { 451 int prot = 0; 452 if (elfflags & PF_R) 453 prot |= PROT_READ; 454 if (elfflags & PF_W) 455 prot |= PROT_WRITE; 456 if (elfflags & PF_X) 457 prot |= PROT_EXEC; 458 return prot; 459 } 460 461 static int 462 convert_flags(int elfflags) 463 { 464 int flags = MAP_PRIVATE; /* All mappings are private */ 465 466 /* 467 * Readonly mappings are marked "MAP_NOCORE", because they can be 468 * reconstructed by a debugger. 469 */ 470 if (!(elfflags & PF_W)) 471 flags |= MAP_NOCORE; 472 return flags; 473 } 474