1 /*- 2 * Copyright (c) 2000 David O'Brien 3 * Copyright (c) 1995-1996 S�ren Schmidt 4 * Copyright (c) 1996 Peter Wemm 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/exec.h> 36 #include <sys/fcntl.h> 37 #include <sys/imgact.h> 38 #include <sys/imgact_elf.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/mman.h> 44 #include <sys/namei.h> 45 #include <sys/pioctl.h> 46 #include <sys/proc.h> 47 #include <sys/procfs.h> 48 #include <sys/resourcevar.h> 49 #include <sys/systm.h> 50 #include <sys/signalvar.h> 51 #include <sys/stat.h> 52 #include <sys/sx.h> 53 #include <sys/syscall.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysent.h> 56 #include <sys/vnode.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_param.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_extern.h> 65 66 #include <machine/elf.h> 67 #include <machine/md_var.h> 68 69 #define OLD_EI_BRAND 8 70 71 static int __elfN(check_header)(const Elf_Ehdr *hdr); 72 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr, 73 const char *interp); 74 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 75 u_long *entry, size_t pagesize); 76 static int __elfN(load_section)(struct proc *p, 77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object, 78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 79 vm_prot_t prot, size_t pagesize); 80 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 81 82 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, 83 ""); 84 85 int __elfN(fallback_brand) = -1; 86 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0, 88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 89 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand", 90 &__elfN(fallback_brand)); 91 92 static int elf_trace = 0; 93 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, ""); 94 95 static int elf_legacy_coredump = 0; 96 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 97 &elf_legacy_coredump, 0, ""); 98 99 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 100 101 int 102 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 103 { 104 int i; 105 106 for (i = 0; i < MAX_BRANDS; i++) { 107 if (elf_brand_list[i] == NULL) { 108 elf_brand_list[i] = entry; 109 break; 110 } 111 } 112 if (i == MAX_BRANDS) 113 return (-1); 114 return (0); 115 } 116 117 int 118 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 119 { 120 int i; 121 122 for (i = 0; i < MAX_BRANDS; i++) { 123 if (elf_brand_list[i] == entry) { 124 elf_brand_list[i] = NULL; 125 break; 126 } 127 } 128 if (i == MAX_BRANDS) 129 return (-1); 130 return (0); 131 } 132 133 int 134 __elfN(brand_inuse)(Elf_Brandinfo *entry) 135 { 136 struct proc *p; 137 int rval = FALSE; 138 139 sx_slock(&allproc_lock); 140 LIST_FOREACH(p, &allproc, p_list) { 141 if (p->p_sysent == entry->sysvec) { 142 rval = TRUE; 143 break; 144 } 145 } 146 sx_sunlock(&allproc_lock); 147 148 return (rval); 149 } 150 151 static Elf_Brandinfo * 152 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp) 153 { 154 Elf_Brandinfo *bi; 155 int i; 156 157 /* 158 * We support three types of branding -- (1) the ELF EI_OSABI field 159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 160 * branding w/in the ELF header, and (3) path of the `interp_path' 161 * field. We should also look for an ".note.ABI-tag" ELF section now 162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 163 */ 164 165 /* If the executable has a brand, search for it in the brand list. */ 166 for (i = 0; i < MAX_BRANDS; i++) { 167 bi = elf_brand_list[i]; 168 if (bi != NULL && hdr->e_machine == bi->machine && 169 (hdr->e_ident[EI_OSABI] == bi->brand || 170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) 172 return (bi); 173 } 174 175 /* Lacking a known brand, search for a recognized interpreter. */ 176 if (interp != NULL) { 177 for (i = 0; i < MAX_BRANDS; i++) { 178 bi = elf_brand_list[i]; 179 if (bi != NULL && hdr->e_machine == bi->machine && 180 strcmp(interp, bi->interp_path) == 0) 181 return (bi); 182 } 183 } 184 185 /* Lacking a recognized interpreter, try the default brand */ 186 for (i = 0; i < MAX_BRANDS; i++) { 187 bi = elf_brand_list[i]; 188 if (bi != NULL && hdr->e_machine == bi->machine && 189 __elfN(fallback_brand) == bi->brand) 190 return (bi); 191 } 192 return (NULL); 193 } 194 195 static int 196 __elfN(check_header)(const Elf_Ehdr *hdr) 197 { 198 Elf_Brandinfo *bi; 199 int i; 200 201 if (!IS_ELF(*hdr) || 202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 204 hdr->e_ident[EI_VERSION] != EV_CURRENT || 205 hdr->e_phentsize != sizeof(Elf_Phdr) || 206 hdr->e_version != ELF_TARG_VER) 207 return (ENOEXEC); 208 209 /* 210 * Make sure we have at least one brand for this machine. 211 */ 212 213 for (i = 0; i < MAX_BRANDS; i++) { 214 bi = elf_brand_list[i]; 215 if (bi != NULL && bi->machine == hdr->e_machine) 216 break; 217 } 218 if (i == MAX_BRANDS) 219 return (ENOEXEC); 220 221 return (0); 222 } 223 224 static int 225 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 226 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 227 vm_prot_t max) 228 { 229 int error, rv; 230 vm_offset_t off; 231 vm_offset_t data_buf = 0; 232 233 /* 234 * Create the page if it doesn't exist yet. Ignore errors. 235 */ 236 vm_map_lock(map); 237 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max, 238 max, 0); 239 vm_map_unlock(map); 240 241 /* 242 * Find the page from the underlying object. 243 */ 244 if (object) { 245 vm_object_reference(object); 246 rv = vm_map_find(exec_map, 247 object, 248 trunc_page(offset), 249 &data_buf, 250 PAGE_SIZE, 251 TRUE, 252 VM_PROT_READ, 253 VM_PROT_ALL, 254 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 255 if (rv != KERN_SUCCESS) { 256 vm_object_deallocate(object); 257 return (rv); 258 } 259 260 off = offset - trunc_page(offset); 261 error = copyout((caddr_t)data_buf + off, (caddr_t)start, 262 end - start); 263 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 264 if (error) { 265 return (KERN_FAILURE); 266 } 267 } 268 269 return (KERN_SUCCESS); 270 } 271 272 static int 273 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 274 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 275 vm_prot_t max, int cow) 276 { 277 vm_offset_t data_buf, off; 278 vm_size_t sz; 279 int error, rv; 280 281 if (start != trunc_page(start)) { 282 rv = __elfN(map_partial)(map, object, offset, start, 283 round_page(start), prot, max); 284 if (rv) 285 return (rv); 286 offset += round_page(start) - start; 287 start = round_page(start); 288 } 289 if (end != round_page(end)) { 290 rv = __elfN(map_partial)(map, object, offset + 291 trunc_page(end) - start, trunc_page(end), end, prot, max); 292 if (rv) 293 return (rv); 294 end = trunc_page(end); 295 } 296 if (end > start) { 297 if (offset & PAGE_MASK) { 298 /* 299 * The mapping is not page aligned. This means we have 300 * to copy the data. Sigh. 301 */ 302 rv = vm_map_find(map, 0, 0, &start, end - start, 303 FALSE, prot, max, 0); 304 if (rv) 305 return (rv); 306 data_buf = 0; 307 while (start < end) { 308 vm_object_reference(object); 309 rv = vm_map_find(exec_map, 310 object, 311 trunc_page(offset), 312 &data_buf, 313 2 * PAGE_SIZE, 314 TRUE, 315 VM_PROT_READ, 316 VM_PROT_ALL, 317 (MAP_COPY_ON_WRITE 318 | MAP_PREFAULT_PARTIAL)); 319 if (rv != KERN_SUCCESS) { 320 vm_object_deallocate(object); 321 return (rv); 322 } 323 off = offset - trunc_page(offset); 324 sz = end - start; 325 if (sz > PAGE_SIZE) 326 sz = PAGE_SIZE; 327 error = copyout((caddr_t)data_buf + off, 328 (caddr_t)start, sz); 329 vm_map_remove(exec_map, data_buf, 330 data_buf + 2 * PAGE_SIZE); 331 if (error) { 332 return (KERN_FAILURE); 333 } 334 start += sz; 335 } 336 rv = KERN_SUCCESS; 337 } else { 338 vm_map_lock(map); 339 rv = vm_map_insert(map, object, offset, start, end, 340 prot, max, cow); 341 vm_map_unlock(map); 342 } 343 return (rv); 344 } else { 345 return (KERN_SUCCESS); 346 } 347 } 348 349 static int 350 __elfN(load_section)(struct proc *p, struct vmspace *vmspace, 351 struct vnode *vp, vm_object_t object, vm_offset_t offset, 352 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, 353 size_t pagesize) 354 { 355 size_t map_len; 356 vm_offset_t map_addr; 357 int error, rv, cow; 358 size_t copy_len; 359 vm_offset_t file_addr; 360 vm_offset_t data_buf = 0; 361 362 error = 0; 363 364 /* 365 * It's necessary to fail if the filsz + offset taken from the 366 * header is greater than the actual file pager object's size. 367 * If we were to allow this, then the vm_map_find() below would 368 * walk right off the end of the file object and into the ether. 369 * 370 * While I'm here, might as well check for something else that 371 * is invalid: filsz cannot be greater than memsz. 372 */ 373 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 374 filsz > memsz) { 375 uprintf("elf_load_section: truncated ELF file\n"); 376 return (ENOEXEC); 377 } 378 379 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) 380 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) 381 382 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); 383 file_addr = trunc_page_ps(offset, pagesize); 384 385 /* 386 * We have two choices. We can either clear the data in the last page 387 * of an oversized mapping, or we can start the anon mapping a page 388 * early and copy the initialized data into that first page. We 389 * choose the second.. 390 */ 391 if (memsz > filsz) 392 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; 393 else 394 map_len = round_page_ps(offset + filsz, pagesize) - file_addr; 395 396 if (map_len != 0) { 397 vm_object_reference(object); 398 399 /* cow flags: don't dump readonly sections in core */ 400 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 401 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 402 403 rv = __elfN(map_insert)(&vmspace->vm_map, 404 object, 405 file_addr, /* file offset */ 406 map_addr, /* virtual start */ 407 map_addr + map_len,/* virtual end */ 408 prot, 409 VM_PROT_ALL, 410 cow); 411 if (rv != KERN_SUCCESS) { 412 vm_object_deallocate(object); 413 return (EINVAL); 414 } 415 416 /* we can stop now if we've covered it all */ 417 if (memsz == filsz) { 418 return (0); 419 } 420 } 421 422 423 /* 424 * We have to get the remaining bit of the file into the first part 425 * of the oversized map segment. This is normally because the .data 426 * segment in the file is extended to provide bss. It's a neat idea 427 * to try and save a page, but it's a pain in the behind to implement. 428 */ 429 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); 430 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); 431 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - 432 map_addr; 433 434 /* This had damn well better be true! */ 435 if (map_len != 0) { 436 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, 437 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0); 438 if (rv != KERN_SUCCESS) { 439 return (EINVAL); 440 } 441 } 442 443 if (copy_len != 0) { 444 vm_offset_t off; 445 vm_object_reference(object); 446 rv = vm_map_find(exec_map, 447 object, 448 trunc_page(offset + filsz), 449 &data_buf, 450 PAGE_SIZE, 451 TRUE, 452 VM_PROT_READ, 453 VM_PROT_ALL, 454 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 455 if (rv != KERN_SUCCESS) { 456 vm_object_deallocate(object); 457 return (EINVAL); 458 } 459 460 /* send the page fragment to user space */ 461 off = trunc_page_ps(offset + filsz, pagesize) - 462 trunc_page(offset + filsz); 463 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr, 464 copy_len); 465 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 466 if (error) { 467 return (error); 468 } 469 } 470 471 /* 472 * set it to the specified protection. 473 * XXX had better undo the damage from pasting over the cracks here! 474 */ 475 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), 476 round_page(map_addr + map_len), prot, FALSE); 477 478 return (error); 479 } 480 481 /* 482 * Load the file "file" into memory. It may be either a shared object 483 * or an executable. 484 * 485 * The "addr" reference parameter is in/out. On entry, it specifies 486 * the address where a shared object should be loaded. If the file is 487 * an executable, this value is ignored. On exit, "addr" specifies 488 * where the file was actually loaded. 489 * 490 * The "entry" reference parameter is out only. On exit, it specifies 491 * the entry point for the loaded file. 492 */ 493 static int 494 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 495 u_long *entry, size_t pagesize) 496 { 497 struct { 498 struct nameidata nd; 499 struct vattr attr; 500 struct image_params image_params; 501 } *tempdata; 502 const Elf_Ehdr *hdr = NULL; 503 const Elf_Phdr *phdr = NULL; 504 struct nameidata *nd; 505 struct vmspace *vmspace = p->p_vmspace; 506 struct vattr *attr; 507 struct image_params *imgp; 508 vm_prot_t prot; 509 u_long rbase; 510 u_long base_addr = 0; 511 int error, i, numsegs; 512 513 if (curthread->td_proc != p) 514 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ 515 516 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 517 nd = &tempdata->nd; 518 attr = &tempdata->attr; 519 imgp = &tempdata->image_params; 520 521 /* 522 * Initialize part of the common data 523 */ 524 imgp->proc = p; 525 imgp->attr = attr; 526 imgp->firstpage = NULL; 527 imgp->image_header = NULL; 528 imgp->object = NULL; 529 imgp->execlabel = NULL; 530 531 /* XXXKSE */ 532 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread); 533 534 if ((error = namei(nd)) != 0) { 535 nd->ni_vp = NULL; 536 goto fail; 537 } 538 NDFREE(nd, NDF_ONLY_PNBUF); 539 imgp->vp = nd->ni_vp; 540 541 /* 542 * Check permissions, modes, uid, etc on the file, and "open" it. 543 */ 544 error = exec_check_permissions(imgp); 545 if (error) { 546 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 547 goto fail; 548 } 549 550 error = exec_map_first_page(imgp); 551 /* 552 * Also make certain that the interpreter stays the same, so set 553 * its VV_TEXT flag, too. 554 */ 555 if (error == 0) 556 nd->ni_vp->v_vflag |= VV_TEXT; 557 558 imgp->object = nd->ni_vp->v_object; 559 vm_object_reference(imgp->object); 560 561 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 562 if (error) 563 goto fail; 564 565 hdr = (const Elf_Ehdr *)imgp->image_header; 566 if ((error = __elfN(check_header)(hdr)) != 0) 567 goto fail; 568 if (hdr->e_type == ET_DYN) 569 rbase = *addr; 570 else if (hdr->e_type == ET_EXEC) 571 rbase = 0; 572 else { 573 error = ENOEXEC; 574 goto fail; 575 } 576 577 /* Only support headers that fit within first page for now */ 578 /* (multiplication of two Elf_Half fields will not overflow) */ 579 if ((hdr->e_phoff > PAGE_SIZE) || 580 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { 581 error = ENOEXEC; 582 goto fail; 583 } 584 585 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 586 587 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 588 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 589 prot = 0; 590 if (phdr[i].p_flags & PF_X) 591 prot |= VM_PROT_EXECUTE; 592 if (phdr[i].p_flags & PF_W) 593 prot |= VM_PROT_WRITE; 594 if (phdr[i].p_flags & PF_R) 595 prot |= VM_PROT_READ; 596 597 if ((error = __elfN(load_section)(p, vmspace, 598 nd->ni_vp, imgp->object, phdr[i].p_offset, 599 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 600 phdr[i].p_memsz, phdr[i].p_filesz, prot, 601 pagesize)) != 0) 602 goto fail; 603 /* 604 * Establish the base address if this is the 605 * first segment. 606 */ 607 if (numsegs == 0) 608 base_addr = trunc_page(phdr[i].p_vaddr + 609 rbase); 610 numsegs++; 611 } 612 } 613 *addr = base_addr; 614 *entry = (unsigned long)hdr->e_entry + rbase; 615 616 fail: 617 if (imgp->firstpage) 618 exec_unmap_first_page(imgp); 619 if (imgp->object) 620 vm_object_deallocate(imgp->object); 621 622 if (nd->ni_vp) 623 vrele(nd->ni_vp); 624 625 free(tempdata, M_TEMP); 626 627 return (error); 628 } 629 630 static int 631 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 632 { 633 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 634 const Elf_Phdr *phdr; 635 Elf_Auxargs *elf_auxargs = NULL; 636 struct vmspace *vmspace; 637 vm_prot_t prot; 638 u_long text_size = 0, data_size = 0, total_size = 0; 639 u_long text_addr = 0, data_addr = 0; 640 u_long seg_size, seg_addr; 641 u_long addr, entry = 0, proghdr = 0; 642 int error = 0, i; 643 const char *interp = NULL; 644 Elf_Brandinfo *brand_info; 645 char *path; 646 struct thread *td = curthread; 647 struct sysentvec *sv; 648 649 /* 650 * Do we have a valid ELF header ? 651 */ 652 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC) 653 return (-1); 654 655 /* 656 * From here on down, we return an errno, not -1, as we've 657 * detected an ELF file. 658 */ 659 660 if ((hdr->e_phoff > PAGE_SIZE) || 661 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 662 /* Only support headers in first page for now */ 663 return (ENOEXEC); 664 } 665 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 666 667 /* 668 * From this point on, we may have resources that need to be freed. 669 */ 670 671 VOP_UNLOCK(imgp->vp, 0, td); 672 673 for (i = 0; i < hdr->e_phnum; i++) { 674 switch (phdr[i].p_type) { 675 case PT_INTERP: /* Path to interpreter */ 676 if (phdr[i].p_filesz > MAXPATHLEN || 677 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 678 error = ENOEXEC; 679 goto fail; 680 } 681 interp = imgp->image_header + phdr[i].p_offset; 682 break; 683 default: 684 break; 685 } 686 } 687 688 brand_info = __elfN(get_brandinfo)(hdr, interp); 689 if (brand_info == NULL) { 690 uprintf("ELF binary type \"%u\" not known.\n", 691 hdr->e_ident[EI_OSABI]); 692 error = ENOEXEC; 693 goto fail; 694 } 695 sv = brand_info->sysvec; 696 if (interp != NULL && brand_info->interp_newpath != NULL) 697 interp = brand_info->interp_newpath; 698 699 exec_new_vmspace(imgp, sv); 700 701 vmspace = imgp->proc->p_vmspace; 702 703 for (i = 0; i < hdr->e_phnum; i++) { 704 switch (phdr[i].p_type) { 705 case PT_LOAD: /* Loadable segment */ 706 prot = 0; 707 if (phdr[i].p_flags & PF_X) 708 prot |= VM_PROT_EXECUTE; 709 if (phdr[i].p_flags & PF_W) 710 prot |= VM_PROT_WRITE; 711 if (phdr[i].p_flags & PF_R) 712 prot |= VM_PROT_READ; 713 714 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) 715 /* 716 * Some x86 binaries assume read == executable, 717 * notably the M3 runtime and therefore cvsup 718 */ 719 if (prot & VM_PROT_READ) 720 prot |= VM_PROT_EXECUTE; 721 #endif 722 723 if ((error = __elfN(load_section)(imgp->proc, vmspace, 724 imgp->vp, imgp->object, phdr[i].p_offset, 725 (caddr_t)(uintptr_t)phdr[i].p_vaddr, 726 phdr[i].p_memsz, phdr[i].p_filesz, prot, 727 sv->sv_pagesize)) != 0) 728 goto fail; 729 730 /* 731 * If this segment contains the program headers, 732 * remember their virtual address for the AT_PHDR 733 * aux entry. Static binaries don't usually include 734 * a PT_PHDR entry. 735 */ 736 if (phdr[i].p_offset == 0 && 737 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize 738 <= phdr[i].p_filesz) 739 proghdr = phdr[i].p_vaddr + hdr->e_phoff; 740 741 seg_addr = trunc_page(phdr[i].p_vaddr); 742 seg_size = round_page(phdr[i].p_memsz + 743 phdr[i].p_vaddr - seg_addr); 744 745 /* 746 * Is this .text or .data? We can't use 747 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 748 * alpha terribly and possibly does other bad 749 * things so we stick to the old way of figuring 750 * it out: If the segment contains the program 751 * entry point, it's a text segment, otherwise it 752 * is a data segment. 753 * 754 * Note that obreak() assumes that data_addr + 755 * data_size == end of data load area, and the ELF 756 * file format expects segments to be sorted by 757 * address. If multiple data segments exist, the 758 * last one will be used. 759 */ 760 if (hdr->e_entry >= phdr[i].p_vaddr && 761 hdr->e_entry < (phdr[i].p_vaddr + 762 phdr[i].p_memsz)) { 763 text_size = seg_size; 764 text_addr = seg_addr; 765 entry = (u_long)hdr->e_entry; 766 } else { 767 data_size = seg_size; 768 data_addr = seg_addr; 769 } 770 total_size += seg_size; 771 break; 772 case PT_PHDR: /* Program header table info */ 773 proghdr = phdr[i].p_vaddr; 774 break; 775 default: 776 break; 777 } 778 } 779 780 if (data_addr == 0 && data_size == 0) { 781 data_addr = text_addr; 782 data_size = text_size; 783 } 784 785 /* 786 * Check limits. It should be safe to check the 787 * limits after loading the segments since we do 788 * not actually fault in all the segments pages. 789 */ 790 PROC_LOCK(imgp->proc); 791 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) || 792 text_size > maxtsiz || 793 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) { 794 PROC_UNLOCK(imgp->proc); 795 error = ENOMEM; 796 goto fail; 797 } 798 799 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 800 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 801 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 802 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 803 804 /* 805 * We load the dynamic linker where a userland call 806 * to mmap(0, ...) would put it. The rationale behind this 807 * calculation is that it leaves room for the heap to grow to 808 * its maximum allowed size. 809 */ 810 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + 811 lim_max(imgp->proc, RLIMIT_DATA)); 812 PROC_UNLOCK(imgp->proc); 813 814 imgp->entry_addr = entry; 815 816 imgp->proc->p_sysent = sv; 817 if (interp != NULL && brand_info->emul_path != NULL && 818 brand_info->emul_path[0] != '\0') { 819 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 820 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, 821 interp); 822 error = __elfN(load_file)(imgp->proc, path, &addr, 823 &imgp->entry_addr, sv->sv_pagesize); 824 free(path, M_TEMP); 825 if (error == 0) 826 interp = NULL; 827 } 828 if (interp != NULL) { 829 error = __elfN(load_file)(imgp->proc, interp, &addr, 830 &imgp->entry_addr, sv->sv_pagesize); 831 if (error != 0) { 832 uprintf("ELF interpreter %s not found\n", interp); 833 goto fail; 834 } 835 } 836 837 /* 838 * Construct auxargs table (used by the fixup routine) 839 */ 840 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 841 elf_auxargs->execfd = -1; 842 elf_auxargs->phdr = proghdr; 843 elf_auxargs->phent = hdr->e_phentsize; 844 elf_auxargs->phnum = hdr->e_phnum; 845 elf_auxargs->pagesz = PAGE_SIZE; 846 elf_auxargs->base = addr; 847 elf_auxargs->flags = 0; 848 elf_auxargs->entry = entry; 849 elf_auxargs->trace = elf_trace; 850 851 imgp->auxargs = elf_auxargs; 852 imgp->interpreted = 0; 853 854 fail: 855 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 856 return (error); 857 } 858 859 #define suword __CONCAT(suword, __ELF_WORD_SIZE) 860 861 int 862 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 863 { 864 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 865 Elf_Addr *base; 866 Elf_Addr *pos; 867 868 base = (Elf_Addr *)*stack_base; 869 pos = base + (imgp->args->argc + imgp->args->envc + 2); 870 871 if (args->trace) { 872 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 873 } 874 if (args->execfd != -1) { 875 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 876 } 877 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 878 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 879 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 880 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 881 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 882 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 883 AUXARGS_ENTRY(pos, AT_BASE, args->base); 884 AUXARGS_ENTRY(pos, AT_NULL, 0); 885 886 free(imgp->auxargs, M_TEMP); 887 imgp->auxargs = NULL; 888 889 base--; 890 suword(base, (long)imgp->args->argc); 891 *stack_base = (register_t *)base; 892 return (0); 893 } 894 895 /* 896 * Code for generating ELF core dumps. 897 */ 898 899 typedef void (*segment_callback)(vm_map_entry_t, void *); 900 901 /* Closure for cb_put_phdr(). */ 902 struct phdr_closure { 903 Elf_Phdr *phdr; /* Program header to fill in */ 904 Elf_Off offset; /* Offset of segment in core file */ 905 }; 906 907 /* Closure for cb_size_segment(). */ 908 struct sseg_closure { 909 int count; /* Count of writable segments. */ 910 size_t size; /* Total size of all writable segments. */ 911 }; 912 913 static void cb_put_phdr(vm_map_entry_t, void *); 914 static void cb_size_segment(vm_map_entry_t, void *); 915 static void each_writable_segment(struct thread *, segment_callback, void *); 916 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, 917 int, void *, size_t); 918 static void __elfN(puthdr)(struct thread *, void *, size_t *, int); 919 static void __elfN(putnote)(void *, size_t *, const char *, int, 920 const void *, size_t); 921 922 extern int osreldate; 923 924 int 925 __elfN(coredump)(td, vp, limit) 926 struct thread *td; 927 struct vnode *vp; 928 off_t limit; 929 { 930 struct ucred *cred = td->td_ucred; 931 int error = 0; 932 struct sseg_closure seginfo; 933 void *hdr; 934 size_t hdrsize; 935 936 /* Size the program segments. */ 937 seginfo.count = 0; 938 seginfo.size = 0; 939 each_writable_segment(td, cb_size_segment, &seginfo); 940 941 /* 942 * Calculate the size of the core file header area by making 943 * a dry run of generating it. Nothing is written, but the 944 * size is calculated. 945 */ 946 hdrsize = 0; 947 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count); 948 949 if (hdrsize + seginfo.size >= limit) 950 return (EFAULT); 951 952 /* 953 * Allocate memory for building the header, fill it up, 954 * and write it out. 955 */ 956 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 957 if (hdr == NULL) { 958 return (EINVAL); 959 } 960 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); 961 962 /* Write the contents of all of the writable segments. */ 963 if (error == 0) { 964 Elf_Phdr *php; 965 off_t offset; 966 int i; 967 968 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 969 offset = hdrsize; 970 for (i = 0; i < seginfo.count; i++) { 971 error = vn_rdwr_inchunks(UIO_WRITE, vp, 972 (caddr_t)(uintptr_t)php->p_vaddr, 973 php->p_filesz, offset, UIO_USERSPACE, 974 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 975 curthread); /* XXXKSE */ 976 if (error != 0) 977 break; 978 offset += php->p_filesz; 979 php++; 980 } 981 } 982 free(hdr, M_TEMP); 983 984 return (error); 985 } 986 987 /* 988 * A callback for each_writable_segment() to write out the segment's 989 * program header entry. 990 */ 991 static void 992 cb_put_phdr(entry, closure) 993 vm_map_entry_t entry; 994 void *closure; 995 { 996 struct phdr_closure *phc = (struct phdr_closure *)closure; 997 Elf_Phdr *phdr = phc->phdr; 998 999 phc->offset = round_page(phc->offset); 1000 1001 phdr->p_type = PT_LOAD; 1002 phdr->p_offset = phc->offset; 1003 phdr->p_vaddr = entry->start; 1004 phdr->p_paddr = 0; 1005 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 1006 phdr->p_align = PAGE_SIZE; 1007 phdr->p_flags = 0; 1008 if (entry->protection & VM_PROT_READ) 1009 phdr->p_flags |= PF_R; 1010 if (entry->protection & VM_PROT_WRITE) 1011 phdr->p_flags |= PF_W; 1012 if (entry->protection & VM_PROT_EXECUTE) 1013 phdr->p_flags |= PF_X; 1014 1015 phc->offset += phdr->p_filesz; 1016 phc->phdr++; 1017 } 1018 1019 /* 1020 * A callback for each_writable_segment() to gather information about 1021 * the number of segments and their total size. 1022 */ 1023 static void 1024 cb_size_segment(entry, closure) 1025 vm_map_entry_t entry; 1026 void *closure; 1027 { 1028 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1029 1030 ssc->count++; 1031 ssc->size += entry->end - entry->start; 1032 } 1033 1034 /* 1035 * For each writable segment in the process's memory map, call the given 1036 * function with a pointer to the map entry and some arbitrary 1037 * caller-supplied data. 1038 */ 1039 static void 1040 each_writable_segment(td, func, closure) 1041 struct thread *td; 1042 segment_callback func; 1043 void *closure; 1044 { 1045 struct proc *p = td->td_proc; 1046 vm_map_t map = &p->p_vmspace->vm_map; 1047 vm_map_entry_t entry; 1048 1049 for (entry = map->header.next; entry != &map->header; 1050 entry = entry->next) { 1051 vm_object_t obj; 1052 1053 /* 1054 * Don't dump inaccessible mappings, deal with legacy 1055 * coredump mode. 1056 * 1057 * Note that read-only segments related to the elf binary 1058 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1059 * need to arbitrarily ignore such segments. 1060 */ 1061 if (elf_legacy_coredump) { 1062 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 1063 continue; 1064 } else { 1065 if ((entry->protection & VM_PROT_ALL) == 0) 1066 continue; 1067 } 1068 1069 /* 1070 * Dont include memory segment in the coredump if 1071 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1072 * madvise(2). Do not dump submaps (i.e. parts of the 1073 * kernel map). 1074 */ 1075 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 1076 continue; 1077 1078 if ((obj = entry->object.vm_object) == NULL) 1079 continue; 1080 1081 /* Find the deepest backing object. */ 1082 while (obj->backing_object != NULL) 1083 obj = obj->backing_object; 1084 1085 /* Ignore memory-mapped devices and such things. */ 1086 if (obj->type != OBJT_DEFAULT && 1087 obj->type != OBJT_SWAP && 1088 obj->type != OBJT_VNODE) 1089 continue; 1090 1091 (*func)(entry, closure); 1092 } 1093 } 1094 1095 /* 1096 * Write the core file header to the file, including padding up to 1097 * the page boundary. 1098 */ 1099 static int 1100 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) 1101 struct thread *td; 1102 struct vnode *vp; 1103 struct ucred *cred; 1104 int numsegs; 1105 size_t hdrsize; 1106 void *hdr; 1107 { 1108 size_t off; 1109 1110 /* Fill in the header. */ 1111 bzero(hdr, hdrsize); 1112 off = 0; 1113 __elfN(puthdr)(td, hdr, &off, numsegs); 1114 1115 /* Write it to the core file. */ 1116 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 1117 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 1118 td)); /* XXXKSE */ 1119 } 1120 1121 static void 1122 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs) 1123 { 1124 struct { 1125 prstatus_t status; 1126 prfpregset_t fpregset; 1127 prpsinfo_t psinfo; 1128 } *tempdata; 1129 prstatus_t *status; 1130 prfpregset_t *fpregset; 1131 prpsinfo_t *psinfo; 1132 struct proc *p; 1133 struct thread *thr; 1134 size_t ehoff, noteoff, notesz, phoff; 1135 1136 p = td->td_proc; 1137 1138 ehoff = *off; 1139 *off += sizeof(Elf_Ehdr); 1140 1141 phoff = *off; 1142 *off += (numsegs + 1) * sizeof(Elf_Phdr); 1143 1144 noteoff = *off; 1145 /* 1146 * Don't allocate space for the notes if we're just calculating 1147 * the size of the header. We also don't collect the data. 1148 */ 1149 if (dst != NULL) { 1150 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK); 1151 status = &tempdata->status; 1152 fpregset = &tempdata->fpregset; 1153 psinfo = &tempdata->psinfo; 1154 } else { 1155 tempdata = NULL; 1156 status = NULL; 1157 fpregset = NULL; 1158 psinfo = NULL; 1159 } 1160 1161 if (dst != NULL) { 1162 psinfo->pr_version = PRPSINFO_VERSION; 1163 psinfo->pr_psinfosz = sizeof(prpsinfo_t); 1164 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 1165 /* 1166 * XXX - We don't fill in the command line arguments properly 1167 * yet. 1168 */ 1169 strlcpy(psinfo->pr_psargs, p->p_comm, 1170 sizeof(psinfo->pr_psargs)); 1171 } 1172 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 1173 sizeof *psinfo); 1174 1175 /* 1176 * To have the debugger select the right thread (LWP) as the initial 1177 * thread, we dump the state of the thread passed to us in td first. 1178 * This is the thread that causes the core dump and thus likely to 1179 * be the right thread one wants to have selected in the debugger. 1180 */ 1181 thr = td; 1182 while (thr != NULL) { 1183 if (dst != NULL) { 1184 status->pr_version = PRSTATUS_VERSION; 1185 status->pr_statussz = sizeof(prstatus_t); 1186 status->pr_gregsetsz = sizeof(gregset_t); 1187 status->pr_fpregsetsz = sizeof(fpregset_t); 1188 status->pr_osreldate = osreldate; 1189 status->pr_cursig = p->p_sig; 1190 status->pr_pid = thr->td_tid; 1191 fill_regs(thr, &status->pr_reg); 1192 fill_fpregs(thr, fpregset); 1193 } 1194 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, 1195 sizeof *status); 1196 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 1197 sizeof *fpregset); 1198 /* 1199 * Allow for MD specific notes, as well as any MD 1200 * specific preparations for writing MI notes. 1201 */ 1202 __elfN(dump_thread)(thr, dst, off); 1203 1204 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 1205 TAILQ_NEXT(thr, td_plist); 1206 if (thr == td) 1207 thr = TAILQ_NEXT(thr, td_plist); 1208 } 1209 1210 notesz = *off - noteoff; 1211 1212 if (dst != NULL) 1213 free(tempdata, M_TEMP); 1214 1215 /* Align up to a page boundary for the program segments. */ 1216 *off = round_page(*off); 1217 1218 if (dst != NULL) { 1219 Elf_Ehdr *ehdr; 1220 Elf_Phdr *phdr; 1221 struct phdr_closure phc; 1222 1223 /* 1224 * Fill in the ELF header. 1225 */ 1226 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 1227 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1228 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1229 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1230 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1231 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1232 ehdr->e_ident[EI_DATA] = ELF_DATA; 1233 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1234 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1235 ehdr->e_ident[EI_ABIVERSION] = 0; 1236 ehdr->e_ident[EI_PAD] = 0; 1237 ehdr->e_type = ET_CORE; 1238 ehdr->e_machine = ELF_ARCH; 1239 ehdr->e_version = EV_CURRENT; 1240 ehdr->e_entry = 0; 1241 ehdr->e_phoff = phoff; 1242 ehdr->e_flags = 0; 1243 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1244 ehdr->e_phentsize = sizeof(Elf_Phdr); 1245 ehdr->e_phnum = numsegs + 1; 1246 ehdr->e_shentsize = sizeof(Elf_Shdr); 1247 ehdr->e_shnum = 0; 1248 ehdr->e_shstrndx = SHN_UNDEF; 1249 1250 /* 1251 * Fill in the program header entries. 1252 */ 1253 phdr = (Elf_Phdr *)((char *)dst + phoff); 1254 1255 /* The note segement. */ 1256 phdr->p_type = PT_NOTE; 1257 phdr->p_offset = noteoff; 1258 phdr->p_vaddr = 0; 1259 phdr->p_paddr = 0; 1260 phdr->p_filesz = notesz; 1261 phdr->p_memsz = 0; 1262 phdr->p_flags = 0; 1263 phdr->p_align = 0; 1264 phdr++; 1265 1266 /* All the writable segments from the program. */ 1267 phc.phdr = phdr; 1268 phc.offset = *off; 1269 each_writable_segment(td, cb_put_phdr, &phc); 1270 } 1271 } 1272 1273 static void 1274 __elfN(putnote)(void *dst, size_t *off, const char *name, int type, 1275 const void *desc, size_t descsz) 1276 { 1277 Elf_Note note; 1278 1279 note.n_namesz = strlen(name) + 1; 1280 note.n_descsz = descsz; 1281 note.n_type = type; 1282 if (dst != NULL) 1283 bcopy(¬e, (char *)dst + *off, sizeof note); 1284 *off += sizeof note; 1285 if (dst != NULL) 1286 bcopy(name, (char *)dst + *off, note.n_namesz); 1287 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1288 if (dst != NULL) 1289 bcopy(desc, (char *)dst + *off, note.n_descsz); 1290 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1291 } 1292 1293 /* 1294 * Tell kern_execve.c about it, with a little help from the linker. 1295 */ 1296 static struct execsw __elfN(execsw) = { 1297 __CONCAT(exec_, __elfN(imgact)), 1298 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 1299 }; 1300 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 1301