1 /*- 2 * Copyright (c) 2000 David O'Brien 3 * Copyright (c) 1995-1996 S�ren Schmidt 4 * Copyright (c) 1996 Peter Wemm 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software withough specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 #include "opt_rlimit.h" 34 35 #include <sys/param.h> 36 #include <sys/exec.h> 37 #include <sys/fcntl.h> 38 #include <sys/imgact.h> 39 #include <sys/imgact_elf.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/mman.h> 45 #include <sys/namei.h> 46 #include <sys/pioctl.h> 47 #include <sys/proc.h> 48 #include <sys/procfs.h> 49 #include <sys/resourcevar.h> 50 #include <sys/systm.h> 51 #include <sys/signalvar.h> 52 #include <sys/stat.h> 53 #include <sys/sx.h> 54 #include <sys/syscall.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysent.h> 57 #include <sys/vnode.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vm_param.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_extern.h> 66 67 #include <machine/elf.h> 68 #include <machine/md_var.h> 69 70 #define OLD_EI_BRAND 8 71 72 __ElfType(Brandinfo); 73 __ElfType(Auxargs); 74 75 static int elf_check_header __P((const Elf_Ehdr *hdr)); 76 static int elf_freebsd_fixup __P((register_t **stack_base, 77 struct image_params *imgp)); 78 static int elf_load_file __P((struct proc *p, const char *file, u_long *addr, 79 u_long *entry)); 80 static int elf_load_section __P((struct proc *p, 81 struct vmspace *vmspace, struct vnode *vp, 82 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 83 vm_prot_t prot)); 84 static int exec_elf_imgact __P((struct image_params *imgp)); 85 86 static int elf_trace = 0; 87 SYSCTL_INT(_debug, OID_AUTO, elf_trace, CTLFLAG_RW, &elf_trace, 0, ""); 88 89 struct sysentvec elf_freebsd_sysvec = { 90 SYS_MAXSYSCALL, 91 sysent, 92 0, 93 0, 94 0, 95 0, 96 0, 97 0, 98 elf_freebsd_fixup, 99 sendsig, 100 sigcode, 101 &szsigcode, 102 0, 103 "FreeBSD ELF", 104 elf_coredump, 105 NULL, 106 MINSIGSTKSZ 107 }; 108 109 static Elf_Brandinfo freebsd_brand_info = { 110 ELFOSABI_FREEBSD, 111 "FreeBSD", 112 "", 113 "/usr/libexec/ld-elf.so.1", 114 &elf_freebsd_sysvec 115 }; 116 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS] = { 117 &freebsd_brand_info, 118 NULL, NULL, NULL, 119 NULL, NULL, NULL, NULL 120 }; 121 122 int 123 elf_insert_brand_entry(Elf_Brandinfo *entry) 124 { 125 int i; 126 127 for (i=1; i<MAX_BRANDS; i++) { 128 if (elf_brand_list[i] == NULL) { 129 elf_brand_list[i] = entry; 130 break; 131 } 132 } 133 if (i == MAX_BRANDS) 134 return -1; 135 return 0; 136 } 137 138 int 139 elf_remove_brand_entry(Elf_Brandinfo *entry) 140 { 141 int i; 142 143 for (i=1; i<MAX_BRANDS; i++) { 144 if (elf_brand_list[i] == entry) { 145 elf_brand_list[i] = NULL; 146 break; 147 } 148 } 149 if (i == MAX_BRANDS) 150 return -1; 151 return 0; 152 } 153 154 int 155 elf_brand_inuse(Elf_Brandinfo *entry) 156 { 157 struct proc *p; 158 int rval = FALSE; 159 160 sx_slock(&allproc_lock); 161 LIST_FOREACH(p, &allproc, p_list) { 162 if (p->p_sysent == entry->sysvec) { 163 rval = TRUE; 164 break; 165 } 166 } 167 sx_sunlock(&allproc_lock); 168 169 return (rval); 170 } 171 172 static int 173 elf_check_header(const Elf_Ehdr *hdr) 174 { 175 if (!IS_ELF(*hdr) || 176 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 177 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 178 hdr->e_ident[EI_VERSION] != EV_CURRENT) 179 return ENOEXEC; 180 181 if (!ELF_MACHINE_OK(hdr->e_machine)) 182 return ENOEXEC; 183 184 if (hdr->e_version != ELF_TARG_VER) 185 return ENOEXEC; 186 187 return 0; 188 } 189 190 static int 191 elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 192 { 193 size_t map_len; 194 vm_offset_t map_addr; 195 int error, rv; 196 size_t copy_len; 197 vm_object_t object; 198 vm_offset_t file_addr; 199 vm_offset_t data_buf = 0; 200 201 VOP_GETVOBJECT(vp, &object); 202 error = 0; 203 204 /* 205 * It's necessary to fail if the filsz + offset taken from the 206 * header is greater than the actual file pager object's size. 207 * If we were to allow this, then the vm_map_find() below would 208 * walk right off the end of the file object and into the ether. 209 * 210 * While I'm here, might as well check for something else that 211 * is invalid: filsz cannot be greater than memsz. 212 */ 213 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 214 filsz > memsz) { 215 uprintf("elf_load_section: truncated ELF file\n"); 216 return (ENOEXEC); 217 } 218 219 map_addr = trunc_page((vm_offset_t)vmaddr); 220 file_addr = trunc_page(offset); 221 222 /* 223 * We have two choices. We can either clear the data in the last page 224 * of an oversized mapping, or we can start the anon mapping a page 225 * early and copy the initialized data into that first page. We 226 * choose the second.. 227 */ 228 if (memsz > filsz) 229 map_len = trunc_page(offset+filsz) - file_addr; 230 else 231 map_len = round_page(offset+filsz) - file_addr; 232 233 mtx_lock(&vm_mtx); 234 if (map_len != 0) { 235 vm_object_reference(object); 236 vm_map_lock(&vmspace->vm_map); 237 rv = vm_map_insert(&vmspace->vm_map, 238 object, 239 file_addr, /* file offset */ 240 map_addr, /* virtual start */ 241 map_addr + map_len,/* virtual end */ 242 prot, 243 VM_PROT_ALL, 244 MAP_COPY_ON_WRITE | MAP_PREFAULT); 245 vm_map_unlock(&vmspace->vm_map); 246 if (rv != KERN_SUCCESS) { 247 vm_object_deallocate(object); 248 mtx_unlock(&vm_mtx); 249 return EINVAL; 250 } 251 252 /* we can stop now if we've covered it all */ 253 if (memsz == filsz) { 254 mtx_unlock(&vm_mtx); 255 return 0; 256 } 257 } 258 259 260 /* 261 * We have to get the remaining bit of the file into the first part 262 * of the oversized map segment. This is normally because the .data 263 * segment in the file is extended to provide bss. It's a neat idea 264 * to try and save a page, but it's a pain in the behind to implement. 265 */ 266 copy_len = (offset + filsz) - trunc_page(offset + filsz); 267 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 268 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 269 270 /* This had damn well better be true! */ 271 if (map_len != 0) { 272 vm_map_lock(&vmspace->vm_map); 273 rv = vm_map_insert(&vmspace->vm_map, NULL, 0, 274 map_addr, map_addr + map_len, 275 VM_PROT_ALL, VM_PROT_ALL, 0); 276 vm_map_unlock(&vmspace->vm_map); 277 if (rv != KERN_SUCCESS) { 278 mtx_unlock(&vm_mtx); 279 return EINVAL; 280 } 281 } 282 283 if (copy_len != 0) { 284 vm_object_reference(object); 285 rv = vm_map_find(exec_map, 286 object, 287 trunc_page(offset + filsz), 288 &data_buf, 289 PAGE_SIZE, 290 TRUE, 291 VM_PROT_READ, 292 VM_PROT_ALL, 293 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 294 if (rv != KERN_SUCCESS) { 295 vm_object_deallocate(object); 296 mtx_unlock(&vm_mtx); 297 return EINVAL; 298 } 299 300 /* send the page fragment to user space */ 301 mtx_unlock(&vm_mtx); 302 error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len); 303 mtx_lock(&vm_mtx); 304 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 305 if (error) { 306 mtx_unlock(&vm_mtx); 307 return (error); 308 } 309 } 310 311 /* 312 * set it to the specified protection 313 */ 314 vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot, 315 FALSE); 316 317 mtx_unlock(&vm_mtx); 318 return error; 319 } 320 321 /* 322 * Load the file "file" into memory. It may be either a shared object 323 * or an executable. 324 * 325 * The "addr" reference parameter is in/out. On entry, it specifies 326 * the address where a shared object should be loaded. If the file is 327 * an executable, this value is ignored. On exit, "addr" specifies 328 * where the file was actually loaded. 329 * 330 * The "entry" reference parameter is out only. On exit, it specifies 331 * the entry point for the loaded file. 332 */ 333 static int 334 elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry) 335 { 336 const Elf_Ehdr *hdr = NULL; 337 const Elf_Phdr *phdr = NULL; 338 struct nameidata nd; 339 struct vmspace *vmspace = p->p_vmspace; 340 struct vattr attr; 341 struct image_params image_params, *imgp; 342 vm_prot_t prot; 343 u_long rbase; 344 u_long base_addr = 0; 345 int error, i, numsegs; 346 347 imgp = &image_params; 348 /* 349 * Initialize part of the common data 350 */ 351 imgp->proc = p; 352 imgp->uap = NULL; 353 imgp->attr = &attr; 354 imgp->firstpage = NULL; 355 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE); 356 357 if (imgp->image_header == NULL) { 358 nd.ni_vp = NULL; 359 error = ENOMEM; 360 goto fail; 361 } 362 363 NDINIT(&nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, p); 364 365 if ((error = namei(&nd)) != 0) { 366 nd.ni_vp = NULL; 367 goto fail; 368 } 369 NDFREE(&nd, NDF_ONLY_PNBUF); 370 imgp->vp = nd.ni_vp; 371 372 /* 373 * Check permissions, modes, uid, etc on the file, and "open" it. 374 */ 375 error = exec_check_permissions(imgp); 376 if (error) { 377 VOP_UNLOCK(nd.ni_vp, 0, p); 378 goto fail; 379 } 380 381 error = exec_map_first_page(imgp); 382 /* 383 * Also make certain that the interpreter stays the same, so set 384 * its VTEXT flag, too. 385 */ 386 if (error == 0) 387 nd.ni_vp->v_flag |= VTEXT; 388 VOP_UNLOCK(nd.ni_vp, 0, p); 389 if (error) 390 goto fail; 391 392 hdr = (const Elf_Ehdr *)imgp->image_header; 393 if ((error = elf_check_header(hdr)) != 0) 394 goto fail; 395 if (hdr->e_type == ET_DYN) 396 rbase = *addr; 397 else if (hdr->e_type == ET_EXEC) 398 rbase = 0; 399 else { 400 error = ENOEXEC; 401 goto fail; 402 } 403 404 /* Only support headers that fit within first page for now */ 405 if ((hdr->e_phoff > PAGE_SIZE) || 406 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 407 error = ENOEXEC; 408 goto fail; 409 } 410 411 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 412 413 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 414 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 415 prot = 0; 416 if (phdr[i].p_flags & PF_X) 417 prot |= VM_PROT_EXECUTE; 418 if (phdr[i].p_flags & PF_W) 419 prot |= VM_PROT_WRITE; 420 if (phdr[i].p_flags & PF_R) 421 prot |= VM_PROT_READ; 422 423 if ((error = elf_load_section(p, vmspace, nd.ni_vp, 424 phdr[i].p_offset, 425 (caddr_t)phdr[i].p_vaddr + 426 rbase, 427 phdr[i].p_memsz, 428 phdr[i].p_filesz, prot)) != 0) 429 goto fail; 430 /* 431 * Establish the base address if this is the 432 * first segment. 433 */ 434 if (numsegs == 0) 435 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 436 numsegs++; 437 } 438 } 439 *addr = base_addr; 440 *entry=(unsigned long)hdr->e_entry + rbase; 441 442 fail: 443 if (imgp->firstpage) 444 exec_unmap_first_page(imgp); 445 if (imgp->image_header) 446 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header, 447 PAGE_SIZE); 448 if (nd.ni_vp) 449 vrele(nd.ni_vp); 450 451 return error; 452 } 453 454 /* 455 * non static, as it can be overridden by start_init() 456 */ 457 int fallback_elf_brand = -1; 458 SYSCTL_INT(_kern, OID_AUTO, fallback_elf_brand, CTLFLAG_RW, 459 &fallback_elf_brand, -1, 460 "ELF brand of last resort"); 461 462 static int 463 exec_elf_imgact(struct image_params *imgp) 464 { 465 const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header; 466 const Elf_Phdr *phdr; 467 Elf_Auxargs *elf_auxargs = NULL; 468 struct vmspace *vmspace; 469 vm_prot_t prot; 470 u_long text_size = 0, data_size = 0; 471 u_long text_addr = 0, data_addr = 0; 472 u_long addr, entry = 0, proghdr = 0; 473 int error, i; 474 const char *interp = NULL; 475 Elf_Brandinfo *brand_info; 476 char path[MAXPATHLEN]; 477 478 /* 479 * Do we have a valid ELF header ? 480 */ 481 if (elf_check_header(hdr) != 0 || hdr->e_type != ET_EXEC) 482 return -1; 483 484 /* 485 * From here on down, we return an errno, not -1, as we've 486 * detected an ELF file. 487 */ 488 489 if ((hdr->e_phoff > PAGE_SIZE) || 490 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 491 /* Only support headers in first page for now */ 492 return ENOEXEC; 493 } 494 phdr = (const Elf_Phdr*)(imgp->image_header + hdr->e_phoff); 495 496 /* 497 * From this point on, we may have resources that need to be freed. 498 */ 499 500 /* 501 * Yeah, I'm paranoid. There is every reason in the world to get 502 * VTEXT now since from here on out, there are places we can have 503 * a context switch. Better safe than sorry; I really don't want 504 * the file to change while it's being loaded. 505 */ 506 mtx_lock(&imgp->vp->v_interlock); 507 imgp->vp->v_flag |= VTEXT; 508 mtx_unlock(&imgp->vp->v_interlock); 509 510 if ((error = exec_extract_strings(imgp)) != 0) 511 goto fail; 512 513 mtx_lock(&vm_mtx); 514 exec_new_vmspace(imgp); 515 516 vmspace = imgp->proc->p_vmspace; 517 mtx_unlock(&vm_mtx); 518 519 for (i = 0; i < hdr->e_phnum; i++) { 520 switch(phdr[i].p_type) { 521 522 case PT_LOAD: /* Loadable segment */ 523 prot = 0; 524 if (phdr[i].p_flags & PF_X) 525 prot |= VM_PROT_EXECUTE; 526 if (phdr[i].p_flags & PF_W) 527 prot |= VM_PROT_WRITE; 528 if (phdr[i].p_flags & PF_R) 529 prot |= VM_PROT_READ; 530 531 if ((error = elf_load_section(imgp->proc, 532 vmspace, imgp->vp, 533 phdr[i].p_offset, 534 (caddr_t)phdr[i].p_vaddr, 535 phdr[i].p_memsz, 536 phdr[i].p_filesz, prot)) != 0) 537 goto fail; 538 539 /* 540 * Is this .text or .data ?? 541 * 542 * We only handle one each of those yet XXX 543 */ 544 if (hdr->e_entry >= phdr[i].p_vaddr && 545 hdr->e_entry <(phdr[i].p_vaddr+phdr[i].p_memsz)) { 546 text_addr = trunc_page(phdr[i].p_vaddr); 547 text_size = round_page(phdr[i].p_memsz + 548 phdr[i].p_vaddr - 549 text_addr); 550 entry = (u_long)hdr->e_entry; 551 } else { 552 data_addr = trunc_page(phdr[i].p_vaddr); 553 data_size = round_page(phdr[i].p_memsz + 554 phdr[i].p_vaddr - 555 data_addr); 556 } 557 break; 558 case PT_INTERP: /* Path to interpreter */ 559 if (phdr[i].p_filesz > MAXPATHLEN || 560 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 561 error = ENOEXEC; 562 goto fail; 563 } 564 interp = imgp->image_header + phdr[i].p_offset; 565 break; 566 case PT_PHDR: /* Program header table info */ 567 proghdr = phdr[i].p_vaddr; 568 break; 569 default: 570 break; 571 } 572 } 573 574 mtx_lock(&vm_mtx); 575 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 576 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 577 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 578 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 579 mtx_unlock(&vm_mtx); 580 581 addr = ELF_RTLD_ADDR(vmspace); 582 583 imgp->entry_addr = entry; 584 585 brand_info = NULL; 586 587 /* We support three types of branding -- (1) the ELF EI_OSABI field 588 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 589 * branding w/in the ELF header, and (3) path of the `interp_path' 590 * field. We should also look for an ".note.ABI-tag" ELF section now 591 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 592 */ 593 594 /* If the executable has a brand, search for it in the brand list. */ 595 if (brand_info == NULL) { 596 for (i = 0; i < MAX_BRANDS; i++) { 597 Elf_Brandinfo *bi = elf_brand_list[i]; 598 599 if (bi != NULL && 600 (hdr->e_ident[EI_OSABI] == bi->brand 601 || 0 == 602 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 603 bi->compat_3_brand, strlen(bi->compat_3_brand)))) { 604 brand_info = bi; 605 break; 606 } 607 } 608 } 609 610 /* Lacking a known brand, search for a recognized interpreter. */ 611 if (brand_info == NULL && interp != NULL) { 612 for (i = 0; i < MAX_BRANDS; i++) { 613 Elf_Brandinfo *bi = elf_brand_list[i]; 614 615 if (bi != NULL && 616 strcmp(interp, bi->interp_path) == 0) { 617 brand_info = bi; 618 break; 619 } 620 } 621 } 622 623 /* Lacking a recognized interpreter, try the default brand */ 624 if (brand_info == NULL) { 625 for (i = 0; i < MAX_BRANDS; i++) { 626 Elf_Brandinfo *bi = elf_brand_list[i]; 627 628 if (bi != NULL && fallback_elf_brand == bi->brand) { 629 brand_info = bi; 630 break; 631 } 632 } 633 } 634 635 if (brand_info == NULL) { 636 uprintf("ELF binary type \"%u\" not known.\n", 637 hdr->e_ident[EI_OSABI]); 638 error = ENOEXEC; 639 goto fail; 640 } 641 642 imgp->proc->p_sysent = brand_info->sysvec; 643 if (interp != NULL) { 644 snprintf(path, sizeof(path), "%s%s", 645 brand_info->emul_path, interp); 646 if ((error = elf_load_file(imgp->proc, path, &addr, 647 &imgp->entry_addr)) != 0) { 648 if ((error = elf_load_file(imgp->proc, interp, &addr, 649 &imgp->entry_addr)) != 0) { 650 uprintf("ELF interpreter %s not found\n", path); 651 goto fail; 652 } 653 } 654 } 655 656 /* 657 * Construct auxargs table (used by the fixup routine) 658 */ 659 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 660 elf_auxargs->execfd = -1; 661 elf_auxargs->phdr = proghdr; 662 elf_auxargs->phent = hdr->e_phentsize; 663 elf_auxargs->phnum = hdr->e_phnum; 664 elf_auxargs->pagesz = PAGE_SIZE; 665 elf_auxargs->base = addr; 666 elf_auxargs->flags = 0; 667 elf_auxargs->entry = entry; 668 elf_auxargs->trace = elf_trace; 669 670 imgp->auxargs = elf_auxargs; 671 imgp->interpreted = 0; 672 673 fail: 674 return error; 675 } 676 677 static int 678 elf_freebsd_fixup(register_t **stack_base, struct image_params *imgp) 679 { 680 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 681 register_t *pos; 682 683 pos = *stack_base + (imgp->argc + imgp->envc + 2); 684 685 if (args->trace) { 686 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 687 } 688 if (args->execfd != -1) { 689 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 690 } 691 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 692 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 693 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 694 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 695 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 696 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 697 AUXARGS_ENTRY(pos, AT_BASE, args->base); 698 AUXARGS_ENTRY(pos, AT_NULL, 0); 699 700 free(imgp->auxargs, M_TEMP); 701 imgp->auxargs = NULL; 702 703 (*stack_base)--; 704 suword(*stack_base, (long) imgp->argc); 705 return 0; 706 } 707 708 /* 709 * Code for generating ELF core dumps. 710 */ 711 712 typedef void (*segment_callback) __P((vm_map_entry_t, void *)); 713 714 /* Closure for cb_put_phdr(). */ 715 struct phdr_closure { 716 Elf_Phdr *phdr; /* Program header to fill in */ 717 Elf_Off offset; /* Offset of segment in core file */ 718 }; 719 720 /* Closure for cb_size_segment(). */ 721 struct sseg_closure { 722 int count; /* Count of writable segments. */ 723 size_t size; /* Total size of all writable segments. */ 724 }; 725 726 static void cb_put_phdr __P((vm_map_entry_t, void *)); 727 static void cb_size_segment __P((vm_map_entry_t, void *)); 728 static void each_writable_segment __P((struct proc *, segment_callback, 729 void *)); 730 static int elf_corehdr __P((struct proc *, struct vnode *, struct ucred *, 731 int, void *, size_t)); 732 static void elf_puthdr __P((struct proc *, void *, size_t *, 733 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int)); 734 static void elf_putnote __P((void *, size_t *, const char *, int, 735 const void *, size_t)); 736 737 extern int osreldate; 738 739 int 740 elf_coredump(p, vp, limit) 741 register struct proc *p; 742 register struct vnode *vp; 743 off_t limit; 744 { 745 register struct ucred *cred = p->p_ucred; 746 int error = 0; 747 struct sseg_closure seginfo; 748 void *hdr; 749 size_t hdrsize; 750 751 /* Size the program segments. */ 752 seginfo.count = 0; 753 seginfo.size = 0; 754 each_writable_segment(p, cb_size_segment, &seginfo); 755 756 /* 757 * Calculate the size of the core file header area by making 758 * a dry run of generating it. Nothing is written, but the 759 * size is calculated. 760 */ 761 hdrsize = 0; 762 elf_puthdr((struct proc *)NULL, (void *)NULL, &hdrsize, 763 (const prstatus_t *)NULL, (const prfpregset_t *)NULL, 764 (const prpsinfo_t *)NULL, seginfo.count); 765 766 if (hdrsize + seginfo.size >= limit) 767 return (EFAULT); 768 769 /* 770 * Allocate memory for building the header, fill it up, 771 * and write it out. 772 */ 773 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 774 if (hdr == NULL) { 775 return EINVAL; 776 } 777 error = elf_corehdr(p, vp, cred, seginfo.count, hdr, hdrsize); 778 779 /* Write the contents of all of the writable segments. */ 780 if (error == 0) { 781 Elf_Phdr *php; 782 off_t offset; 783 int i; 784 785 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 786 offset = hdrsize; 787 for (i = 0; i < seginfo.count; i++) { 788 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)php->p_vaddr, 789 php->p_filesz, offset, UIO_USERSPACE, 790 IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p); 791 if (error != 0) 792 break; 793 offset += php->p_filesz; 794 php++; 795 } 796 } 797 free(hdr, M_TEMP); 798 799 return error; 800 } 801 802 /* 803 * A callback for each_writable_segment() to write out the segment's 804 * program header entry. 805 */ 806 static void 807 cb_put_phdr(entry, closure) 808 vm_map_entry_t entry; 809 void *closure; 810 { 811 struct phdr_closure *phc = (struct phdr_closure *)closure; 812 Elf_Phdr *phdr = phc->phdr; 813 814 phc->offset = round_page(phc->offset); 815 816 phdr->p_type = PT_LOAD; 817 phdr->p_offset = phc->offset; 818 phdr->p_vaddr = entry->start; 819 phdr->p_paddr = 0; 820 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 821 phdr->p_align = PAGE_SIZE; 822 phdr->p_flags = 0; 823 if (entry->protection & VM_PROT_READ) 824 phdr->p_flags |= PF_R; 825 if (entry->protection & VM_PROT_WRITE) 826 phdr->p_flags |= PF_W; 827 if (entry->protection & VM_PROT_EXECUTE) 828 phdr->p_flags |= PF_X; 829 830 phc->offset += phdr->p_filesz; 831 phc->phdr++; 832 } 833 834 /* 835 * A callback for each_writable_segment() to gather information about 836 * the number of segments and their total size. 837 */ 838 static void 839 cb_size_segment(entry, closure) 840 vm_map_entry_t entry; 841 void *closure; 842 { 843 struct sseg_closure *ssc = (struct sseg_closure *)closure; 844 845 ssc->count++; 846 ssc->size += entry->end - entry->start; 847 } 848 849 /* 850 * For each writable segment in the process's memory map, call the given 851 * function with a pointer to the map entry and some arbitrary 852 * caller-supplied data. 853 */ 854 static void 855 each_writable_segment(p, func, closure) 856 struct proc *p; 857 segment_callback func; 858 void *closure; 859 { 860 vm_map_t map = &p->p_vmspace->vm_map; 861 vm_map_entry_t entry; 862 863 for (entry = map->header.next; entry != &map->header; 864 entry = entry->next) { 865 vm_object_t obj; 866 867 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) || 868 (entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) != 869 (VM_PROT_READ|VM_PROT_WRITE)) 870 continue; 871 872 /* 873 ** Dont include memory segment in the coredump if 874 ** MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 875 ** madvise(2). 876 */ 877 if (entry->eflags & MAP_ENTRY_NOCOREDUMP) 878 continue; 879 880 if ((obj = entry->object.vm_object) == NULL) 881 continue; 882 883 /* Find the deepest backing object. */ 884 while (obj->backing_object != NULL) 885 obj = obj->backing_object; 886 887 /* Ignore memory-mapped devices and such things. */ 888 if (obj->type != OBJT_DEFAULT && 889 obj->type != OBJT_SWAP && 890 obj->type != OBJT_VNODE) 891 continue; 892 893 (*func)(entry, closure); 894 } 895 } 896 897 /* 898 * Write the core file header to the file, including padding up to 899 * the page boundary. 900 */ 901 static int 902 elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize) 903 struct proc *p; 904 struct vnode *vp; 905 struct ucred *cred; 906 int numsegs; 907 size_t hdrsize; 908 void *hdr; 909 { 910 size_t off; 911 prstatus_t status; 912 prfpregset_t fpregset; 913 prpsinfo_t psinfo; 914 915 /* Gather the information for the header. */ 916 bzero(&status, sizeof status); 917 status.pr_version = PRSTATUS_VERSION; 918 status.pr_statussz = sizeof(prstatus_t); 919 status.pr_gregsetsz = sizeof(gregset_t); 920 status.pr_fpregsetsz = sizeof(fpregset_t); 921 status.pr_osreldate = osreldate; 922 status.pr_cursig = p->p_sig; 923 status.pr_pid = p->p_pid; 924 fill_regs(p, &status.pr_reg); 925 926 fill_fpregs(p, &fpregset); 927 928 bzero(&psinfo, sizeof psinfo); 929 psinfo.pr_version = PRPSINFO_VERSION; 930 psinfo.pr_psinfosz = sizeof(prpsinfo_t); 931 strncpy(psinfo.pr_fname, p->p_comm, MAXCOMLEN); 932 /* XXX - We don't fill in the command line arguments properly yet. */ 933 strncpy(psinfo.pr_psargs, p->p_comm, PRARGSZ); 934 935 /* Fill in the header. */ 936 bzero(hdr, hdrsize); 937 off = 0; 938 elf_puthdr(p, hdr, &off, &status, &fpregset, &psinfo, numsegs); 939 940 /* Write it to the core file. */ 941 return vn_rdwr(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 942 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p); 943 } 944 945 static void 946 elf_puthdr(struct proc *p, void *dst, size_t *off, const prstatus_t *status, 947 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs) 948 { 949 size_t ehoff; 950 size_t phoff; 951 size_t noteoff; 952 size_t notesz; 953 954 ehoff = *off; 955 *off += sizeof(Elf_Ehdr); 956 957 phoff = *off; 958 *off += (numsegs + 1) * sizeof(Elf_Phdr); 959 960 noteoff = *off; 961 elf_putnote(dst, off, "FreeBSD", NT_PRSTATUS, status, 962 sizeof *status); 963 elf_putnote(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 964 sizeof *fpregset); 965 elf_putnote(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 966 sizeof *psinfo); 967 notesz = *off - noteoff; 968 969 /* Align up to a page boundary for the program segments. */ 970 *off = round_page(*off); 971 972 if (dst != NULL) { 973 Elf_Ehdr *ehdr; 974 Elf_Phdr *phdr; 975 struct phdr_closure phc; 976 977 /* 978 * Fill in the ELF header. 979 */ 980 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 981 ehdr->e_ident[EI_MAG0] = ELFMAG0; 982 ehdr->e_ident[EI_MAG1] = ELFMAG1; 983 ehdr->e_ident[EI_MAG2] = ELFMAG2; 984 ehdr->e_ident[EI_MAG3] = ELFMAG3; 985 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 986 ehdr->e_ident[EI_DATA] = ELF_DATA; 987 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 988 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 989 ehdr->e_ident[EI_ABIVERSION] = 0; 990 ehdr->e_ident[EI_PAD] = 0; 991 ehdr->e_type = ET_CORE; 992 ehdr->e_machine = ELF_ARCH; 993 ehdr->e_version = EV_CURRENT; 994 ehdr->e_entry = 0; 995 ehdr->e_phoff = phoff; 996 ehdr->e_flags = 0; 997 ehdr->e_ehsize = sizeof(Elf_Ehdr); 998 ehdr->e_phentsize = sizeof(Elf_Phdr); 999 ehdr->e_phnum = numsegs + 1; 1000 ehdr->e_shentsize = sizeof(Elf_Shdr); 1001 ehdr->e_shnum = 0; 1002 ehdr->e_shstrndx = SHN_UNDEF; 1003 1004 /* 1005 * Fill in the program header entries. 1006 */ 1007 phdr = (Elf_Phdr *)((char *)dst + phoff); 1008 1009 /* The note segement. */ 1010 phdr->p_type = PT_NOTE; 1011 phdr->p_offset = noteoff; 1012 phdr->p_vaddr = 0; 1013 phdr->p_paddr = 0; 1014 phdr->p_filesz = notesz; 1015 phdr->p_memsz = 0; 1016 phdr->p_flags = 0; 1017 phdr->p_align = 0; 1018 phdr++; 1019 1020 /* All the writable segments from the program. */ 1021 phc.phdr = phdr; 1022 phc.offset = *off; 1023 each_writable_segment(p, cb_put_phdr, &phc); 1024 } 1025 } 1026 1027 static void 1028 elf_putnote(void *dst, size_t *off, const char *name, int type, 1029 const void *desc, size_t descsz) 1030 { 1031 Elf_Note note; 1032 1033 note.n_namesz = strlen(name) + 1; 1034 note.n_descsz = descsz; 1035 note.n_type = type; 1036 if (dst != NULL) 1037 bcopy(¬e, (char *)dst + *off, sizeof note); 1038 *off += sizeof note; 1039 if (dst != NULL) 1040 bcopy(name, (char *)dst + *off, note.n_namesz); 1041 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1042 if (dst != NULL) 1043 bcopy(desc, (char *)dst + *off, note.n_descsz); 1044 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1045 } 1046 1047 /* 1048 * Tell kern_execve.c about it, with a little help from the linker. 1049 */ 1050 static struct execsw elf_execsw = {exec_elf_imgact, "ELF"}; 1051 EXEC_SET(elf, elf_execsw); 1052