1 /*- 2 * Copyright (c) 2000 David O'Brien 3 * Copyright (c) 1995-1996 S�ren Schmidt 4 * Copyright (c) 1996 Peter Wemm 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software withough specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 #include "opt_rlimit.h" 34 35 #include <sys/param.h> 36 #include <sys/exec.h> 37 #include <sys/fcntl.h> 38 #include <sys/imgact.h> 39 #include <sys/imgact_elf.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/mman.h> 45 #include <sys/namei.h> 46 #include <sys/pioctl.h> 47 #include <sys/proc.h> 48 #include <sys/procfs.h> 49 #include <sys/resourcevar.h> 50 #include <sys/systm.h> 51 #include <sys/signalvar.h> 52 #include <sys/stat.h> 53 #include <sys/sx.h> 54 #include <sys/syscall.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysent.h> 57 #include <sys/vnode.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vm_param.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_extern.h> 66 67 #include <machine/elf.h> 68 #include <machine/md_var.h> 69 70 #define OLD_EI_BRAND 8 71 72 __ElfType(Brandinfo); 73 __ElfType(Auxargs); 74 75 static int elf_check_header __P((const Elf_Ehdr *hdr)); 76 static int elf_freebsd_fixup __P((register_t **stack_base, 77 struct image_params *imgp)); 78 static int elf_load_file __P((struct proc *p, const char *file, u_long *addr, 79 u_long *entry)); 80 static int elf_load_section __P((struct proc *p, 81 struct vmspace *vmspace, struct vnode *vp, 82 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 83 vm_prot_t prot)); 84 static int exec_elf_imgact __P((struct image_params *imgp)); 85 86 static int elf_trace = 0; 87 SYSCTL_INT(_debug, OID_AUTO, elf_trace, CTLFLAG_RW, &elf_trace, 0, ""); 88 89 struct sysentvec elf_freebsd_sysvec = { 90 SYS_MAXSYSCALL, 91 sysent, 92 0, 93 0, 94 0, 95 0, 96 0, 97 0, 98 elf_freebsd_fixup, 99 sendsig, 100 sigcode, 101 &szsigcode, 102 0, 103 "FreeBSD ELF", 104 elf_coredump, 105 NULL, 106 MINSIGSTKSZ 107 }; 108 109 static Elf_Brandinfo freebsd_brand_info = { 110 ELFOSABI_FREEBSD, 111 "FreeBSD", 112 "", 113 "/usr/libexec/ld-elf.so.1", 114 &elf_freebsd_sysvec 115 }; 116 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS] = { 117 &freebsd_brand_info, 118 NULL, NULL, NULL, 119 NULL, NULL, NULL, NULL 120 }; 121 122 int 123 elf_insert_brand_entry(Elf_Brandinfo *entry) 124 { 125 int i; 126 127 for (i=1; i<MAX_BRANDS; i++) { 128 if (elf_brand_list[i] == NULL) { 129 elf_brand_list[i] = entry; 130 break; 131 } 132 } 133 if (i == MAX_BRANDS) 134 return -1; 135 return 0; 136 } 137 138 int 139 elf_remove_brand_entry(Elf_Brandinfo *entry) 140 { 141 int i; 142 143 for (i=1; i<MAX_BRANDS; i++) { 144 if (elf_brand_list[i] == entry) { 145 elf_brand_list[i] = NULL; 146 break; 147 } 148 } 149 if (i == MAX_BRANDS) 150 return -1; 151 return 0; 152 } 153 154 int 155 elf_brand_inuse(Elf_Brandinfo *entry) 156 { 157 struct proc *p; 158 int rval = FALSE; 159 160 sx_slock(&allproc_lock); 161 LIST_FOREACH(p, &allproc, p_list) { 162 if (p->p_sysent == entry->sysvec) { 163 rval = TRUE; 164 break; 165 } 166 } 167 sx_sunlock(&allproc_lock); 168 169 return (rval); 170 } 171 172 static int 173 elf_check_header(const Elf_Ehdr *hdr) 174 { 175 if (!IS_ELF(*hdr) || 176 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 177 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 178 hdr->e_ident[EI_VERSION] != EV_CURRENT) 179 return ENOEXEC; 180 181 if (!ELF_MACHINE_OK(hdr->e_machine)) 182 return ENOEXEC; 183 184 if (hdr->e_version != ELF_TARG_VER) 185 return ENOEXEC; 186 187 return 0; 188 } 189 190 static int 191 elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 192 { 193 size_t map_len; 194 vm_offset_t map_addr; 195 int error, rv; 196 size_t copy_len; 197 vm_object_t object; 198 vm_offset_t file_addr; 199 vm_offset_t data_buf = 0; 200 201 VOP_GETVOBJECT(vp, &object); 202 error = 0; 203 204 /* 205 * It's necessary to fail if the filsz + offset taken from the 206 * header is greater than the actual file pager object's size. 207 * If we were to allow this, then the vm_map_find() below would 208 * walk right off the end of the file object and into the ether. 209 * 210 * While I'm here, might as well check for something else that 211 * is invalid: filsz cannot be greater than memsz. 212 */ 213 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 214 filsz > memsz) { 215 uprintf("elf_load_section: truncated ELF file\n"); 216 return (ENOEXEC); 217 } 218 219 map_addr = trunc_page((vm_offset_t)vmaddr); 220 file_addr = trunc_page(offset); 221 222 /* 223 * We have two choices. We can either clear the data in the last page 224 * of an oversized mapping, or we can start the anon mapping a page 225 * early and copy the initialized data into that first page. We 226 * choose the second.. 227 */ 228 if (memsz > filsz) 229 map_len = trunc_page(offset+filsz) - file_addr; 230 else 231 map_len = round_page(offset+filsz) - file_addr; 232 233 if (map_len != 0) { 234 vm_object_reference(object); 235 vm_map_lock(&vmspace->vm_map); 236 rv = vm_map_insert(&vmspace->vm_map, 237 object, 238 file_addr, /* file offset */ 239 map_addr, /* virtual start */ 240 map_addr + map_len,/* virtual end */ 241 prot, 242 VM_PROT_ALL, 243 MAP_COPY_ON_WRITE | MAP_PREFAULT); 244 vm_map_unlock(&vmspace->vm_map); 245 if (rv != KERN_SUCCESS) { 246 vm_object_deallocate(object); 247 return EINVAL; 248 } 249 250 /* we can stop now if we've covered it all */ 251 if (memsz == filsz) 252 return 0; 253 } 254 255 256 /* 257 * We have to get the remaining bit of the file into the first part 258 * of the oversized map segment. This is normally because the .data 259 * segment in the file is extended to provide bss. It's a neat idea 260 * to try and save a page, but it's a pain in the behind to implement. 261 */ 262 copy_len = (offset + filsz) - trunc_page(offset + filsz); 263 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 264 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 265 266 /* This had damn well better be true! */ 267 if (map_len != 0) { 268 vm_map_lock(&vmspace->vm_map); 269 rv = vm_map_insert(&vmspace->vm_map, NULL, 0, 270 map_addr, map_addr + map_len, 271 VM_PROT_ALL, VM_PROT_ALL, 0); 272 vm_map_unlock(&vmspace->vm_map); 273 if (rv != KERN_SUCCESS) 274 return EINVAL; 275 } 276 277 if (copy_len != 0) { 278 vm_object_reference(object); 279 rv = vm_map_find(exec_map, 280 object, 281 trunc_page(offset + filsz), 282 &data_buf, 283 PAGE_SIZE, 284 TRUE, 285 VM_PROT_READ, 286 VM_PROT_ALL, 287 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 288 if (rv != KERN_SUCCESS) { 289 vm_object_deallocate(object); 290 return EINVAL; 291 } 292 293 /* send the page fragment to user space */ 294 error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len); 295 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 296 if (error) 297 return (error); 298 } 299 300 /* 301 * set it to the specified protection 302 */ 303 vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot, 304 FALSE); 305 306 return error; 307 } 308 309 /* 310 * Load the file "file" into memory. It may be either a shared object 311 * or an executable. 312 * 313 * The "addr" reference parameter is in/out. On entry, it specifies 314 * the address where a shared object should be loaded. If the file is 315 * an executable, this value is ignored. On exit, "addr" specifies 316 * where the file was actually loaded. 317 * 318 * The "entry" reference parameter is out only. On exit, it specifies 319 * the entry point for the loaded file. 320 */ 321 static int 322 elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry) 323 { 324 const Elf_Ehdr *hdr = NULL; 325 const Elf_Phdr *phdr = NULL; 326 struct nameidata nd; 327 struct vmspace *vmspace = p->p_vmspace; 328 struct vattr attr; 329 struct image_params image_params, *imgp; 330 vm_prot_t prot; 331 u_long rbase; 332 u_long base_addr = 0; 333 int error, i, numsegs; 334 335 imgp = &image_params; 336 /* 337 * Initialize part of the common data 338 */ 339 imgp->proc = p; 340 imgp->uap = NULL; 341 imgp->attr = &attr; 342 imgp->firstpage = NULL; 343 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE); 344 345 if (imgp->image_header == NULL) { 346 nd.ni_vp = NULL; 347 error = ENOMEM; 348 goto fail; 349 } 350 351 NDINIT(&nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, p); 352 353 if ((error = namei(&nd)) != 0) { 354 nd.ni_vp = NULL; 355 goto fail; 356 } 357 NDFREE(&nd, NDF_ONLY_PNBUF); 358 imgp->vp = nd.ni_vp; 359 360 /* 361 * Check permissions, modes, uid, etc on the file, and "open" it. 362 */ 363 error = exec_check_permissions(imgp); 364 if (error) { 365 VOP_UNLOCK(nd.ni_vp, 0, p); 366 goto fail; 367 } 368 369 error = exec_map_first_page(imgp); 370 /* 371 * Also make certain that the interpreter stays the same, so set 372 * its VTEXT flag, too. 373 */ 374 if (error == 0) 375 nd.ni_vp->v_flag |= VTEXT; 376 VOP_UNLOCK(nd.ni_vp, 0, p); 377 if (error) 378 goto fail; 379 380 hdr = (const Elf_Ehdr *)imgp->image_header; 381 if ((error = elf_check_header(hdr)) != 0) 382 goto fail; 383 if (hdr->e_type == ET_DYN) 384 rbase = *addr; 385 else if (hdr->e_type == ET_EXEC) 386 rbase = 0; 387 else { 388 error = ENOEXEC; 389 goto fail; 390 } 391 392 /* Only support headers that fit within first page for now */ 393 if ((hdr->e_phoff > PAGE_SIZE) || 394 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 395 error = ENOEXEC; 396 goto fail; 397 } 398 399 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 400 401 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 402 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 403 prot = 0; 404 if (phdr[i].p_flags & PF_X) 405 prot |= VM_PROT_EXECUTE; 406 if (phdr[i].p_flags & PF_W) 407 prot |= VM_PROT_WRITE; 408 if (phdr[i].p_flags & PF_R) 409 prot |= VM_PROT_READ; 410 411 if ((error = elf_load_section(p, vmspace, nd.ni_vp, 412 phdr[i].p_offset, 413 (caddr_t)phdr[i].p_vaddr + 414 rbase, 415 phdr[i].p_memsz, 416 phdr[i].p_filesz, prot)) != 0) 417 goto fail; 418 /* 419 * Establish the base address if this is the 420 * first segment. 421 */ 422 if (numsegs == 0) 423 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 424 numsegs++; 425 } 426 } 427 *addr = base_addr; 428 *entry=(unsigned long)hdr->e_entry + rbase; 429 430 fail: 431 if (imgp->firstpage) 432 exec_unmap_first_page(imgp); 433 if (imgp->image_header) 434 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header, 435 PAGE_SIZE); 436 if (nd.ni_vp) 437 vrele(nd.ni_vp); 438 439 return error; 440 } 441 442 /* 443 * non static, as it can be overridden by start_init() 444 */ 445 int fallback_elf_brand = -1; 446 SYSCTL_INT(_kern, OID_AUTO, fallback_elf_brand, CTLFLAG_RW, 447 &fallback_elf_brand, -1, 448 "ELF brand of last resort"); 449 450 static int 451 exec_elf_imgact(struct image_params *imgp) 452 { 453 const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header; 454 const Elf_Phdr *phdr; 455 Elf_Auxargs *elf_auxargs = NULL; 456 struct vmspace *vmspace; 457 vm_prot_t prot; 458 u_long text_size = 0, data_size = 0; 459 u_long text_addr = 0, data_addr = 0; 460 u_long addr, entry = 0, proghdr = 0; 461 int error, i; 462 const char *interp = NULL; 463 Elf_Brandinfo *brand_info; 464 char path[MAXPATHLEN]; 465 466 /* 467 * Do we have a valid ELF header ? 468 */ 469 if (elf_check_header(hdr) != 0 || hdr->e_type != ET_EXEC) 470 return -1; 471 472 /* 473 * From here on down, we return an errno, not -1, as we've 474 * detected an ELF file. 475 */ 476 477 if ((hdr->e_phoff > PAGE_SIZE) || 478 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 479 /* Only support headers in first page for now */ 480 return ENOEXEC; 481 } 482 phdr = (const Elf_Phdr*)(imgp->image_header + hdr->e_phoff); 483 484 /* 485 * From this point on, we may have resources that need to be freed. 486 */ 487 488 /* 489 * Yeah, I'm paranoid. There is every reason in the world to get 490 * VTEXT now since from here on out, there are places we can have 491 * a context switch. Better safe than sorry; I really don't want 492 * the file to change while it's being loaded. 493 */ 494 mtx_lock(&imgp->vp->v_interlock); 495 imgp->vp->v_flag |= VTEXT; 496 mtx_unlock(&imgp->vp->v_interlock); 497 498 if ((error = exec_extract_strings(imgp)) != 0) 499 goto fail; 500 501 exec_new_vmspace(imgp); 502 503 vmspace = imgp->proc->p_vmspace; 504 505 for (i = 0; i < hdr->e_phnum; i++) { 506 switch(phdr[i].p_type) { 507 508 case PT_LOAD: /* Loadable segment */ 509 prot = 0; 510 if (phdr[i].p_flags & PF_X) 511 prot |= VM_PROT_EXECUTE; 512 if (phdr[i].p_flags & PF_W) 513 prot |= VM_PROT_WRITE; 514 if (phdr[i].p_flags & PF_R) 515 prot |= VM_PROT_READ; 516 517 if ((error = elf_load_section(imgp->proc, 518 vmspace, imgp->vp, 519 phdr[i].p_offset, 520 (caddr_t)phdr[i].p_vaddr, 521 phdr[i].p_memsz, 522 phdr[i].p_filesz, prot)) != 0) 523 goto fail; 524 525 /* 526 * Is this .text or .data ?? 527 * 528 * We only handle one each of those yet XXX 529 */ 530 if (hdr->e_entry >= phdr[i].p_vaddr && 531 hdr->e_entry <(phdr[i].p_vaddr+phdr[i].p_memsz)) { 532 text_addr = trunc_page(phdr[i].p_vaddr); 533 text_size = round_page(phdr[i].p_memsz + 534 phdr[i].p_vaddr - 535 text_addr); 536 entry = (u_long)hdr->e_entry; 537 } else { 538 data_addr = trunc_page(phdr[i].p_vaddr); 539 data_size = round_page(phdr[i].p_memsz + 540 phdr[i].p_vaddr - 541 data_addr); 542 } 543 break; 544 case PT_INTERP: /* Path to interpreter */ 545 if (phdr[i].p_filesz > MAXPATHLEN || 546 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 547 error = ENOEXEC; 548 goto fail; 549 } 550 interp = imgp->image_header + phdr[i].p_offset; 551 break; 552 case PT_PHDR: /* Program header table info */ 553 proghdr = phdr[i].p_vaddr; 554 break; 555 default: 556 break; 557 } 558 } 559 560 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 561 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 562 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 563 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 564 565 addr = ELF_RTLD_ADDR(vmspace); 566 567 imgp->entry_addr = entry; 568 569 brand_info = NULL; 570 571 /* We support three types of branding -- (1) the ELF EI_OSABI field 572 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 573 * branding w/in the ELF header, and (3) path of the `interp_path' 574 * field. We should also look for an ".note.ABI-tag" ELF section now 575 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 576 */ 577 578 /* If the executable has a brand, search for it in the brand list. */ 579 if (brand_info == NULL) { 580 for (i = 0; i < MAX_BRANDS; i++) { 581 Elf_Brandinfo *bi = elf_brand_list[i]; 582 583 if (bi != NULL && 584 (hdr->e_ident[EI_OSABI] == bi->brand 585 || 0 == 586 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 587 bi->compat_3_brand, strlen(bi->compat_3_brand)))) { 588 brand_info = bi; 589 break; 590 } 591 } 592 } 593 594 /* Lacking a known brand, search for a recognized interpreter. */ 595 if (brand_info == NULL && interp != NULL) { 596 for (i = 0; i < MAX_BRANDS; i++) { 597 Elf_Brandinfo *bi = elf_brand_list[i]; 598 599 if (bi != NULL && 600 strcmp(interp, bi->interp_path) == 0) { 601 brand_info = bi; 602 break; 603 } 604 } 605 } 606 607 /* Lacking a recognized interpreter, try the default brand */ 608 if (brand_info == NULL) { 609 for (i = 0; i < MAX_BRANDS; i++) { 610 Elf_Brandinfo *bi = elf_brand_list[i]; 611 612 if (bi != NULL && fallback_elf_brand == bi->brand) { 613 brand_info = bi; 614 break; 615 } 616 } 617 } 618 619 if (brand_info == NULL) { 620 uprintf("ELF binary type \"%u\" not known.\n", 621 hdr->e_ident[EI_OSABI]); 622 error = ENOEXEC; 623 goto fail; 624 } 625 626 imgp->proc->p_sysent = brand_info->sysvec; 627 if (interp != NULL) { 628 snprintf(path, sizeof(path), "%s%s", 629 brand_info->emul_path, interp); 630 if ((error = elf_load_file(imgp->proc, path, &addr, 631 &imgp->entry_addr)) != 0) { 632 if ((error = elf_load_file(imgp->proc, interp, &addr, 633 &imgp->entry_addr)) != 0) { 634 uprintf("ELF interpreter %s not found\n", path); 635 goto fail; 636 } 637 } 638 } 639 640 /* 641 * Construct auxargs table (used by the fixup routine) 642 */ 643 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 644 elf_auxargs->execfd = -1; 645 elf_auxargs->phdr = proghdr; 646 elf_auxargs->phent = hdr->e_phentsize; 647 elf_auxargs->phnum = hdr->e_phnum; 648 elf_auxargs->pagesz = PAGE_SIZE; 649 elf_auxargs->base = addr; 650 elf_auxargs->flags = 0; 651 elf_auxargs->entry = entry; 652 elf_auxargs->trace = elf_trace; 653 654 imgp->auxargs = elf_auxargs; 655 imgp->interpreted = 0; 656 657 fail: 658 return error; 659 } 660 661 static int 662 elf_freebsd_fixup(register_t **stack_base, struct image_params *imgp) 663 { 664 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 665 register_t *pos; 666 667 pos = *stack_base + (imgp->argc + imgp->envc + 2); 668 669 if (args->trace) { 670 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 671 } 672 if (args->execfd != -1) { 673 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 674 } 675 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 676 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 677 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 678 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 679 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 680 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 681 AUXARGS_ENTRY(pos, AT_BASE, args->base); 682 AUXARGS_ENTRY(pos, AT_NULL, 0); 683 684 free(imgp->auxargs, M_TEMP); 685 imgp->auxargs = NULL; 686 687 (*stack_base)--; 688 suword(*stack_base, (long) imgp->argc); 689 return 0; 690 } 691 692 /* 693 * Code for generating ELF core dumps. 694 */ 695 696 typedef void (*segment_callback) __P((vm_map_entry_t, void *)); 697 698 /* Closure for cb_put_phdr(). */ 699 struct phdr_closure { 700 Elf_Phdr *phdr; /* Program header to fill in */ 701 Elf_Off offset; /* Offset of segment in core file */ 702 }; 703 704 /* Closure for cb_size_segment(). */ 705 struct sseg_closure { 706 int count; /* Count of writable segments. */ 707 size_t size; /* Total size of all writable segments. */ 708 }; 709 710 static void cb_put_phdr __P((vm_map_entry_t, void *)); 711 static void cb_size_segment __P((vm_map_entry_t, void *)); 712 static void each_writable_segment __P((struct proc *, segment_callback, 713 void *)); 714 static int elf_corehdr __P((struct proc *, struct vnode *, struct ucred *, 715 int, void *, size_t)); 716 static void elf_puthdr __P((struct proc *, void *, size_t *, 717 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int)); 718 static void elf_putnote __P((void *, size_t *, const char *, int, 719 const void *, size_t)); 720 721 extern int osreldate; 722 723 int 724 elf_coredump(p, vp, limit) 725 register struct proc *p; 726 register struct vnode *vp; 727 off_t limit; 728 { 729 register struct ucred *cred = p->p_ucred; 730 int error = 0; 731 struct sseg_closure seginfo; 732 void *hdr; 733 size_t hdrsize; 734 735 /* Size the program segments. */ 736 seginfo.count = 0; 737 seginfo.size = 0; 738 each_writable_segment(p, cb_size_segment, &seginfo); 739 740 /* 741 * Calculate the size of the core file header area by making 742 * a dry run of generating it. Nothing is written, but the 743 * size is calculated. 744 */ 745 hdrsize = 0; 746 elf_puthdr((struct proc *)NULL, (void *)NULL, &hdrsize, 747 (const prstatus_t *)NULL, (const prfpregset_t *)NULL, 748 (const prpsinfo_t *)NULL, seginfo.count); 749 750 if (hdrsize + seginfo.size >= limit) 751 return (EFAULT); 752 753 /* 754 * Allocate memory for building the header, fill it up, 755 * and write it out. 756 */ 757 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 758 if (hdr == NULL) { 759 return EINVAL; 760 } 761 error = elf_corehdr(p, vp, cred, seginfo.count, hdr, hdrsize); 762 763 /* Write the contents of all of the writable segments. */ 764 if (error == 0) { 765 Elf_Phdr *php; 766 off_t offset; 767 int i; 768 769 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 770 offset = hdrsize; 771 for (i = 0; i < seginfo.count; i++) { 772 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)php->p_vaddr, 773 php->p_filesz, offset, UIO_USERSPACE, 774 IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p); 775 if (error != 0) 776 break; 777 offset += php->p_filesz; 778 php++; 779 } 780 } 781 free(hdr, M_TEMP); 782 783 return error; 784 } 785 786 /* 787 * A callback for each_writable_segment() to write out the segment's 788 * program header entry. 789 */ 790 static void 791 cb_put_phdr(entry, closure) 792 vm_map_entry_t entry; 793 void *closure; 794 { 795 struct phdr_closure *phc = (struct phdr_closure *)closure; 796 Elf_Phdr *phdr = phc->phdr; 797 798 phc->offset = round_page(phc->offset); 799 800 phdr->p_type = PT_LOAD; 801 phdr->p_offset = phc->offset; 802 phdr->p_vaddr = entry->start; 803 phdr->p_paddr = 0; 804 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 805 phdr->p_align = PAGE_SIZE; 806 phdr->p_flags = 0; 807 if (entry->protection & VM_PROT_READ) 808 phdr->p_flags |= PF_R; 809 if (entry->protection & VM_PROT_WRITE) 810 phdr->p_flags |= PF_W; 811 if (entry->protection & VM_PROT_EXECUTE) 812 phdr->p_flags |= PF_X; 813 814 phc->offset += phdr->p_filesz; 815 phc->phdr++; 816 } 817 818 /* 819 * A callback for each_writable_segment() to gather information about 820 * the number of segments and their total size. 821 */ 822 static void 823 cb_size_segment(entry, closure) 824 vm_map_entry_t entry; 825 void *closure; 826 { 827 struct sseg_closure *ssc = (struct sseg_closure *)closure; 828 829 ssc->count++; 830 ssc->size += entry->end - entry->start; 831 } 832 833 /* 834 * For each writable segment in the process's memory map, call the given 835 * function with a pointer to the map entry and some arbitrary 836 * caller-supplied data. 837 */ 838 static void 839 each_writable_segment(p, func, closure) 840 struct proc *p; 841 segment_callback func; 842 void *closure; 843 { 844 vm_map_t map = &p->p_vmspace->vm_map; 845 vm_map_entry_t entry; 846 847 for (entry = map->header.next; entry != &map->header; 848 entry = entry->next) { 849 vm_object_t obj; 850 851 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) || 852 (entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) != 853 (VM_PROT_READ|VM_PROT_WRITE)) 854 continue; 855 856 /* 857 ** Dont include memory segment in the coredump if 858 ** MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 859 ** madvise(2). 860 */ 861 if (entry->eflags & MAP_ENTRY_NOCOREDUMP) 862 continue; 863 864 if ((obj = entry->object.vm_object) == NULL) 865 continue; 866 867 /* Find the deepest backing object. */ 868 while (obj->backing_object != NULL) 869 obj = obj->backing_object; 870 871 /* Ignore memory-mapped devices and such things. */ 872 if (obj->type != OBJT_DEFAULT && 873 obj->type != OBJT_SWAP && 874 obj->type != OBJT_VNODE) 875 continue; 876 877 (*func)(entry, closure); 878 } 879 } 880 881 /* 882 * Write the core file header to the file, including padding up to 883 * the page boundary. 884 */ 885 static int 886 elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize) 887 struct proc *p; 888 struct vnode *vp; 889 struct ucred *cred; 890 int numsegs; 891 size_t hdrsize; 892 void *hdr; 893 { 894 size_t off; 895 prstatus_t status; 896 prfpregset_t fpregset; 897 prpsinfo_t psinfo; 898 899 /* Gather the information for the header. */ 900 bzero(&status, sizeof status); 901 status.pr_version = PRSTATUS_VERSION; 902 status.pr_statussz = sizeof(prstatus_t); 903 status.pr_gregsetsz = sizeof(gregset_t); 904 status.pr_fpregsetsz = sizeof(fpregset_t); 905 status.pr_osreldate = osreldate; 906 status.pr_cursig = p->p_sig; 907 status.pr_pid = p->p_pid; 908 fill_regs(p, &status.pr_reg); 909 910 fill_fpregs(p, &fpregset); 911 912 bzero(&psinfo, sizeof psinfo); 913 psinfo.pr_version = PRPSINFO_VERSION; 914 psinfo.pr_psinfosz = sizeof(prpsinfo_t); 915 strncpy(psinfo.pr_fname, p->p_comm, MAXCOMLEN); 916 /* XXX - We don't fill in the command line arguments properly yet. */ 917 strncpy(psinfo.pr_psargs, p->p_comm, PRARGSZ); 918 919 /* Fill in the header. */ 920 bzero(hdr, hdrsize); 921 off = 0; 922 elf_puthdr(p, hdr, &off, &status, &fpregset, &psinfo, numsegs); 923 924 /* Write it to the core file. */ 925 return vn_rdwr(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 926 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p); 927 } 928 929 static void 930 elf_puthdr(struct proc *p, void *dst, size_t *off, const prstatus_t *status, 931 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs) 932 { 933 size_t ehoff; 934 size_t phoff; 935 size_t noteoff; 936 size_t notesz; 937 938 ehoff = *off; 939 *off += sizeof(Elf_Ehdr); 940 941 phoff = *off; 942 *off += (numsegs + 1) * sizeof(Elf_Phdr); 943 944 noteoff = *off; 945 elf_putnote(dst, off, "FreeBSD", NT_PRSTATUS, status, 946 sizeof *status); 947 elf_putnote(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 948 sizeof *fpregset); 949 elf_putnote(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 950 sizeof *psinfo); 951 notesz = *off - noteoff; 952 953 /* Align up to a page boundary for the program segments. */ 954 *off = round_page(*off); 955 956 if (dst != NULL) { 957 Elf_Ehdr *ehdr; 958 Elf_Phdr *phdr; 959 struct phdr_closure phc; 960 961 /* 962 * Fill in the ELF header. 963 */ 964 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 965 ehdr->e_ident[EI_MAG0] = ELFMAG0; 966 ehdr->e_ident[EI_MAG1] = ELFMAG1; 967 ehdr->e_ident[EI_MAG2] = ELFMAG2; 968 ehdr->e_ident[EI_MAG3] = ELFMAG3; 969 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 970 ehdr->e_ident[EI_DATA] = ELF_DATA; 971 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 972 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 973 ehdr->e_ident[EI_ABIVERSION] = 0; 974 ehdr->e_ident[EI_PAD] = 0; 975 ehdr->e_type = ET_CORE; 976 ehdr->e_machine = ELF_ARCH; 977 ehdr->e_version = EV_CURRENT; 978 ehdr->e_entry = 0; 979 ehdr->e_phoff = phoff; 980 ehdr->e_flags = 0; 981 ehdr->e_ehsize = sizeof(Elf_Ehdr); 982 ehdr->e_phentsize = sizeof(Elf_Phdr); 983 ehdr->e_phnum = numsegs + 1; 984 ehdr->e_shentsize = sizeof(Elf_Shdr); 985 ehdr->e_shnum = 0; 986 ehdr->e_shstrndx = SHN_UNDEF; 987 988 /* 989 * Fill in the program header entries. 990 */ 991 phdr = (Elf_Phdr *)((char *)dst + phoff); 992 993 /* The note segement. */ 994 phdr->p_type = PT_NOTE; 995 phdr->p_offset = noteoff; 996 phdr->p_vaddr = 0; 997 phdr->p_paddr = 0; 998 phdr->p_filesz = notesz; 999 phdr->p_memsz = 0; 1000 phdr->p_flags = 0; 1001 phdr->p_align = 0; 1002 phdr++; 1003 1004 /* All the writable segments from the program. */ 1005 phc.phdr = phdr; 1006 phc.offset = *off; 1007 each_writable_segment(p, cb_put_phdr, &phc); 1008 } 1009 } 1010 1011 static void 1012 elf_putnote(void *dst, size_t *off, const char *name, int type, 1013 const void *desc, size_t descsz) 1014 { 1015 Elf_Note note; 1016 1017 note.n_namesz = strlen(name) + 1; 1018 note.n_descsz = descsz; 1019 note.n_type = type; 1020 if (dst != NULL) 1021 bcopy(¬e, (char *)dst + *off, sizeof note); 1022 *off += sizeof note; 1023 if (dst != NULL) 1024 bcopy(name, (char *)dst + *off, note.n_namesz); 1025 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1026 if (dst != NULL) 1027 bcopy(desc, (char *)dst + *off, note.n_descsz); 1028 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1029 } 1030 1031 /* 1032 * Tell kern_execve.c about it, with a little help from the linker. 1033 */ 1034 static struct execsw elf_execsw = {exec_elf_imgact, "ELF"}; 1035 EXEC_SET(elf, elf_execsw); 1036