1 /*- 2 * Copyright (c) 2000 David O'Brien 3 * Copyright (c) 1995-1996 S�ren Schmidt 4 * Copyright (c) 1996 Peter Wemm 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/exec.h> 35 #include <sys/fcntl.h> 36 #include <sys/imgact.h> 37 #include <sys/imgact_elf.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/mman.h> 43 #include <sys/namei.h> 44 #include <sys/pioctl.h> 45 #include <sys/proc.h> 46 #include <sys/procfs.h> 47 #include <sys/resourcevar.h> 48 #include <sys/systm.h> 49 #include <sys/signalvar.h> 50 #include <sys/stat.h> 51 #include <sys/sx.h> 52 #include <sys/syscall.h> 53 #include <sys/sysctl.h> 54 #include <sys/sysent.h> 55 #include <sys/vnode.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_kern.h> 59 #include <vm/vm_param.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_extern.h> 64 65 #include <machine/elf.h> 66 #include <machine/md_var.h> 67 68 #define OLD_EI_BRAND 8 69 70 __ElfType(Brandinfo); 71 __ElfType(Auxargs); 72 73 static int __elfN(check_header)(const Elf_Ehdr *hdr); 74 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr, 75 const char *interp); 76 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 77 u_long *entry, size_t pagesize); 78 static int __elfN(load_section)(struct proc *p, 79 struct vmspace *vmspace, struct vnode *vp, vm_object_t object, 80 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 81 vm_prot_t prot, size_t pagesize); 82 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 83 84 static int elf_trace = 0; 85 #if __ELF_WORD_SIZE == 32 86 SYSCTL_INT(_debug, OID_AUTO, elf32_trace, CTLFLAG_RW, &elf_trace, 0, ""); 87 #else 88 SYSCTL_INT(_debug, OID_AUTO, elf64_trace, CTLFLAG_RW, &elf_trace, 0, ""); 89 #endif 90 91 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 92 extern int fallback_elf_brand; 93 94 int 95 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 96 { 97 int i; 98 99 for (i = 0; i < MAX_BRANDS; i++) { 100 if (elf_brand_list[i] == NULL) { 101 elf_brand_list[i] = entry; 102 break; 103 } 104 } 105 if (i == MAX_BRANDS) 106 return (-1); 107 return (0); 108 } 109 110 int 111 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 112 { 113 int i; 114 115 for (i = 0; i < MAX_BRANDS; i++) { 116 if (elf_brand_list[i] == entry) { 117 elf_brand_list[i] = NULL; 118 break; 119 } 120 } 121 if (i == MAX_BRANDS) 122 return (-1); 123 return (0); 124 } 125 126 int 127 __elfN(brand_inuse)(Elf_Brandinfo *entry) 128 { 129 struct proc *p; 130 int rval = FALSE; 131 132 sx_slock(&allproc_lock); 133 LIST_FOREACH(p, &allproc, p_list) { 134 if (p->p_sysent == entry->sysvec) { 135 rval = TRUE; 136 break; 137 } 138 } 139 sx_sunlock(&allproc_lock); 140 141 return (rval); 142 } 143 144 static Elf_Brandinfo * 145 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp) 146 { 147 Elf_Brandinfo *bi; 148 int i; 149 150 /* 151 * We support three types of branding -- (1) the ELF EI_OSABI field 152 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 153 * branding w/in the ELF header, and (3) path of the `interp_path' 154 * field. We should also look for an ".note.ABI-tag" ELF section now 155 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 156 */ 157 158 /* If the executable has a brand, search for it in the brand list. */ 159 for (i = 0; i < MAX_BRANDS; i++) { 160 bi = elf_brand_list[i]; 161 if (bi != NULL && hdr->e_machine == bi->machine && 162 (hdr->e_ident[EI_OSABI] == bi->brand || 163 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 164 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) 165 return (bi); 166 } 167 168 /* Lacking a known brand, search for a recognized interpreter. */ 169 if (interp != NULL) { 170 for (i = 0; i < MAX_BRANDS; i++) { 171 bi = elf_brand_list[i]; 172 if (bi != NULL && hdr->e_machine == bi->machine && 173 strcmp(interp, bi->interp_path) == 0) 174 return (bi); 175 } 176 } 177 178 /* Lacking a recognized interpreter, try the default brand */ 179 for (i = 0; i < MAX_BRANDS; i++) { 180 bi = elf_brand_list[i]; 181 if (bi != NULL && hdr->e_machine == bi->machine && 182 fallback_elf_brand == bi->brand) 183 return (bi); 184 } 185 return (NULL); 186 } 187 188 static int 189 __elfN(check_header)(const Elf_Ehdr *hdr) 190 { 191 Elf_Brandinfo *bi; 192 int i; 193 194 if (!IS_ELF(*hdr) || 195 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 196 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 197 hdr->e_ident[EI_VERSION] != EV_CURRENT) 198 return (ENOEXEC); 199 200 /* 201 * Make sure we have at least one brand for this machine. 202 */ 203 204 for (i = 0; i < MAX_BRANDS; i++) { 205 bi = elf_brand_list[i]; 206 if (bi != NULL && bi->machine == hdr->e_machine) 207 break; 208 } 209 if (i == MAX_BRANDS) 210 return (ENOEXEC); 211 212 if (hdr->e_version != ELF_TARG_VER) 213 return (ENOEXEC); 214 215 return (0); 216 } 217 218 static int 219 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 220 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 221 vm_prot_t max) 222 { 223 int error, rv; 224 vm_offset_t off; 225 vm_offset_t data_buf = 0; 226 227 /* 228 * Create the page if it doesn't exist yet. Ignore errors. 229 */ 230 vm_map_lock(map); 231 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max, 232 max, 0); 233 vm_map_unlock(map); 234 235 /* 236 * Find the page from the underlying object. 237 */ 238 if (object) { 239 vm_object_reference(object); 240 rv = vm_map_find(exec_map, 241 object, 242 trunc_page(offset), 243 &data_buf, 244 PAGE_SIZE, 245 TRUE, 246 VM_PROT_READ, 247 VM_PROT_ALL, 248 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 249 if (rv != KERN_SUCCESS) { 250 vm_object_deallocate(object); 251 return (rv); 252 } 253 254 off = offset - trunc_page(offset); 255 error = copyout((caddr_t)data_buf + off, (caddr_t)start, 256 end - start); 257 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 258 if (error) { 259 return (KERN_FAILURE); 260 } 261 } 262 263 return (KERN_SUCCESS); 264 } 265 266 static int 267 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 268 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 269 vm_prot_t max, int cow) 270 { 271 int rv; 272 273 if (start != trunc_page(start)) { 274 rv = __elfN(map_partial)(map, object, offset, start, 275 round_page(start), prot, max); 276 if (rv) 277 return (rv); 278 offset += round_page(start) - start; 279 start = round_page(start); 280 } 281 if (end != round_page(end)) { 282 rv = __elfN(map_partial)(map, object, offset + 283 trunc_page(end) - start, trunc_page(end), end, prot, max); 284 if (rv) 285 return (rv); 286 end = trunc_page(end); 287 } 288 if (end > start) { 289 if (offset & PAGE_MASK) { 290 vm_offset_t data_buf, off; 291 vm_size_t sz; 292 int error; 293 294 /* 295 * The mapping is not page aligned. This means we have 296 * to copy the data. Sigh. 297 */ 298 rv = vm_map_find(map, 0, 0, &start, end - start, 299 FALSE, prot, max, 0); 300 if (rv) 301 return (rv); 302 while (start < end) { 303 vm_object_reference(object); 304 rv = vm_map_find(exec_map, 305 object, 306 trunc_page(offset), 307 &data_buf, 308 2 * PAGE_SIZE, 309 TRUE, 310 VM_PROT_READ, 311 VM_PROT_ALL, 312 (MAP_COPY_ON_WRITE 313 | MAP_PREFAULT_PARTIAL)); 314 if (rv != KERN_SUCCESS) { 315 vm_object_deallocate(object); 316 return (rv); 317 } 318 off = offset - trunc_page(offset); 319 sz = end - start; 320 if (sz > PAGE_SIZE) 321 sz = PAGE_SIZE; 322 error = copyout((caddr_t)data_buf + off, 323 (caddr_t)start, sz); 324 vm_map_remove(exec_map, data_buf, 325 data_buf + 2 * PAGE_SIZE); 326 if (error) { 327 return (KERN_FAILURE); 328 } 329 start += sz; 330 } 331 rv = KERN_SUCCESS; 332 } else { 333 vm_map_lock(map); 334 rv = vm_map_insert(map, object, offset, start, end, 335 prot, max, cow); 336 vm_map_unlock(map); 337 } 338 return (rv); 339 } else { 340 return (KERN_SUCCESS); 341 } 342 } 343 344 static int 345 __elfN(load_section)(struct proc *p, struct vmspace *vmspace, 346 struct vnode *vp, vm_object_t object, vm_offset_t offset, 347 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, 348 size_t pagesize) 349 { 350 size_t map_len; 351 vm_offset_t map_addr; 352 int error, rv; 353 size_t copy_len; 354 vm_offset_t file_addr; 355 vm_offset_t data_buf = 0; 356 357 GIANT_REQUIRED; 358 359 error = 0; 360 361 /* 362 * It's necessary to fail if the filsz + offset taken from the 363 * header is greater than the actual file pager object's size. 364 * If we were to allow this, then the vm_map_find() below would 365 * walk right off the end of the file object and into the ether. 366 * 367 * While I'm here, might as well check for something else that 368 * is invalid: filsz cannot be greater than memsz. 369 */ 370 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 371 filsz > memsz) { 372 uprintf("elf_load_section: truncated ELF file\n"); 373 return (ENOEXEC); 374 } 375 376 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) 377 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) 378 379 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); 380 file_addr = trunc_page_ps(offset, pagesize); 381 382 /* 383 * We have two choices. We can either clear the data in the last page 384 * of an oversized mapping, or we can start the anon mapping a page 385 * early and copy the initialized data into that first page. We 386 * choose the second.. 387 */ 388 if (memsz > filsz) 389 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; 390 else 391 map_len = round_page_ps(offset + filsz, pagesize) - file_addr; 392 393 if (map_len != 0) { 394 vm_object_reference(object); 395 rv = __elfN(map_insert)(&vmspace->vm_map, 396 object, 397 file_addr, /* file offset */ 398 map_addr, /* virtual start */ 399 map_addr + map_len,/* virtual end */ 400 prot, 401 VM_PROT_ALL, 402 MAP_COPY_ON_WRITE | MAP_PREFAULT); 403 if (rv != KERN_SUCCESS) { 404 vm_object_deallocate(object); 405 return (EINVAL); 406 } 407 408 /* we can stop now if we've covered it all */ 409 if (memsz == filsz) { 410 return (0); 411 } 412 } 413 414 415 /* 416 * We have to get the remaining bit of the file into the first part 417 * of the oversized map segment. This is normally because the .data 418 * segment in the file is extended to provide bss. It's a neat idea 419 * to try and save a page, but it's a pain in the behind to implement. 420 */ 421 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); 422 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); 423 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - 424 map_addr; 425 426 /* This had damn well better be true! */ 427 if (map_len != 0) { 428 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, 429 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0); 430 if (rv != KERN_SUCCESS) { 431 return (EINVAL); 432 } 433 } 434 435 if (copy_len != 0) { 436 vm_offset_t off; 437 vm_object_reference(object); 438 rv = vm_map_find(exec_map, 439 object, 440 trunc_page(offset + filsz), 441 &data_buf, 442 PAGE_SIZE, 443 TRUE, 444 VM_PROT_READ, 445 VM_PROT_ALL, 446 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 447 if (rv != KERN_SUCCESS) { 448 vm_object_deallocate(object); 449 return (EINVAL); 450 } 451 452 /* send the page fragment to user space */ 453 off = trunc_page_ps(offset + filsz, pagesize) - 454 trunc_page(offset + filsz); 455 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr, 456 copy_len); 457 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 458 if (error) { 459 return (error); 460 } 461 } 462 463 /* 464 * set it to the specified protection. 465 * XXX had better undo the damage from pasting over the cracks here! 466 */ 467 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), 468 round_page(map_addr + map_len), prot, FALSE); 469 470 return (error); 471 } 472 473 /* 474 * Load the file "file" into memory. It may be either a shared object 475 * or an executable. 476 * 477 * The "addr" reference parameter is in/out. On entry, it specifies 478 * the address where a shared object should be loaded. If the file is 479 * an executable, this value is ignored. On exit, "addr" specifies 480 * where the file was actually loaded. 481 * 482 * The "entry" reference parameter is out only. On exit, it specifies 483 * the entry point for the loaded file. 484 */ 485 static int 486 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 487 u_long *entry, size_t pagesize) 488 { 489 struct { 490 struct nameidata nd; 491 struct vattr attr; 492 struct image_params image_params; 493 } *tempdata; 494 const Elf_Ehdr *hdr = NULL; 495 const Elf_Phdr *phdr = NULL; 496 struct nameidata *nd; 497 struct vmspace *vmspace = p->p_vmspace; 498 struct vattr *attr; 499 struct image_params *imgp; 500 vm_prot_t prot; 501 u_long rbase; 502 u_long base_addr = 0; 503 int error, i, numsegs; 504 505 if (curthread->td_proc != p) 506 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ 507 508 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 509 nd = &tempdata->nd; 510 attr = &tempdata->attr; 511 imgp = &tempdata->image_params; 512 513 /* 514 * Initialize part of the common data 515 */ 516 imgp->proc = p; 517 imgp->uap = NULL; 518 imgp->attr = attr; 519 imgp->firstpage = NULL; 520 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE); 521 imgp->object = NULL; 522 523 if (imgp->image_header == NULL) { 524 nd->ni_vp = NULL; 525 error = ENOMEM; 526 goto fail; 527 } 528 529 /* XXXKSE */ 530 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread); 531 532 if ((error = namei(nd)) != 0) { 533 nd->ni_vp = NULL; 534 goto fail; 535 } 536 NDFREE(nd, NDF_ONLY_PNBUF); 537 imgp->vp = nd->ni_vp; 538 539 /* 540 * Check permissions, modes, uid, etc on the file, and "open" it. 541 */ 542 error = exec_check_permissions(imgp); 543 if (error) { 544 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 545 goto fail; 546 } 547 548 error = exec_map_first_page(imgp); 549 /* 550 * Also make certain that the interpreter stays the same, so set 551 * its VV_TEXT flag, too. 552 */ 553 if (error == 0) 554 nd->ni_vp->v_vflag |= VV_TEXT; 555 556 VOP_GETVOBJECT(nd->ni_vp, &imgp->object); 557 vm_object_reference(imgp->object); 558 559 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 560 if (error) 561 goto fail; 562 563 hdr = (const Elf_Ehdr *)imgp->image_header; 564 if ((error = __elfN(check_header)(hdr)) != 0) 565 goto fail; 566 if (hdr->e_type == ET_DYN) 567 rbase = *addr; 568 else if (hdr->e_type == ET_EXEC) 569 rbase = 0; 570 else { 571 error = ENOEXEC; 572 goto fail; 573 } 574 575 /* Only support headers that fit within first page for now */ 576 if ((hdr->e_phoff > PAGE_SIZE) || 577 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 578 error = ENOEXEC; 579 goto fail; 580 } 581 582 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 583 584 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 585 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 586 prot = 0; 587 if (phdr[i].p_flags & PF_X) 588 prot |= VM_PROT_EXECUTE; 589 if (phdr[i].p_flags & PF_W) 590 prot |= VM_PROT_WRITE; 591 if (phdr[i].p_flags & PF_R) 592 prot |= VM_PROT_READ; 593 594 if ((error = __elfN(load_section)(p, vmspace, 595 nd->ni_vp, imgp->object, phdr[i].p_offset, 596 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 597 phdr[i].p_memsz, phdr[i].p_filesz, prot, 598 pagesize)) != 0) 599 goto fail; 600 /* 601 * Establish the base address if this is the 602 * first segment. 603 */ 604 if (numsegs == 0) 605 base_addr = trunc_page(phdr[i].p_vaddr + 606 rbase); 607 numsegs++; 608 } 609 } 610 *addr = base_addr; 611 *entry = (unsigned long)hdr->e_entry + rbase; 612 613 fail: 614 if (imgp->firstpage) 615 exec_unmap_first_page(imgp); 616 if (imgp->image_header) 617 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header, 618 PAGE_SIZE); 619 if (imgp->object) 620 vm_object_deallocate(imgp->object); 621 622 if (nd->ni_vp) 623 vrele(nd->ni_vp); 624 625 free(tempdata, M_TEMP); 626 627 return (error); 628 } 629 630 static int 631 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 632 { 633 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 634 const Elf_Phdr *phdr; 635 Elf_Auxargs *elf_auxargs = NULL; 636 struct vmspace *vmspace; 637 vm_prot_t prot; 638 u_long text_size = 0, data_size = 0, total_size = 0; 639 u_long text_addr = 0, data_addr = 0; 640 u_long seg_size, seg_addr; 641 u_long addr, entry = 0, proghdr = 0; 642 int error, i; 643 const char *interp = NULL; 644 Elf_Brandinfo *brand_info; 645 char *path; 646 struct thread *td = curthread; 647 struct sysentvec *sv; 648 649 GIANT_REQUIRED; 650 651 /* 652 * Do we have a valid ELF header ? 653 */ 654 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC) 655 return (-1); 656 657 /* 658 * From here on down, we return an errno, not -1, as we've 659 * detected an ELF file. 660 */ 661 662 if ((hdr->e_phoff > PAGE_SIZE) || 663 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 664 /* Only support headers in first page for now */ 665 return (ENOEXEC); 666 } 667 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 668 669 /* 670 * From this point on, we may have resources that need to be freed. 671 */ 672 673 VOP_UNLOCK(imgp->vp, 0, td); 674 675 for (i = 0; i < hdr->e_phnum; i++) { 676 switch (phdr[i].p_type) { 677 case PT_INTERP: /* Path to interpreter */ 678 if (phdr[i].p_filesz > MAXPATHLEN || 679 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 680 error = ENOEXEC; 681 goto fail; 682 } 683 interp = imgp->image_header + phdr[i].p_offset; 684 break; 685 default: 686 break; 687 } 688 } 689 690 brand_info = __elfN(get_brandinfo)(hdr, interp); 691 if (brand_info == NULL) { 692 uprintf("ELF binary type \"%u\" not known.\n", 693 hdr->e_ident[EI_OSABI]); 694 error = ENOEXEC; 695 goto fail; 696 } 697 sv = brand_info->sysvec; 698 699 if ((error = exec_extract_strings(imgp)) != 0) 700 goto fail; 701 702 exec_new_vmspace(imgp, sv); 703 704 vmspace = imgp->proc->p_vmspace; 705 706 for (i = 0; i < hdr->e_phnum; i++) { 707 switch (phdr[i].p_type) { 708 case PT_LOAD: /* Loadable segment */ 709 prot = 0; 710 if (phdr[i].p_flags & PF_X) 711 prot |= VM_PROT_EXECUTE; 712 if (phdr[i].p_flags & PF_W) 713 prot |= VM_PROT_WRITE; 714 if (phdr[i].p_flags & PF_R) 715 prot |= VM_PROT_READ; 716 717 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) 718 /* 719 * Some x86 binaries assume read == executable, 720 * notably the M3 runtime and therefore cvsup 721 */ 722 if (prot & VM_PROT_READ) 723 prot |= VM_PROT_EXECUTE; 724 #endif 725 726 if ((error = __elfN(load_section)(imgp->proc, vmspace, 727 imgp->vp, imgp->object, phdr[i].p_offset, 728 (caddr_t)(uintptr_t)phdr[i].p_vaddr, 729 phdr[i].p_memsz, phdr[i].p_filesz, prot, 730 sv->sv_pagesize)) != 0) 731 goto fail; 732 733 seg_addr = trunc_page(phdr[i].p_vaddr); 734 seg_size = round_page(phdr[i].p_memsz + 735 phdr[i].p_vaddr - seg_addr); 736 737 /* 738 * Is this .text or .data? We can't use 739 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 740 * alpha terribly and possibly does other bad 741 * things so we stick to the old way of figuring 742 * it out: If the segment contains the program 743 * entry point, it's a text segment, otherwise it 744 * is a data segment. 745 * 746 * Note that obreak() assumes that data_addr + 747 * data_size == end of data load area, and the ELF 748 * file format expects segments to be sorted by 749 * address. If multiple data segments exist, the 750 * last one will be used. 751 */ 752 if (hdr->e_entry >= phdr[i].p_vaddr && 753 hdr->e_entry < (phdr[i].p_vaddr + 754 phdr[i].p_memsz)) { 755 text_size = seg_size; 756 text_addr = seg_addr; 757 entry = (u_long)hdr->e_entry; 758 } else { 759 data_size = seg_size; 760 data_addr = seg_addr; 761 } 762 total_size += seg_size; 763 764 /* 765 * Check limits. It should be safe to check the 766 * limits after loading the segment since we do 767 * not actually fault in all the segment's pages. 768 */ 769 if (data_size > 770 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur || 771 text_size > maxtsiz || 772 total_size > 773 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 774 error = ENOMEM; 775 goto fail; 776 } 777 break; 778 case PT_PHDR: /* Program header table info */ 779 proghdr = phdr[i].p_vaddr; 780 break; 781 default: 782 break; 783 } 784 } 785 786 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 787 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 788 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 789 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 790 791 addr = ELF_RTLD_ADDR(vmspace); 792 793 imgp->entry_addr = entry; 794 795 imgp->proc->p_sysent = sv; 796 if (interp != NULL) { 797 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 798 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, 799 interp); 800 if ((error = __elfN(load_file)(imgp->proc, path, &addr, 801 &imgp->entry_addr, sv->sv_pagesize)) != 0) { 802 if ((error = __elfN(load_file)(imgp->proc, interp, 803 &addr, &imgp->entry_addr, sv->sv_pagesize)) != 0) { 804 uprintf("ELF interpreter %s not found\n", 805 path); 806 free(path, M_TEMP); 807 goto fail; 808 } 809 } 810 free(path, M_TEMP); 811 } 812 813 /* 814 * Construct auxargs table (used by the fixup routine) 815 */ 816 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 817 elf_auxargs->execfd = -1; 818 elf_auxargs->phdr = proghdr; 819 elf_auxargs->phent = hdr->e_phentsize; 820 elf_auxargs->phnum = hdr->e_phnum; 821 elf_auxargs->pagesz = PAGE_SIZE; 822 elf_auxargs->base = addr; 823 elf_auxargs->flags = 0; 824 elf_auxargs->entry = entry; 825 elf_auxargs->trace = elf_trace; 826 827 imgp->auxargs = elf_auxargs; 828 imgp->interpreted = 0; 829 830 fail: 831 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 832 return (error); 833 } 834 835 #if __ELF_WORD_SIZE == 32 836 #define suword suword32 837 #define stacktype u_int32_t 838 #else 839 #define suword suword64 840 #define stacktype u_int64_t 841 #endif 842 843 int 844 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 845 { 846 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 847 stacktype *base; 848 stacktype *pos; 849 850 base = (stacktype *)*stack_base; 851 pos = base + (imgp->argc + imgp->envc + 2); 852 853 if (args->trace) { 854 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 855 } 856 if (args->execfd != -1) { 857 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 858 } 859 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 860 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 861 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 862 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 863 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 864 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 865 AUXARGS_ENTRY(pos, AT_BASE, args->base); 866 AUXARGS_ENTRY(pos, AT_NULL, 0); 867 868 free(imgp->auxargs, M_TEMP); 869 imgp->auxargs = NULL; 870 871 base--; 872 suword(base, (long)imgp->argc); 873 *stack_base = (register_t *)base; 874 return (0); 875 } 876 877 /* 878 * Code for generating ELF core dumps. 879 */ 880 881 typedef void (*segment_callback)(vm_map_entry_t, void *); 882 883 /* Closure for cb_put_phdr(). */ 884 struct phdr_closure { 885 Elf_Phdr *phdr; /* Program header to fill in */ 886 Elf_Off offset; /* Offset of segment in core file */ 887 }; 888 889 /* Closure for cb_size_segment(). */ 890 struct sseg_closure { 891 int count; /* Count of writable segments. */ 892 size_t size; /* Total size of all writable segments. */ 893 }; 894 895 static void cb_put_phdr(vm_map_entry_t, void *); 896 static void cb_size_segment(vm_map_entry_t, void *); 897 static void each_writable_segment(struct proc *, segment_callback, void *); 898 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, 899 int, void *, size_t); 900 static void __elfN(puthdr)(struct proc *, void *, size_t *, 901 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int); 902 static void __elfN(putnote)(void *, size_t *, const char *, int, 903 const void *, size_t); 904 905 extern int osreldate; 906 907 int 908 __elfN(coredump)(td, vp, limit) 909 struct thread *td; 910 register struct vnode *vp; 911 off_t limit; 912 { 913 register struct proc *p = td->td_proc; 914 register struct ucred *cred = td->td_ucred; 915 int error = 0; 916 struct sseg_closure seginfo; 917 void *hdr; 918 size_t hdrsize; 919 920 /* Size the program segments. */ 921 seginfo.count = 0; 922 seginfo.size = 0; 923 each_writable_segment(p, cb_size_segment, &seginfo); 924 925 /* 926 * Calculate the size of the core file header area by making 927 * a dry run of generating it. Nothing is written, but the 928 * size is calculated. 929 */ 930 hdrsize = 0; 931 __elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize, 932 (const prstatus_t *)NULL, (const prfpregset_t *)NULL, 933 (const prpsinfo_t *)NULL, seginfo.count); 934 935 if (hdrsize + seginfo.size >= limit) 936 return (EFAULT); 937 938 /* 939 * Allocate memory for building the header, fill it up, 940 * and write it out. 941 */ 942 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 943 if (hdr == NULL) { 944 return (EINVAL); 945 } 946 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); 947 948 /* Write the contents of all of the writable segments. */ 949 if (error == 0) { 950 Elf_Phdr *php; 951 off_t offset; 952 int i; 953 954 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 955 offset = hdrsize; 956 for (i = 0; i < seginfo.count; i++) { 957 error = vn_rdwr_inchunks(UIO_WRITE, vp, 958 (caddr_t)(uintptr_t)php->p_vaddr, 959 php->p_filesz, offset, UIO_USERSPACE, 960 IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL, 961 curthread); /* XXXKSE */ 962 if (error != 0) 963 break; 964 offset += php->p_filesz; 965 php++; 966 } 967 } 968 free(hdr, M_TEMP); 969 970 return (error); 971 } 972 973 /* 974 * A callback for each_writable_segment() to write out the segment's 975 * program header entry. 976 */ 977 static void 978 cb_put_phdr(entry, closure) 979 vm_map_entry_t entry; 980 void *closure; 981 { 982 struct phdr_closure *phc = (struct phdr_closure *)closure; 983 Elf_Phdr *phdr = phc->phdr; 984 985 phc->offset = round_page(phc->offset); 986 987 phdr->p_type = PT_LOAD; 988 phdr->p_offset = phc->offset; 989 phdr->p_vaddr = entry->start; 990 phdr->p_paddr = 0; 991 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 992 phdr->p_align = PAGE_SIZE; 993 phdr->p_flags = 0; 994 if (entry->protection & VM_PROT_READ) 995 phdr->p_flags |= PF_R; 996 if (entry->protection & VM_PROT_WRITE) 997 phdr->p_flags |= PF_W; 998 if (entry->protection & VM_PROT_EXECUTE) 999 phdr->p_flags |= PF_X; 1000 1001 phc->offset += phdr->p_filesz; 1002 phc->phdr++; 1003 } 1004 1005 /* 1006 * A callback for each_writable_segment() to gather information about 1007 * the number of segments and their total size. 1008 */ 1009 static void 1010 cb_size_segment(entry, closure) 1011 vm_map_entry_t entry; 1012 void *closure; 1013 { 1014 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1015 1016 ssc->count++; 1017 ssc->size += entry->end - entry->start; 1018 } 1019 1020 /* 1021 * For each writable segment in the process's memory map, call the given 1022 * function with a pointer to the map entry and some arbitrary 1023 * caller-supplied data. 1024 */ 1025 static void 1026 each_writable_segment(p, func, closure) 1027 struct proc *p; 1028 segment_callback func; 1029 void *closure; 1030 { 1031 vm_map_t map = &p->p_vmspace->vm_map; 1032 vm_map_entry_t entry; 1033 1034 for (entry = map->header.next; entry != &map->header; 1035 entry = entry->next) { 1036 vm_object_t obj; 1037 1038 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) || 1039 (entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) != 1040 (VM_PROT_READ|VM_PROT_WRITE)) 1041 continue; 1042 1043 /* 1044 ** Dont include memory segment in the coredump if 1045 ** MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1046 ** madvise(2). 1047 */ 1048 if (entry->eflags & MAP_ENTRY_NOCOREDUMP) 1049 continue; 1050 1051 if ((obj = entry->object.vm_object) == NULL) 1052 continue; 1053 1054 /* Find the deepest backing object. */ 1055 while (obj->backing_object != NULL) 1056 obj = obj->backing_object; 1057 1058 /* Ignore memory-mapped devices and such things. */ 1059 if (obj->type != OBJT_DEFAULT && 1060 obj->type != OBJT_SWAP && 1061 obj->type != OBJT_VNODE) 1062 continue; 1063 1064 (*func)(entry, closure); 1065 } 1066 } 1067 1068 /* 1069 * Write the core file header to the file, including padding up to 1070 * the page boundary. 1071 */ 1072 static int 1073 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) 1074 struct thread *td; 1075 struct vnode *vp; 1076 struct ucred *cred; 1077 int numsegs; 1078 size_t hdrsize; 1079 void *hdr; 1080 { 1081 struct { 1082 prstatus_t status; 1083 prfpregset_t fpregset; 1084 prpsinfo_t psinfo; 1085 } *tempdata; 1086 struct proc *p = td->td_proc; 1087 size_t off; 1088 prstatus_t *status; 1089 prfpregset_t *fpregset; 1090 prpsinfo_t *psinfo; 1091 1092 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK); 1093 status = &tempdata->status; 1094 fpregset = &tempdata->fpregset; 1095 psinfo = &tempdata->psinfo; 1096 1097 /* Gather the information for the header. */ 1098 status->pr_version = PRSTATUS_VERSION; 1099 status->pr_statussz = sizeof(prstatus_t); 1100 status->pr_gregsetsz = sizeof(gregset_t); 1101 status->pr_fpregsetsz = sizeof(fpregset_t); 1102 status->pr_osreldate = osreldate; 1103 status->pr_cursig = p->p_sig; 1104 status->pr_pid = p->p_pid; 1105 fill_regs(td, &status->pr_reg); 1106 1107 fill_fpregs(td, fpregset); 1108 1109 psinfo->pr_version = PRPSINFO_VERSION; 1110 psinfo->pr_psinfosz = sizeof(prpsinfo_t); 1111 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 1112 1113 /* XXX - We don't fill in the command line arguments properly yet. */ 1114 strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); 1115 1116 /* Fill in the header. */ 1117 bzero(hdr, hdrsize); 1118 off = 0; 1119 __elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs); 1120 1121 free(tempdata, M_TEMP); 1122 1123 /* Write it to the core file. */ 1124 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 1125 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 1126 td)); /* XXXKSE */ 1127 } 1128 1129 static void 1130 __elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status, 1131 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs) 1132 { 1133 size_t ehoff; 1134 size_t phoff; 1135 size_t noteoff; 1136 size_t notesz; 1137 1138 ehoff = *off; 1139 *off += sizeof(Elf_Ehdr); 1140 1141 phoff = *off; 1142 *off += (numsegs + 1) * sizeof(Elf_Phdr); 1143 1144 noteoff = *off; 1145 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, 1146 sizeof *status); 1147 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 1148 sizeof *fpregset); 1149 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 1150 sizeof *psinfo); 1151 notesz = *off - noteoff; 1152 1153 /* Align up to a page boundary for the program segments. */ 1154 *off = round_page(*off); 1155 1156 if (dst != NULL) { 1157 Elf_Ehdr *ehdr; 1158 Elf_Phdr *phdr; 1159 struct phdr_closure phc; 1160 1161 /* 1162 * Fill in the ELF header. 1163 */ 1164 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 1165 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1166 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1167 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1168 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1169 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1170 ehdr->e_ident[EI_DATA] = ELF_DATA; 1171 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1172 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1173 ehdr->e_ident[EI_ABIVERSION] = 0; 1174 ehdr->e_ident[EI_PAD] = 0; 1175 ehdr->e_type = ET_CORE; 1176 ehdr->e_machine = ELF_ARCH; 1177 ehdr->e_version = EV_CURRENT; 1178 ehdr->e_entry = 0; 1179 ehdr->e_phoff = phoff; 1180 ehdr->e_flags = 0; 1181 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1182 ehdr->e_phentsize = sizeof(Elf_Phdr); 1183 ehdr->e_phnum = numsegs + 1; 1184 ehdr->e_shentsize = sizeof(Elf_Shdr); 1185 ehdr->e_shnum = 0; 1186 ehdr->e_shstrndx = SHN_UNDEF; 1187 1188 /* 1189 * Fill in the program header entries. 1190 */ 1191 phdr = (Elf_Phdr *)((char *)dst + phoff); 1192 1193 /* The note segement. */ 1194 phdr->p_type = PT_NOTE; 1195 phdr->p_offset = noteoff; 1196 phdr->p_vaddr = 0; 1197 phdr->p_paddr = 0; 1198 phdr->p_filesz = notesz; 1199 phdr->p_memsz = 0; 1200 phdr->p_flags = 0; 1201 phdr->p_align = 0; 1202 phdr++; 1203 1204 /* All the writable segments from the program. */ 1205 phc.phdr = phdr; 1206 phc.offset = *off; 1207 each_writable_segment(p, cb_put_phdr, &phc); 1208 } 1209 } 1210 1211 static void 1212 __elfN(putnote)(void *dst, size_t *off, const char *name, int type, 1213 const void *desc, size_t descsz) 1214 { 1215 Elf_Note note; 1216 1217 note.n_namesz = strlen(name) + 1; 1218 note.n_descsz = descsz; 1219 note.n_type = type; 1220 if (dst != NULL) 1221 bcopy(¬e, (char *)dst + *off, sizeof note); 1222 *off += sizeof note; 1223 if (dst != NULL) 1224 bcopy(name, (char *)dst + *off, note.n_namesz); 1225 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1226 if (dst != NULL) 1227 bcopy(desc, (char *)dst + *off, note.n_descsz); 1228 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1229 } 1230 1231 /* 1232 * Tell kern_execve.c about it, with a little help from the linker. 1233 */ 1234 #if __ELF_WORD_SIZE == 32 1235 static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"}; 1236 EXEC_SET(elf32, elf_execsw); 1237 #else 1238 static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"}; 1239 EXEC_SET(elf64, elf_execsw); 1240 #endif 1241