1 /*- 2 * Copyright (c) 2000 David O'Brien 3 * Copyright (c) 1995-1996 S�ren Schmidt 4 * Copyright (c) 1996 Peter Wemm 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/exec.h> 36 #include <sys/fcntl.h> 37 #include <sys/imgact.h> 38 #include <sys/imgact_elf.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/mman.h> 44 #include <sys/namei.h> 45 #include <sys/pioctl.h> 46 #include <sys/proc.h> 47 #include <sys/procfs.h> 48 #include <sys/resourcevar.h> 49 #include <sys/systm.h> 50 #include <sys/signalvar.h> 51 #include <sys/stat.h> 52 #include <sys/sx.h> 53 #include <sys/syscall.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysent.h> 56 #include <sys/vnode.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_param.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_extern.h> 65 66 #include <machine/elf.h> 67 #include <machine/md_var.h> 68 69 #define OLD_EI_BRAND 8 70 71 static int __elfN(check_header)(const Elf_Ehdr *hdr); 72 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr, 73 const char *interp); 74 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 75 u_long *entry, size_t pagesize); 76 static int __elfN(load_section)(struct proc *p, 77 struct vmspace *vmspace, struct vnode *vp, vm_object_t object, 78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 79 vm_prot_t prot, size_t pagesize); 80 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); 81 82 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, 83 ""); 84 85 int __elfN(fallback_brand) = -1; 86 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, 87 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0, 88 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); 89 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand", 90 &__elfN(fallback_brand)); 91 92 static int elf_trace = 0; 93 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, ""); 94 95 static int elf_legacy_coredump = 0; 96 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 97 &elf_legacy_coredump, 0, ""); 98 99 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; 100 101 int 102 __elfN(insert_brand_entry)(Elf_Brandinfo *entry) 103 { 104 int i; 105 106 for (i = 0; i < MAX_BRANDS; i++) { 107 if (elf_brand_list[i] == NULL) { 108 elf_brand_list[i] = entry; 109 break; 110 } 111 } 112 if (i == MAX_BRANDS) 113 return (-1); 114 return (0); 115 } 116 117 int 118 __elfN(remove_brand_entry)(Elf_Brandinfo *entry) 119 { 120 int i; 121 122 for (i = 0; i < MAX_BRANDS; i++) { 123 if (elf_brand_list[i] == entry) { 124 elf_brand_list[i] = NULL; 125 break; 126 } 127 } 128 if (i == MAX_BRANDS) 129 return (-1); 130 return (0); 131 } 132 133 int 134 __elfN(brand_inuse)(Elf_Brandinfo *entry) 135 { 136 struct proc *p; 137 int rval = FALSE; 138 139 sx_slock(&allproc_lock); 140 LIST_FOREACH(p, &allproc, p_list) { 141 if (p->p_sysent == entry->sysvec) { 142 rval = TRUE; 143 break; 144 } 145 } 146 sx_sunlock(&allproc_lock); 147 148 return (rval); 149 } 150 151 static Elf_Brandinfo * 152 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp) 153 { 154 Elf_Brandinfo *bi; 155 int i; 156 157 /* 158 * We support three types of branding -- (1) the ELF EI_OSABI field 159 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 160 * branding w/in the ELF header, and (3) path of the `interp_path' 161 * field. We should also look for an ".note.ABI-tag" ELF section now 162 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 163 */ 164 165 /* If the executable has a brand, search for it in the brand list. */ 166 for (i = 0; i < MAX_BRANDS; i++) { 167 bi = elf_brand_list[i]; 168 if (bi != NULL && hdr->e_machine == bi->machine && 169 (hdr->e_ident[EI_OSABI] == bi->brand || 170 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 171 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) 172 return (bi); 173 } 174 175 /* Lacking a known brand, search for a recognized interpreter. */ 176 if (interp != NULL) { 177 for (i = 0; i < MAX_BRANDS; i++) { 178 bi = elf_brand_list[i]; 179 if (bi != NULL && hdr->e_machine == bi->machine && 180 strcmp(interp, bi->interp_path) == 0) 181 return (bi); 182 } 183 } 184 185 /* Lacking a recognized interpreter, try the default brand */ 186 for (i = 0; i < MAX_BRANDS; i++) { 187 bi = elf_brand_list[i]; 188 if (bi != NULL && hdr->e_machine == bi->machine && 189 __elfN(fallback_brand) == bi->brand) 190 return (bi); 191 } 192 return (NULL); 193 } 194 195 static int 196 __elfN(check_header)(const Elf_Ehdr *hdr) 197 { 198 Elf_Brandinfo *bi; 199 int i; 200 201 if (!IS_ELF(*hdr) || 202 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 203 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 204 hdr->e_ident[EI_VERSION] != EV_CURRENT || 205 hdr->e_phentsize != sizeof(Elf_Phdr) || 206 hdr->e_version != ELF_TARG_VER) 207 return (ENOEXEC); 208 209 /* 210 * Make sure we have at least one brand for this machine. 211 */ 212 213 for (i = 0; i < MAX_BRANDS; i++) { 214 bi = elf_brand_list[i]; 215 if (bi != NULL && bi->machine == hdr->e_machine) 216 break; 217 } 218 if (i == MAX_BRANDS) 219 return (ENOEXEC); 220 221 return (0); 222 } 223 224 static int 225 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 226 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 227 vm_prot_t max) 228 { 229 int error, rv; 230 vm_offset_t off; 231 vm_offset_t data_buf = 0; 232 233 /* 234 * Create the page if it doesn't exist yet. Ignore errors. 235 */ 236 vm_map_lock(map); 237 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max, 238 max, 0); 239 vm_map_unlock(map); 240 241 /* 242 * Find the page from the underlying object. 243 */ 244 if (object) { 245 vm_object_reference(object); 246 rv = vm_map_find(exec_map, 247 object, 248 trunc_page(offset), 249 &data_buf, 250 PAGE_SIZE, 251 TRUE, 252 VM_PROT_READ, 253 VM_PROT_ALL, 254 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 255 if (rv != KERN_SUCCESS) { 256 vm_object_deallocate(object); 257 return (rv); 258 } 259 260 off = offset - trunc_page(offset); 261 error = copyout((caddr_t)data_buf + off, (caddr_t)start, 262 end - start); 263 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 264 if (error) { 265 return (KERN_FAILURE); 266 } 267 } 268 269 return (KERN_SUCCESS); 270 } 271 272 static int 273 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 274 vm_offset_t start, vm_offset_t end, vm_prot_t prot, 275 vm_prot_t max, int cow) 276 { 277 vm_offset_t data_buf, off; 278 vm_size_t sz; 279 int error, rv; 280 281 if (start != trunc_page(start)) { 282 rv = __elfN(map_partial)(map, object, offset, start, 283 round_page(start), prot, max); 284 if (rv) 285 return (rv); 286 offset += round_page(start) - start; 287 start = round_page(start); 288 } 289 if (end != round_page(end)) { 290 rv = __elfN(map_partial)(map, object, offset + 291 trunc_page(end) - start, trunc_page(end), end, prot, max); 292 if (rv) 293 return (rv); 294 end = trunc_page(end); 295 } 296 if (end > start) { 297 if (offset & PAGE_MASK) { 298 /* 299 * The mapping is not page aligned. This means we have 300 * to copy the data. Sigh. 301 */ 302 rv = vm_map_find(map, 0, 0, &start, end - start, 303 FALSE, prot, max, 0); 304 if (rv) 305 return (rv); 306 data_buf = 0; 307 while (start < end) { 308 vm_object_reference(object); 309 rv = vm_map_find(exec_map, 310 object, 311 trunc_page(offset), 312 &data_buf, 313 2 * PAGE_SIZE, 314 TRUE, 315 VM_PROT_READ, 316 VM_PROT_ALL, 317 (MAP_COPY_ON_WRITE 318 | MAP_PREFAULT_PARTIAL)); 319 if (rv != KERN_SUCCESS) { 320 vm_object_deallocate(object); 321 return (rv); 322 } 323 off = offset - trunc_page(offset); 324 sz = end - start; 325 if (sz > PAGE_SIZE) 326 sz = PAGE_SIZE; 327 error = copyout((caddr_t)data_buf + off, 328 (caddr_t)start, sz); 329 vm_map_remove(exec_map, data_buf, 330 data_buf + 2 * PAGE_SIZE); 331 if (error) { 332 return (KERN_FAILURE); 333 } 334 start += sz; 335 } 336 rv = KERN_SUCCESS; 337 } else { 338 vm_map_lock(map); 339 rv = vm_map_insert(map, object, offset, start, end, 340 prot, max, cow); 341 vm_map_unlock(map); 342 } 343 return (rv); 344 } else { 345 return (KERN_SUCCESS); 346 } 347 } 348 349 static int 350 __elfN(load_section)(struct proc *p, struct vmspace *vmspace, 351 struct vnode *vp, vm_object_t object, vm_offset_t offset, 352 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, 353 size_t pagesize) 354 { 355 size_t map_len; 356 vm_offset_t map_addr; 357 int error, rv, cow; 358 size_t copy_len; 359 vm_offset_t file_addr; 360 vm_offset_t data_buf = 0; 361 362 GIANT_REQUIRED; 363 364 error = 0; 365 366 /* 367 * It's necessary to fail if the filsz + offset taken from the 368 * header is greater than the actual file pager object's size. 369 * If we were to allow this, then the vm_map_find() below would 370 * walk right off the end of the file object and into the ether. 371 * 372 * While I'm here, might as well check for something else that 373 * is invalid: filsz cannot be greater than memsz. 374 */ 375 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 376 filsz > memsz) { 377 uprintf("elf_load_section: truncated ELF file\n"); 378 return (ENOEXEC); 379 } 380 381 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) 382 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) 383 384 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); 385 file_addr = trunc_page_ps(offset, pagesize); 386 387 /* 388 * We have two choices. We can either clear the data in the last page 389 * of an oversized mapping, or we can start the anon mapping a page 390 * early and copy the initialized data into that first page. We 391 * choose the second.. 392 */ 393 if (memsz > filsz) 394 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; 395 else 396 map_len = round_page_ps(offset + filsz, pagesize) - file_addr; 397 398 if (map_len != 0) { 399 vm_object_reference(object); 400 401 /* cow flags: don't dump readonly sections in core */ 402 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 403 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 404 405 rv = __elfN(map_insert)(&vmspace->vm_map, 406 object, 407 file_addr, /* file offset */ 408 map_addr, /* virtual start */ 409 map_addr + map_len,/* virtual end */ 410 prot, 411 VM_PROT_ALL, 412 cow); 413 if (rv != KERN_SUCCESS) { 414 vm_object_deallocate(object); 415 return (EINVAL); 416 } 417 418 /* we can stop now if we've covered it all */ 419 if (memsz == filsz) { 420 return (0); 421 } 422 } 423 424 425 /* 426 * We have to get the remaining bit of the file into the first part 427 * of the oversized map segment. This is normally because the .data 428 * segment in the file is extended to provide bss. It's a neat idea 429 * to try and save a page, but it's a pain in the behind to implement. 430 */ 431 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); 432 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); 433 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - 434 map_addr; 435 436 /* This had damn well better be true! */ 437 if (map_len != 0) { 438 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, 439 map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0); 440 if (rv != KERN_SUCCESS) { 441 return (EINVAL); 442 } 443 } 444 445 if (copy_len != 0) { 446 vm_offset_t off; 447 vm_object_reference(object); 448 rv = vm_map_find(exec_map, 449 object, 450 trunc_page(offset + filsz), 451 &data_buf, 452 PAGE_SIZE, 453 TRUE, 454 VM_PROT_READ, 455 VM_PROT_ALL, 456 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 457 if (rv != KERN_SUCCESS) { 458 vm_object_deallocate(object); 459 return (EINVAL); 460 } 461 462 /* send the page fragment to user space */ 463 off = trunc_page_ps(offset + filsz, pagesize) - 464 trunc_page(offset + filsz); 465 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr, 466 copy_len); 467 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 468 if (error) { 469 return (error); 470 } 471 } 472 473 /* 474 * set it to the specified protection. 475 * XXX had better undo the damage from pasting over the cracks here! 476 */ 477 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), 478 round_page(map_addr + map_len), prot, FALSE); 479 480 return (error); 481 } 482 483 /* 484 * Load the file "file" into memory. It may be either a shared object 485 * or an executable. 486 * 487 * The "addr" reference parameter is in/out. On entry, it specifies 488 * the address where a shared object should be loaded. If the file is 489 * an executable, this value is ignored. On exit, "addr" specifies 490 * where the file was actually loaded. 491 * 492 * The "entry" reference parameter is out only. On exit, it specifies 493 * the entry point for the loaded file. 494 */ 495 static int 496 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, 497 u_long *entry, size_t pagesize) 498 { 499 struct { 500 struct nameidata nd; 501 struct vattr attr; 502 struct image_params image_params; 503 } *tempdata; 504 const Elf_Ehdr *hdr = NULL; 505 const Elf_Phdr *phdr = NULL; 506 struct nameidata *nd; 507 struct vmspace *vmspace = p->p_vmspace; 508 struct vattr *attr; 509 struct image_params *imgp; 510 vm_prot_t prot; 511 u_long rbase; 512 u_long base_addr = 0; 513 int error, i, numsegs; 514 515 if (curthread->td_proc != p) 516 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ 517 518 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 519 nd = &tempdata->nd; 520 attr = &tempdata->attr; 521 imgp = &tempdata->image_params; 522 523 /* 524 * Initialize part of the common data 525 */ 526 imgp->proc = p; 527 imgp->userspace_argv = NULL; 528 imgp->userspace_envv = NULL; 529 imgp->attr = attr; 530 imgp->firstpage = NULL; 531 imgp->image_header = NULL; 532 imgp->object = NULL; 533 imgp->execlabel = NULL; 534 535 /* XXXKSE */ 536 NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread); 537 538 if ((error = namei(nd)) != 0) { 539 nd->ni_vp = NULL; 540 goto fail; 541 } 542 NDFREE(nd, NDF_ONLY_PNBUF); 543 imgp->vp = nd->ni_vp; 544 545 /* 546 * Check permissions, modes, uid, etc on the file, and "open" it. 547 */ 548 error = exec_check_permissions(imgp); 549 if (error) { 550 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 551 goto fail; 552 } 553 554 error = exec_map_first_page(imgp); 555 /* 556 * Also make certain that the interpreter stays the same, so set 557 * its VV_TEXT flag, too. 558 */ 559 if (error == 0) 560 nd->ni_vp->v_vflag |= VV_TEXT; 561 562 VOP_GETVOBJECT(nd->ni_vp, &imgp->object); 563 vm_object_reference(imgp->object); 564 565 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ 566 if (error) 567 goto fail; 568 569 hdr = (const Elf_Ehdr *)imgp->image_header; 570 if ((error = __elfN(check_header)(hdr)) != 0) 571 goto fail; 572 if (hdr->e_type == ET_DYN) 573 rbase = *addr; 574 else if (hdr->e_type == ET_EXEC) 575 rbase = 0; 576 else { 577 error = ENOEXEC; 578 goto fail; 579 } 580 581 /* Only support headers that fit within first page for now */ 582 /* (multiplication of two Elf_Half fields will not overflow) */ 583 if ((hdr->e_phoff > PAGE_SIZE) || 584 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { 585 error = ENOEXEC; 586 goto fail; 587 } 588 589 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 590 591 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 592 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 593 prot = 0; 594 if (phdr[i].p_flags & PF_X) 595 prot |= VM_PROT_EXECUTE; 596 if (phdr[i].p_flags & PF_W) 597 prot |= VM_PROT_WRITE; 598 if (phdr[i].p_flags & PF_R) 599 prot |= VM_PROT_READ; 600 601 if ((error = __elfN(load_section)(p, vmspace, 602 nd->ni_vp, imgp->object, phdr[i].p_offset, 603 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, 604 phdr[i].p_memsz, phdr[i].p_filesz, prot, 605 pagesize)) != 0) 606 goto fail; 607 /* 608 * Establish the base address if this is the 609 * first segment. 610 */ 611 if (numsegs == 0) 612 base_addr = trunc_page(phdr[i].p_vaddr + 613 rbase); 614 numsegs++; 615 } 616 } 617 *addr = base_addr; 618 *entry = (unsigned long)hdr->e_entry + rbase; 619 620 fail: 621 if (imgp->firstpage) 622 exec_unmap_first_page(imgp); 623 if (imgp->object) 624 vm_object_deallocate(imgp->object); 625 626 if (nd->ni_vp) 627 vrele(nd->ni_vp); 628 629 free(tempdata, M_TEMP); 630 631 return (error); 632 } 633 634 static int 635 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) 636 { 637 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; 638 const Elf_Phdr *phdr; 639 Elf_Auxargs *elf_auxargs = NULL; 640 struct vmspace *vmspace; 641 vm_prot_t prot; 642 u_long text_size = 0, data_size = 0, total_size = 0; 643 u_long text_addr = 0, data_addr = 0; 644 u_long seg_size, seg_addr; 645 u_long addr, entry = 0, proghdr = 0; 646 int error, i; 647 const char *interp = NULL; 648 Elf_Brandinfo *brand_info; 649 char *path; 650 struct thread *td = curthread; 651 struct sysentvec *sv; 652 653 GIANT_REQUIRED; 654 655 /* 656 * Do we have a valid ELF header ? 657 */ 658 if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC) 659 return (-1); 660 661 /* 662 * From here on down, we return an errno, not -1, as we've 663 * detected an ELF file. 664 */ 665 666 if ((hdr->e_phoff > PAGE_SIZE) || 667 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 668 /* Only support headers in first page for now */ 669 return (ENOEXEC); 670 } 671 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 672 673 /* 674 * From this point on, we may have resources that need to be freed. 675 */ 676 677 VOP_UNLOCK(imgp->vp, 0, td); 678 679 for (i = 0; i < hdr->e_phnum; i++) { 680 switch (phdr[i].p_type) { 681 case PT_INTERP: /* Path to interpreter */ 682 if (phdr[i].p_filesz > MAXPATHLEN || 683 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 684 error = ENOEXEC; 685 goto fail; 686 } 687 interp = imgp->image_header + phdr[i].p_offset; 688 break; 689 default: 690 break; 691 } 692 } 693 694 brand_info = __elfN(get_brandinfo)(hdr, interp); 695 if (brand_info == NULL) { 696 uprintf("ELF binary type \"%u\" not known.\n", 697 hdr->e_ident[EI_OSABI]); 698 error = ENOEXEC; 699 goto fail; 700 } 701 sv = brand_info->sysvec; 702 if (interp != NULL && brand_info->interp_newpath != NULL) 703 interp = brand_info->interp_newpath; 704 705 if ((error = exec_extract_strings(imgp)) != 0) 706 goto fail; 707 708 exec_new_vmspace(imgp, sv); 709 710 vmspace = imgp->proc->p_vmspace; 711 712 for (i = 0; i < hdr->e_phnum; i++) { 713 switch (phdr[i].p_type) { 714 case PT_LOAD: /* Loadable segment */ 715 prot = 0; 716 if (phdr[i].p_flags & PF_X) 717 prot |= VM_PROT_EXECUTE; 718 if (phdr[i].p_flags & PF_W) 719 prot |= VM_PROT_WRITE; 720 if (phdr[i].p_flags & PF_R) 721 prot |= VM_PROT_READ; 722 723 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) 724 /* 725 * Some x86 binaries assume read == executable, 726 * notably the M3 runtime and therefore cvsup 727 */ 728 if (prot & VM_PROT_READ) 729 prot |= VM_PROT_EXECUTE; 730 #endif 731 732 if ((error = __elfN(load_section)(imgp->proc, vmspace, 733 imgp->vp, imgp->object, phdr[i].p_offset, 734 (caddr_t)(uintptr_t)phdr[i].p_vaddr, 735 phdr[i].p_memsz, phdr[i].p_filesz, prot, 736 sv->sv_pagesize)) != 0) 737 goto fail; 738 739 seg_addr = trunc_page(phdr[i].p_vaddr); 740 seg_size = round_page(phdr[i].p_memsz + 741 phdr[i].p_vaddr - seg_addr); 742 743 /* 744 * Is this .text or .data? We can't use 745 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 746 * alpha terribly and possibly does other bad 747 * things so we stick to the old way of figuring 748 * it out: If the segment contains the program 749 * entry point, it's a text segment, otherwise it 750 * is a data segment. 751 * 752 * Note that obreak() assumes that data_addr + 753 * data_size == end of data load area, and the ELF 754 * file format expects segments to be sorted by 755 * address. If multiple data segments exist, the 756 * last one will be used. 757 */ 758 if (hdr->e_entry >= phdr[i].p_vaddr && 759 hdr->e_entry < (phdr[i].p_vaddr + 760 phdr[i].p_memsz)) { 761 text_size = seg_size; 762 text_addr = seg_addr; 763 entry = (u_long)hdr->e_entry; 764 } else { 765 data_size = seg_size; 766 data_addr = seg_addr; 767 } 768 total_size += seg_size; 769 break; 770 case PT_PHDR: /* Program header table info */ 771 proghdr = phdr[i].p_vaddr; 772 break; 773 default: 774 break; 775 } 776 } 777 778 if (data_addr == 0 && data_size == 0) { 779 data_addr = text_addr; 780 data_size = text_size; 781 } 782 783 /* 784 * Check limits. It should be safe to check the 785 * limits after loading the segments since we do 786 * not actually fault in all the segments pages. 787 */ 788 PROC_LOCK(imgp->proc); 789 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) || 790 text_size > maxtsiz || 791 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) { 792 PROC_UNLOCK(imgp->proc); 793 error = ENOMEM; 794 goto fail; 795 } 796 797 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 798 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 799 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 800 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 801 802 /* 803 * We load the dynamic linker where a userland call 804 * to mmap(0, ...) would put it. The rationale behind this 805 * calculation is that it leaves room for the heap to grow to 806 * its maximum allowed size. 807 */ 808 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + 809 lim_max(imgp->proc, RLIMIT_DATA)); 810 PROC_UNLOCK(imgp->proc); 811 812 imgp->entry_addr = entry; 813 814 imgp->proc->p_sysent = sv; 815 if (interp != NULL && brand_info->emul_path != NULL && 816 brand_info->emul_path[0] != '\0') { 817 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 818 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, 819 interp); 820 error = __elfN(load_file)(imgp->proc, path, &addr, 821 &imgp->entry_addr, sv->sv_pagesize); 822 free(path, M_TEMP); 823 if (error == 0) 824 interp = NULL; 825 } 826 if (interp != NULL) { 827 error = __elfN(load_file)(imgp->proc, interp, &addr, 828 &imgp->entry_addr, sv->sv_pagesize); 829 if (error != 0) { 830 uprintf("ELF interpreter %s not found\n", interp); 831 goto fail; 832 } 833 } 834 835 /* 836 * Construct auxargs table (used by the fixup routine) 837 */ 838 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 839 elf_auxargs->execfd = -1; 840 elf_auxargs->phdr = proghdr; 841 elf_auxargs->phent = hdr->e_phentsize; 842 elf_auxargs->phnum = hdr->e_phnum; 843 elf_auxargs->pagesz = PAGE_SIZE; 844 elf_auxargs->base = addr; 845 elf_auxargs->flags = 0; 846 elf_auxargs->entry = entry; 847 elf_auxargs->trace = elf_trace; 848 849 imgp->auxargs = elf_auxargs; 850 imgp->interpreted = 0; 851 852 fail: 853 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 854 return (error); 855 } 856 857 #define suword __CONCAT(suword, __ELF_WORD_SIZE) 858 859 int 860 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) 861 { 862 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 863 Elf_Addr *base; 864 Elf_Addr *pos; 865 866 base = (Elf_Addr *)*stack_base; 867 pos = base + (imgp->argc + imgp->envc + 2); 868 869 if (args->trace) { 870 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 871 } 872 if (args->execfd != -1) { 873 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 874 } 875 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 876 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 877 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 878 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 879 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 880 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 881 AUXARGS_ENTRY(pos, AT_BASE, args->base); 882 AUXARGS_ENTRY(pos, AT_NULL, 0); 883 884 free(imgp->auxargs, M_TEMP); 885 imgp->auxargs = NULL; 886 887 base--; 888 suword(base, (long)imgp->argc); 889 *stack_base = (register_t *)base; 890 return (0); 891 } 892 893 /* 894 * Code for generating ELF core dumps. 895 */ 896 897 typedef void (*segment_callback)(vm_map_entry_t, void *); 898 899 /* Closure for cb_put_phdr(). */ 900 struct phdr_closure { 901 Elf_Phdr *phdr; /* Program header to fill in */ 902 Elf_Off offset; /* Offset of segment in core file */ 903 }; 904 905 /* Closure for cb_size_segment(). */ 906 struct sseg_closure { 907 int count; /* Count of writable segments. */ 908 size_t size; /* Total size of all writable segments. */ 909 }; 910 911 static void cb_put_phdr(vm_map_entry_t, void *); 912 static void cb_size_segment(vm_map_entry_t, void *); 913 static void each_writable_segment(struct thread *, segment_callback, void *); 914 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, 915 int, void *, size_t); 916 static void __elfN(puthdr)(struct thread *, void *, size_t *, int); 917 static void __elfN(putnote)(void *, size_t *, const char *, int, 918 const void *, size_t); 919 920 extern int osreldate; 921 922 int 923 __elfN(coredump)(td, vp, limit) 924 struct thread *td; 925 struct vnode *vp; 926 off_t limit; 927 { 928 struct ucred *cred = td->td_ucred; 929 int error = 0; 930 struct sseg_closure seginfo; 931 void *hdr; 932 size_t hdrsize; 933 934 /* Size the program segments. */ 935 seginfo.count = 0; 936 seginfo.size = 0; 937 each_writable_segment(td, cb_size_segment, &seginfo); 938 939 /* 940 * Calculate the size of the core file header area by making 941 * a dry run of generating it. Nothing is written, but the 942 * size is calculated. 943 */ 944 hdrsize = 0; 945 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count); 946 947 if (hdrsize + seginfo.size >= limit) 948 return (EFAULT); 949 950 /* 951 * Allocate memory for building the header, fill it up, 952 * and write it out. 953 */ 954 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 955 if (hdr == NULL) { 956 return (EINVAL); 957 } 958 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); 959 960 /* Write the contents of all of the writable segments. */ 961 if (error == 0) { 962 Elf_Phdr *php; 963 off_t offset; 964 int i; 965 966 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 967 offset = hdrsize; 968 for (i = 0; i < seginfo.count; i++) { 969 error = vn_rdwr_inchunks(UIO_WRITE, vp, 970 (caddr_t)(uintptr_t)php->p_vaddr, 971 php->p_filesz, offset, UIO_USERSPACE, 972 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 973 curthread); /* XXXKSE */ 974 if (error != 0) 975 break; 976 offset += php->p_filesz; 977 php++; 978 } 979 } 980 free(hdr, M_TEMP); 981 982 return (error); 983 } 984 985 /* 986 * A callback for each_writable_segment() to write out the segment's 987 * program header entry. 988 */ 989 static void 990 cb_put_phdr(entry, closure) 991 vm_map_entry_t entry; 992 void *closure; 993 { 994 struct phdr_closure *phc = (struct phdr_closure *)closure; 995 Elf_Phdr *phdr = phc->phdr; 996 997 phc->offset = round_page(phc->offset); 998 999 phdr->p_type = PT_LOAD; 1000 phdr->p_offset = phc->offset; 1001 phdr->p_vaddr = entry->start; 1002 phdr->p_paddr = 0; 1003 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 1004 phdr->p_align = PAGE_SIZE; 1005 phdr->p_flags = 0; 1006 if (entry->protection & VM_PROT_READ) 1007 phdr->p_flags |= PF_R; 1008 if (entry->protection & VM_PROT_WRITE) 1009 phdr->p_flags |= PF_W; 1010 if (entry->protection & VM_PROT_EXECUTE) 1011 phdr->p_flags |= PF_X; 1012 1013 phc->offset += phdr->p_filesz; 1014 phc->phdr++; 1015 } 1016 1017 /* 1018 * A callback for each_writable_segment() to gather information about 1019 * the number of segments and their total size. 1020 */ 1021 static void 1022 cb_size_segment(entry, closure) 1023 vm_map_entry_t entry; 1024 void *closure; 1025 { 1026 struct sseg_closure *ssc = (struct sseg_closure *)closure; 1027 1028 ssc->count++; 1029 ssc->size += entry->end - entry->start; 1030 } 1031 1032 /* 1033 * For each writable segment in the process's memory map, call the given 1034 * function with a pointer to the map entry and some arbitrary 1035 * caller-supplied data. 1036 */ 1037 static void 1038 each_writable_segment(td, func, closure) 1039 struct thread *td; 1040 segment_callback func; 1041 void *closure; 1042 { 1043 struct proc *p = td->td_proc; 1044 vm_map_t map = &p->p_vmspace->vm_map; 1045 vm_map_entry_t entry; 1046 1047 for (entry = map->header.next; entry != &map->header; 1048 entry = entry->next) { 1049 vm_object_t obj; 1050 1051 /* 1052 * Don't dump inaccessible mappings, deal with legacy 1053 * coredump mode. 1054 * 1055 * Note that read-only segments related to the elf binary 1056 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 1057 * need to arbitrarily ignore such segments. 1058 */ 1059 if (elf_legacy_coredump) { 1060 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 1061 continue; 1062 } else { 1063 if ((entry->protection & VM_PROT_ALL) == 0) 1064 continue; 1065 } 1066 1067 /* 1068 * Dont include memory segment in the coredump if 1069 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 1070 * madvise(2). Do not dump submaps (i.e. parts of the 1071 * kernel map). 1072 */ 1073 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 1074 continue; 1075 1076 if ((obj = entry->object.vm_object) == NULL) 1077 continue; 1078 1079 /* Find the deepest backing object. */ 1080 while (obj->backing_object != NULL) 1081 obj = obj->backing_object; 1082 1083 /* Ignore memory-mapped devices and such things. */ 1084 if (obj->type != OBJT_DEFAULT && 1085 obj->type != OBJT_SWAP && 1086 obj->type != OBJT_VNODE) 1087 continue; 1088 1089 (*func)(entry, closure); 1090 } 1091 } 1092 1093 /* 1094 * Write the core file header to the file, including padding up to 1095 * the page boundary. 1096 */ 1097 static int 1098 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) 1099 struct thread *td; 1100 struct vnode *vp; 1101 struct ucred *cred; 1102 int numsegs; 1103 size_t hdrsize; 1104 void *hdr; 1105 { 1106 size_t off; 1107 1108 /* Fill in the header. */ 1109 bzero(hdr, hdrsize); 1110 off = 0; 1111 __elfN(puthdr)(td, hdr, &off, numsegs); 1112 1113 /* Write it to the core file. */ 1114 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 1115 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, 1116 td)); /* XXXKSE */ 1117 } 1118 1119 static void 1120 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs) 1121 { 1122 struct { 1123 prstatus_t status; 1124 prfpregset_t fpregset; 1125 prpsinfo_t psinfo; 1126 } *tempdata; 1127 prstatus_t *status; 1128 prfpregset_t *fpregset; 1129 prpsinfo_t *psinfo; 1130 struct proc *p; 1131 struct thread *thr; 1132 size_t ehoff, noteoff, notesz, phoff; 1133 1134 p = td->td_proc; 1135 1136 ehoff = *off; 1137 *off += sizeof(Elf_Ehdr); 1138 1139 phoff = *off; 1140 *off += (numsegs + 1) * sizeof(Elf_Phdr); 1141 1142 noteoff = *off; 1143 /* 1144 * Don't allocate space for the notes if we're just calculating 1145 * the size of the header. We also don't collect the data. 1146 */ 1147 if (dst != NULL) { 1148 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK); 1149 status = &tempdata->status; 1150 fpregset = &tempdata->fpregset; 1151 psinfo = &tempdata->psinfo; 1152 } else { 1153 tempdata = NULL; 1154 status = NULL; 1155 fpregset = NULL; 1156 psinfo = NULL; 1157 } 1158 1159 if (dst != NULL) { 1160 psinfo->pr_version = PRPSINFO_VERSION; 1161 psinfo->pr_psinfosz = sizeof(prpsinfo_t); 1162 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); 1163 /* 1164 * XXX - We don't fill in the command line arguments properly 1165 * yet. 1166 */ 1167 strlcpy(psinfo->pr_psargs, p->p_comm, 1168 sizeof(psinfo->pr_psargs)); 1169 } 1170 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 1171 sizeof *psinfo); 1172 1173 /* 1174 * To have the debugger select the right thread (LWP) as the initial 1175 * thread, we dump the state of the thread passed to us in td first. 1176 * This is the thread that causes the core dump and thus likely to 1177 * be the right thread one wants to have selected in the debugger. 1178 */ 1179 thr = td; 1180 while (thr != NULL) { 1181 if (dst != NULL) { 1182 status->pr_version = PRSTATUS_VERSION; 1183 status->pr_statussz = sizeof(prstatus_t); 1184 status->pr_gregsetsz = sizeof(gregset_t); 1185 status->pr_fpregsetsz = sizeof(fpregset_t); 1186 status->pr_osreldate = osreldate; 1187 status->pr_cursig = p->p_sig; 1188 status->pr_pid = thr->td_tid; 1189 fill_regs(thr, &status->pr_reg); 1190 fill_fpregs(thr, fpregset); 1191 } 1192 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, 1193 sizeof *status); 1194 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 1195 sizeof *fpregset); 1196 1197 /* XXX allow for MD specific notes. */ 1198 1199 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : 1200 TAILQ_NEXT(thr, td_plist); 1201 if (thr == td) 1202 thr = TAILQ_NEXT(thr, td_plist); 1203 } 1204 1205 notesz = *off - noteoff; 1206 1207 if (dst != NULL) 1208 free(tempdata, M_TEMP); 1209 1210 /* Align up to a page boundary for the program segments. */ 1211 *off = round_page(*off); 1212 1213 if (dst != NULL) { 1214 Elf_Ehdr *ehdr; 1215 Elf_Phdr *phdr; 1216 struct phdr_closure phc; 1217 1218 /* 1219 * Fill in the ELF header. 1220 */ 1221 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 1222 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1223 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1224 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1225 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1226 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1227 ehdr->e_ident[EI_DATA] = ELF_DATA; 1228 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1229 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1230 ehdr->e_ident[EI_ABIVERSION] = 0; 1231 ehdr->e_ident[EI_PAD] = 0; 1232 ehdr->e_type = ET_CORE; 1233 ehdr->e_machine = ELF_ARCH; 1234 ehdr->e_version = EV_CURRENT; 1235 ehdr->e_entry = 0; 1236 ehdr->e_phoff = phoff; 1237 ehdr->e_flags = 0; 1238 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1239 ehdr->e_phentsize = sizeof(Elf_Phdr); 1240 ehdr->e_phnum = numsegs + 1; 1241 ehdr->e_shentsize = sizeof(Elf_Shdr); 1242 ehdr->e_shnum = 0; 1243 ehdr->e_shstrndx = SHN_UNDEF; 1244 1245 /* 1246 * Fill in the program header entries. 1247 */ 1248 phdr = (Elf_Phdr *)((char *)dst + phoff); 1249 1250 /* The note segement. */ 1251 phdr->p_type = PT_NOTE; 1252 phdr->p_offset = noteoff; 1253 phdr->p_vaddr = 0; 1254 phdr->p_paddr = 0; 1255 phdr->p_filesz = notesz; 1256 phdr->p_memsz = 0; 1257 phdr->p_flags = 0; 1258 phdr->p_align = 0; 1259 phdr++; 1260 1261 /* All the writable segments from the program. */ 1262 phc.phdr = phdr; 1263 phc.offset = *off; 1264 each_writable_segment(td, cb_put_phdr, &phc); 1265 } 1266 } 1267 1268 static void 1269 __elfN(putnote)(void *dst, size_t *off, const char *name, int type, 1270 const void *desc, size_t descsz) 1271 { 1272 Elf_Note note; 1273 1274 note.n_namesz = strlen(name) + 1; 1275 note.n_descsz = descsz; 1276 note.n_type = type; 1277 if (dst != NULL) 1278 bcopy(¬e, (char *)dst + *off, sizeof note); 1279 *off += sizeof note; 1280 if (dst != NULL) 1281 bcopy(name, (char *)dst + *off, note.n_namesz); 1282 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1283 if (dst != NULL) 1284 bcopy(desc, (char *)dst + *off, note.n_descsz); 1285 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1286 } 1287 1288 /* 1289 * Tell kern_execve.c about it, with a little help from the linker. 1290 */ 1291 static struct execsw __elfN(execsw) = { 1292 __CONCAT(exec_, __elfN(imgact)), 1293 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) 1294 }; 1295 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); 1296