1 /* 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/lock.h> 32 #include <sys/mutex.h> 33 #include <sys/sysproto.h> 34 #include <sys/signalvar.h> 35 #include <sys/kernel.h> 36 #include <sys/mount.h> 37 #include <sys/filedesc.h> 38 #include <sys/fcntl.h> 39 #include <sys/acct.h> 40 #include <sys/exec.h> 41 #include <sys/imgact.h> 42 #include <sys/imgact_elf.h> 43 #include <sys/wait.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/pioctl.h> 47 #include <sys/namei.h> 48 #include <sys/sysent.h> 49 #include <sys/shm.h> 50 #include <sys/sysctl.h> 51 #include <sys/user.h> 52 #include <sys/vnode.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_param.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/reg.h> 65 66 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 67 68 static MALLOC_DEFINE(M_ATEXEC, "atexec", "atexec callback"); 69 70 /* 71 * callout list for things to do at exec time 72 */ 73 struct execlist { 74 execlist_fn function; 75 TAILQ_ENTRY(execlist) next; 76 }; 77 78 TAILQ_HEAD(exec_list_head, execlist); 79 static struct exec_list_head exec_list = TAILQ_HEAD_INITIALIZER(exec_list); 80 81 static register_t *exec_copyout_strings(struct image_params *); 82 83 /* XXX This should be vm_size_t. */ 84 static u_long ps_strings = PS_STRINGS; 85 SYSCTL_ULONG(_kern, KERN_PS_STRINGS, ps_strings, CTLFLAG_RD, &ps_strings, 0, ""); 86 87 /* XXX This should be vm_size_t. */ 88 static u_long usrstack = USRSTACK; 89 SYSCTL_ULONG(_kern, KERN_USRSTACK, usrstack, CTLFLAG_RD, &usrstack, 0, ""); 90 91 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 92 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 93 &ps_arg_cache_limit, 0, ""); 94 95 int ps_argsopen = 1; 96 SYSCTL_INT(_kern, OID_AUTO, ps_argsopen, CTLFLAG_RW, &ps_argsopen, 0, ""); 97 98 #ifdef __ia64__ 99 /* XXX HACK */ 100 static int regstkpages = 256; 101 SYSCTL_INT(_machdep, OID_AUTO, regstkpages, CTLFLAG_RW, ®stkpages, 0, ""); 102 #endif 103 104 /* 105 * Each of the items is a pointer to a `const struct execsw', hence the 106 * double pointer here. 107 */ 108 static const struct execsw **execsw; 109 110 #ifndef _SYS_SYSPROTO_H_ 111 struct execve_args { 112 char *fname; 113 char **argv; 114 char **envv; 115 }; 116 #endif 117 118 /* 119 * execve() system call. 120 * 121 * MPSAFE 122 */ 123 int 124 execve(td, uap) 125 struct thread *td; 126 register struct execve_args *uap; 127 { 128 struct proc *p = td->td_proc; 129 struct nameidata nd, *ndp; 130 struct ucred *newcred = NULL, *oldcred; 131 struct uidinfo *euip; 132 register_t *stack_base; 133 int error, len, i; 134 struct image_params image_params, *imgp; 135 struct vattr attr; 136 int (*img_first)(struct image_params *); 137 struct pargs *oldargs = NULL, *newargs = NULL; 138 struct procsig *oldprocsig, *newprocsig; 139 #ifdef KTRACE 140 struct vnode *tracevp = NULL; 141 #endif 142 struct vnode *textvp = NULL; 143 144 imgp = &image_params; 145 146 /* 147 * Lock the process and set the P_INEXEC flag to indicate that 148 * it should be left alone until we're done here. This is 149 * necessary to avoid race conditions - e.g. in ptrace() - 150 * that might allow a local user to illicitly obtain elevated 151 * privileges. 152 */ 153 mtx_lock(&Giant); 154 PROC_LOCK(p); 155 KASSERT((p->p_flag & P_INEXEC) == 0, 156 ("%s(): process already has P_INEXEC flag", __func__)); 157 p->p_flag |= P_INEXEC; 158 PROC_UNLOCK(p); 159 160 /* XXXKSE */ 161 /* !!!!!!!! we need abort all the other threads of this process before we */ 162 /* proceed beyond his point! */ 163 164 /* 165 * Initialize part of the common data 166 */ 167 imgp->proc = p; 168 imgp->uap = uap; 169 imgp->attr = &attr; 170 imgp->argc = imgp->envc = 0; 171 imgp->argv0 = NULL; 172 imgp->entry_addr = 0; 173 imgp->vmspace_destroyed = 0; 174 imgp->interpreted = 0; 175 imgp->interpreter_name[0] = '\0'; 176 imgp->auxargs = NULL; 177 imgp->vp = NULL; 178 imgp->firstpage = NULL; 179 imgp->ps_strings = 0; 180 imgp->auxarg_size = 0; 181 182 /* 183 * Allocate temporary demand zeroed space for argument and 184 * environment strings 185 */ 186 imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX + PAGE_SIZE); 187 if (imgp->stringbase == NULL) { 188 error = ENOMEM; 189 goto exec_fail; 190 } 191 imgp->stringp = imgp->stringbase; 192 imgp->stringspace = ARG_MAX; 193 imgp->image_header = imgp->stringbase + ARG_MAX; 194 195 /* 196 * Translate the file name. namei() returns a vnode pointer 197 * in ni_vp amoung other things. 198 */ 199 ndp = &nd; 200 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 201 UIO_USERSPACE, uap->fname, td); 202 203 interpret: 204 205 error = namei(ndp); 206 if (error) { 207 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 208 ARG_MAX + PAGE_SIZE); 209 goto exec_fail; 210 } 211 212 imgp->vp = ndp->ni_vp; 213 imgp->fname = uap->fname; 214 215 /* 216 * Check file permissions (also 'opens' file) 217 */ 218 error = exec_check_permissions(imgp); 219 if (error) { 220 VOP_UNLOCK(imgp->vp, 0, td); 221 goto exec_fail_dealloc; 222 } 223 224 error = exec_map_first_page(imgp); 225 VOP_UNLOCK(imgp->vp, 0, td); 226 if (error) 227 goto exec_fail_dealloc; 228 229 /* 230 * If the current process has a special image activator it 231 * wants to try first, call it. For example, emulating shell 232 * scripts differently. 233 */ 234 error = -1; 235 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 236 error = img_first(imgp); 237 238 /* 239 * Loop through the list of image activators, calling each one. 240 * An activator returns -1 if there is no match, 0 on success, 241 * and an error otherwise. 242 */ 243 for (i = 0; error == -1 && execsw[i]; ++i) { 244 if (execsw[i]->ex_imgact == NULL || 245 execsw[i]->ex_imgact == img_first) { 246 continue; 247 } 248 error = (*execsw[i]->ex_imgact)(imgp); 249 } 250 251 if (error) { 252 if (error == -1) 253 error = ENOEXEC; 254 goto exec_fail_dealloc; 255 } 256 257 /* 258 * Special interpreter operation, cleanup and loop up to try to 259 * activate the interpreter. 260 */ 261 if (imgp->interpreted) { 262 exec_unmap_first_page(imgp); 263 /* free name buffer and old vnode */ 264 NDFREE(ndp, NDF_ONLY_PNBUF); 265 vrele(ndp->ni_vp); 266 /* set new name to that of the interpreter */ 267 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 268 UIO_SYSSPACE, imgp->interpreter_name, td); 269 goto interpret; 270 } 271 272 /* 273 * Copy out strings (args and env) and initialize stack base 274 */ 275 stack_base = exec_copyout_strings(imgp); 276 277 /* 278 * If custom stack fixup routine present for this process 279 * let it do the stack setup. 280 * Else stuff argument count as first item on stack 281 */ 282 if (p->p_sysent->sv_fixup) 283 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 284 else 285 suword(--stack_base, imgp->argc); 286 287 /* 288 * For security and other reasons, the file descriptor table cannot 289 * be shared after an exec. 290 */ 291 FILEDESC_LOCK(p->p_fd); 292 if (p->p_fd->fd_refcnt > 1) { 293 struct filedesc *tmp; 294 295 tmp = fdcopy(td); 296 FILEDESC_UNLOCK(p->p_fd); 297 fdfree(td); 298 p->p_fd = tmp; 299 } else 300 FILEDESC_UNLOCK(p->p_fd); 301 302 /* 303 * Malloc things before we need locks. 304 */ 305 newcred = crget(); 306 euip = uifind(attr.va_uid); 307 i = imgp->endargs - imgp->stringbase; 308 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) 309 newargs = pargs_alloc(i); 310 311 /* close files on exec */ 312 fdcloseexec(td); 313 314 /* 315 * For security and other reasons, signal handlers cannot 316 * be shared after an exec. The new process gets a copy of the old 317 * handlers. In execsigs(), the new process will have its signals 318 * reset. 319 */ 320 PROC_LOCK(p); 321 mp_fixme("procsig needs a lock"); 322 if (p->p_procsig->ps_refcnt > 1) { 323 oldprocsig = p->p_procsig; 324 PROC_UNLOCK(p); 325 MALLOC(newprocsig, struct procsig *, sizeof(struct procsig), 326 M_SUBPROC, M_WAITOK); 327 bcopy(oldprocsig, newprocsig, sizeof(*newprocsig)); 328 newprocsig->ps_refcnt = 1; 329 oldprocsig->ps_refcnt--; 330 PROC_LOCK(p); 331 p->p_procsig = newprocsig; 332 if (p->p_sigacts == &p->p_uarea->u_sigacts) 333 panic("shared procsig but private sigacts?"); 334 335 p->p_uarea->u_sigacts = *p->p_sigacts; 336 p->p_sigacts = &p->p_uarea->u_sigacts; 337 } 338 /* Stop profiling */ 339 stopprofclock(p); 340 341 /* reset caught signals */ 342 execsigs(p); 343 344 /* name this process - nameiexec(p, ndp) */ 345 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 346 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 347 p->p_comm[len] = 0; 348 349 /* 350 * mark as execed, wakeup the process that vforked (if any) and tell 351 * it that it now has its own resources back 352 */ 353 p->p_flag |= P_EXEC; 354 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 355 p->p_flag &= ~P_PPWAIT; 356 wakeup((caddr_t)p->p_pptr); 357 } 358 359 /* 360 * Implement image setuid/setgid. 361 * 362 * Don't honor setuid/setgid if the filesystem prohibits it or if 363 * the process is being traced. 364 */ 365 oldcred = p->p_ucred; 366 if ((((attr.va_mode & VSUID) && oldcred->cr_uid != attr.va_uid) || 367 ((attr.va_mode & VSGID) && oldcred->cr_gid != attr.va_gid)) && 368 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 369 (p->p_flag & P_TRACED) == 0) { 370 /* 371 * Turn off syscall tracing for set-id programs, except for 372 * root. Record any set-id flags first to make sure that 373 * we do not regain any tracing during a possible block. 374 */ 375 setsugid(p); 376 #ifdef KTRACE 377 if (p->p_tracep && suser_cred(oldcred, PRISON_ROOT)) { 378 mtx_lock(&ktrace_mtx); 379 p->p_traceflag = 0; 380 tracevp = p->p_tracep; 381 p->p_tracep = NULL; 382 mtx_unlock(&ktrace_mtx); 383 } 384 #endif 385 /* Make sure file descriptors 0..2 are in use. */ 386 error = fdcheckstd(td); 387 if (error != 0) { 388 oldcred = NULL; 389 goto done1; 390 } 391 /* 392 * Set the new credentials. 393 */ 394 crcopy(newcred, oldcred); 395 if (attr.va_mode & VSUID) 396 change_euid(newcred, euip); 397 if (attr.va_mode & VSGID) 398 change_egid(newcred, attr.va_gid); 399 setugidsafety(td); 400 /* 401 * Implement correct POSIX saved-id behavior. 402 */ 403 change_svuid(newcred, newcred->cr_uid); 404 change_svgid(newcred, newcred->cr_gid); 405 p->p_ucred = newcred; 406 newcred = NULL; 407 } else { 408 if (oldcred->cr_uid == oldcred->cr_ruid && 409 oldcred->cr_gid == oldcred->cr_rgid) 410 p->p_flag &= ~P_SUGID; 411 /* 412 * Implement correct POSIX saved-id behavior. 413 * 414 * XXX: It's not clear that the existing behavior is 415 * POSIX-compliant. A number of sources indicate that the 416 * saved uid/gid should only be updated if the new ruid is 417 * not equal to the old ruid, or the new euid is not equal 418 * to the old euid and the new euid is not equal to the old 419 * ruid. The FreeBSD code always updates the saved uid/gid. 420 * Also, this code uses the new (replaced) euid and egid as 421 * the source, which may or may not be the right ones to use. 422 */ 423 if (oldcred->cr_svuid != oldcred->cr_uid || 424 oldcred->cr_svgid != oldcred->cr_gid) { 425 crcopy(newcred, oldcred); 426 change_svuid(newcred, newcred->cr_uid); 427 change_svgid(newcred, newcred->cr_gid); 428 p->p_ucred = newcred; 429 newcred = NULL; 430 } 431 } 432 433 /* 434 * Store the vp for use in procfs 435 */ 436 textvp = p->p_textvp; 437 VREF(ndp->ni_vp); 438 p->p_textvp = ndp->ni_vp; 439 440 /* 441 * Notify others that we exec'd, and clear the P_INEXEC flag 442 * as we're now a bona fide freshly-execed process. 443 */ 444 KNOTE(&p->p_klist, NOTE_EXEC); 445 p->p_flag &= ~P_INEXEC; 446 447 /* 448 * If tracing the process, trap to debugger so breakpoints 449 * can be set before the program executes. 450 */ 451 _STOPEVENT(p, S_EXEC, 0); 452 453 if (p->p_flag & P_TRACED) 454 psignal(p, SIGTRAP); 455 456 /* clear "fork but no exec" flag, as we _are_ execing */ 457 p->p_acflag &= ~AFORK; 458 459 /* Free any previous argument cache */ 460 oldargs = p->p_args; 461 p->p_args = NULL; 462 463 /* Set values passed into the program in registers. */ 464 setregs(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, 465 imgp->ps_strings); 466 467 /* Cache arguments if they fit inside our allowance */ 468 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 469 bcopy(imgp->stringbase, newargs->ar_args, i); 470 p->p_args = newargs; 471 newargs = NULL; 472 } 473 done1: 474 PROC_UNLOCK(p); 475 476 /* 477 * Free any resources malloc'd earlier that we didn't use. 478 */ 479 uifree(euip); 480 if (newcred == NULL) 481 crfree(oldcred); 482 else 483 crfree(newcred); 484 /* 485 * Handle deferred decrement of ref counts. 486 */ 487 if (textvp != NULL) 488 vrele(textvp); 489 #ifdef KTRACE 490 if (tracevp != NULL) 491 vrele(tracevp); 492 #endif 493 if (oldargs != NULL) 494 pargs_drop(oldargs); 495 if (newargs != NULL) 496 pargs_drop(newargs); 497 498 exec_fail_dealloc: 499 500 /* 501 * free various allocated resources 502 */ 503 if (imgp->firstpage) 504 exec_unmap_first_page(imgp); 505 506 if (imgp->stringbase != NULL) 507 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 508 ARG_MAX + PAGE_SIZE); 509 510 if (imgp->vp) { 511 NDFREE(ndp, NDF_ONLY_PNBUF); 512 vrele(imgp->vp); 513 } 514 515 if (error == 0) 516 goto done2; 517 518 exec_fail: 519 /* we're done here, clear P_INEXEC */ 520 PROC_LOCK(p); 521 p->p_flag &= ~P_INEXEC; 522 PROC_UNLOCK(p); 523 524 if (imgp->vmspace_destroyed) { 525 /* sorry, no more process anymore. exit gracefully */ 526 exit1(td, W_EXITCODE(0, SIGABRT)); 527 /* NOT REACHED */ 528 error = 0; 529 } 530 done2: 531 mtx_unlock(&Giant); 532 return (error); 533 } 534 535 int 536 exec_map_first_page(imgp) 537 struct image_params *imgp; 538 { 539 int rv, i; 540 int initial_pagein; 541 vm_page_t ma[VM_INITIAL_PAGEIN]; 542 vm_object_t object; 543 544 GIANT_REQUIRED; 545 546 if (imgp->firstpage) { 547 exec_unmap_first_page(imgp); 548 } 549 550 VOP_GETVOBJECT(imgp->vp, &object); 551 552 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 553 554 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 555 initial_pagein = VM_INITIAL_PAGEIN; 556 if (initial_pagein > object->size) 557 initial_pagein = object->size; 558 for (i = 1; i < initial_pagein; i++) { 559 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 560 if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) 561 break; 562 if (ma[i]->valid) 563 break; 564 vm_page_busy(ma[i]); 565 } else { 566 ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL); 567 if (ma[i] == NULL) 568 break; 569 } 570 } 571 initial_pagein = i; 572 573 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 574 ma[0] = vm_page_lookup(object, 0); 575 576 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || (ma[0]->valid == 0)) { 577 if (ma[0]) { 578 vm_page_protect(ma[0], VM_PROT_NONE); 579 vm_page_free(ma[0]); 580 } 581 return EIO; 582 } 583 } 584 585 vm_page_wire(ma[0]); 586 vm_page_wakeup(ma[0]); 587 588 pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); 589 imgp->firstpage = ma[0]; 590 591 return 0; 592 } 593 594 void 595 exec_unmap_first_page(imgp) 596 struct image_params *imgp; 597 { 598 GIANT_REQUIRED; 599 600 if (imgp->firstpage) { 601 pmap_qremove((vm_offset_t)imgp->image_header, 1); 602 vm_page_unwire(imgp->firstpage, 1); 603 imgp->firstpage = NULL; 604 } 605 } 606 607 /* 608 * Destroy old address space, and allocate a new stack 609 * The new stack is only SGROWSIZ large because it is grown 610 * automatically in trap.c. 611 */ 612 int 613 exec_new_vmspace(imgp) 614 struct image_params *imgp; 615 { 616 int error; 617 struct execlist *ep; 618 struct proc *p = imgp->proc; 619 struct vmspace *vmspace = p->p_vmspace; 620 vm_offset_t stack_addr = USRSTACK - maxssiz; 621 622 GIANT_REQUIRED; 623 624 imgp->vmspace_destroyed = 1; 625 626 /* 627 * Perform functions registered with at_exec(). 628 */ 629 TAILQ_FOREACH(ep, &exec_list, next) 630 (*ep->function)(p); 631 632 /* 633 * Blow away entire process VM, if address space not shared, 634 * otherwise, create a new VM space so that other threads are 635 * not disrupted 636 */ 637 if (vmspace->vm_refcnt == 1) { 638 if (vmspace->vm_shm) 639 shmexit(p); 640 pmap_remove_pages(vmspace_pmap(vmspace), 0, VM_MAXUSER_ADDRESS); 641 vm_map_remove(&vmspace->vm_map, 0, VM_MAXUSER_ADDRESS); 642 } else { 643 vmspace_exec(p); 644 vmspace = p->p_vmspace; 645 } 646 647 /* Allocate a new stack */ 648 error = vm_map_stack(&vmspace->vm_map, stack_addr, (vm_size_t)maxssiz, 649 VM_PROT_ALL, VM_PROT_ALL, 0); 650 if (error) 651 return (error); 652 653 #ifdef __ia64__ 654 { 655 /* 656 * Allocate backing store. We really need something 657 * similar to vm_map_stack which can allow the backing 658 * store to grow upwards. This will do for now. 659 */ 660 vm_offset_t bsaddr; 661 bsaddr = USRSTACK - 2*maxssiz; 662 error = vm_map_find(&vmspace->vm_map, 0, 0, &bsaddr, 663 regstkpages * PAGE_SIZE, 0, 664 VM_PROT_ALL, VM_PROT_ALL, 0); 665 FIRST_THREAD_IN_PROC(p)->td_md.md_bspstore = bsaddr; 666 } 667 #endif 668 669 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 670 * VM_STACK case, but they are still used to monitor the size of the 671 * process stack so we can check the stack rlimit. 672 */ 673 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 674 vmspace->vm_maxsaddr = (char *)USRSTACK - maxssiz; 675 676 return(0); 677 } 678 679 /* 680 * Copy out argument and environment strings from the old process 681 * address space into the temporary string buffer. 682 */ 683 int 684 exec_extract_strings(imgp) 685 struct image_params *imgp; 686 { 687 char **argv, **envv; 688 char *argp, *envp; 689 int error; 690 size_t length; 691 692 /* 693 * extract arguments first 694 */ 695 696 argv = imgp->uap->argv; 697 698 if (argv) { 699 argp = (caddr_t) (intptr_t) fuword(argv); 700 if (argp == (caddr_t) -1) 701 return (EFAULT); 702 if (argp) 703 argv++; 704 if (imgp->argv0) 705 argp = imgp->argv0; 706 if (argp) { 707 do { 708 if (argp == (caddr_t) -1) 709 return (EFAULT); 710 if ((error = copyinstr(argp, imgp->stringp, 711 imgp->stringspace, &length))) { 712 if (error == ENAMETOOLONG) 713 return(E2BIG); 714 return (error); 715 } 716 imgp->stringspace -= length; 717 imgp->stringp += length; 718 imgp->argc++; 719 } while ((argp = (caddr_t) (intptr_t) fuword(argv++))); 720 } 721 } 722 723 imgp->endargs = imgp->stringp; 724 725 /* 726 * extract environment strings 727 */ 728 729 envv = imgp->uap->envv; 730 731 if (envv) { 732 while ((envp = (caddr_t) (intptr_t) fuword(envv++))) { 733 if (envp == (caddr_t) -1) 734 return (EFAULT); 735 if ((error = copyinstr(envp, imgp->stringp, 736 imgp->stringspace, &length))) { 737 if (error == ENAMETOOLONG) 738 return(E2BIG); 739 return (error); 740 } 741 imgp->stringspace -= length; 742 imgp->stringp += length; 743 imgp->envc++; 744 } 745 } 746 747 return (0); 748 } 749 750 /* 751 * Copy strings out to the new process address space, constructing 752 * new arg and env vector tables. Return a pointer to the base 753 * so that it can be used as the initial stack pointer. 754 */ 755 register_t * 756 exec_copyout_strings(imgp) 757 struct image_params *imgp; 758 { 759 int argc, envc; 760 char **vectp; 761 char *stringp, *destp; 762 register_t *stack_base; 763 struct ps_strings *arginfo; 764 int szsigcode; 765 766 /* 767 * Calculate string base and vector table pointers. 768 * Also deal with signal trampoline code for this exec type. 769 */ 770 arginfo = (struct ps_strings *)PS_STRINGS; 771 szsigcode = *(imgp->proc->p_sysent->sv_szsigcode); 772 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 773 roundup((ARG_MAX - imgp->stringspace), sizeof(char *)); 774 775 /* 776 * install sigcode 777 */ 778 if (szsigcode) 779 copyout(imgp->proc->p_sysent->sv_sigcode, 780 ((caddr_t)arginfo - szsigcode), szsigcode); 781 782 /* 783 * If we have a valid auxargs ptr, prepare some room 784 * on the stack. 785 */ 786 if (imgp->auxargs) { 787 /* 788 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 789 * lower compatibility. 790 */ 791 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size 792 : (AT_COUNT * 2); 793 /* 794 * The '+ 2' is for the null pointers at the end of each of 795 * the arg and env vector sets,and imgp->auxarg_size is room 796 * for argument of Runtime loader. 797 */ 798 vectp = (char **) (destp - (imgp->argc + imgp->envc + 2 + 799 imgp->auxarg_size) * sizeof(char *)); 800 801 } else 802 /* 803 * The '+ 2' is for the null pointers at the end of each of 804 * the arg and env vector sets 805 */ 806 vectp = (char **) 807 (destp - (imgp->argc + imgp->envc + 2) * sizeof(char *)); 808 809 /* 810 * vectp also becomes our initial stack base 811 */ 812 stack_base = (register_t *)vectp; 813 814 stringp = imgp->stringbase; 815 argc = imgp->argc; 816 envc = imgp->envc; 817 818 /* 819 * Copy out strings - arguments and environment. 820 */ 821 copyout(stringp, destp, ARG_MAX - imgp->stringspace); 822 823 /* 824 * Fill in "ps_strings" struct for ps, w, etc. 825 */ 826 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 827 suword(&arginfo->ps_nargvstr, argc); 828 829 /* 830 * Fill in argument portion of vector table. 831 */ 832 for (; argc > 0; --argc) { 833 suword(vectp++, (long)(intptr_t)destp); 834 while (*stringp++ != 0) 835 destp++; 836 destp++; 837 } 838 839 /* a null vector table pointer separates the argp's from the envp's */ 840 suword(vectp++, 0); 841 842 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 843 suword(&arginfo->ps_nenvstr, envc); 844 845 /* 846 * Fill in environment portion of vector table. 847 */ 848 for (; envc > 0; --envc) { 849 suword(vectp++, (long)(intptr_t)destp); 850 while (*stringp++ != 0) 851 destp++; 852 destp++; 853 } 854 855 /* end of vector table is a null pointer */ 856 suword(vectp, 0); 857 858 return (stack_base); 859 } 860 861 /* 862 * Check permissions of file to execute. 863 * Called with imgp->vp locked. 864 * Return 0 for success or error code on failure. 865 */ 866 int 867 exec_check_permissions(imgp) 868 struct image_params *imgp; 869 { 870 struct vnode *vp = imgp->vp; 871 struct vattr *attr = imgp->attr; 872 struct thread *td; 873 int error; 874 875 td = curthread; /* XXXKSE */ 876 /* Get file attributes */ 877 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 878 if (error) 879 return (error); 880 881 /* 882 * 1) Check if file execution is disabled for the filesystem that this 883 * file resides on. 884 * 2) Insure that at least one execute bit is on - otherwise root 885 * will always succeed, and we don't want to happen unless the 886 * file really is executable. 887 * 3) Insure that the file is a regular file. 888 */ 889 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 890 ((attr->va_mode & 0111) == 0) || 891 (attr->va_type != VREG)) 892 return (EACCES); 893 894 /* 895 * Zero length files can't be exec'd 896 */ 897 if (attr->va_size == 0) 898 return (ENOEXEC); 899 900 /* 901 * Check for execute permission to file based on current credentials. 902 */ 903 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 904 if (error) 905 return (error); 906 907 /* 908 * Check number of open-for-writes on the file and deny execution 909 * if there are any. 910 */ 911 if (vp->v_writecount) 912 return (ETXTBSY); 913 914 /* 915 * Call filesystem specific open routine (which does nothing in the 916 * general case). 917 */ 918 error = VOP_OPEN(vp, FREAD, td->td_ucred, td); 919 return (error); 920 } 921 922 /* 923 * Exec handler registration 924 */ 925 int 926 exec_register(execsw_arg) 927 const struct execsw *execsw_arg; 928 { 929 const struct execsw **es, **xs, **newexecsw; 930 int count = 2; /* New slot and trailing NULL */ 931 932 if (execsw) 933 for (es = execsw; *es; es++) 934 count++; 935 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 936 if (newexecsw == NULL) 937 return ENOMEM; 938 xs = newexecsw; 939 if (execsw) 940 for (es = execsw; *es; es++) 941 *xs++ = *es; 942 *xs++ = execsw_arg; 943 *xs = NULL; 944 if (execsw) 945 free(execsw, M_TEMP); 946 execsw = newexecsw; 947 return 0; 948 } 949 950 int 951 exec_unregister(execsw_arg) 952 const struct execsw *execsw_arg; 953 { 954 const struct execsw **es, **xs, **newexecsw; 955 int count = 1; 956 957 if (execsw == NULL) 958 panic("unregister with no handlers left?\n"); 959 960 for (es = execsw; *es; es++) { 961 if (*es == execsw_arg) 962 break; 963 } 964 if (*es == NULL) 965 return ENOENT; 966 for (es = execsw; *es; es++) 967 if (*es != execsw_arg) 968 count++; 969 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 970 if (newexecsw == NULL) 971 return ENOMEM; 972 xs = newexecsw; 973 for (es = execsw; *es; es++) 974 if (*es != execsw_arg) 975 *xs++ = *es; 976 *xs = NULL; 977 if (execsw) 978 free(execsw, M_TEMP); 979 execsw = newexecsw; 980 return 0; 981 } 982 983 int 984 at_exec(function) 985 execlist_fn function; 986 { 987 struct execlist *ep; 988 989 #ifdef INVARIANTS 990 /* Be noisy if the programmer has lost track of things */ 991 if (rm_at_exec(function)) 992 printf("WARNING: exec callout entry (%p) already present\n", 993 function); 994 #endif 995 ep = malloc(sizeof(*ep), M_ATEXEC, M_NOWAIT); 996 if (ep == NULL) 997 return (ENOMEM); 998 ep->function = function; 999 TAILQ_INSERT_TAIL(&exec_list, ep, next); 1000 return (0); 1001 } 1002 1003 /* 1004 * Scan the exec callout list for the given item and remove it. 1005 * Returns the number of items removed (0 or 1) 1006 */ 1007 int 1008 rm_at_exec(function) 1009 execlist_fn function; 1010 { 1011 struct execlist *ep; 1012 1013 TAILQ_FOREACH(ep, &exec_list, next) { 1014 if (ep->function == function) { 1015 TAILQ_REMOVE(&exec_list, ep, next); 1016 free(ep, M_ATEXEC); 1017 return(1); 1018 } 1019 } 1020 return (0); 1021 } 1022 1023