1 /* 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_ktrace.h" 30 #include "opt_mac.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/sysproto.h> 37 #include <sys/signalvar.h> 38 #include <sys/kernel.h> 39 #include <sys/mac.h> 40 #include <sys/mount.h> 41 #include <sys/filedesc.h> 42 #include <sys/fcntl.h> 43 #include <sys/acct.h> 44 #include <sys/exec.h> 45 #include <sys/imgact.h> 46 #include <sys/imgact_elf.h> 47 #include <sys/wait.h> 48 #include <sys/malloc.h> 49 #include <sys/proc.h> 50 #include <sys/pioctl.h> 51 #include <sys/namei.h> 52 #include <sys/sysent.h> 53 #include <sys/shm.h> 54 #include <sys/sysctl.h> 55 #include <sys/user.h> 56 #include <sys/vnode.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_pager.h> 70 71 #include <machine/reg.h> 72 73 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 74 75 static MALLOC_DEFINE(M_ATEXEC, "atexec", "atexec callback"); 76 77 /* 78 * callout list for things to do at exec time 79 */ 80 struct execlist { 81 execlist_fn function; 82 TAILQ_ENTRY(execlist) next; 83 }; 84 85 TAILQ_HEAD(exec_list_head, execlist); 86 static struct exec_list_head exec_list = TAILQ_HEAD_INITIALIZER(exec_list); 87 88 static register_t *exec_copyout_strings(struct image_params *); 89 90 /* XXX This should be vm_size_t. */ 91 static u_long ps_strings = PS_STRINGS; 92 SYSCTL_ULONG(_kern, KERN_PS_STRINGS, ps_strings, CTLFLAG_RD, &ps_strings, 0, ""); 93 94 /* XXX This should be vm_size_t. */ 95 static u_long usrstack = USRSTACK; 96 SYSCTL_ULONG(_kern, KERN_USRSTACK, usrstack, CTLFLAG_RD, &usrstack, 0, ""); 97 98 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 99 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 100 &ps_arg_cache_limit, 0, ""); 101 102 int ps_argsopen = 1; 103 SYSCTL_INT(_kern, OID_AUTO, ps_argsopen, CTLFLAG_RW, &ps_argsopen, 0, ""); 104 105 #ifdef __ia64__ 106 /* XXX HACK */ 107 static int regstkpages = 256; 108 SYSCTL_INT(_machdep, OID_AUTO, regstkpages, CTLFLAG_RW, ®stkpages, 0, ""); 109 #endif 110 111 /* 112 * Each of the items is a pointer to a `const struct execsw', hence the 113 * double pointer here. 114 */ 115 static const struct execsw **execsw; 116 117 #ifndef _SYS_SYSPROTO_H_ 118 struct execve_args { 119 char *fname; 120 char **argv; 121 char **envv; 122 }; 123 #endif 124 125 /* 126 * execve() system call. 127 * 128 * MPSAFE 129 */ 130 int 131 execve(td, uap) 132 struct thread *td; 133 register struct execve_args *uap; 134 { 135 struct proc *p = td->td_proc; 136 struct nameidata nd, *ndp; 137 struct ucred *newcred = NULL, *oldcred; 138 struct uidinfo *euip; 139 register_t *stack_base; 140 int error, len, i; 141 struct image_params image_params, *imgp; 142 struct vattr attr; 143 int (*img_first)(struct image_params *); 144 struct pargs *oldargs = NULL, *newargs = NULL; 145 struct procsig *oldprocsig, *newprocsig; 146 #ifdef KTRACE 147 struct vnode *tracevp = NULL; 148 #endif 149 struct vnode *textvp = NULL; 150 int credential_changing; 151 int textset; 152 153 imgp = &image_params; 154 155 /* 156 * Lock the process and set the P_INEXEC flag to indicate that 157 * it should be left alone until we're done here. This is 158 * necessary to avoid race conditions - e.g. in ptrace() - 159 * that might allow a local user to illicitly obtain elevated 160 * privileges. 161 */ 162 PROC_LOCK(p); 163 KASSERT((p->p_flag & P_INEXEC) == 0, 164 ("%s(): process already has P_INEXEC flag", __func__)); 165 if ((p->p_flag & P_KSES) && thread_single(SNGLE_EXIT)) { 166 PROC_UNLOCK(p); 167 return (ERESTART); /* Try again later. */ 168 } 169 /* If we get here all other threads are dead. */ 170 p->p_flag |= P_INEXEC; 171 PROC_UNLOCK(p); 172 173 /* 174 * Initialize part of the common data 175 */ 176 imgp->proc = p; 177 imgp->uap = uap; 178 imgp->attr = &attr; 179 imgp->argc = imgp->envc = 0; 180 imgp->argv0 = NULL; 181 imgp->entry_addr = 0; 182 imgp->vmspace_destroyed = 0; 183 imgp->interpreted = 0; 184 imgp->interpreter_name[0] = '\0'; 185 imgp->auxargs = NULL; 186 imgp->vp = NULL; 187 imgp->object = NULL; 188 imgp->firstpage = NULL; 189 imgp->ps_strings = 0; 190 imgp->auxarg_size = 0; 191 192 /* 193 * Allocate temporary demand zeroed space for argument and 194 * environment strings 195 */ 196 imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX + PAGE_SIZE); 197 if (imgp->stringbase == NULL) { 198 error = ENOMEM; 199 mtx_lock(&Giant); 200 goto exec_fail; 201 } 202 imgp->stringp = imgp->stringbase; 203 imgp->stringspace = ARG_MAX; 204 imgp->image_header = imgp->stringbase + ARG_MAX; 205 206 /* 207 * Translate the file name. namei() returns a vnode pointer 208 * in ni_vp amoung other things. 209 */ 210 ndp = &nd; 211 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 212 UIO_USERSPACE, uap->fname, td); 213 214 mtx_lock(&Giant); 215 interpret: 216 217 error = namei(ndp); 218 if (error) { 219 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 220 ARG_MAX + PAGE_SIZE); 221 goto exec_fail; 222 } 223 224 imgp->vp = ndp->ni_vp; 225 imgp->fname = uap->fname; 226 227 /* 228 * Check file permissions (also 'opens' file) 229 */ 230 error = exec_check_permissions(imgp); 231 if (error) 232 goto exec_fail_dealloc; 233 234 if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0) 235 vm_object_reference(imgp->object); 236 237 /* 238 * Set VV_TEXT now so no one can write to the executable while we're 239 * activating it. 240 * 241 * Remember if this was set before and unset it in case this is not 242 * actually an executable image. 243 */ 244 textset = imgp->vp->v_vflag & VV_TEXT; 245 imgp->vp->v_vflag |= VV_TEXT; 246 247 error = exec_map_first_page(imgp); 248 if (error) 249 goto exec_fail_dealloc; 250 251 /* 252 * If the current process has a special image activator it 253 * wants to try first, call it. For example, emulating shell 254 * scripts differently. 255 */ 256 error = -1; 257 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 258 error = img_first(imgp); 259 260 /* 261 * Loop through the list of image activators, calling each one. 262 * An activator returns -1 if there is no match, 0 on success, 263 * and an error otherwise. 264 */ 265 for (i = 0; error == -1 && execsw[i]; ++i) { 266 if (execsw[i]->ex_imgact == NULL || 267 execsw[i]->ex_imgact == img_first) { 268 continue; 269 } 270 error = (*execsw[i]->ex_imgact)(imgp); 271 } 272 273 if (error) { 274 if (error == -1) { 275 if (textset == 0) 276 imgp->vp->v_vflag &= ~VV_TEXT; 277 error = ENOEXEC; 278 } 279 goto exec_fail_dealloc; 280 } 281 282 /* 283 * Special interpreter operation, cleanup and loop up to try to 284 * activate the interpreter. 285 */ 286 if (imgp->interpreted) { 287 exec_unmap_first_page(imgp); 288 /* 289 * VV_TEXT needs to be unset for scripts. There is a short 290 * period before we determine that something is a script where 291 * VV_TEXT will be set. The vnode lock is held over this 292 * entire period so nothing should illegitimately be blocked. 293 */ 294 imgp->vp->v_vflag &= ~VV_TEXT; 295 /* free name buffer and old vnode */ 296 NDFREE(ndp, NDF_ONLY_PNBUF); 297 vput(ndp->ni_vp); 298 vm_object_deallocate(imgp->object); 299 imgp->object = NULL; 300 /* set new name to that of the interpreter */ 301 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 302 UIO_SYSSPACE, imgp->interpreter_name, td); 303 goto interpret; 304 } 305 306 /* 307 * Copy out strings (args and env) and initialize stack base 308 */ 309 if (p->p_sysent->sv_copyout_strings) 310 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); 311 else 312 stack_base = exec_copyout_strings(imgp); 313 314 /* 315 * If custom stack fixup routine present for this process 316 * let it do the stack setup. 317 * Else stuff argument count as first item on stack 318 */ 319 if (p->p_sysent->sv_fixup) 320 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 321 else 322 suword(--stack_base, imgp->argc); 323 324 /* 325 * For security and other reasons, the file descriptor table cannot 326 * be shared after an exec. 327 */ 328 FILEDESC_LOCK(p->p_fd); 329 if (p->p_fd->fd_refcnt > 1) { 330 struct filedesc *tmp; 331 332 tmp = fdcopy(td); 333 FILEDESC_UNLOCK(p->p_fd); 334 fdfree(td); 335 p->p_fd = tmp; 336 } else 337 FILEDESC_UNLOCK(p->p_fd); 338 339 /* 340 * Malloc things before we need locks. 341 */ 342 newcred = crget(); 343 euip = uifind(attr.va_uid); 344 i = imgp->endargs - imgp->stringbase; 345 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) 346 newargs = pargs_alloc(i); 347 348 /* close files on exec */ 349 fdcloseexec(td); 350 351 /* Get a reference to the vnode prior to locking the proc */ 352 VREF(ndp->ni_vp); 353 354 /* 355 * For security and other reasons, signal handlers cannot 356 * be shared after an exec. The new process gets a copy of the old 357 * handlers. In execsigs(), the new process will have its signals 358 * reset. 359 */ 360 PROC_LOCK(p); 361 mp_fixme("procsig needs a lock"); 362 if (p->p_procsig->ps_refcnt > 1) { 363 oldprocsig = p->p_procsig; 364 PROC_UNLOCK(p); 365 MALLOC(newprocsig, struct procsig *, sizeof(struct procsig), 366 M_SUBPROC, M_WAITOK); 367 bcopy(oldprocsig, newprocsig, sizeof(*newprocsig)); 368 newprocsig->ps_refcnt = 1; 369 oldprocsig->ps_refcnt--; 370 PROC_LOCK(p); 371 p->p_procsig = newprocsig; 372 if (p->p_sigacts == &p->p_uarea->u_sigacts) 373 panic("shared procsig but private sigacts?"); 374 375 p->p_uarea->u_sigacts = *p->p_sigacts; 376 p->p_sigacts = &p->p_uarea->u_sigacts; 377 } 378 /* Stop profiling */ 379 stopprofclock(p); 380 381 /* reset caught signals */ 382 execsigs(p); 383 384 /* name this process - nameiexec(p, ndp) */ 385 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 386 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 387 p->p_comm[len] = 0; 388 389 /* 390 * mark as execed, wakeup the process that vforked (if any) and tell 391 * it that it now has its own resources back 392 */ 393 p->p_flag |= P_EXEC; 394 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 395 p->p_flag &= ~P_PPWAIT; 396 wakeup(p->p_pptr); 397 } 398 399 /* 400 * Implement image setuid/setgid. 401 * 402 * Don't honor setuid/setgid if the filesystem prohibits it or if 403 * the process is being traced. 404 */ 405 oldcred = p->p_ucred; 406 credential_changing = 0; 407 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != 408 attr.va_uid; 409 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != 410 attr.va_gid; 411 412 if (credential_changing && 413 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 414 (p->p_flag & P_TRACED) == 0) { 415 /* 416 * Turn off syscall tracing for set-id programs, except for 417 * root. Record any set-id flags first to make sure that 418 * we do not regain any tracing during a possible block. 419 */ 420 setsugid(p); 421 #ifdef KTRACE 422 if (p->p_tracep && suser_cred(oldcred, PRISON_ROOT)) { 423 mtx_lock(&ktrace_mtx); 424 p->p_traceflag = 0; 425 tracevp = p->p_tracep; 426 p->p_tracep = NULL; 427 mtx_unlock(&ktrace_mtx); 428 } 429 #endif 430 /* Close any file descriptors 0..2 that reference procfs */ 431 setugidsafety(td); 432 /* Make sure file descriptors 0..2 are in use. */ 433 error = fdcheckstd(td); 434 if (error != 0) 435 goto done1; 436 /* 437 * Set the new credentials. 438 */ 439 crcopy(newcred, oldcred); 440 if (attr.va_mode & VSUID) 441 change_euid(newcred, euip); 442 if (attr.va_mode & VSGID) 443 change_egid(newcred, attr.va_gid); 444 /* 445 * Implement correct POSIX saved-id behavior. 446 */ 447 change_svuid(newcred, newcred->cr_uid); 448 change_svgid(newcred, newcred->cr_gid); 449 p->p_ucred = newcred; 450 newcred = NULL; 451 } else { 452 if (oldcred->cr_uid == oldcred->cr_ruid && 453 oldcred->cr_gid == oldcred->cr_rgid) 454 p->p_flag &= ~P_SUGID; 455 /* 456 * Implement correct POSIX saved-id behavior. 457 * 458 * XXX: It's not clear that the existing behavior is 459 * POSIX-compliant. A number of sources indicate that the 460 * saved uid/gid should only be updated if the new ruid is 461 * not equal to the old ruid, or the new euid is not equal 462 * to the old euid and the new euid is not equal to the old 463 * ruid. The FreeBSD code always updates the saved uid/gid. 464 * Also, this code uses the new (replaced) euid and egid as 465 * the source, which may or may not be the right ones to use. 466 */ 467 if (oldcred->cr_svuid != oldcred->cr_uid || 468 oldcred->cr_svgid != oldcred->cr_gid) { 469 crcopy(newcred, oldcred); 470 change_svuid(newcred, newcred->cr_uid); 471 change_svgid(newcred, newcred->cr_gid); 472 p->p_ucred = newcred; 473 newcred = NULL; 474 } 475 } 476 477 /* 478 * Store the vp for use in procfs. This vnode was referenced prior 479 * to locking the proc lock. 480 */ 481 textvp = p->p_textvp; 482 p->p_textvp = ndp->ni_vp; 483 484 /* 485 * Notify others that we exec'd, and clear the P_INEXEC flag 486 * as we're now a bona fide freshly-execed process. 487 */ 488 KNOTE(&p->p_klist, NOTE_EXEC); 489 p->p_flag &= ~P_INEXEC; 490 491 /* 492 * If tracing the process, trap to debugger so breakpoints 493 * can be set before the program executes. 494 */ 495 _STOPEVENT(p, S_EXEC, 0); 496 497 if (p->p_flag & P_TRACED) 498 psignal(p, SIGTRAP); 499 500 /* clear "fork but no exec" flag, as we _are_ execing */ 501 p->p_acflag &= ~AFORK; 502 503 /* Free any previous argument cache */ 504 oldargs = p->p_args; 505 p->p_args = NULL; 506 507 /* Set values passed into the program in registers. */ 508 if (p->p_sysent->sv_setregs) 509 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, 510 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 511 else 512 setregs(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, 513 imgp->ps_strings); 514 515 /* Cache arguments if they fit inside our allowance */ 516 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 517 bcopy(imgp->stringbase, newargs->ar_args, i); 518 p->p_args = newargs; 519 newargs = NULL; 520 } 521 done1: 522 PROC_UNLOCK(p); 523 524 525 /* 526 * Free any resources malloc'd earlier that we didn't use. 527 */ 528 uifree(euip); 529 if (newcred == NULL) 530 crfree(oldcred); 531 else 532 crfree(newcred); 533 /* 534 * Handle deferred decrement of ref counts. 535 */ 536 if (textvp != NULL) 537 vrele(textvp); 538 if (ndp->ni_vp && error != 0) 539 vrele(ndp->ni_vp); 540 #ifdef KTRACE 541 if (tracevp != NULL) 542 vrele(tracevp); 543 #endif 544 if (oldargs != NULL) 545 pargs_drop(oldargs); 546 if (newargs != NULL) 547 pargs_drop(newargs); 548 549 exec_fail_dealloc: 550 551 /* 552 * free various allocated resources 553 */ 554 if (imgp->firstpage) 555 exec_unmap_first_page(imgp); 556 557 if (imgp->stringbase != NULL) 558 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 559 ARG_MAX + PAGE_SIZE); 560 561 if (imgp->vp) { 562 NDFREE(ndp, NDF_ONLY_PNBUF); 563 vput(imgp->vp); 564 } 565 566 if (imgp->object) 567 vm_object_deallocate(imgp->object); 568 569 if (error == 0) 570 goto done2; 571 572 exec_fail: 573 /* we're done here, clear P_INEXEC */ 574 PROC_LOCK(p); 575 p->p_flag &= ~P_INEXEC; 576 PROC_UNLOCK(p); 577 578 if (imgp->vmspace_destroyed) { 579 /* sorry, no more process anymore. exit gracefully */ 580 exit1(td, W_EXITCODE(0, SIGABRT)); 581 /* NOT REACHED */ 582 error = 0; 583 } 584 done2: 585 mtx_unlock(&Giant); 586 return (error); 587 } 588 589 int 590 exec_map_first_page(imgp) 591 struct image_params *imgp; 592 { 593 int rv, i; 594 int initial_pagein; 595 vm_page_t ma[VM_INITIAL_PAGEIN]; 596 vm_object_t object; 597 598 GIANT_REQUIRED; 599 600 if (imgp->firstpage) { 601 exec_unmap_first_page(imgp); 602 } 603 604 VOP_GETVOBJECT(imgp->vp, &object); 605 606 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 607 608 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 609 initial_pagein = VM_INITIAL_PAGEIN; 610 if (initial_pagein > object->size) 611 initial_pagein = object->size; 612 for (i = 1; i < initial_pagein; i++) { 613 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 614 if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) 615 break; 616 if (ma[i]->valid) 617 break; 618 vm_page_busy(ma[i]); 619 } else { 620 ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL); 621 if (ma[i] == NULL) 622 break; 623 } 624 } 625 initial_pagein = i; 626 627 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 628 ma[0] = vm_page_lookup(object, 0); 629 630 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || (ma[0]->valid == 0)) { 631 if (ma[0]) { 632 vm_page_lock_queues(); 633 vm_page_protect(ma[0], VM_PROT_NONE); 634 vm_page_free(ma[0]); 635 vm_page_unlock_queues(); 636 } 637 return EIO; 638 } 639 } 640 vm_page_lock_queues(); 641 vm_page_wire(ma[0]); 642 vm_page_wakeup(ma[0]); 643 vm_page_unlock_queues(); 644 645 pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); 646 imgp->firstpage = ma[0]; 647 648 return 0; 649 } 650 651 void 652 exec_unmap_first_page(imgp) 653 struct image_params *imgp; 654 { 655 GIANT_REQUIRED; 656 657 if (imgp->firstpage) { 658 pmap_qremove((vm_offset_t)imgp->image_header, 1); 659 vm_page_lock_queues(); 660 vm_page_unwire(imgp->firstpage, 1); 661 vm_page_unlock_queues(); 662 imgp->firstpage = NULL; 663 } 664 } 665 666 /* 667 * Destroy old address space, and allocate a new stack 668 * The new stack is only SGROWSIZ large because it is grown 669 * automatically in trap.c. 670 */ 671 int 672 exec_new_vmspace(imgp, minuser, maxuser, stack_addr) 673 struct image_params *imgp; 674 vm_offset_t minuser, maxuser, stack_addr; 675 { 676 int error; 677 struct execlist *ep; 678 struct proc *p = imgp->proc; 679 struct vmspace *vmspace = p->p_vmspace; 680 681 GIANT_REQUIRED; 682 683 stack_addr = stack_addr - maxssiz; 684 685 imgp->vmspace_destroyed = 1; 686 687 /* 688 * Perform functions registered with at_exec(). 689 */ 690 TAILQ_FOREACH(ep, &exec_list, next) 691 (*ep->function)(p); 692 693 /* 694 * Blow away entire process VM, if address space not shared, 695 * otherwise, create a new VM space so that other threads are 696 * not disrupted 697 */ 698 if (vmspace->vm_refcnt == 1 699 && vm_map_min(&vmspace->vm_map) == minuser 700 && vm_map_max(&vmspace->vm_map) == maxuser) { 701 if (vmspace->vm_shm) 702 shmexit(p); 703 pmap_remove_pages(vmspace_pmap(vmspace), minuser, maxuser); 704 vm_map_remove(&vmspace->vm_map, minuser, maxuser); 705 } else { 706 vmspace_exec(p, minuser, maxuser); 707 vmspace = p->p_vmspace; 708 } 709 710 /* Allocate a new stack */ 711 error = vm_map_stack(&vmspace->vm_map, stack_addr, (vm_size_t)maxssiz, 712 VM_PROT_ALL, VM_PROT_ALL, 0); 713 if (error) 714 return (error); 715 716 #ifdef __ia64__ 717 { 718 /* 719 * Allocate backing store. We really need something 720 * similar to vm_map_stack which can allow the backing 721 * store to grow upwards. This will do for now. 722 */ 723 vm_offset_t bsaddr; 724 bsaddr = USRSTACK - 2*maxssiz; 725 error = vm_map_find(&vmspace->vm_map, 0, 0, &bsaddr, 726 regstkpages * PAGE_SIZE, 0, 727 VM_PROT_ALL, VM_PROT_ALL, 0); 728 FIRST_THREAD_IN_PROC(p)->td_md.md_bspstore = bsaddr; 729 } 730 #endif 731 732 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 733 * VM_STACK case, but they are still used to monitor the size of the 734 * process stack so we can check the stack rlimit. 735 */ 736 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 737 vmspace->vm_maxsaddr = (char *)USRSTACK - maxssiz; 738 739 return(0); 740 } 741 742 /* 743 * Copy out argument and environment strings from the old process 744 * address space into the temporary string buffer. 745 */ 746 int 747 exec_extract_strings(imgp) 748 struct image_params *imgp; 749 { 750 char **argv, **envv; 751 char *argp, *envp; 752 int error; 753 size_t length; 754 755 /* 756 * extract arguments first 757 */ 758 759 argv = imgp->uap->argv; 760 761 if (argv) { 762 argp = (caddr_t) (intptr_t) fuword(argv); 763 if (argp == (caddr_t) -1) 764 return (EFAULT); 765 if (argp) 766 argv++; 767 if (imgp->argv0) 768 argp = imgp->argv0; 769 if (argp) { 770 do { 771 if (argp == (caddr_t) -1) 772 return (EFAULT); 773 if ((error = copyinstr(argp, imgp->stringp, 774 imgp->stringspace, &length))) { 775 if (error == ENAMETOOLONG) 776 return(E2BIG); 777 return (error); 778 } 779 imgp->stringspace -= length; 780 imgp->stringp += length; 781 imgp->argc++; 782 } while ((argp = (caddr_t) (intptr_t) fuword(argv++))); 783 } 784 } 785 786 imgp->endargs = imgp->stringp; 787 788 /* 789 * extract environment strings 790 */ 791 792 envv = imgp->uap->envv; 793 794 if (envv) { 795 while ((envp = (caddr_t) (intptr_t) fuword(envv++))) { 796 if (envp == (caddr_t) -1) 797 return (EFAULT); 798 if ((error = copyinstr(envp, imgp->stringp, 799 imgp->stringspace, &length))) { 800 if (error == ENAMETOOLONG) 801 return(E2BIG); 802 return (error); 803 } 804 imgp->stringspace -= length; 805 imgp->stringp += length; 806 imgp->envc++; 807 } 808 } 809 810 return (0); 811 } 812 813 /* 814 * Copy strings out to the new process address space, constructing 815 * new arg and env vector tables. Return a pointer to the base 816 * so that it can be used as the initial stack pointer. 817 */ 818 register_t * 819 exec_copyout_strings(imgp) 820 struct image_params *imgp; 821 { 822 int argc, envc; 823 char **vectp; 824 char *stringp, *destp; 825 register_t *stack_base; 826 struct ps_strings *arginfo; 827 int szsigcode; 828 829 /* 830 * Calculate string base and vector table pointers. 831 * Also deal with signal trampoline code for this exec type. 832 */ 833 arginfo = (struct ps_strings *)PS_STRINGS; 834 szsigcode = *(imgp->proc->p_sysent->sv_szsigcode); 835 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 836 roundup((ARG_MAX - imgp->stringspace), sizeof(char *)); 837 838 /* 839 * install sigcode 840 */ 841 if (szsigcode) 842 copyout(imgp->proc->p_sysent->sv_sigcode, 843 ((caddr_t)arginfo - szsigcode), szsigcode); 844 845 /* 846 * If we have a valid auxargs ptr, prepare some room 847 * on the stack. 848 */ 849 if (imgp->auxargs) { 850 /* 851 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 852 * lower compatibility. 853 */ 854 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size 855 : (AT_COUNT * 2); 856 /* 857 * The '+ 2' is for the null pointers at the end of each of 858 * the arg and env vector sets,and imgp->auxarg_size is room 859 * for argument of Runtime loader. 860 */ 861 vectp = (char **) (destp - (imgp->argc + imgp->envc + 2 + 862 imgp->auxarg_size) * sizeof(char *)); 863 864 } else 865 /* 866 * The '+ 2' is for the null pointers at the end of each of 867 * the arg and env vector sets 868 */ 869 vectp = (char **) 870 (destp - (imgp->argc + imgp->envc + 2) * sizeof(char *)); 871 872 /* 873 * vectp also becomes our initial stack base 874 */ 875 stack_base = (register_t *)vectp; 876 877 stringp = imgp->stringbase; 878 argc = imgp->argc; 879 envc = imgp->envc; 880 881 /* 882 * Copy out strings - arguments and environment. 883 */ 884 copyout(stringp, destp, ARG_MAX - imgp->stringspace); 885 886 /* 887 * Fill in "ps_strings" struct for ps, w, etc. 888 */ 889 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 890 suword(&arginfo->ps_nargvstr, argc); 891 892 /* 893 * Fill in argument portion of vector table. 894 */ 895 for (; argc > 0; --argc) { 896 suword(vectp++, (long)(intptr_t)destp); 897 while (*stringp++ != 0) 898 destp++; 899 destp++; 900 } 901 902 /* a null vector table pointer separates the argp's from the envp's */ 903 suword(vectp++, 0); 904 905 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 906 suword(&arginfo->ps_nenvstr, envc); 907 908 /* 909 * Fill in environment portion of vector table. 910 */ 911 for (; envc > 0; --envc) { 912 suword(vectp++, (long)(intptr_t)destp); 913 while (*stringp++ != 0) 914 destp++; 915 destp++; 916 } 917 918 /* end of vector table is a null pointer */ 919 suword(vectp, 0); 920 921 return (stack_base); 922 } 923 924 /* 925 * Check permissions of file to execute. 926 * Called with imgp->vp locked. 927 * Return 0 for success or error code on failure. 928 */ 929 int 930 exec_check_permissions(imgp) 931 struct image_params *imgp; 932 { 933 struct vnode *vp = imgp->vp; 934 struct vattr *attr = imgp->attr; 935 struct thread *td; 936 int error; 937 938 td = curthread; /* XXXKSE */ 939 940 #ifdef MAC 941 error = mac_check_vnode_exec(td->td_ucred, imgp->vp); 942 if (error) 943 return (error); 944 #endif 945 946 /* Get file attributes */ 947 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 948 if (error) 949 return (error); 950 951 /* 952 * 1) Check if file execution is disabled for the filesystem that this 953 * file resides on. 954 * 2) Insure that at least one execute bit is on - otherwise root 955 * will always succeed, and we don't want to happen unless the 956 * file really is executable. 957 * 3) Insure that the file is a regular file. 958 */ 959 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 960 ((attr->va_mode & 0111) == 0) || 961 (attr->va_type != VREG)) 962 return (EACCES); 963 964 /* 965 * Zero length files can't be exec'd 966 */ 967 if (attr->va_size == 0) 968 return (ENOEXEC); 969 970 /* 971 * Check for execute permission to file based on current credentials. 972 */ 973 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 974 if (error) 975 return (error); 976 977 /* 978 * Check number of open-for-writes on the file and deny execution 979 * if there are any. 980 */ 981 if (vp->v_writecount) 982 return (ETXTBSY); 983 984 /* 985 * Call filesystem specific open routine (which does nothing in the 986 * general case). 987 */ 988 error = VOP_OPEN(vp, FREAD, td->td_ucred, td); 989 return (error); 990 } 991 992 /* 993 * Exec handler registration 994 */ 995 int 996 exec_register(execsw_arg) 997 const struct execsw *execsw_arg; 998 { 999 const struct execsw **es, **xs, **newexecsw; 1000 int count = 2; /* New slot and trailing NULL */ 1001 1002 if (execsw) 1003 for (es = execsw; *es; es++) 1004 count++; 1005 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1006 if (newexecsw == NULL) 1007 return ENOMEM; 1008 xs = newexecsw; 1009 if (execsw) 1010 for (es = execsw; *es; es++) 1011 *xs++ = *es; 1012 *xs++ = execsw_arg; 1013 *xs = NULL; 1014 if (execsw) 1015 free(execsw, M_TEMP); 1016 execsw = newexecsw; 1017 return 0; 1018 } 1019 1020 int 1021 exec_unregister(execsw_arg) 1022 const struct execsw *execsw_arg; 1023 { 1024 const struct execsw **es, **xs, **newexecsw; 1025 int count = 1; 1026 1027 if (execsw == NULL) 1028 panic("unregister with no handlers left?\n"); 1029 1030 for (es = execsw; *es; es++) { 1031 if (*es == execsw_arg) 1032 break; 1033 } 1034 if (*es == NULL) 1035 return ENOENT; 1036 for (es = execsw; *es; es++) 1037 if (*es != execsw_arg) 1038 count++; 1039 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1040 if (newexecsw == NULL) 1041 return ENOMEM; 1042 xs = newexecsw; 1043 for (es = execsw; *es; es++) 1044 if (*es != execsw_arg) 1045 *xs++ = *es; 1046 *xs = NULL; 1047 if (execsw) 1048 free(execsw, M_TEMP); 1049 execsw = newexecsw; 1050 return 0; 1051 } 1052 1053 int 1054 at_exec(function) 1055 execlist_fn function; 1056 { 1057 struct execlist *ep; 1058 1059 #ifdef INVARIANTS 1060 /* Be noisy if the programmer has lost track of things */ 1061 if (rm_at_exec(function)) 1062 printf("WARNING: exec callout entry (%p) already present\n", 1063 function); 1064 #endif 1065 ep = malloc(sizeof(*ep), M_ATEXEC, M_NOWAIT); 1066 if (ep == NULL) 1067 return (ENOMEM); 1068 ep->function = function; 1069 TAILQ_INSERT_TAIL(&exec_list, ep, next); 1070 return (0); 1071 } 1072 1073 /* 1074 * Scan the exec callout list for the given item and remove it. 1075 * Returns the number of items removed (0 or 1) 1076 */ 1077 int 1078 rm_at_exec(function) 1079 execlist_fn function; 1080 { 1081 struct execlist *ep; 1082 1083 TAILQ_FOREACH(ep, &exec_list, next) { 1084 if (ep->function == function) { 1085 TAILQ_REMOVE(&exec_list, ep, next); 1086 free(ep, M_ATEXEC); 1087 return(1); 1088 } 1089 } 1090 return (0); 1091 } 1092 1093