1 /* 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_ktrace.h" 30 #include "opt_mac.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/sysproto.h> 37 #include <sys/signalvar.h> 38 #include <sys/kernel.h> 39 #include <sys/mac.h> 40 #include <sys/mount.h> 41 #include <sys/filedesc.h> 42 #include <sys/fcntl.h> 43 #include <sys/acct.h> 44 #include <sys/exec.h> 45 #include <sys/imgact.h> 46 #include <sys/imgact_elf.h> 47 #include <sys/wait.h> 48 #include <sys/malloc.h> 49 #include <sys/proc.h> 50 #include <sys/pioctl.h> 51 #include <sys/namei.h> 52 #include <sys/sysent.h> 53 #include <sys/shm.h> 54 #include <sys/sysctl.h> 55 #include <sys/user.h> 56 #include <sys/vnode.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_pager.h> 70 71 #include <machine/reg.h> 72 73 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 74 75 static MALLOC_DEFINE(M_ATEXEC, "atexec", "atexec callback"); 76 77 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 78 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 79 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 80 static int kern_execve(struct thread *td, char *fname, char **argv, 81 char **envv, struct mac *mac_p); 82 83 /* 84 * callout list for things to do at exec time 85 */ 86 struct execlist { 87 execlist_fn function; 88 TAILQ_ENTRY(execlist) next; 89 }; 90 91 TAILQ_HEAD(exec_list_head, execlist); 92 static struct exec_list_head exec_list = TAILQ_HEAD_INITIALIZER(exec_list); 93 94 /* XXX This should be vm_size_t. */ 95 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, 96 NULL, 0, sysctl_kern_ps_strings, "LU", ""); 97 98 /* XXX This should be vm_size_t. */ 99 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, 100 NULL, 0, sysctl_kern_usrstack, "LU", ""); 101 102 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, 103 NULL, 0, sysctl_kern_stackprot, "I", ""); 104 105 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 106 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 107 &ps_arg_cache_limit, 0, ""); 108 109 int ps_argsopen = 1; 110 SYSCTL_INT(_kern, OID_AUTO, ps_argsopen, CTLFLAG_RW, &ps_argsopen, 0, ""); 111 112 #ifdef __ia64__ 113 /* XXX HACK */ 114 static int regstkpages = 256; 115 SYSCTL_INT(_machdep, OID_AUTO, regstkpages, CTLFLAG_RW, ®stkpages, 0, ""); 116 #endif 117 118 static int 119 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 120 { 121 struct proc *p; 122 123 p = curproc; 124 return (SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, 125 sizeof(p->p_sysent->sv_psstrings))); 126 } 127 128 static int 129 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 130 { 131 struct proc *p; 132 133 p = curproc; 134 return (SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, 135 sizeof(p->p_sysent->sv_usrstack))); 136 } 137 138 static int 139 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 140 { 141 struct proc *p; 142 143 p = curproc; 144 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 145 sizeof(p->p_sysent->sv_stackprot))); 146 } 147 148 /* 149 * Each of the items is a pointer to a `const struct execsw', hence the 150 * double pointer here. 151 */ 152 static const struct execsw **execsw; 153 154 /* 155 * In-kernel implementation of execve(). All arguments are assumed to be 156 * userspace pointers from the passed thread. 157 * 158 * MPSAFE 159 */ 160 static int 161 kern_execve(td, fname, argv, envv, mac_p) 162 struct thread *td; 163 char *fname; 164 char **argv; 165 char **envv; 166 struct mac *mac_p; 167 { 168 struct proc *p = td->td_proc; 169 struct nameidata nd, *ndp; 170 struct ucred *newcred = NULL, *oldcred; 171 struct uidinfo *euip; 172 register_t *stack_base; 173 int error, len, i; 174 struct image_params image_params, *imgp; 175 struct vattr attr; 176 int (*img_first)(struct image_params *); 177 struct pargs *oldargs = NULL, *newargs = NULL; 178 struct procsig *oldprocsig, *newprocsig; 179 #ifdef KTRACE 180 struct vnode *tracevp = NULL; 181 #endif 182 struct vnode *textvp = NULL; 183 int credential_changing; 184 int textset; 185 #ifdef MAC 186 struct label interplabel; /* label of the interpreted vnode */ 187 struct label execlabel; /* optional label argument */ 188 int will_transition, interplabelvalid = 0; 189 #endif 190 191 imgp = &image_params; 192 193 /* 194 * Lock the process and set the P_INEXEC flag to indicate that 195 * it should be left alone until we're done here. This is 196 * necessary to avoid race conditions - e.g. in ptrace() - 197 * that might allow a local user to illicitly obtain elevated 198 * privileges. 199 */ 200 PROC_LOCK(p); 201 KASSERT((p->p_flag & P_INEXEC) == 0, 202 ("%s(): process already has P_INEXEC flag", __func__)); 203 if (p->p_flag & P_KSES) { 204 if (thread_single(SINGLE_EXIT)) { 205 PROC_UNLOCK(p); 206 return (ERESTART); /* Try again later. */ 207 } 208 /* 209 * If we get here all other threads are dead, 210 * so unset the associated flags and lose KSE mode. 211 */ 212 p->p_flag &= ~P_KSES; 213 td->td_mailbox = NULL; 214 thread_single_end(); 215 } 216 p->p_flag |= P_INEXEC; 217 PROC_UNLOCK(p); 218 219 /* 220 * Initialize part of the common data 221 */ 222 imgp->proc = p; 223 imgp->userspace_argv = argv; 224 imgp->userspace_envv = envv; 225 imgp->execlabel = NULL; 226 imgp->attr = &attr; 227 imgp->argc = imgp->envc = 0; 228 imgp->argv0 = NULL; 229 imgp->entry_addr = 0; 230 imgp->vmspace_destroyed = 0; 231 imgp->interpreted = 0; 232 imgp->interpreter_name[0] = '\0'; 233 imgp->auxargs = NULL; 234 imgp->vp = NULL; 235 imgp->object = NULL; 236 imgp->firstpage = NULL; 237 imgp->ps_strings = 0; 238 imgp->auxarg_size = 0; 239 240 #ifdef MAC 241 error = mac_execve_enter(imgp, mac_p, &execlabel); 242 if (error) { 243 mtx_lock(&Giant); 244 goto exec_fail; 245 } 246 #endif 247 248 /* 249 * Allocate temporary demand zeroed space for argument and 250 * environment strings 251 */ 252 imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX + 253 PAGE_SIZE); 254 if (imgp->stringbase == NULL) { 255 error = ENOMEM; 256 mtx_lock(&Giant); 257 goto exec_fail; 258 } 259 imgp->stringp = imgp->stringbase; 260 imgp->stringspace = ARG_MAX; 261 imgp->image_header = imgp->stringbase + ARG_MAX; 262 263 /* 264 * Translate the file name. namei() returns a vnode pointer 265 * in ni_vp amoung other things. 266 */ 267 ndp = &nd; 268 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 269 UIO_USERSPACE, fname, td); 270 271 mtx_lock(&Giant); 272 interpret: 273 274 error = namei(ndp); 275 if (error) { 276 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 277 ARG_MAX + PAGE_SIZE); 278 goto exec_fail; 279 } 280 281 imgp->vp = ndp->ni_vp; 282 imgp->fname = fname; 283 284 /* 285 * Check file permissions (also 'opens' file) 286 */ 287 error = exec_check_permissions(imgp); 288 if (error) 289 goto exec_fail_dealloc; 290 291 if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0) 292 vm_object_reference(imgp->object); 293 294 /* 295 * Set VV_TEXT now so no one can write to the executable while we're 296 * activating it. 297 * 298 * Remember if this was set before and unset it in case this is not 299 * actually an executable image. 300 */ 301 textset = imgp->vp->v_vflag & VV_TEXT; 302 imgp->vp->v_vflag |= VV_TEXT; 303 304 error = exec_map_first_page(imgp); 305 if (error) 306 goto exec_fail_dealloc; 307 308 /* 309 * If the current process has a special image activator it 310 * wants to try first, call it. For example, emulating shell 311 * scripts differently. 312 */ 313 error = -1; 314 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 315 error = img_first(imgp); 316 317 /* 318 * Loop through the list of image activators, calling each one. 319 * An activator returns -1 if there is no match, 0 on success, 320 * and an error otherwise. 321 */ 322 for (i = 0; error == -1 && execsw[i]; ++i) { 323 if (execsw[i]->ex_imgact == NULL || 324 execsw[i]->ex_imgact == img_first) { 325 continue; 326 } 327 error = (*execsw[i]->ex_imgact)(imgp); 328 } 329 330 if (error) { 331 if (error == -1) { 332 if (textset == 0) 333 imgp->vp->v_vflag &= ~VV_TEXT; 334 error = ENOEXEC; 335 } 336 goto exec_fail_dealloc; 337 } 338 339 /* 340 * Special interpreter operation, cleanup and loop up to try to 341 * activate the interpreter. 342 */ 343 if (imgp->interpreted) { 344 exec_unmap_first_page(imgp); 345 /* 346 * VV_TEXT needs to be unset for scripts. There is a short 347 * period before we determine that something is a script where 348 * VV_TEXT will be set. The vnode lock is held over this 349 * entire period so nothing should illegitimately be blocked. 350 */ 351 imgp->vp->v_vflag &= ~VV_TEXT; 352 /* free name buffer and old vnode */ 353 NDFREE(ndp, NDF_ONLY_PNBUF); 354 #ifdef MAC 355 mac_init_vnode_label(&interplabel); 356 mac_copy_vnode_label(&ndp->ni_vp->v_label, &interplabel); 357 interplabelvalid = 1; 358 #endif 359 vput(ndp->ni_vp); 360 vm_object_deallocate(imgp->object); 361 imgp->object = NULL; 362 /* set new name to that of the interpreter */ 363 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 364 UIO_SYSSPACE, imgp->interpreter_name, td); 365 goto interpret; 366 } 367 368 /* 369 * Copy out strings (args and env) and initialize stack base 370 */ 371 if (p->p_sysent->sv_copyout_strings) 372 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); 373 else 374 stack_base = exec_copyout_strings(imgp); 375 376 /* 377 * If custom stack fixup routine present for this process 378 * let it do the stack setup. 379 * Else stuff argument count as first item on stack 380 */ 381 if (p->p_sysent->sv_fixup) 382 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 383 else 384 suword(--stack_base, imgp->argc); 385 386 /* 387 * For security and other reasons, the file descriptor table cannot 388 * be shared after an exec. 389 */ 390 FILEDESC_LOCK(p->p_fd); 391 if (p->p_fd->fd_refcnt > 1) { 392 struct filedesc *tmp; 393 394 tmp = fdcopy(td->td_proc->p_fd); 395 FILEDESC_UNLOCK(p->p_fd); 396 fdfree(td); 397 p->p_fd = tmp; 398 } else 399 FILEDESC_UNLOCK(p->p_fd); 400 401 /* 402 * Malloc things before we need locks. 403 */ 404 newcred = crget(); 405 euip = uifind(attr.va_uid); 406 i = imgp->endargs - imgp->stringbase; 407 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) 408 newargs = pargs_alloc(i); 409 410 /* close files on exec */ 411 fdcloseexec(td); 412 413 /* Get a reference to the vnode prior to locking the proc */ 414 VREF(ndp->ni_vp); 415 416 /* 417 * For security and other reasons, signal handlers cannot 418 * be shared after an exec. The new process gets a copy of the old 419 * handlers. In execsigs(), the new process will have its signals 420 * reset. 421 */ 422 PROC_LOCK(p); 423 mp_fixme("procsig needs a lock"); 424 if (p->p_procsig->ps_refcnt > 1) { 425 oldprocsig = p->p_procsig; 426 PROC_UNLOCK(p); 427 MALLOC(newprocsig, struct procsig *, sizeof(struct procsig), 428 M_SUBPROC, 0); 429 bcopy(oldprocsig, newprocsig, sizeof(*newprocsig)); 430 newprocsig->ps_refcnt = 1; 431 oldprocsig->ps_refcnt--; 432 PROC_LOCK(p); 433 p->p_procsig = newprocsig; 434 if (p->p_sigacts == &p->p_uarea->u_sigacts) 435 panic("shared procsig but private sigacts?"); 436 437 p->p_uarea->u_sigacts = *p->p_sigacts; 438 p->p_sigacts = &p->p_uarea->u_sigacts; 439 } 440 /* Stop profiling */ 441 stopprofclock(p); 442 443 /* reset caught signals */ 444 execsigs(p); 445 446 /* name this process - nameiexec(p, ndp) */ 447 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 448 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 449 p->p_comm[len] = 0; 450 451 /* 452 * mark as execed, wakeup the process that vforked (if any) and tell 453 * it that it now has its own resources back 454 */ 455 p->p_flag |= P_EXEC; 456 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 457 p->p_flag &= ~P_PPWAIT; 458 wakeup(p->p_pptr); 459 } 460 461 /* 462 * Implement image setuid/setgid. 463 * 464 * Don't honor setuid/setgid if the filesystem prohibits it or if 465 * the process is being traced. 466 * 467 * XXXMAC: For the time being, use NOSUID to also prohibit 468 * transitions on the file system. 469 */ 470 oldcred = p->p_ucred; 471 credential_changing = 0; 472 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != 473 attr.va_uid; 474 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != 475 attr.va_gid; 476 #ifdef MAC 477 will_transition = mac_execve_will_transition(oldcred, imgp->vp, 478 interplabelvalid ? &interplabel : NULL, imgp); 479 credential_changing |= will_transition; 480 #endif 481 482 if (credential_changing && 483 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 484 (p->p_flag & P_TRACED) == 0) { 485 /* 486 * Turn off syscall tracing for set-id programs, except for 487 * root. Record any set-id flags first to make sure that 488 * we do not regain any tracing during a possible block. 489 */ 490 setsugid(p); 491 #ifdef KTRACE 492 if (p->p_tracep && suser_cred(oldcred, PRISON_ROOT)) { 493 mtx_lock(&ktrace_mtx); 494 p->p_traceflag = 0; 495 tracevp = p->p_tracep; 496 p->p_tracep = NULL; 497 mtx_unlock(&ktrace_mtx); 498 } 499 #endif 500 /* 501 * Close any file descriptors 0..2 that reference procfs, 502 * then make sure file descriptors 0..2 are in use. 503 * 504 * setugidsafety() may call closef() and then pfind() 505 * which may grab the process lock. 506 * fdcheckstd() may call falloc() which may block to 507 * allocate memory, so temporarily drop the process lock. 508 */ 509 PROC_UNLOCK(p); 510 setugidsafety(td); 511 error = fdcheckstd(td); 512 if (error != 0) 513 goto done1; 514 PROC_LOCK(p); 515 /* 516 * Set the new credentials. 517 */ 518 crcopy(newcred, oldcred); 519 if (attr.va_mode & VSUID) 520 change_euid(newcred, euip); 521 if (attr.va_mode & VSGID) 522 change_egid(newcred, attr.va_gid); 523 #ifdef MAC 524 if (will_transition) { 525 mac_execve_transition(oldcred, newcred, imgp->vp, 526 interplabelvalid ? &interplabel : NULL, imgp); 527 } 528 #endif 529 /* 530 * Implement correct POSIX saved-id behavior. 531 * 532 * XXXMAC: Note that the current logic will save the 533 * uid and gid if a MAC domain transition occurs, even 534 * though maybe it shouldn't. 535 */ 536 change_svuid(newcred, newcred->cr_uid); 537 change_svgid(newcred, newcred->cr_gid); 538 p->p_ucred = newcred; 539 newcred = NULL; 540 } else { 541 if (oldcred->cr_uid == oldcred->cr_ruid && 542 oldcred->cr_gid == oldcred->cr_rgid) 543 p->p_flag &= ~P_SUGID; 544 /* 545 * Implement correct POSIX saved-id behavior. 546 * 547 * XXX: It's not clear that the existing behavior is 548 * POSIX-compliant. A number of sources indicate that the 549 * saved uid/gid should only be updated if the new ruid is 550 * not equal to the old ruid, or the new euid is not equal 551 * to the old euid and the new euid is not equal to the old 552 * ruid. The FreeBSD code always updates the saved uid/gid. 553 * Also, this code uses the new (replaced) euid and egid as 554 * the source, which may or may not be the right ones to use. 555 */ 556 if (oldcred->cr_svuid != oldcred->cr_uid || 557 oldcred->cr_svgid != oldcred->cr_gid) { 558 crcopy(newcred, oldcred); 559 change_svuid(newcred, newcred->cr_uid); 560 change_svgid(newcred, newcred->cr_gid); 561 p->p_ucred = newcred; 562 newcred = NULL; 563 } 564 } 565 566 /* 567 * Store the vp for use in procfs. This vnode was referenced prior 568 * to locking the proc lock. 569 */ 570 textvp = p->p_textvp; 571 p->p_textvp = ndp->ni_vp; 572 573 /* 574 * Notify others that we exec'd, and clear the P_INEXEC flag 575 * as we're now a bona fide freshly-execed process. 576 */ 577 KNOTE(&p->p_klist, NOTE_EXEC); 578 p->p_flag &= ~P_INEXEC; 579 580 /* 581 * If tracing the process, trap to debugger so breakpoints 582 * can be set before the program executes. 583 */ 584 if (p->p_flag & P_TRACED) 585 psignal(p, SIGTRAP); 586 587 /* clear "fork but no exec" flag, as we _are_ execing */ 588 p->p_acflag &= ~AFORK; 589 590 /* Free any previous argument cache */ 591 oldargs = p->p_args; 592 p->p_args = NULL; 593 594 /* Cache arguments if they fit inside our allowance */ 595 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 596 bcopy(imgp->stringbase, newargs->ar_args, i); 597 p->p_args = newargs; 598 newargs = NULL; 599 } 600 PROC_UNLOCK(p); 601 602 /* Set values passed into the program in registers. */ 603 if (p->p_sysent->sv_setregs) 604 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, 605 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 606 else 607 exec_setregs(td, imgp->entry_addr, 608 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 609 610 done1: 611 /* 612 * Free any resources malloc'd earlier that we didn't use. 613 */ 614 uifree(euip); 615 if (newcred == NULL) 616 crfree(oldcred); 617 else 618 crfree(newcred); 619 /* 620 * Handle deferred decrement of ref counts. 621 */ 622 if (textvp != NULL) 623 vrele(textvp); 624 if (ndp->ni_vp && error != 0) 625 vrele(ndp->ni_vp); 626 #ifdef KTRACE 627 if (tracevp != NULL) 628 vrele(tracevp); 629 #endif 630 if (oldargs != NULL) 631 pargs_drop(oldargs); 632 if (newargs != NULL) 633 pargs_drop(newargs); 634 635 exec_fail_dealloc: 636 637 /* 638 * free various allocated resources 639 */ 640 if (imgp->firstpage) 641 exec_unmap_first_page(imgp); 642 643 if (imgp->vp) { 644 NDFREE(ndp, NDF_ONLY_PNBUF); 645 vput(imgp->vp); 646 } 647 648 if (imgp->stringbase != NULL) 649 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 650 ARG_MAX + PAGE_SIZE); 651 652 if (imgp->object) 653 vm_object_deallocate(imgp->object); 654 655 if (error == 0) { 656 /* 657 * Stop the process here if its stop event mask has 658 * the S_EXEC bit set. 659 */ 660 STOPEVENT(p, S_EXEC, 0); 661 goto done2; 662 } 663 664 exec_fail: 665 /* we're done here, clear P_INEXEC */ 666 PROC_LOCK(p); 667 p->p_flag &= ~P_INEXEC; 668 PROC_UNLOCK(p); 669 670 if (imgp->vmspace_destroyed) { 671 /* sorry, no more process anymore. exit gracefully */ 672 #ifdef MAC 673 mac_execve_exit(imgp); 674 if (interplabelvalid) 675 mac_destroy_vnode_label(&interplabel); 676 #endif 677 exit1(td, W_EXITCODE(0, SIGABRT)); 678 /* NOT REACHED */ 679 error = 0; 680 } 681 done2: 682 #ifdef MAC 683 mac_execve_exit(imgp); 684 if (interplabelvalid) 685 mac_destroy_vnode_label(&interplabel); 686 #endif 687 mtx_unlock(&Giant); 688 return (error); 689 } 690 691 #ifndef _SYS_SYSPROTO_H_ 692 struct execve_args { 693 char *fname; 694 char **argv; 695 char **envv; 696 }; 697 #endif 698 699 /* 700 * MPSAFE 701 */ 702 int 703 execve(td, uap) 704 struct thread *td; 705 struct execve_args /* { 706 char *fname; 707 char **argv; 708 char **envv; 709 } */ *uap; 710 { 711 712 return (kern_execve(td, uap->fname, uap->argv, uap->envv, NULL)); 713 } 714 715 #ifndef _SYS_SYSPROTO_H_ 716 struct __mac_execve_args { 717 char *fname; 718 char **argv; 719 char **envv; 720 struct mac *mac_p; 721 }; 722 #endif 723 724 /* 725 * MPSAFE 726 */ 727 int 728 __mac_execve(td, uap) 729 struct thread *td; 730 struct __mac_execve_args /* { 731 char *fname; 732 char **argv; 733 char **envv; 734 struct mac *mac_p; 735 } */ *uap; 736 { 737 738 #ifdef MAC 739 return (kern_execve(td, uap->fname, uap->argv, uap->envv, 740 uap->mac_p)); 741 #else 742 return (ENOSYS); 743 #endif 744 } 745 746 int 747 exec_map_first_page(imgp) 748 struct image_params *imgp; 749 { 750 int rv, i; 751 int initial_pagein; 752 vm_page_t ma[VM_INITIAL_PAGEIN]; 753 vm_object_t object; 754 755 GIANT_REQUIRED; 756 757 if (imgp->firstpage) { 758 exec_unmap_first_page(imgp); 759 } 760 761 VOP_GETVOBJECT(imgp->vp, &object); 762 763 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 764 765 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 766 initial_pagein = VM_INITIAL_PAGEIN; 767 if (initial_pagein > object->size) 768 initial_pagein = object->size; 769 for (i = 1; i < initial_pagein; i++) { 770 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 771 if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) 772 break; 773 if (ma[i]->valid) 774 break; 775 vm_page_lock_queues(); 776 vm_page_busy(ma[i]); 777 vm_page_unlock_queues(); 778 } else { 779 ma[i] = vm_page_alloc(object, i, 780 VM_ALLOC_NORMAL); 781 if (ma[i] == NULL) 782 break; 783 } 784 } 785 initial_pagein = i; 786 787 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 788 ma[0] = vm_page_lookup(object, 0); 789 790 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || 791 (ma[0]->valid == 0)) { 792 if (ma[0]) { 793 vm_page_lock_queues(); 794 pmap_remove_all(ma[0]); 795 vm_page_free(ma[0]); 796 vm_page_unlock_queues(); 797 } 798 return (EIO); 799 } 800 } 801 vm_page_lock_queues(); 802 vm_page_wire(ma[0]); 803 vm_page_wakeup(ma[0]); 804 vm_page_unlock_queues(); 805 806 pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); 807 imgp->firstpage = ma[0]; 808 809 return (0); 810 } 811 812 void 813 exec_unmap_first_page(imgp) 814 struct image_params *imgp; 815 { 816 GIANT_REQUIRED; 817 818 if (imgp->firstpage) { 819 pmap_qremove((vm_offset_t)imgp->image_header, 1); 820 vm_page_lock_queues(); 821 vm_page_unwire(imgp->firstpage, 1); 822 vm_page_unlock_queues(); 823 imgp->firstpage = NULL; 824 } 825 } 826 827 /* 828 * Destroy old address space, and allocate a new stack 829 * The new stack is only SGROWSIZ large because it is grown 830 * automatically in trap.c. 831 */ 832 int 833 exec_new_vmspace(imgp, sv) 834 struct image_params *imgp; 835 struct sysentvec *sv; 836 { 837 int error; 838 struct execlist *ep; 839 struct proc *p = imgp->proc; 840 struct vmspace *vmspace = p->p_vmspace; 841 vm_offset_t stack_addr; 842 vm_map_t map; 843 844 GIANT_REQUIRED; 845 846 stack_addr = sv->sv_usrstack - maxssiz; 847 848 imgp->vmspace_destroyed = 1; 849 850 /* 851 * Perform functions registered with at_exec(). 852 */ 853 TAILQ_FOREACH(ep, &exec_list, next) 854 (*ep->function)(p); 855 856 /* 857 * Blow away entire process VM, if address space not shared, 858 * otherwise, create a new VM space so that other threads are 859 * not disrupted 860 */ 861 map = &vmspace->vm_map; 862 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && 863 vm_map_max(map) == sv->sv_maxuser) { 864 shmexit(vmspace); 865 vm_page_lock_queues(); 866 pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map), 867 vm_map_max(map)); 868 vm_page_unlock_queues(); 869 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 870 } else { 871 vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); 872 vmspace = p->p_vmspace; 873 map = &vmspace->vm_map; 874 } 875 876 /* Allocate a new stack */ 877 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 878 sv->sv_stackprot, VM_PROT_ALL, 0); 879 if (error) 880 return (error); 881 882 #ifdef __ia64__ 883 { 884 /* 885 * Allocate backing store. We really need something 886 * similar to vm_map_stack which can allow the backing 887 * store to grow upwards. This will do for now. 888 */ 889 vm_offset_t bsaddr; 890 bsaddr = p->p_sysent->sv_usrstack - 2 * maxssiz; 891 error = vm_map_find(map, 0, 0, &bsaddr, 892 regstkpages * PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0); 893 FIRST_THREAD_IN_PROC(p)->td_md.md_bspstore = bsaddr; 894 } 895 #endif 896 897 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 898 * VM_STACK case, but they are still used to monitor the size of the 899 * process stack so we can check the stack rlimit. 900 */ 901 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 902 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz; 903 904 return (0); 905 } 906 907 /* 908 * Copy out argument and environment strings from the old process 909 * address space into the temporary string buffer. 910 */ 911 int 912 exec_extract_strings(imgp) 913 struct image_params *imgp; 914 { 915 char **argv, **envv; 916 char *argp, *envp; 917 int error; 918 size_t length; 919 920 /* 921 * extract arguments first 922 */ 923 924 argv = imgp->userspace_argv; 925 926 if (argv) { 927 argp = (caddr_t)(intptr_t)fuword(argv); 928 if (argp == (caddr_t)-1) 929 return (EFAULT); 930 if (argp) 931 argv++; 932 if (imgp->argv0) 933 argp = imgp->argv0; 934 if (argp) { 935 do { 936 if (argp == (caddr_t)-1) 937 return (EFAULT); 938 if ((error = copyinstr(argp, imgp->stringp, 939 imgp->stringspace, &length))) { 940 if (error == ENAMETOOLONG) 941 return (E2BIG); 942 return (error); 943 } 944 imgp->stringspace -= length; 945 imgp->stringp += length; 946 imgp->argc++; 947 } while ((argp = (caddr_t)(intptr_t)fuword(argv++))); 948 } 949 } 950 951 imgp->endargs = imgp->stringp; 952 953 /* 954 * extract environment strings 955 */ 956 957 envv = imgp->userspace_envv; 958 959 if (envv) { 960 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { 961 if (envp == (caddr_t)-1) 962 return (EFAULT); 963 if ((error = copyinstr(envp, imgp->stringp, 964 imgp->stringspace, &length))) { 965 if (error == ENAMETOOLONG) 966 return (E2BIG); 967 return (error); 968 } 969 imgp->stringspace -= length; 970 imgp->stringp += length; 971 imgp->envc++; 972 } 973 } 974 975 return (0); 976 } 977 978 /* 979 * Copy strings out to the new process address space, constructing 980 * new arg and env vector tables. Return a pointer to the base 981 * so that it can be used as the initial stack pointer. 982 */ 983 register_t * 984 exec_copyout_strings(imgp) 985 struct image_params *imgp; 986 { 987 int argc, envc; 988 char **vectp; 989 char *stringp, *destp; 990 register_t *stack_base; 991 struct ps_strings *arginfo; 992 struct proc *p; 993 int szsigcode; 994 995 /* 996 * Calculate string base and vector table pointers. 997 * Also deal with signal trampoline code for this exec type. 998 */ 999 p = imgp->proc; 1000 szsigcode = 0; 1001 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; 1002 if (p->p_sysent->sv_szsigcode != NULL) 1003 szsigcode = *(p->p_sysent->sv_szsigcode); 1004 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 1005 roundup((ARG_MAX - imgp->stringspace), sizeof(char *)); 1006 1007 /* 1008 * install sigcode 1009 */ 1010 if (szsigcode) 1011 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - 1012 szsigcode), szsigcode); 1013 1014 /* 1015 * If we have a valid auxargs ptr, prepare some room 1016 * on the stack. 1017 */ 1018 if (imgp->auxargs) { 1019 /* 1020 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 1021 * lower compatibility. 1022 */ 1023 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : 1024 (AT_COUNT * 2); 1025 /* 1026 * The '+ 2' is for the null pointers at the end of each of 1027 * the arg and env vector sets,and imgp->auxarg_size is room 1028 * for argument of Runtime loader. 1029 */ 1030 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2 + 1031 imgp->auxarg_size) * sizeof(char *)); 1032 1033 } else 1034 /* 1035 * The '+ 2' is for the null pointers at the end of each of 1036 * the arg and env vector sets 1037 */ 1038 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2) * 1039 sizeof(char *)); 1040 1041 /* 1042 * vectp also becomes our initial stack base 1043 */ 1044 stack_base = (register_t *)vectp; 1045 1046 stringp = imgp->stringbase; 1047 argc = imgp->argc; 1048 envc = imgp->envc; 1049 1050 /* 1051 * Copy out strings - arguments and environment. 1052 */ 1053 copyout(stringp, destp, ARG_MAX - imgp->stringspace); 1054 1055 /* 1056 * Fill in "ps_strings" struct for ps, w, etc. 1057 */ 1058 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 1059 suword(&arginfo->ps_nargvstr, argc); 1060 1061 /* 1062 * Fill in argument portion of vector table. 1063 */ 1064 for (; argc > 0; --argc) { 1065 suword(vectp++, (long)(intptr_t)destp); 1066 while (*stringp++ != 0) 1067 destp++; 1068 destp++; 1069 } 1070 1071 /* a null vector table pointer separates the argp's from the envp's */ 1072 suword(vectp++, 0); 1073 1074 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 1075 suword(&arginfo->ps_nenvstr, envc); 1076 1077 /* 1078 * Fill in environment portion of vector table. 1079 */ 1080 for (; envc > 0; --envc) { 1081 suword(vectp++, (long)(intptr_t)destp); 1082 while (*stringp++ != 0) 1083 destp++; 1084 destp++; 1085 } 1086 1087 /* end of vector table is a null pointer */ 1088 suword(vectp, 0); 1089 1090 return (stack_base); 1091 } 1092 1093 /* 1094 * Check permissions of file to execute. 1095 * Called with imgp->vp locked. 1096 * Return 0 for success or error code on failure. 1097 */ 1098 int 1099 exec_check_permissions(imgp) 1100 struct image_params *imgp; 1101 { 1102 struct vnode *vp = imgp->vp; 1103 struct vattr *attr = imgp->attr; 1104 struct thread *td; 1105 int error; 1106 1107 td = curthread; /* XXXKSE */ 1108 1109 /* Get file attributes */ 1110 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 1111 if (error) 1112 return (error); 1113 1114 #ifdef MAC 1115 error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp); 1116 if (error) 1117 return (error); 1118 #endif 1119 1120 /* 1121 * 1) Check if file execution is disabled for the filesystem that this 1122 * file resides on. 1123 * 2) Insure that at least one execute bit is on - otherwise root 1124 * will always succeed, and we don't want to happen unless the 1125 * file really is executable. 1126 * 3) Insure that the file is a regular file. 1127 */ 1128 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1129 ((attr->va_mode & 0111) == 0) || 1130 (attr->va_type != VREG)) 1131 return (EACCES); 1132 1133 /* 1134 * Zero length files can't be exec'd 1135 */ 1136 if (attr->va_size == 0) 1137 return (ENOEXEC); 1138 1139 /* 1140 * Check for execute permission to file based on current credentials. 1141 */ 1142 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1143 if (error) 1144 return (error); 1145 1146 /* 1147 * Check number of open-for-writes on the file and deny execution 1148 * if there are any. 1149 */ 1150 if (vp->v_writecount) 1151 return (ETXTBSY); 1152 1153 /* 1154 * Call filesystem specific open routine (which does nothing in the 1155 * general case). 1156 */ 1157 error = VOP_OPEN(vp, FREAD, td->td_ucred, td); 1158 return (error); 1159 } 1160 1161 /* 1162 * Exec handler registration 1163 */ 1164 int 1165 exec_register(execsw_arg) 1166 const struct execsw *execsw_arg; 1167 { 1168 const struct execsw **es, **xs, **newexecsw; 1169 int count = 2; /* New slot and trailing NULL */ 1170 1171 if (execsw) 1172 for (es = execsw; *es; es++) 1173 count++; 1174 newexecsw = malloc(count * sizeof(*es), M_TEMP, 0); 1175 if (newexecsw == NULL) 1176 return (ENOMEM); 1177 xs = newexecsw; 1178 if (execsw) 1179 for (es = execsw; *es; es++) 1180 *xs++ = *es; 1181 *xs++ = execsw_arg; 1182 *xs = NULL; 1183 if (execsw) 1184 free(execsw, M_TEMP); 1185 execsw = newexecsw; 1186 return (0); 1187 } 1188 1189 int 1190 exec_unregister(execsw_arg) 1191 const struct execsw *execsw_arg; 1192 { 1193 const struct execsw **es, **xs, **newexecsw; 1194 int count = 1; 1195 1196 if (execsw == NULL) 1197 panic("unregister with no handlers left?\n"); 1198 1199 for (es = execsw; *es; es++) { 1200 if (*es == execsw_arg) 1201 break; 1202 } 1203 if (*es == NULL) 1204 return (ENOENT); 1205 for (es = execsw; *es; es++) 1206 if (*es != execsw_arg) 1207 count++; 1208 newexecsw = malloc(count * sizeof(*es), M_TEMP, 0); 1209 if (newexecsw == NULL) 1210 return (ENOMEM); 1211 xs = newexecsw; 1212 for (es = execsw; *es; es++) 1213 if (*es != execsw_arg) 1214 *xs++ = *es; 1215 *xs = NULL; 1216 if (execsw) 1217 free(execsw, M_TEMP); 1218 execsw = newexecsw; 1219 return (0); 1220 } 1221 1222 int 1223 at_exec(function) 1224 execlist_fn function; 1225 { 1226 struct execlist *ep; 1227 1228 #ifdef INVARIANTS 1229 /* Be noisy if the programmer has lost track of things */ 1230 if (rm_at_exec(function)) 1231 printf("WARNING: exec callout entry (%p) already present\n", 1232 function); 1233 #endif 1234 ep = malloc(sizeof(*ep), M_ATEXEC, M_NOWAIT); 1235 if (ep == NULL) 1236 return (ENOMEM); 1237 ep->function = function; 1238 TAILQ_INSERT_TAIL(&exec_list, ep, next); 1239 return (0); 1240 } 1241 1242 /* 1243 * Scan the exec callout list for the given item and remove it. 1244 * Returns the number of items removed (0 or 1) 1245 */ 1246 int 1247 rm_at_exec(function) 1248 execlist_fn function; 1249 { 1250 struct execlist *ep; 1251 1252 TAILQ_FOREACH(ep, &exec_list, next) { 1253 if (ep->function == function) { 1254 TAILQ_REMOVE(&exec_list, ep, next); 1255 free(ep, M_ATEXEC); 1256 return (1); 1257 } 1258 } 1259 return (0); 1260 } 1261