1 /* 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_ktrace.h" 30 #include "opt_mac.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/eventhandler.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/sysproto.h> 38 #include <sys/signalvar.h> 39 #include <sys/kernel.h> 40 #include <sys/mac.h> 41 #include <sys/mount.h> 42 #include <sys/filedesc.h> 43 #include <sys/fcntl.h> 44 #include <sys/acct.h> 45 #include <sys/exec.h> 46 #include <sys/imgact.h> 47 #include <sys/imgact_elf.h> 48 #include <sys/wait.h> 49 #include <sys/malloc.h> 50 #include <sys/proc.h> 51 #include <sys/pioctl.h> 52 #include <sys/namei.h> 53 #include <sys/sysent.h> 54 #include <sys/shm.h> 55 #include <sys/sysctl.h> 56 #include <sys/user.h> 57 #include <sys/vnode.h> 58 #ifdef KTRACE 59 #include <sys/ktrace.h> 60 #endif 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 #include <vm/pmap.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_map.h> 67 #include <vm/vm_kern.h> 68 #include <vm/vm_extern.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_pager.h> 71 72 #include <machine/reg.h> 73 74 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 75 76 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 77 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 78 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 79 static int kern_execve(struct thread *td, char *fname, char **argv, 80 char **envv, struct mac *mac_p); 81 82 /* XXX This should be vm_size_t. */ 83 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, 84 NULL, 0, sysctl_kern_ps_strings, "LU", ""); 85 86 /* XXX This should be vm_size_t. */ 87 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, 88 NULL, 0, sysctl_kern_usrstack, "LU", ""); 89 90 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, 91 NULL, 0, sysctl_kern_stackprot, "I", ""); 92 93 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 94 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 95 &ps_arg_cache_limit, 0, ""); 96 97 int ps_argsopen = 1; 98 SYSCTL_INT(_kern, OID_AUTO, ps_argsopen, CTLFLAG_RW, &ps_argsopen, 0, ""); 99 100 #ifdef __ia64__ 101 /* XXX HACK */ 102 static int regstkpages = 256; 103 SYSCTL_INT(_machdep, OID_AUTO, regstkpages, CTLFLAG_RW, ®stkpages, 0, ""); 104 #endif 105 106 static int 107 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 108 { 109 struct proc *p; 110 111 p = curproc; 112 return (SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, 113 sizeof(p->p_sysent->sv_psstrings))); 114 } 115 116 static int 117 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 118 { 119 struct proc *p; 120 121 p = curproc; 122 return (SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, 123 sizeof(p->p_sysent->sv_usrstack))); 124 } 125 126 static int 127 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 128 { 129 struct proc *p; 130 131 p = curproc; 132 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 133 sizeof(p->p_sysent->sv_stackprot))); 134 } 135 136 /* 137 * Each of the items is a pointer to a `const struct execsw', hence the 138 * double pointer here. 139 */ 140 static const struct execsw **execsw; 141 142 /* 143 * In-kernel implementation of execve(). All arguments are assumed to be 144 * userspace pointers from the passed thread. 145 * 146 * MPSAFE 147 */ 148 static int 149 kern_execve(td, fname, argv, envv, mac_p) 150 struct thread *td; 151 char *fname; 152 char **argv; 153 char **envv; 154 struct mac *mac_p; 155 { 156 struct proc *p = td->td_proc; 157 struct nameidata nd, *ndp; 158 struct ucred *newcred = NULL, *oldcred; 159 struct uidinfo *euip; 160 register_t *stack_base; 161 int error, len, i; 162 struct image_params image_params, *imgp; 163 struct vattr attr; 164 int (*img_first)(struct image_params *); 165 struct pargs *oldargs = NULL, *newargs = NULL; 166 struct sigacts *oldsigacts, *newsigacts; 167 #ifdef KTRACE 168 struct vnode *tracevp = NULL; 169 struct ucred *tracecred = NULL; 170 #endif 171 struct vnode *textvp = NULL; 172 int credential_changing; 173 int textset; 174 #ifdef MAC 175 struct label interplabel; /* label of the interpreted vnode */ 176 struct label execlabel; /* optional label argument */ 177 int will_transition, interplabelvalid = 0; 178 #endif 179 180 imgp = &image_params; 181 182 /* 183 * Lock the process and set the P_INEXEC flag to indicate that 184 * it should be left alone until we're done here. This is 185 * necessary to avoid race conditions - e.g. in ptrace() - 186 * that might allow a local user to illicitly obtain elevated 187 * privileges. 188 */ 189 PROC_LOCK(p); 190 KASSERT((p->p_flag & P_INEXEC) == 0, 191 ("%s(): process already has P_INEXEC flag", __func__)); 192 if (p->p_flag & P_THREADED || p->p_numthreads > 1) { 193 if (thread_single(SINGLE_EXIT)) { 194 PROC_UNLOCK(p); 195 return (ERESTART); /* Try again later. */ 196 } 197 /* 198 * If we get here all other threads are dead, 199 * so unset the associated flags and lose KSE mode. 200 */ 201 p->p_flag &= ~P_THREADED; 202 td->td_mailbox = NULL; 203 thread_single_end(); 204 } 205 p->p_flag |= P_INEXEC; 206 PROC_UNLOCK(p); 207 208 /* 209 * Initialize part of the common data 210 */ 211 imgp->proc = p; 212 imgp->userspace_argv = argv; 213 imgp->userspace_envv = envv; 214 imgp->execlabel = NULL; 215 imgp->attr = &attr; 216 imgp->argc = imgp->envc = 0; 217 imgp->argv0 = NULL; 218 imgp->entry_addr = 0; 219 imgp->vmspace_destroyed = 0; 220 imgp->interpreted = 0; 221 imgp->interpreter_name[0] = '\0'; 222 imgp->auxargs = NULL; 223 imgp->vp = NULL; 224 imgp->object = NULL; 225 imgp->firstpage = NULL; 226 imgp->ps_strings = 0; 227 imgp->auxarg_size = 0; 228 229 #ifdef MAC 230 error = mac_execve_enter(imgp, mac_p, &execlabel); 231 if (error) { 232 mtx_lock(&Giant); 233 goto exec_fail; 234 } 235 #endif 236 237 /* 238 * Allocate temporary demand zeroed space for argument and 239 * environment strings 240 */ 241 imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX + 242 PAGE_SIZE); 243 if (imgp->stringbase == NULL) { 244 error = ENOMEM; 245 mtx_lock(&Giant); 246 goto exec_fail; 247 } 248 imgp->stringp = imgp->stringbase; 249 imgp->stringspace = ARG_MAX; 250 imgp->image_header = imgp->stringbase + ARG_MAX; 251 252 /* 253 * Translate the file name. namei() returns a vnode pointer 254 * in ni_vp amoung other things. 255 */ 256 ndp = &nd; 257 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 258 UIO_USERSPACE, fname, td); 259 260 mtx_lock(&Giant); 261 interpret: 262 263 error = namei(ndp); 264 if (error) { 265 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 266 ARG_MAX + PAGE_SIZE); 267 goto exec_fail; 268 } 269 270 imgp->vp = ndp->ni_vp; 271 imgp->fname = fname; 272 273 /* 274 * Check file permissions (also 'opens' file) 275 */ 276 error = exec_check_permissions(imgp); 277 if (error) 278 goto exec_fail_dealloc; 279 280 if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0) 281 vm_object_reference(imgp->object); 282 283 /* 284 * Set VV_TEXT now so no one can write to the executable while we're 285 * activating it. 286 * 287 * Remember if this was set before and unset it in case this is not 288 * actually an executable image. 289 */ 290 textset = imgp->vp->v_vflag & VV_TEXT; 291 imgp->vp->v_vflag |= VV_TEXT; 292 293 error = exec_map_first_page(imgp); 294 if (error) 295 goto exec_fail_dealloc; 296 297 /* 298 * If the current process has a special image activator it 299 * wants to try first, call it. For example, emulating shell 300 * scripts differently. 301 */ 302 error = -1; 303 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 304 error = img_first(imgp); 305 306 /* 307 * Loop through the list of image activators, calling each one. 308 * An activator returns -1 if there is no match, 0 on success, 309 * and an error otherwise. 310 */ 311 for (i = 0; error == -1 && execsw[i]; ++i) { 312 if (execsw[i]->ex_imgact == NULL || 313 execsw[i]->ex_imgact == img_first) { 314 continue; 315 } 316 error = (*execsw[i]->ex_imgact)(imgp); 317 } 318 319 if (error) { 320 if (error == -1) { 321 if (textset == 0) 322 imgp->vp->v_vflag &= ~VV_TEXT; 323 error = ENOEXEC; 324 } 325 goto exec_fail_dealloc; 326 } 327 328 /* 329 * Special interpreter operation, cleanup and loop up to try to 330 * activate the interpreter. 331 */ 332 if (imgp->interpreted) { 333 exec_unmap_first_page(imgp); 334 /* 335 * VV_TEXT needs to be unset for scripts. There is a short 336 * period before we determine that something is a script where 337 * VV_TEXT will be set. The vnode lock is held over this 338 * entire period so nothing should illegitimately be blocked. 339 */ 340 imgp->vp->v_vflag &= ~VV_TEXT; 341 /* free name buffer and old vnode */ 342 NDFREE(ndp, NDF_ONLY_PNBUF); 343 #ifdef MAC 344 mac_init_vnode_label(&interplabel); 345 mac_copy_vnode_label(&ndp->ni_vp->v_label, &interplabel); 346 interplabelvalid = 1; 347 #endif 348 vput(ndp->ni_vp); 349 vm_object_deallocate(imgp->object); 350 imgp->object = NULL; 351 /* set new name to that of the interpreter */ 352 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, 353 UIO_SYSSPACE, imgp->interpreter_name, td); 354 goto interpret; 355 } 356 357 /* 358 * Copy out strings (args and env) and initialize stack base 359 */ 360 if (p->p_sysent->sv_copyout_strings) 361 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); 362 else 363 stack_base = exec_copyout_strings(imgp); 364 365 /* 366 * If custom stack fixup routine present for this process 367 * let it do the stack setup. 368 * Else stuff argument count as first item on stack 369 */ 370 if (p->p_sysent->sv_fixup) 371 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 372 else 373 suword(--stack_base, imgp->argc); 374 375 /* 376 * For security and other reasons, the file descriptor table cannot 377 * be shared after an exec. 378 */ 379 FILEDESC_LOCK(p->p_fd); 380 if (p->p_fd->fd_refcnt > 1) { 381 struct filedesc *tmp; 382 383 tmp = fdcopy(td->td_proc->p_fd); 384 FILEDESC_UNLOCK(p->p_fd); 385 fdfree(td); 386 p->p_fd = tmp; 387 } else 388 FILEDESC_UNLOCK(p->p_fd); 389 390 /* 391 * Malloc things before we need locks. 392 */ 393 newcred = crget(); 394 euip = uifind(attr.va_uid); 395 i = imgp->endargs - imgp->stringbase; 396 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) 397 newargs = pargs_alloc(i); 398 399 /* close files on exec */ 400 fdcloseexec(td); 401 402 /* Get a reference to the vnode prior to locking the proc */ 403 VREF(ndp->ni_vp); 404 405 /* 406 * For security and other reasons, signal handlers cannot 407 * be shared after an exec. The new process gets a copy of the old 408 * handlers. In execsigs(), the new process will have its signals 409 * reset. 410 */ 411 PROC_LOCK(p); 412 if (sigacts_shared(p->p_sigacts)) { 413 oldsigacts = p->p_sigacts; 414 PROC_UNLOCK(p); 415 newsigacts = sigacts_alloc(); 416 sigacts_copy(newsigacts, oldsigacts); 417 PROC_LOCK(p); 418 p->p_sigacts = newsigacts; 419 } else 420 oldsigacts = NULL; 421 422 /* Stop profiling */ 423 stopprofclock(p); 424 425 /* reset caught signals */ 426 execsigs(p); 427 428 /* name this process - nameiexec(p, ndp) */ 429 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 430 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 431 p->p_comm[len] = 0; 432 433 /* 434 * mark as execed, wakeup the process that vforked (if any) and tell 435 * it that it now has its own resources back 436 */ 437 p->p_flag |= P_EXEC; 438 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 439 p->p_flag &= ~P_PPWAIT; 440 wakeup(p->p_pptr); 441 } 442 443 /* 444 * Implement image setuid/setgid. 445 * 446 * Don't honor setuid/setgid if the filesystem prohibits it or if 447 * the process is being traced. 448 * 449 * XXXMAC: For the time being, use NOSUID to also prohibit 450 * transitions on the file system. 451 */ 452 oldcred = p->p_ucred; 453 credential_changing = 0; 454 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != 455 attr.va_uid; 456 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != 457 attr.va_gid; 458 #ifdef MAC 459 will_transition = mac_execve_will_transition(oldcred, imgp->vp, 460 interplabelvalid ? &interplabel : NULL, imgp); 461 credential_changing |= will_transition; 462 #endif 463 464 if (credential_changing && 465 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 466 (p->p_flag & P_TRACED) == 0) { 467 /* 468 * Turn off syscall tracing for set-id programs, except for 469 * root. Record any set-id flags first to make sure that 470 * we do not regain any tracing during a possible block. 471 */ 472 setsugid(p); 473 #ifdef KTRACE 474 if (p->p_tracevp != NULL && suser_cred(oldcred, PRISON_ROOT)) { 475 mtx_lock(&ktrace_mtx); 476 p->p_traceflag = 0; 477 tracevp = p->p_tracevp; 478 p->p_tracevp = NULL; 479 tracecred = p->p_tracecred; 480 p->p_tracecred = NULL; 481 mtx_unlock(&ktrace_mtx); 482 } 483 #endif 484 /* 485 * Close any file descriptors 0..2 that reference procfs, 486 * then make sure file descriptors 0..2 are in use. 487 * 488 * setugidsafety() may call closef() and then pfind() 489 * which may grab the process lock. 490 * fdcheckstd() may call falloc() which may block to 491 * allocate memory, so temporarily drop the process lock. 492 */ 493 PROC_UNLOCK(p); 494 setugidsafety(td); 495 error = fdcheckstd(td); 496 if (error != 0) 497 goto done1; 498 PROC_LOCK(p); 499 /* 500 * Set the new credentials. 501 */ 502 crcopy(newcred, oldcred); 503 if (attr.va_mode & VSUID) 504 change_euid(newcred, euip); 505 if (attr.va_mode & VSGID) 506 change_egid(newcred, attr.va_gid); 507 #ifdef MAC 508 if (will_transition) { 509 mac_execve_transition(oldcred, newcred, imgp->vp, 510 interplabelvalid ? &interplabel : NULL, imgp); 511 } 512 #endif 513 /* 514 * Implement correct POSIX saved-id behavior. 515 * 516 * XXXMAC: Note that the current logic will save the 517 * uid and gid if a MAC domain transition occurs, even 518 * though maybe it shouldn't. 519 */ 520 change_svuid(newcred, newcred->cr_uid); 521 change_svgid(newcred, newcred->cr_gid); 522 p->p_ucred = newcred; 523 newcred = NULL; 524 } else { 525 if (oldcred->cr_uid == oldcred->cr_ruid && 526 oldcred->cr_gid == oldcred->cr_rgid) 527 p->p_flag &= ~P_SUGID; 528 /* 529 * Implement correct POSIX saved-id behavior. 530 * 531 * XXX: It's not clear that the existing behavior is 532 * POSIX-compliant. A number of sources indicate that the 533 * saved uid/gid should only be updated if the new ruid is 534 * not equal to the old ruid, or the new euid is not equal 535 * to the old euid and the new euid is not equal to the old 536 * ruid. The FreeBSD code always updates the saved uid/gid. 537 * Also, this code uses the new (replaced) euid and egid as 538 * the source, which may or may not be the right ones to use. 539 */ 540 if (oldcred->cr_svuid != oldcred->cr_uid || 541 oldcred->cr_svgid != oldcred->cr_gid) { 542 crcopy(newcred, oldcred); 543 change_svuid(newcred, newcred->cr_uid); 544 change_svgid(newcred, newcred->cr_gid); 545 p->p_ucred = newcred; 546 newcred = NULL; 547 } 548 } 549 550 /* 551 * Store the vp for use in procfs. This vnode was referenced prior 552 * to locking the proc lock. 553 */ 554 textvp = p->p_textvp; 555 p->p_textvp = ndp->ni_vp; 556 557 /* 558 * Notify others that we exec'd, and clear the P_INEXEC flag 559 * as we're now a bona fide freshly-execed process. 560 */ 561 KNOTE(&p->p_klist, NOTE_EXEC); 562 p->p_flag &= ~P_INEXEC; 563 564 /* 565 * If tracing the process, trap to debugger so breakpoints 566 * can be set before the program executes. 567 */ 568 if (p->p_flag & P_TRACED) 569 psignal(p, SIGTRAP); 570 571 /* clear "fork but no exec" flag, as we _are_ execing */ 572 p->p_acflag &= ~AFORK; 573 574 /* Free any previous argument cache */ 575 oldargs = p->p_args; 576 p->p_args = NULL; 577 578 /* Cache arguments if they fit inside our allowance */ 579 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 580 bcopy(imgp->stringbase, newargs->ar_args, i); 581 p->p_args = newargs; 582 newargs = NULL; 583 } 584 PROC_UNLOCK(p); 585 586 /* Set values passed into the program in registers. */ 587 if (p->p_sysent->sv_setregs) 588 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, 589 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 590 else 591 exec_setregs(td, imgp->entry_addr, 592 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 593 594 done1: 595 /* 596 * Free any resources malloc'd earlier that we didn't use. 597 */ 598 uifree(euip); 599 if (newcred == NULL) 600 crfree(oldcred); 601 else 602 crfree(newcred); 603 /* 604 * Handle deferred decrement of ref counts. 605 */ 606 if (textvp != NULL) 607 vrele(textvp); 608 if (ndp->ni_vp && error != 0) 609 vrele(ndp->ni_vp); 610 #ifdef KTRACE 611 if (tracevp != NULL) 612 vrele(tracevp); 613 if (tracecred != NULL) 614 crfree(tracecred); 615 #endif 616 if (oldargs != NULL) 617 pargs_drop(oldargs); 618 if (newargs != NULL) 619 pargs_drop(newargs); 620 if (oldsigacts != NULL) 621 sigacts_free(oldsigacts); 622 623 exec_fail_dealloc: 624 625 /* 626 * free various allocated resources 627 */ 628 if (imgp->firstpage) 629 exec_unmap_first_page(imgp); 630 631 if (imgp->vp) { 632 NDFREE(ndp, NDF_ONLY_PNBUF); 633 vput(imgp->vp); 634 } 635 636 if (imgp->stringbase != NULL) 637 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase, 638 ARG_MAX + PAGE_SIZE); 639 640 if (imgp->object) 641 vm_object_deallocate(imgp->object); 642 643 if (error == 0) { 644 /* 645 * Stop the process here if its stop event mask has 646 * the S_EXEC bit set. 647 */ 648 STOPEVENT(p, S_EXEC, 0); 649 goto done2; 650 } 651 652 exec_fail: 653 /* we're done here, clear P_INEXEC */ 654 PROC_LOCK(p); 655 p->p_flag &= ~P_INEXEC; 656 PROC_UNLOCK(p); 657 658 if (imgp->vmspace_destroyed) { 659 /* sorry, no more process anymore. exit gracefully */ 660 #ifdef MAC 661 mac_execve_exit(imgp); 662 if (interplabelvalid) 663 mac_destroy_vnode_label(&interplabel); 664 #endif 665 exit1(td, W_EXITCODE(0, SIGABRT)); 666 /* NOT REACHED */ 667 error = 0; 668 } 669 done2: 670 #ifdef MAC 671 mac_execve_exit(imgp); 672 if (interplabelvalid) 673 mac_destroy_vnode_label(&interplabel); 674 #endif 675 mtx_unlock(&Giant); 676 return (error); 677 } 678 679 #ifndef _SYS_SYSPROTO_H_ 680 struct execve_args { 681 char *fname; 682 char **argv; 683 char **envv; 684 }; 685 #endif 686 687 /* 688 * MPSAFE 689 */ 690 int 691 execve(td, uap) 692 struct thread *td; 693 struct execve_args /* { 694 char *fname; 695 char **argv; 696 char **envv; 697 } */ *uap; 698 { 699 700 return (kern_execve(td, uap->fname, uap->argv, uap->envv, NULL)); 701 } 702 703 #ifndef _SYS_SYSPROTO_H_ 704 struct __mac_execve_args { 705 char *fname; 706 char **argv; 707 char **envv; 708 struct mac *mac_p; 709 }; 710 #endif 711 712 /* 713 * MPSAFE 714 */ 715 int 716 __mac_execve(td, uap) 717 struct thread *td; 718 struct __mac_execve_args /* { 719 char *fname; 720 char **argv; 721 char **envv; 722 struct mac *mac_p; 723 } */ *uap; 724 { 725 726 #ifdef MAC 727 return (kern_execve(td, uap->fname, uap->argv, uap->envv, 728 uap->mac_p)); 729 #else 730 return (ENOSYS); 731 #endif 732 } 733 734 int 735 exec_map_first_page(imgp) 736 struct image_params *imgp; 737 { 738 int rv, i; 739 int initial_pagein; 740 vm_page_t ma[VM_INITIAL_PAGEIN]; 741 vm_object_t object; 742 743 GIANT_REQUIRED; 744 745 if (imgp->firstpage) { 746 exec_unmap_first_page(imgp); 747 } 748 749 VOP_GETVOBJECT(imgp->vp, &object); 750 751 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 752 753 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 754 initial_pagein = VM_INITIAL_PAGEIN; 755 if (initial_pagein > object->size) 756 initial_pagein = object->size; 757 for (i = 1; i < initial_pagein; i++) { 758 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 759 if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) 760 break; 761 if (ma[i]->valid) 762 break; 763 vm_page_lock_queues(); 764 vm_page_busy(ma[i]); 765 vm_page_unlock_queues(); 766 } else { 767 ma[i] = vm_page_alloc(object, i, 768 VM_ALLOC_NORMAL); 769 if (ma[i] == NULL) 770 break; 771 } 772 } 773 initial_pagein = i; 774 775 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 776 ma[0] = vm_page_lookup(object, 0); 777 778 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || 779 (ma[0]->valid == 0)) { 780 if (ma[0]) { 781 vm_page_lock_queues(); 782 pmap_remove_all(ma[0]); 783 vm_page_free(ma[0]); 784 vm_page_unlock_queues(); 785 } 786 return (EIO); 787 } 788 } 789 vm_page_lock_queues(); 790 vm_page_wire(ma[0]); 791 vm_page_wakeup(ma[0]); 792 vm_page_unlock_queues(); 793 794 pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); 795 imgp->firstpage = ma[0]; 796 797 return (0); 798 } 799 800 void 801 exec_unmap_first_page(imgp) 802 struct image_params *imgp; 803 { 804 GIANT_REQUIRED; 805 806 if (imgp->firstpage) { 807 pmap_qremove((vm_offset_t)imgp->image_header, 1); 808 vm_page_lock_queues(); 809 vm_page_unwire(imgp->firstpage, 1); 810 vm_page_unlock_queues(); 811 imgp->firstpage = NULL; 812 } 813 } 814 815 /* 816 * Destroy old address space, and allocate a new stack 817 * The new stack is only SGROWSIZ large because it is grown 818 * automatically in trap.c. 819 */ 820 int 821 exec_new_vmspace(imgp, sv) 822 struct image_params *imgp; 823 struct sysentvec *sv; 824 { 825 int error; 826 struct proc *p = imgp->proc; 827 struct vmspace *vmspace = p->p_vmspace; 828 vm_offset_t stack_addr; 829 vm_map_t map; 830 831 GIANT_REQUIRED; 832 833 stack_addr = sv->sv_usrstack - maxssiz; 834 835 imgp->vmspace_destroyed = 1; 836 837 EVENTHANDLER_INVOKE(process_exec, p); 838 839 /* 840 * Blow away entire process VM, if address space not shared, 841 * otherwise, create a new VM space so that other threads are 842 * not disrupted 843 */ 844 map = &vmspace->vm_map; 845 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && 846 vm_map_max(map) == sv->sv_maxuser) { 847 shmexit(vmspace); 848 vm_page_lock_queues(); 849 pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map), 850 vm_map_max(map)); 851 vm_page_unlock_queues(); 852 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 853 } else { 854 vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); 855 vmspace = p->p_vmspace; 856 map = &vmspace->vm_map; 857 } 858 859 /* Allocate a new stack */ 860 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 861 sv->sv_stackprot, VM_PROT_ALL, 0); 862 if (error) 863 return (error); 864 865 #ifdef __ia64__ 866 { 867 /* 868 * Allocate backing store. We really need something 869 * similar to vm_map_stack which can allow the backing 870 * store to grow upwards. This will do for now. 871 */ 872 vm_offset_t bsaddr; 873 bsaddr = p->p_sysent->sv_usrstack - 2 * maxssiz; 874 error = vm_map_find(map, 0, 0, &bsaddr, 875 regstkpages * PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0); 876 FIRST_THREAD_IN_PROC(p)->td_md.md_bspstore = bsaddr; 877 } 878 #endif 879 880 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 881 * VM_STACK case, but they are still used to monitor the size of the 882 * process stack so we can check the stack rlimit. 883 */ 884 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 885 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz; 886 887 return (0); 888 } 889 890 /* 891 * Copy out argument and environment strings from the old process 892 * address space into the temporary string buffer. 893 */ 894 int 895 exec_extract_strings(imgp) 896 struct image_params *imgp; 897 { 898 char **argv, **envv; 899 char *argp, *envp; 900 int error; 901 size_t length; 902 903 /* 904 * extract arguments first 905 */ 906 907 argv = imgp->userspace_argv; 908 909 if (argv) { 910 argp = (caddr_t)(intptr_t)fuword(argv); 911 if (argp == (caddr_t)-1) 912 return (EFAULT); 913 if (argp) 914 argv++; 915 if (imgp->argv0) 916 argp = imgp->argv0; 917 if (argp) { 918 do { 919 if (argp == (caddr_t)-1) 920 return (EFAULT); 921 if ((error = copyinstr(argp, imgp->stringp, 922 imgp->stringspace, &length))) { 923 if (error == ENAMETOOLONG) 924 return (E2BIG); 925 return (error); 926 } 927 imgp->stringspace -= length; 928 imgp->stringp += length; 929 imgp->argc++; 930 } while ((argp = (caddr_t)(intptr_t)fuword(argv++))); 931 } 932 } 933 934 imgp->endargs = imgp->stringp; 935 936 /* 937 * extract environment strings 938 */ 939 940 envv = imgp->userspace_envv; 941 942 if (envv) { 943 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { 944 if (envp == (caddr_t)-1) 945 return (EFAULT); 946 if ((error = copyinstr(envp, imgp->stringp, 947 imgp->stringspace, &length))) { 948 if (error == ENAMETOOLONG) 949 return (E2BIG); 950 return (error); 951 } 952 imgp->stringspace -= length; 953 imgp->stringp += length; 954 imgp->envc++; 955 } 956 } 957 958 return (0); 959 } 960 961 /* 962 * Copy strings out to the new process address space, constructing 963 * new arg and env vector tables. Return a pointer to the base 964 * so that it can be used as the initial stack pointer. 965 */ 966 register_t * 967 exec_copyout_strings(imgp) 968 struct image_params *imgp; 969 { 970 int argc, envc; 971 char **vectp; 972 char *stringp, *destp; 973 register_t *stack_base; 974 struct ps_strings *arginfo; 975 struct proc *p; 976 int szsigcode; 977 978 /* 979 * Calculate string base and vector table pointers. 980 * Also deal with signal trampoline code for this exec type. 981 */ 982 p = imgp->proc; 983 szsigcode = 0; 984 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; 985 if (p->p_sysent->sv_szsigcode != NULL) 986 szsigcode = *(p->p_sysent->sv_szsigcode); 987 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 988 roundup((ARG_MAX - imgp->stringspace), sizeof(char *)); 989 990 /* 991 * install sigcode 992 */ 993 if (szsigcode) 994 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - 995 szsigcode), szsigcode); 996 997 /* 998 * If we have a valid auxargs ptr, prepare some room 999 * on the stack. 1000 */ 1001 if (imgp->auxargs) { 1002 /* 1003 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 1004 * lower compatibility. 1005 */ 1006 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : 1007 (AT_COUNT * 2); 1008 /* 1009 * The '+ 2' is for the null pointers at the end of each of 1010 * the arg and env vector sets,and imgp->auxarg_size is room 1011 * for argument of Runtime loader. 1012 */ 1013 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2 + 1014 imgp->auxarg_size) * sizeof(char *)); 1015 1016 } else 1017 /* 1018 * The '+ 2' is for the null pointers at the end of each of 1019 * the arg and env vector sets 1020 */ 1021 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2) * 1022 sizeof(char *)); 1023 1024 /* 1025 * vectp also becomes our initial stack base 1026 */ 1027 stack_base = (register_t *)vectp; 1028 1029 stringp = imgp->stringbase; 1030 argc = imgp->argc; 1031 envc = imgp->envc; 1032 1033 /* 1034 * Copy out strings - arguments and environment. 1035 */ 1036 copyout(stringp, destp, ARG_MAX - imgp->stringspace); 1037 1038 /* 1039 * Fill in "ps_strings" struct for ps, w, etc. 1040 */ 1041 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 1042 suword(&arginfo->ps_nargvstr, argc); 1043 1044 /* 1045 * Fill in argument portion of vector table. 1046 */ 1047 for (; argc > 0; --argc) { 1048 suword(vectp++, (long)(intptr_t)destp); 1049 while (*stringp++ != 0) 1050 destp++; 1051 destp++; 1052 } 1053 1054 /* a null vector table pointer separates the argp's from the envp's */ 1055 suword(vectp++, 0); 1056 1057 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 1058 suword(&arginfo->ps_nenvstr, envc); 1059 1060 /* 1061 * Fill in environment portion of vector table. 1062 */ 1063 for (; envc > 0; --envc) { 1064 suword(vectp++, (long)(intptr_t)destp); 1065 while (*stringp++ != 0) 1066 destp++; 1067 destp++; 1068 } 1069 1070 /* end of vector table is a null pointer */ 1071 suword(vectp, 0); 1072 1073 return (stack_base); 1074 } 1075 1076 /* 1077 * Check permissions of file to execute. 1078 * Called with imgp->vp locked. 1079 * Return 0 for success or error code on failure. 1080 */ 1081 int 1082 exec_check_permissions(imgp) 1083 struct image_params *imgp; 1084 { 1085 struct vnode *vp = imgp->vp; 1086 struct vattr *attr = imgp->attr; 1087 struct thread *td; 1088 int error; 1089 1090 td = curthread; /* XXXKSE */ 1091 1092 /* Get file attributes */ 1093 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 1094 if (error) 1095 return (error); 1096 1097 #ifdef MAC 1098 error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp); 1099 if (error) 1100 return (error); 1101 #endif 1102 1103 /* 1104 * 1) Check if file execution is disabled for the filesystem that this 1105 * file resides on. 1106 * 2) Insure that at least one execute bit is on - otherwise root 1107 * will always succeed, and we don't want to happen unless the 1108 * file really is executable. 1109 * 3) Insure that the file is a regular file. 1110 */ 1111 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1112 ((attr->va_mode & 0111) == 0) || 1113 (attr->va_type != VREG)) 1114 return (EACCES); 1115 1116 /* 1117 * Zero length files can't be exec'd 1118 */ 1119 if (attr->va_size == 0) 1120 return (ENOEXEC); 1121 1122 /* 1123 * Check for execute permission to file based on current credentials. 1124 */ 1125 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1126 if (error) 1127 return (error); 1128 1129 /* 1130 * Check number of open-for-writes on the file and deny execution 1131 * if there are any. 1132 */ 1133 if (vp->v_writecount) 1134 return (ETXTBSY); 1135 1136 /* 1137 * Call filesystem specific open routine (which does nothing in the 1138 * general case). 1139 */ 1140 error = VOP_OPEN(vp, FREAD, td->td_ucred, td); 1141 return (error); 1142 } 1143 1144 /* 1145 * Exec handler registration 1146 */ 1147 int 1148 exec_register(execsw_arg) 1149 const struct execsw *execsw_arg; 1150 { 1151 const struct execsw **es, **xs, **newexecsw; 1152 int count = 2; /* New slot and trailing NULL */ 1153 1154 if (execsw) 1155 for (es = execsw; *es; es++) 1156 count++; 1157 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1158 if (newexecsw == NULL) 1159 return (ENOMEM); 1160 xs = newexecsw; 1161 if (execsw) 1162 for (es = execsw; *es; es++) 1163 *xs++ = *es; 1164 *xs++ = execsw_arg; 1165 *xs = NULL; 1166 if (execsw) 1167 free(execsw, M_TEMP); 1168 execsw = newexecsw; 1169 return (0); 1170 } 1171 1172 int 1173 exec_unregister(execsw_arg) 1174 const struct execsw *execsw_arg; 1175 { 1176 const struct execsw **es, **xs, **newexecsw; 1177 int count = 1; 1178 1179 if (execsw == NULL) 1180 panic("unregister with no handlers left?\n"); 1181 1182 for (es = execsw; *es; es++) { 1183 if (*es == execsw_arg) 1184 break; 1185 } 1186 if (*es == NULL) 1187 return (ENOENT); 1188 for (es = execsw; *es; es++) 1189 if (*es != execsw_arg) 1190 count++; 1191 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1192 if (newexecsw == NULL) 1193 return (ENOMEM); 1194 xs = newexecsw; 1195 for (es = execsw; *es; es++) 1196 if (*es != execsw_arg) 1197 *xs++ = *es; 1198 *xs = NULL; 1199 if (execsw) 1200 free(execsw, M_TEMP); 1201 execsw = newexecsw; 1202 return (0); 1203 } 1204