1 /*- 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_hwpmc_hooks.h" 31 #include "opt_ktrace.h" 32 #include "opt_mac.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/eventhandler.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/sysproto.h> 40 #include <sys/signalvar.h> 41 #include <sys/kernel.h> 42 #include <sys/mount.h> 43 #include <sys/filedesc.h> 44 #include <sys/fcntl.h> 45 #include <sys/acct.h> 46 #include <sys/exec.h> 47 #include <sys/imgact.h> 48 #include <sys/imgact_elf.h> 49 #include <sys/wait.h> 50 #include <sys/malloc.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/pioctl.h> 54 #include <sys/namei.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sf_buf.h> 57 #include <sys/syscallsubr.h> 58 #include <sys/sysent.h> 59 #include <sys/shm.h> 60 #include <sys/sysctl.h> 61 #include <sys/vnode.h> 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_object.h> 74 #include <vm/vm_pager.h> 75 76 #ifdef HWPMC_HOOKS 77 #include <sys/pmckern.h> 78 #endif 79 80 #include <machine/reg.h> 81 82 #include <security/audit/audit.h> 83 #include <security/mac/mac_framework.h> 84 85 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 86 87 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 88 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 89 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 90 static int do_execve(struct thread *td, struct image_args *args, 91 struct mac *mac_p); 92 static void exec_free_args(struct image_args *); 93 94 /* XXX This should be vm_size_t. */ 95 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, 96 NULL, 0, sysctl_kern_ps_strings, "LU", ""); 97 98 /* XXX This should be vm_size_t. */ 99 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, 100 NULL, 0, sysctl_kern_usrstack, "LU", ""); 101 102 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, 103 NULL, 0, sysctl_kern_stackprot, "I", ""); 104 105 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 106 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 107 &ps_arg_cache_limit, 0, ""); 108 109 static int 110 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 111 { 112 struct proc *p; 113 int error; 114 115 p = curproc; 116 #ifdef SCTL_MASK32 117 if (req->flags & SCTL_MASK32) { 118 unsigned int val; 119 val = (unsigned int)p->p_sysent->sv_psstrings; 120 error = SYSCTL_OUT(req, &val, sizeof(val)); 121 } else 122 #endif 123 error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, 124 sizeof(p->p_sysent->sv_psstrings)); 125 return error; 126 } 127 128 static int 129 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 130 { 131 struct proc *p; 132 int error; 133 134 p = curproc; 135 #ifdef SCTL_MASK32 136 if (req->flags & SCTL_MASK32) { 137 unsigned int val; 138 val = (unsigned int)p->p_sysent->sv_usrstack; 139 error = SYSCTL_OUT(req, &val, sizeof(val)); 140 } else 141 #endif 142 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, 143 sizeof(p->p_sysent->sv_usrstack)); 144 return error; 145 } 146 147 static int 148 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 149 { 150 struct proc *p; 151 152 p = curproc; 153 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 154 sizeof(p->p_sysent->sv_stackprot))); 155 } 156 157 /* 158 * Each of the items is a pointer to a `const struct execsw', hence the 159 * double pointer here. 160 */ 161 static const struct execsw **execsw; 162 163 #ifndef _SYS_SYSPROTO_H_ 164 struct execve_args { 165 char *fname; 166 char **argv; 167 char **envv; 168 }; 169 #endif 170 171 int 172 execve(td, uap) 173 struct thread *td; 174 struct execve_args /* { 175 char *fname; 176 char **argv; 177 char **envv; 178 } */ *uap; 179 { 180 int error; 181 struct image_args args; 182 183 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 184 uap->argv, uap->envv); 185 if (error == 0) 186 error = kern_execve(td, &args, NULL); 187 return (error); 188 } 189 190 #ifndef _SYS_SYSPROTO_H_ 191 struct __mac_execve_args { 192 char *fname; 193 char **argv; 194 char **envv; 195 struct mac *mac_p; 196 }; 197 #endif 198 199 int 200 __mac_execve(td, uap) 201 struct thread *td; 202 struct __mac_execve_args /* { 203 char *fname; 204 char **argv; 205 char **envv; 206 struct mac *mac_p; 207 } */ *uap; 208 { 209 #ifdef MAC 210 int error; 211 struct image_args args; 212 213 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 214 uap->argv, uap->envv); 215 if (error == 0) 216 error = kern_execve(td, &args, uap->mac_p); 217 return (error); 218 #else 219 return (ENOSYS); 220 #endif 221 } 222 223 /* 224 * XXX: kern_execve has the astonishing property of not always returning to 225 * the caller. If sufficiently bad things happen during the call to 226 * do_execve(), it can end up calling exit1(); as a result, callers must 227 * avoid doing anything which they might need to undo (e.g., allocating 228 * memory). 229 */ 230 int 231 kern_execve(td, args, mac_p) 232 struct thread *td; 233 struct image_args *args; 234 struct mac *mac_p; 235 { 236 struct proc *p = td->td_proc; 237 int error; 238 239 AUDIT_ARG(argv, args->begin_argv, args->argc, 240 args->begin_envv - args->begin_argv); 241 AUDIT_ARG(envv, args->begin_envv, args->envc, 242 args->endp - args->begin_envv); 243 if (p->p_flag & P_HADTHREADS) { 244 PROC_LOCK(p); 245 if (thread_single(SINGLE_BOUNDARY)) { 246 PROC_UNLOCK(p); 247 exec_free_args(args); 248 return (ERESTART); /* Try again later. */ 249 } 250 PROC_UNLOCK(p); 251 } 252 253 error = do_execve(td, args, mac_p); 254 255 if (p->p_flag & P_HADTHREADS) { 256 PROC_LOCK(p); 257 /* 258 * If success, we upgrade to SINGLE_EXIT state to 259 * force other threads to suicide. 260 */ 261 if (error == 0) 262 thread_single(SINGLE_EXIT); 263 else 264 thread_single_end(); 265 PROC_UNLOCK(p); 266 } 267 268 return (error); 269 } 270 271 /* 272 * In-kernel implementation of execve(). All arguments are assumed to be 273 * userspace pointers from the passed thread. 274 */ 275 static int 276 do_execve(td, args, mac_p) 277 struct thread *td; 278 struct image_args *args; 279 struct mac *mac_p; 280 { 281 struct proc *p = td->td_proc; 282 struct nameidata nd, *ndp; 283 struct ucred *newcred = NULL, *oldcred; 284 struct uidinfo *euip; 285 register_t *stack_base; 286 int error, len, i; 287 struct image_params image_params, *imgp; 288 struct vattr attr; 289 int (*img_first)(struct image_params *); 290 struct pargs *oldargs = NULL, *newargs = NULL; 291 struct sigacts *oldsigacts, *newsigacts; 292 #ifdef KTRACE 293 struct vnode *tracevp = NULL; 294 struct ucred *tracecred = NULL; 295 #endif 296 struct vnode *textvp = NULL; 297 int credential_changing; 298 int vfslocked; 299 int textset; 300 #ifdef MAC 301 struct label *interplabel = NULL; 302 int will_transition; 303 #endif 304 #ifdef HWPMC_HOOKS 305 struct pmckern_procexec pe; 306 #endif 307 308 vfslocked = 0; 309 imgp = &image_params; 310 311 /* 312 * Lock the process and set the P_INEXEC flag to indicate that 313 * it should be left alone until we're done here. This is 314 * necessary to avoid race conditions - e.g. in ptrace() - 315 * that might allow a local user to illicitly obtain elevated 316 * privileges. 317 */ 318 PROC_LOCK(p); 319 KASSERT((p->p_flag & P_INEXEC) == 0, 320 ("%s(): process already has P_INEXEC flag", __func__)); 321 p->p_flag |= P_INEXEC; 322 PROC_UNLOCK(p); 323 324 /* 325 * Initialize part of the common data 326 */ 327 imgp->proc = p; 328 imgp->execlabel = NULL; 329 imgp->attr = &attr; 330 imgp->entry_addr = 0; 331 imgp->vmspace_destroyed = 0; 332 imgp->interpreted = 0; 333 imgp->interpreter_name = args->buf + PATH_MAX + ARG_MAX; 334 imgp->auxargs = NULL; 335 imgp->vp = NULL; 336 imgp->object = NULL; 337 imgp->firstpage = NULL; 338 imgp->ps_strings = 0; 339 imgp->auxarg_size = 0; 340 imgp->args = args; 341 342 #ifdef MAC 343 error = mac_execve_enter(imgp, mac_p); 344 if (error) 345 goto exec_fail; 346 #endif 347 348 imgp->image_header = NULL; 349 350 /* 351 * Translate the file name. namei() returns a vnode pointer 352 * in ni_vp amoung other things. 353 * 354 * XXXAUDIT: It would be desirable to also audit the name of the 355 * interpreter if this is an interpreted binary. 356 */ 357 ndp = &nd; 358 NDINIT(ndp, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | MPSAFE | 359 AUDITVNODE1, UIO_SYSSPACE, args->fname, td); 360 361 interpret: 362 error = namei(ndp); 363 if (error) 364 goto exec_fail; 365 366 vfslocked = NDHASGIANT(ndp); 367 imgp->vp = ndp->ni_vp; 368 369 /* 370 * Check file permissions (also 'opens' file) 371 */ 372 error = exec_check_permissions(imgp); 373 if (error) 374 goto exec_fail_dealloc; 375 376 imgp->object = imgp->vp->v_object; 377 if (imgp->object != NULL) 378 vm_object_reference(imgp->object); 379 380 /* 381 * Set VV_TEXT now so no one can write to the executable while we're 382 * activating it. 383 * 384 * Remember if this was set before and unset it in case this is not 385 * actually an executable image. 386 */ 387 textset = imgp->vp->v_vflag & VV_TEXT; 388 imgp->vp->v_vflag |= VV_TEXT; 389 390 error = exec_map_first_page(imgp); 391 if (error) 392 goto exec_fail_dealloc; 393 394 /* 395 * If the current process has a special image activator it 396 * wants to try first, call it. For example, emulating shell 397 * scripts differently. 398 */ 399 error = -1; 400 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 401 error = img_first(imgp); 402 403 /* 404 * Loop through the list of image activators, calling each one. 405 * An activator returns -1 if there is no match, 0 on success, 406 * and an error otherwise. 407 */ 408 for (i = 0; error == -1 && execsw[i]; ++i) { 409 if (execsw[i]->ex_imgact == NULL || 410 execsw[i]->ex_imgact == img_first) { 411 continue; 412 } 413 error = (*execsw[i]->ex_imgact)(imgp); 414 } 415 416 if (error) { 417 if (error == -1) { 418 if (textset == 0) 419 imgp->vp->v_vflag &= ~VV_TEXT; 420 error = ENOEXEC; 421 } 422 goto exec_fail_dealloc; 423 } 424 425 /* 426 * Special interpreter operation, cleanup and loop up to try to 427 * activate the interpreter. 428 */ 429 if (imgp->interpreted) { 430 exec_unmap_first_page(imgp); 431 /* 432 * VV_TEXT needs to be unset for scripts. There is a short 433 * period before we determine that something is a script where 434 * VV_TEXT will be set. The vnode lock is held over this 435 * entire period so nothing should illegitimately be blocked. 436 */ 437 imgp->vp->v_vflag &= ~VV_TEXT; 438 /* free name buffer and old vnode */ 439 NDFREE(ndp, NDF_ONLY_PNBUF); 440 #ifdef MAC 441 interplabel = mac_vnode_label_alloc(); 442 mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel); 443 #endif 444 vput(ndp->ni_vp); 445 vm_object_deallocate(imgp->object); 446 imgp->object = NULL; 447 VFS_UNLOCK_GIANT(vfslocked); 448 vfslocked = 0; 449 /* set new name to that of the interpreter */ 450 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE, 451 UIO_SYSSPACE, imgp->interpreter_name, td); 452 goto interpret; 453 } 454 455 /* 456 * Copy out strings (args and env) and initialize stack base 457 */ 458 if (p->p_sysent->sv_copyout_strings) 459 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); 460 else 461 stack_base = exec_copyout_strings(imgp); 462 463 /* 464 * If custom stack fixup routine present for this process 465 * let it do the stack setup. 466 * Else stuff argument count as first item on stack 467 */ 468 if (p->p_sysent->sv_fixup != NULL) 469 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 470 else 471 suword(--stack_base, imgp->args->argc); 472 473 /* 474 * For security and other reasons, the file descriptor table cannot 475 * be shared after an exec. 476 */ 477 fdunshare(p, td); 478 479 /* 480 * Malloc things before we need locks. 481 */ 482 newcred = crget(); 483 euip = uifind(attr.va_uid); 484 i = imgp->args->begin_envv - imgp->args->begin_argv; 485 /* Cache arguments if they fit inside our allowance */ 486 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 487 newargs = pargs_alloc(i); 488 bcopy(imgp->args->begin_argv, newargs->ar_args, i); 489 } 490 491 /* close files on exec */ 492 VOP_UNLOCK(imgp->vp, 0, td); 493 fdcloseexec(td); 494 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 495 496 /* Get a reference to the vnode prior to locking the proc */ 497 VREF(ndp->ni_vp); 498 499 /* 500 * For security and other reasons, signal handlers cannot 501 * be shared after an exec. The new process gets a copy of the old 502 * handlers. In execsigs(), the new process will have its signals 503 * reset. 504 */ 505 PROC_LOCK(p); 506 if (sigacts_shared(p->p_sigacts)) { 507 oldsigacts = p->p_sigacts; 508 PROC_UNLOCK(p); 509 newsigacts = sigacts_alloc(); 510 sigacts_copy(newsigacts, oldsigacts); 511 PROC_LOCK(p); 512 p->p_sigacts = newsigacts; 513 } else 514 oldsigacts = NULL; 515 516 /* Stop profiling */ 517 stopprofclock(p); 518 519 /* reset caught signals */ 520 execsigs(p); 521 522 /* name this process - nameiexec(p, ndp) */ 523 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 524 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 525 p->p_comm[len] = 0; 526 527 /* 528 * mark as execed, wakeup the process that vforked (if any) and tell 529 * it that it now has its own resources back 530 */ 531 p->p_flag |= P_EXEC; 532 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 533 p->p_flag &= ~P_PPWAIT; 534 wakeup(p->p_pptr); 535 } 536 537 /* 538 * Implement image setuid/setgid. 539 * 540 * Don't honor setuid/setgid if the filesystem prohibits it or if 541 * the process is being traced. 542 * 543 * XXXMAC: For the time being, use NOSUID to also prohibit 544 * transitions on the file system. 545 */ 546 oldcred = p->p_ucred; 547 credential_changing = 0; 548 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != 549 attr.va_uid; 550 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != 551 attr.va_gid; 552 #ifdef MAC 553 will_transition = mac_execve_will_transition(oldcred, imgp->vp, 554 interplabel, imgp); 555 credential_changing |= will_transition; 556 #endif 557 558 if (credential_changing && 559 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 560 (p->p_flag & P_TRACED) == 0) { 561 /* 562 * Turn off syscall tracing for set-id programs, except for 563 * root. Record any set-id flags first to make sure that 564 * we do not regain any tracing during a possible block. 565 */ 566 setsugid(p); 567 568 #ifdef KTRACE 569 if (p->p_tracevp != NULL && 570 priv_check_cred(oldcred, PRIV_DEBUG_DIFFCRED, 571 SUSER_ALLOWJAIL)) { 572 mtx_lock(&ktrace_mtx); 573 p->p_traceflag = 0; 574 tracevp = p->p_tracevp; 575 p->p_tracevp = NULL; 576 tracecred = p->p_tracecred; 577 p->p_tracecred = NULL; 578 mtx_unlock(&ktrace_mtx); 579 } 580 #endif 581 /* 582 * Close any file descriptors 0..2 that reference procfs, 583 * then make sure file descriptors 0..2 are in use. 584 * 585 * setugidsafety() may call closef() and then pfind() 586 * which may grab the process lock. 587 * fdcheckstd() may call falloc() which may block to 588 * allocate memory, so temporarily drop the process lock. 589 */ 590 PROC_UNLOCK(p); 591 setugidsafety(td); 592 VOP_UNLOCK(imgp->vp, 0, td); 593 error = fdcheckstd(td); 594 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 595 if (error != 0) 596 goto done1; 597 PROC_LOCK(p); 598 /* 599 * Set the new credentials. 600 */ 601 crcopy(newcred, oldcred); 602 if (attr.va_mode & VSUID) 603 change_euid(newcred, euip); 604 if (attr.va_mode & VSGID) 605 change_egid(newcred, attr.va_gid); 606 #ifdef MAC 607 if (will_transition) { 608 mac_execve_transition(oldcred, newcred, imgp->vp, 609 interplabel, imgp); 610 } 611 #endif 612 /* 613 * Implement correct POSIX saved-id behavior. 614 * 615 * XXXMAC: Note that the current logic will save the 616 * uid and gid if a MAC domain transition occurs, even 617 * though maybe it shouldn't. 618 */ 619 change_svuid(newcred, newcred->cr_uid); 620 change_svgid(newcred, newcred->cr_gid); 621 p->p_ucred = newcred; 622 newcred = NULL; 623 } else { 624 if (oldcred->cr_uid == oldcred->cr_ruid && 625 oldcred->cr_gid == oldcred->cr_rgid) 626 p->p_flag &= ~P_SUGID; 627 /* 628 * Implement correct POSIX saved-id behavior. 629 * 630 * XXX: It's not clear that the existing behavior is 631 * POSIX-compliant. A number of sources indicate that the 632 * saved uid/gid should only be updated if the new ruid is 633 * not equal to the old ruid, or the new euid is not equal 634 * to the old euid and the new euid is not equal to the old 635 * ruid. The FreeBSD code always updates the saved uid/gid. 636 * Also, this code uses the new (replaced) euid and egid as 637 * the source, which may or may not be the right ones to use. 638 */ 639 if (oldcred->cr_svuid != oldcred->cr_uid || 640 oldcred->cr_svgid != oldcred->cr_gid) { 641 crcopy(newcred, oldcred); 642 change_svuid(newcred, newcred->cr_uid); 643 change_svgid(newcred, newcred->cr_gid); 644 p->p_ucred = newcred; 645 newcred = NULL; 646 } 647 } 648 649 /* 650 * Store the vp for use in procfs. This vnode was referenced prior 651 * to locking the proc lock. 652 */ 653 textvp = p->p_textvp; 654 p->p_textvp = ndp->ni_vp; 655 656 /* 657 * Notify others that we exec'd, and clear the P_INEXEC flag 658 * as we're now a bona fide freshly-execed process. 659 */ 660 KNOTE_LOCKED(&p->p_klist, NOTE_EXEC); 661 p->p_flag &= ~P_INEXEC; 662 663 /* 664 * If tracing the process, trap to debugger so breakpoints 665 * can be set before the program executes. 666 * Use tdsignal to deliver signal to current thread, use 667 * psignal may cause the signal to be delivered to wrong thread 668 * because that thread will exit, remember we are going to enter 669 * single thread mode. 670 */ 671 if (p->p_flag & P_TRACED) 672 tdsignal(p, td, SIGTRAP, NULL); 673 674 /* clear "fork but no exec" flag, as we _are_ execing */ 675 p->p_acflag &= ~AFORK; 676 677 /* 678 * Free any previous argument cache and replace it with 679 * the new argument cache, if any. 680 */ 681 oldargs = p->p_args; 682 p->p_args = newargs; 683 newargs = NULL; 684 685 #ifdef HWPMC_HOOKS 686 /* 687 * Check if system-wide sampling is in effect or if the 688 * current process is using PMCs. If so, do exec() time 689 * processing. This processing needs to happen AFTER the 690 * P_INEXEC flag is cleared. 691 * 692 * The proc lock needs to be released before taking the PMC 693 * SX. 694 */ 695 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { 696 PROC_UNLOCK(p); 697 pe.pm_credentialschanged = credential_changing; 698 pe.pm_entryaddr = imgp->entry_addr; 699 700 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); 701 } else 702 PROC_UNLOCK(p); 703 #else /* !HWPMC_HOOKS */ 704 PROC_UNLOCK(p); 705 #endif 706 707 /* Set values passed into the program in registers. */ 708 if (p->p_sysent->sv_setregs) 709 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, 710 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 711 else 712 exec_setregs(td, imgp->entry_addr, 713 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 714 715 vfs_mark_atime(imgp->vp, td); 716 717 done1: 718 /* 719 * Free any resources malloc'd earlier that we didn't use. 720 */ 721 uifree(euip); 722 if (newcred == NULL) 723 crfree(oldcred); 724 else 725 crfree(newcred); 726 VOP_UNLOCK(imgp->vp, 0, td); 727 /* 728 * Handle deferred decrement of ref counts. 729 */ 730 if (textvp != NULL) { 731 int tvfslocked; 732 733 tvfslocked = VFS_LOCK_GIANT(textvp->v_mount); 734 vrele(textvp); 735 VFS_UNLOCK_GIANT(tvfslocked); 736 } 737 if (ndp->ni_vp && error != 0) 738 vrele(ndp->ni_vp); 739 #ifdef KTRACE 740 if (tracevp != NULL) 741 vrele(tracevp); 742 if (tracecred != NULL) 743 crfree(tracecred); 744 #endif 745 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 746 if (oldargs != NULL) 747 pargs_drop(oldargs); 748 if (newargs != NULL) 749 pargs_drop(newargs); 750 if (oldsigacts != NULL) 751 sigacts_free(oldsigacts); 752 753 exec_fail_dealloc: 754 755 /* 756 * free various allocated resources 757 */ 758 if (imgp->firstpage != NULL) 759 exec_unmap_first_page(imgp); 760 761 if (imgp->vp != NULL) { 762 NDFREE(ndp, NDF_ONLY_PNBUF); 763 vput(imgp->vp); 764 } 765 766 if (imgp->object != NULL) 767 vm_object_deallocate(imgp->object); 768 769 if (error == 0) { 770 /* 771 * Stop the process here if its stop event mask has 772 * the S_EXEC bit set. 773 */ 774 STOPEVENT(p, S_EXEC, 0); 775 goto done2; 776 } 777 778 exec_fail: 779 /* we're done here, clear P_INEXEC */ 780 PROC_LOCK(p); 781 p->p_flag &= ~P_INEXEC; 782 PROC_UNLOCK(p); 783 784 done2: 785 #ifdef MAC 786 mac_execve_exit(imgp); 787 if (interplabel != NULL) 788 mac_vnode_label_free(interplabel); 789 #endif 790 VFS_UNLOCK_GIANT(vfslocked); 791 exec_free_args(args); 792 793 if (error && imgp->vmspace_destroyed) { 794 /* sorry, no more process anymore. exit gracefully */ 795 exit1(td, W_EXITCODE(0, SIGABRT)); 796 /* NOT REACHED */ 797 } 798 return (error); 799 } 800 801 int 802 exec_map_first_page(imgp) 803 struct image_params *imgp; 804 { 805 int rv, i; 806 int initial_pagein; 807 vm_page_t ma[VM_INITIAL_PAGEIN]; 808 vm_object_t object; 809 810 if (imgp->firstpage != NULL) 811 exec_unmap_first_page(imgp); 812 813 object = imgp->vp->v_object; 814 if (object == NULL) 815 return (EACCES); 816 VM_OBJECT_LOCK(object); 817 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 818 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 819 initial_pagein = VM_INITIAL_PAGEIN; 820 if (initial_pagein > object->size) 821 initial_pagein = object->size; 822 for (i = 1; i < initial_pagein; i++) { 823 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 824 if (ma[i]->valid) 825 break; 826 if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy) 827 break; 828 vm_page_busy(ma[i]); 829 } else { 830 ma[i] = vm_page_alloc(object, i, 831 VM_ALLOC_NORMAL); 832 if (ma[i] == NULL) 833 break; 834 } 835 } 836 initial_pagein = i; 837 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 838 ma[0] = vm_page_lookup(object, 0); 839 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || 840 (ma[0]->valid == 0)) { 841 if (ma[0]) { 842 vm_page_lock_queues(); 843 vm_page_free(ma[0]); 844 vm_page_unlock_queues(); 845 } 846 VM_OBJECT_UNLOCK(object); 847 return (EIO); 848 } 849 } 850 vm_page_lock_queues(); 851 vm_page_hold(ma[0]); 852 vm_page_unlock_queues(); 853 vm_page_wakeup(ma[0]); 854 VM_OBJECT_UNLOCK(object); 855 856 imgp->firstpage = sf_buf_alloc(ma[0], 0); 857 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); 858 859 return (0); 860 } 861 862 void 863 exec_unmap_first_page(imgp) 864 struct image_params *imgp; 865 { 866 vm_page_t m; 867 868 if (imgp->firstpage != NULL) { 869 m = sf_buf_page(imgp->firstpage); 870 sf_buf_free(imgp->firstpage); 871 imgp->firstpage = NULL; 872 vm_page_lock_queues(); 873 vm_page_unhold(m); 874 vm_page_unlock_queues(); 875 } 876 } 877 878 /* 879 * Destroy old address space, and allocate a new stack 880 * The new stack is only SGROWSIZ large because it is grown 881 * automatically in trap.c. 882 */ 883 int 884 exec_new_vmspace(imgp, sv) 885 struct image_params *imgp; 886 struct sysentvec *sv; 887 { 888 int error; 889 struct proc *p = imgp->proc; 890 struct vmspace *vmspace = p->p_vmspace; 891 vm_offset_t stack_addr; 892 vm_map_t map; 893 894 imgp->vmspace_destroyed = 1; 895 imgp->sysent = sv; 896 897 /* May be called with Giant held */ 898 EVENTHANDLER_INVOKE(process_exec, p, imgp); 899 900 /* 901 * Here is as good a place as any to do any resource limit cleanups. 902 * This is needed if a 64 bit binary exec's a 32 bit binary - the 903 * data size limit may need to be changed to a value that makes 904 * sense for the 32 bit binary. 905 */ 906 if (sv->sv_fixlimits != NULL) 907 sv->sv_fixlimits(p); 908 909 /* 910 * Blow away entire process VM, if address space not shared, 911 * otherwise, create a new VM space so that other threads are 912 * not disrupted 913 */ 914 map = &vmspace->vm_map; 915 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && 916 vm_map_max(map) == sv->sv_maxuser) { 917 shmexit(vmspace); 918 pmap_remove_pages(vmspace_pmap(vmspace)); 919 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 920 } else { 921 vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); 922 vmspace = p->p_vmspace; 923 map = &vmspace->vm_map; 924 } 925 926 /* Allocate a new stack */ 927 stack_addr = sv->sv_usrstack - maxssiz; 928 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 929 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); 930 if (error) 931 return (error); 932 933 #ifdef __ia64__ 934 /* Allocate a new register stack */ 935 stack_addr = IA64_BACKINGSTORE; 936 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 937 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP); 938 if (error) 939 return (error); 940 #endif 941 942 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 943 * VM_STACK case, but they are still used to monitor the size of the 944 * process stack so we can check the stack rlimit. 945 */ 946 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 947 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz; 948 949 return (0); 950 } 951 952 /* 953 * Copy out argument and environment strings from the old process address 954 * space into the temporary string buffer. 955 */ 956 int 957 exec_copyin_args(struct image_args *args, char *fname, 958 enum uio_seg segflg, char **argv, char **envv) 959 { 960 char *argp, *envp; 961 int error; 962 size_t length; 963 964 error = 0; 965 966 bzero(args, sizeof(*args)); 967 if (argv == NULL) 968 return (EFAULT); 969 /* 970 * Allocate temporary demand zeroed space for argument and 971 * environment strings: 972 * 973 * o ARG_MAX for argument and environment; 974 * o MAXSHELLCMDLEN for the name of interpreters. 975 */ 976 args->buf = (char *) kmem_alloc_wait(exec_map, 977 PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); 978 if (args->buf == NULL) 979 return (ENOMEM); 980 args->begin_argv = args->buf; 981 args->endp = args->begin_argv; 982 args->stringspace = ARG_MAX; 983 984 args->fname = args->buf + ARG_MAX; 985 986 /* 987 * Copy the file name. 988 */ 989 error = (segflg == UIO_SYSSPACE) ? 990 copystr(fname, args->fname, PATH_MAX, &length) : 991 copyinstr(fname, args->fname, PATH_MAX, &length); 992 if (error != 0) 993 goto err_exit; 994 995 /* 996 * extract arguments first 997 */ 998 while ((argp = (caddr_t) (intptr_t) fuword(argv++))) { 999 if (argp == (caddr_t) -1) { 1000 error = EFAULT; 1001 goto err_exit; 1002 } 1003 if ((error = copyinstr(argp, args->endp, 1004 args->stringspace, &length))) { 1005 if (error == ENAMETOOLONG) 1006 error = E2BIG; 1007 goto err_exit; 1008 } 1009 args->stringspace -= length; 1010 args->endp += length; 1011 args->argc++; 1012 } 1013 1014 args->begin_envv = args->endp; 1015 1016 /* 1017 * extract environment strings 1018 */ 1019 if (envv) { 1020 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { 1021 if (envp == (caddr_t)-1) { 1022 error = EFAULT; 1023 goto err_exit; 1024 } 1025 if ((error = copyinstr(envp, args->endp, 1026 args->stringspace, &length))) { 1027 if (error == ENAMETOOLONG) 1028 error = E2BIG; 1029 goto err_exit; 1030 } 1031 args->stringspace -= length; 1032 args->endp += length; 1033 args->envc++; 1034 } 1035 } 1036 1037 return (0); 1038 1039 err_exit: 1040 exec_free_args(args); 1041 return (error); 1042 } 1043 1044 static void 1045 exec_free_args(struct image_args *args) 1046 { 1047 1048 if (args->buf) { 1049 kmem_free_wakeup(exec_map, (vm_offset_t)args->buf, 1050 PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); 1051 args->buf = NULL; 1052 } 1053 } 1054 1055 /* 1056 * Copy strings out to the new process address space, constructing new arg 1057 * and env vector tables. Return a pointer to the base so that it can be used 1058 * as the initial stack pointer. 1059 */ 1060 register_t * 1061 exec_copyout_strings(imgp) 1062 struct image_params *imgp; 1063 { 1064 int argc, envc; 1065 char **vectp; 1066 char *stringp, *destp; 1067 register_t *stack_base; 1068 struct ps_strings *arginfo; 1069 struct proc *p; 1070 int szsigcode; 1071 1072 /* 1073 * Calculate string base and vector table pointers. 1074 * Also deal with signal trampoline code for this exec type. 1075 */ 1076 p = imgp->proc; 1077 szsigcode = 0; 1078 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; 1079 if (p->p_sysent->sv_szsigcode != NULL) 1080 szsigcode = *(p->p_sysent->sv_szsigcode); 1081 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 1082 roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *)); 1083 1084 /* 1085 * install sigcode 1086 */ 1087 if (szsigcode) 1088 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - 1089 szsigcode), szsigcode); 1090 1091 /* 1092 * If we have a valid auxargs ptr, prepare some room 1093 * on the stack. 1094 */ 1095 if (imgp->auxargs) { 1096 /* 1097 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 1098 * lower compatibility. 1099 */ 1100 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : 1101 (AT_COUNT * 2); 1102 /* 1103 * The '+ 2' is for the null pointers at the end of each of 1104 * the arg and env vector sets,and imgp->auxarg_size is room 1105 * for argument of Runtime loader. 1106 */ 1107 vectp = (char **)(destp - (imgp->args->argc + 1108 imgp->args->envc + 2 + imgp->auxarg_size) * 1109 sizeof(char *)); 1110 1111 } else { 1112 /* 1113 * The '+ 2' is for the null pointers at the end of each of 1114 * the arg and env vector sets 1115 */ 1116 vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * 1117 sizeof(char *)); 1118 } 1119 1120 /* 1121 * vectp also becomes our initial stack base 1122 */ 1123 stack_base = (register_t *)vectp; 1124 1125 stringp = imgp->args->begin_argv; 1126 argc = imgp->args->argc; 1127 envc = imgp->args->envc; 1128 1129 /* 1130 * Copy out strings - arguments and environment. 1131 */ 1132 copyout(stringp, destp, ARG_MAX - imgp->args->stringspace); 1133 1134 /* 1135 * Fill in "ps_strings" struct for ps, w, etc. 1136 */ 1137 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 1138 suword(&arginfo->ps_nargvstr, argc); 1139 1140 /* 1141 * Fill in argument portion of vector table. 1142 */ 1143 for (; argc > 0; --argc) { 1144 suword(vectp++, (long)(intptr_t)destp); 1145 while (*stringp++ != 0) 1146 destp++; 1147 destp++; 1148 } 1149 1150 /* a null vector table pointer separates the argp's from the envp's */ 1151 suword(vectp++, 0); 1152 1153 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 1154 suword(&arginfo->ps_nenvstr, envc); 1155 1156 /* 1157 * Fill in environment portion of vector table. 1158 */ 1159 for (; envc > 0; --envc) { 1160 suword(vectp++, (long)(intptr_t)destp); 1161 while (*stringp++ != 0) 1162 destp++; 1163 destp++; 1164 } 1165 1166 /* end of vector table is a null pointer */ 1167 suword(vectp, 0); 1168 1169 return (stack_base); 1170 } 1171 1172 /* 1173 * Check permissions of file to execute. 1174 * Called with imgp->vp locked. 1175 * Return 0 for success or error code on failure. 1176 */ 1177 int 1178 exec_check_permissions(imgp) 1179 struct image_params *imgp; 1180 { 1181 struct vnode *vp = imgp->vp; 1182 struct vattr *attr = imgp->attr; 1183 struct thread *td; 1184 int error; 1185 1186 td = curthread; /* XXXKSE */ 1187 1188 /* Get file attributes */ 1189 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 1190 if (error) 1191 return (error); 1192 1193 #ifdef MAC 1194 error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp); 1195 if (error) 1196 return (error); 1197 #endif 1198 1199 /* 1200 * 1) Check if file execution is disabled for the filesystem that this 1201 * file resides on. 1202 * 2) Insure that at least one execute bit is on - otherwise root 1203 * will always succeed, and we don't want to happen unless the 1204 * file really is executable. 1205 * 3) Insure that the file is a regular file. 1206 */ 1207 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1208 ((attr->va_mode & 0111) == 0) || 1209 (attr->va_type != VREG)) 1210 return (EACCES); 1211 1212 /* 1213 * Zero length files can't be exec'd 1214 */ 1215 if (attr->va_size == 0) 1216 return (ENOEXEC); 1217 1218 /* 1219 * Check for execute permission to file based on current credentials. 1220 */ 1221 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1222 if (error) 1223 return (error); 1224 1225 /* 1226 * Check number of open-for-writes on the file and deny execution 1227 * if there are any. 1228 */ 1229 if (vp->v_writecount) 1230 return (ETXTBSY); 1231 1232 /* 1233 * Call filesystem specific open routine (which does nothing in the 1234 * general case). 1235 */ 1236 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); 1237 return (error); 1238 } 1239 1240 /* 1241 * Exec handler registration 1242 */ 1243 int 1244 exec_register(execsw_arg) 1245 const struct execsw *execsw_arg; 1246 { 1247 const struct execsw **es, **xs, **newexecsw; 1248 int count = 2; /* New slot and trailing NULL */ 1249 1250 if (execsw) 1251 for (es = execsw; *es; es++) 1252 count++; 1253 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1254 if (newexecsw == NULL) 1255 return (ENOMEM); 1256 xs = newexecsw; 1257 if (execsw) 1258 for (es = execsw; *es; es++) 1259 *xs++ = *es; 1260 *xs++ = execsw_arg; 1261 *xs = NULL; 1262 if (execsw) 1263 free(execsw, M_TEMP); 1264 execsw = newexecsw; 1265 return (0); 1266 } 1267 1268 int 1269 exec_unregister(execsw_arg) 1270 const struct execsw *execsw_arg; 1271 { 1272 const struct execsw **es, **xs, **newexecsw; 1273 int count = 1; 1274 1275 if (execsw == NULL) 1276 panic("unregister with no handlers left?\n"); 1277 1278 for (es = execsw; *es; es++) { 1279 if (*es == execsw_arg) 1280 break; 1281 } 1282 if (*es == NULL) 1283 return (ENOENT); 1284 for (es = execsw; *es; es++) 1285 if (*es != execsw_arg) 1286 count++; 1287 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1288 if (newexecsw == NULL) 1289 return (ENOMEM); 1290 xs = newexecsw; 1291 for (es = execsw; *es; es++) 1292 if (*es != execsw_arg) 1293 *xs++ = *es; 1294 *xs = NULL; 1295 if (execsw) 1296 free(execsw, M_TEMP); 1297 execsw = newexecsw; 1298 return (0); 1299 } 1300