1 /*- 2 * Copyright (c) 1993, David Greenman 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_hwpmc_hooks.h" 31 #include "opt_ktrace.h" 32 #include "opt_mac.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/eventhandler.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/sysproto.h> 40 #include <sys/signalvar.h> 41 #include <sys/kernel.h> 42 #include <sys/mount.h> 43 #include <sys/filedesc.h> 44 #include <sys/fcntl.h> 45 #include <sys/acct.h> 46 #include <sys/exec.h> 47 #include <sys/imgact.h> 48 #include <sys/imgact_elf.h> 49 #include <sys/wait.h> 50 #include <sys/malloc.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/pioctl.h> 54 #include <sys/namei.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sf_buf.h> 57 #include <sys/syscallsubr.h> 58 #include <sys/sysent.h> 59 #include <sys/shm.h> 60 #include <sys/sysctl.h> 61 #include <sys/vnode.h> 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_object.h> 74 #include <vm/vm_pager.h> 75 76 #ifdef HWPMC_HOOKS 77 #include <sys/pmckern.h> 78 #endif 79 80 #include <machine/reg.h> 81 82 #include <security/audit/audit.h> 83 #include <security/mac/mac_framework.h> 84 85 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 86 87 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 88 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 89 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 90 static int do_execve(struct thread *td, struct image_args *args, 91 struct mac *mac_p); 92 static void exec_free_args(struct image_args *); 93 94 /* XXX This should be vm_size_t. */ 95 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, 96 NULL, 0, sysctl_kern_ps_strings, "LU", ""); 97 98 /* XXX This should be vm_size_t. */ 99 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, 100 NULL, 0, sysctl_kern_usrstack, "LU", ""); 101 102 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, 103 NULL, 0, sysctl_kern_stackprot, "I", ""); 104 105 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 106 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 107 &ps_arg_cache_limit, 0, ""); 108 109 static int 110 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 111 { 112 struct proc *p; 113 int error; 114 115 p = curproc; 116 #ifdef SCTL_MASK32 117 if (req->flags & SCTL_MASK32) { 118 unsigned int val; 119 val = (unsigned int)p->p_sysent->sv_psstrings; 120 error = SYSCTL_OUT(req, &val, sizeof(val)); 121 } else 122 #endif 123 error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, 124 sizeof(p->p_sysent->sv_psstrings)); 125 return error; 126 } 127 128 static int 129 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 130 { 131 struct proc *p; 132 int error; 133 134 p = curproc; 135 #ifdef SCTL_MASK32 136 if (req->flags & SCTL_MASK32) { 137 unsigned int val; 138 val = (unsigned int)p->p_sysent->sv_usrstack; 139 error = SYSCTL_OUT(req, &val, sizeof(val)); 140 } else 141 #endif 142 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, 143 sizeof(p->p_sysent->sv_usrstack)); 144 return error; 145 } 146 147 static int 148 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 149 { 150 struct proc *p; 151 152 p = curproc; 153 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 154 sizeof(p->p_sysent->sv_stackprot))); 155 } 156 157 /* 158 * Each of the items is a pointer to a `const struct execsw', hence the 159 * double pointer here. 160 */ 161 static const struct execsw **execsw; 162 163 #ifndef _SYS_SYSPROTO_H_ 164 struct execve_args { 165 char *fname; 166 char **argv; 167 char **envv; 168 }; 169 #endif 170 171 int 172 execve(td, uap) 173 struct thread *td; 174 struct execve_args /* { 175 char *fname; 176 char **argv; 177 char **envv; 178 } */ *uap; 179 { 180 int error; 181 struct image_args args; 182 183 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 184 uap->argv, uap->envv); 185 if (error == 0) 186 error = kern_execve(td, &args, NULL); 187 return (error); 188 } 189 190 #ifndef _SYS_SYSPROTO_H_ 191 struct __mac_execve_args { 192 char *fname; 193 char **argv; 194 char **envv; 195 struct mac *mac_p; 196 }; 197 #endif 198 199 int 200 __mac_execve(td, uap) 201 struct thread *td; 202 struct __mac_execve_args /* { 203 char *fname; 204 char **argv; 205 char **envv; 206 struct mac *mac_p; 207 } */ *uap; 208 { 209 #ifdef MAC 210 int error; 211 struct image_args args; 212 213 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 214 uap->argv, uap->envv); 215 if (error == 0) 216 error = kern_execve(td, &args, uap->mac_p); 217 return (error); 218 #else 219 return (ENOSYS); 220 #endif 221 } 222 223 /* 224 * XXX: kern_execve has the astonishing property of not always returning to 225 * the caller. If sufficiently bad things happen during the call to 226 * do_execve(), it can end up calling exit1(); as a result, callers must 227 * avoid doing anything which they might need to undo (e.g., allocating 228 * memory). 229 */ 230 int 231 kern_execve(td, args, mac_p) 232 struct thread *td; 233 struct image_args *args; 234 struct mac *mac_p; 235 { 236 struct proc *p = td->td_proc; 237 int error; 238 239 AUDIT_ARG(argv, args->begin_argv, args->argc, 240 args->begin_envv - args->begin_argv); 241 AUDIT_ARG(envv, args->begin_envv, args->envc, 242 args->endp - args->begin_envv); 243 if (p->p_flag & P_HADTHREADS) { 244 PROC_LOCK(p); 245 if (thread_single(SINGLE_BOUNDARY)) { 246 PROC_UNLOCK(p); 247 exec_free_args(args); 248 return (ERESTART); /* Try again later. */ 249 } 250 PROC_UNLOCK(p); 251 } 252 253 error = do_execve(td, args, mac_p); 254 255 if (p->p_flag & P_HADTHREADS) { 256 PROC_LOCK(p); 257 /* 258 * If success, we upgrade to SINGLE_EXIT state to 259 * force other threads to suicide. 260 */ 261 if (error == 0) 262 thread_single(SINGLE_EXIT); 263 else 264 thread_single_end(); 265 PROC_UNLOCK(p); 266 } 267 268 return (error); 269 } 270 271 /* 272 * In-kernel implementation of execve(). All arguments are assumed to be 273 * userspace pointers from the passed thread. 274 */ 275 static int 276 do_execve(td, args, mac_p) 277 struct thread *td; 278 struct image_args *args; 279 struct mac *mac_p; 280 { 281 struct proc *p = td->td_proc; 282 struct nameidata nd, *ndp; 283 struct ucred *newcred = NULL, *oldcred; 284 struct uidinfo *euip; 285 register_t *stack_base; 286 int error, len, i; 287 struct image_params image_params, *imgp; 288 struct vattr attr; 289 int (*img_first)(struct image_params *); 290 struct pargs *oldargs = NULL, *newargs = NULL; 291 struct sigacts *oldsigacts, *newsigacts; 292 #ifdef KTRACE 293 struct vnode *tracevp = NULL; 294 struct ucred *tracecred = NULL; 295 #endif 296 struct vnode *textvp = NULL; 297 int credential_changing; 298 int vfslocked; 299 int textset; 300 #ifdef MAC 301 struct label *interplabel = NULL; 302 int will_transition; 303 #endif 304 #ifdef HWPMC_HOOKS 305 struct pmckern_procexec pe; 306 #endif 307 308 vfslocked = 0; 309 imgp = &image_params; 310 311 /* 312 * Lock the process and set the P_INEXEC flag to indicate that 313 * it should be left alone until we're done here. This is 314 * necessary to avoid race conditions - e.g. in ptrace() - 315 * that might allow a local user to illicitly obtain elevated 316 * privileges. 317 */ 318 PROC_LOCK(p); 319 KASSERT((p->p_flag & P_INEXEC) == 0, 320 ("%s(): process already has P_INEXEC flag", __func__)); 321 p->p_flag |= P_INEXEC; 322 PROC_UNLOCK(p); 323 324 /* 325 * Initialize part of the common data 326 */ 327 imgp->proc = p; 328 imgp->execlabel = NULL; 329 imgp->attr = &attr; 330 imgp->entry_addr = 0; 331 imgp->vmspace_destroyed = 0; 332 imgp->interpreted = 0; 333 imgp->interpreter_name = args->buf + PATH_MAX + ARG_MAX; 334 imgp->auxargs = NULL; 335 imgp->vp = NULL; 336 imgp->object = NULL; 337 imgp->firstpage = NULL; 338 imgp->ps_strings = 0; 339 imgp->auxarg_size = 0; 340 imgp->args = args; 341 342 #ifdef MAC 343 error = mac_execve_enter(imgp, mac_p); 344 if (error) 345 goto exec_fail; 346 #endif 347 348 imgp->image_header = NULL; 349 350 /* 351 * Translate the file name. namei() returns a vnode pointer 352 * in ni_vp amoung other things. 353 * 354 * XXXAUDIT: It would be desirable to also audit the name of the 355 * interpreter if this is an interpreted binary. 356 */ 357 ndp = &nd; 358 NDINIT(ndp, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | MPSAFE | 359 AUDITVNODE1, UIO_SYSSPACE, args->fname, td); 360 361 interpret: 362 error = namei(ndp); 363 if (error) 364 goto exec_fail; 365 366 vfslocked = NDHASGIANT(ndp); 367 imgp->vp = ndp->ni_vp; 368 369 /* 370 * Check file permissions (also 'opens' file) 371 */ 372 error = exec_check_permissions(imgp); 373 if (error) 374 goto exec_fail_dealloc; 375 376 imgp->object = imgp->vp->v_object; 377 if (imgp->object != NULL) 378 vm_object_reference(imgp->object); 379 380 /* 381 * Set VV_TEXT now so no one can write to the executable while we're 382 * activating it. 383 * 384 * Remember if this was set before and unset it in case this is not 385 * actually an executable image. 386 */ 387 textset = imgp->vp->v_vflag & VV_TEXT; 388 imgp->vp->v_vflag |= VV_TEXT; 389 390 error = exec_map_first_page(imgp); 391 if (error) 392 goto exec_fail_dealloc; 393 394 /* 395 * If the current process has a special image activator it 396 * wants to try first, call it. For example, emulating shell 397 * scripts differently. 398 */ 399 error = -1; 400 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 401 error = img_first(imgp); 402 403 /* 404 * Loop through the list of image activators, calling each one. 405 * An activator returns -1 if there is no match, 0 on success, 406 * and an error otherwise. 407 */ 408 for (i = 0; error == -1 && execsw[i]; ++i) { 409 if (execsw[i]->ex_imgact == NULL || 410 execsw[i]->ex_imgact == img_first) { 411 continue; 412 } 413 error = (*execsw[i]->ex_imgact)(imgp); 414 } 415 416 if (error) { 417 if (error == -1) { 418 if (textset == 0) 419 imgp->vp->v_vflag &= ~VV_TEXT; 420 error = ENOEXEC; 421 } 422 goto exec_fail_dealloc; 423 } 424 425 /* 426 * Special interpreter operation, cleanup and loop up to try to 427 * activate the interpreter. 428 */ 429 if (imgp->interpreted) { 430 exec_unmap_first_page(imgp); 431 /* 432 * VV_TEXT needs to be unset for scripts. There is a short 433 * period before we determine that something is a script where 434 * VV_TEXT will be set. The vnode lock is held over this 435 * entire period so nothing should illegitimately be blocked. 436 */ 437 imgp->vp->v_vflag &= ~VV_TEXT; 438 /* free name buffer and old vnode */ 439 NDFREE(ndp, NDF_ONLY_PNBUF); 440 #ifdef MAC 441 interplabel = mac_vnode_label_alloc(); 442 mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel); 443 #endif 444 vput(ndp->ni_vp); 445 vm_object_deallocate(imgp->object); 446 imgp->object = NULL; 447 VFS_UNLOCK_GIANT(vfslocked); 448 vfslocked = 0; 449 /* set new name to that of the interpreter */ 450 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE, 451 UIO_SYSSPACE, imgp->interpreter_name, td); 452 goto interpret; 453 } 454 455 /* 456 * Copy out strings (args and env) and initialize stack base 457 */ 458 if (p->p_sysent->sv_copyout_strings) 459 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); 460 else 461 stack_base = exec_copyout_strings(imgp); 462 463 /* 464 * If custom stack fixup routine present for this process 465 * let it do the stack setup. 466 * Else stuff argument count as first item on stack 467 */ 468 if (p->p_sysent->sv_fixup != NULL) 469 (*p->p_sysent->sv_fixup)(&stack_base, imgp); 470 else 471 suword(--stack_base, imgp->args->argc); 472 473 /* 474 * For security and other reasons, the file descriptor table cannot 475 * be shared after an exec. 476 */ 477 fdunshare(p, td); 478 479 /* 480 * Malloc things before we need locks. 481 */ 482 newcred = crget(); 483 euip = uifind(attr.va_uid); 484 i = imgp->args->begin_envv - imgp->args->begin_argv; 485 /* Cache arguments if they fit inside our allowance */ 486 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 487 newargs = pargs_alloc(i); 488 bcopy(imgp->args->begin_argv, newargs->ar_args, i); 489 } 490 491 /* close files on exec */ 492 VOP_UNLOCK(imgp->vp, 0, td); 493 fdcloseexec(td); 494 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 495 496 /* Get a reference to the vnode prior to locking the proc */ 497 VREF(ndp->ni_vp); 498 499 /* 500 * For security and other reasons, signal handlers cannot 501 * be shared after an exec. The new process gets a copy of the old 502 * handlers. In execsigs(), the new process will have its signals 503 * reset. 504 */ 505 PROC_LOCK(p); 506 if (sigacts_shared(p->p_sigacts)) { 507 oldsigacts = p->p_sigacts; 508 PROC_UNLOCK(p); 509 newsigacts = sigacts_alloc(); 510 sigacts_copy(newsigacts, oldsigacts); 511 PROC_LOCK(p); 512 p->p_sigacts = newsigacts; 513 } else 514 oldsigacts = NULL; 515 516 /* Stop profiling */ 517 stopprofclock(p); 518 519 /* reset caught signals */ 520 execsigs(p); 521 522 /* name this process - nameiexec(p, ndp) */ 523 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); 524 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); 525 p->p_comm[len] = 0; 526 527 /* 528 * mark as execed, wakeup the process that vforked (if any) and tell 529 * it that it now has its own resources back 530 */ 531 p->p_flag |= P_EXEC; 532 if (p->p_pptr && (p->p_flag & P_PPWAIT)) { 533 p->p_flag &= ~P_PPWAIT; 534 wakeup(p->p_pptr); 535 } 536 537 /* 538 * Implement image setuid/setgid. 539 * 540 * Don't honor setuid/setgid if the filesystem prohibits it or if 541 * the process is being traced. 542 * 543 * XXXMAC: For the time being, use NOSUID to also prohibit 544 * transitions on the file system. 545 */ 546 oldcred = p->p_ucred; 547 credential_changing = 0; 548 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != 549 attr.va_uid; 550 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != 551 attr.va_gid; 552 #ifdef MAC 553 will_transition = mac_execve_will_transition(oldcred, imgp->vp, 554 interplabel, imgp); 555 credential_changing |= will_transition; 556 #endif 557 558 if (credential_changing && 559 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 560 (p->p_flag & P_TRACED) == 0) { 561 /* 562 * Turn off syscall tracing for set-id programs, except for 563 * root. Record any set-id flags first to make sure that 564 * we do not regain any tracing during a possible block. 565 */ 566 setsugid(p); 567 568 #ifdef KTRACE 569 if (p->p_tracevp != NULL && 570 priv_check_cred(oldcred, PRIV_DEBUG_DIFFCRED, 0)) { 571 mtx_lock(&ktrace_mtx); 572 p->p_traceflag = 0; 573 tracevp = p->p_tracevp; 574 p->p_tracevp = NULL; 575 tracecred = p->p_tracecred; 576 p->p_tracecred = NULL; 577 mtx_unlock(&ktrace_mtx); 578 } 579 #endif 580 /* 581 * Close any file descriptors 0..2 that reference procfs, 582 * then make sure file descriptors 0..2 are in use. 583 * 584 * setugidsafety() may call closef() and then pfind() 585 * which may grab the process lock. 586 * fdcheckstd() may call falloc() which may block to 587 * allocate memory, so temporarily drop the process lock. 588 */ 589 PROC_UNLOCK(p); 590 setugidsafety(td); 591 VOP_UNLOCK(imgp->vp, 0, td); 592 error = fdcheckstd(td); 593 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 594 if (error != 0) 595 goto done1; 596 PROC_LOCK(p); 597 /* 598 * Set the new credentials. 599 */ 600 crcopy(newcred, oldcred); 601 if (attr.va_mode & VSUID) 602 change_euid(newcred, euip); 603 if (attr.va_mode & VSGID) 604 change_egid(newcred, attr.va_gid); 605 #ifdef MAC 606 if (will_transition) { 607 mac_execve_transition(oldcred, newcred, imgp->vp, 608 interplabel, imgp); 609 } 610 #endif 611 /* 612 * Implement correct POSIX saved-id behavior. 613 * 614 * XXXMAC: Note that the current logic will save the 615 * uid and gid if a MAC domain transition occurs, even 616 * though maybe it shouldn't. 617 */ 618 change_svuid(newcred, newcred->cr_uid); 619 change_svgid(newcred, newcred->cr_gid); 620 p->p_ucred = newcred; 621 newcred = NULL; 622 } else { 623 if (oldcred->cr_uid == oldcred->cr_ruid && 624 oldcred->cr_gid == oldcred->cr_rgid) 625 p->p_flag &= ~P_SUGID; 626 /* 627 * Implement correct POSIX saved-id behavior. 628 * 629 * XXX: It's not clear that the existing behavior is 630 * POSIX-compliant. A number of sources indicate that the 631 * saved uid/gid should only be updated if the new ruid is 632 * not equal to the old ruid, or the new euid is not equal 633 * to the old euid and the new euid is not equal to the old 634 * ruid. The FreeBSD code always updates the saved uid/gid. 635 * Also, this code uses the new (replaced) euid and egid as 636 * the source, which may or may not be the right ones to use. 637 */ 638 if (oldcred->cr_svuid != oldcred->cr_uid || 639 oldcred->cr_svgid != oldcred->cr_gid) { 640 crcopy(newcred, oldcred); 641 change_svuid(newcred, newcred->cr_uid); 642 change_svgid(newcred, newcred->cr_gid); 643 p->p_ucred = newcred; 644 newcred = NULL; 645 } 646 } 647 648 /* 649 * Store the vp for use in procfs. This vnode was referenced prior 650 * to locking the proc lock. 651 */ 652 textvp = p->p_textvp; 653 p->p_textvp = ndp->ni_vp; 654 655 /* 656 * Notify others that we exec'd, and clear the P_INEXEC flag 657 * as we're now a bona fide freshly-execed process. 658 */ 659 KNOTE_LOCKED(&p->p_klist, NOTE_EXEC); 660 p->p_flag &= ~P_INEXEC; 661 662 /* 663 * If tracing the process, trap to debugger so breakpoints 664 * can be set before the program executes. 665 * Use tdsignal to deliver signal to current thread, use 666 * psignal may cause the signal to be delivered to wrong thread 667 * because that thread will exit, remember we are going to enter 668 * single thread mode. 669 */ 670 if (p->p_flag & P_TRACED) 671 tdsignal(p, td, SIGTRAP, NULL); 672 673 /* clear "fork but no exec" flag, as we _are_ execing */ 674 p->p_acflag &= ~AFORK; 675 676 /* 677 * Free any previous argument cache and replace it with 678 * the new argument cache, if any. 679 */ 680 oldargs = p->p_args; 681 p->p_args = newargs; 682 newargs = NULL; 683 684 #ifdef HWPMC_HOOKS 685 /* 686 * Check if system-wide sampling is in effect or if the 687 * current process is using PMCs. If so, do exec() time 688 * processing. This processing needs to happen AFTER the 689 * P_INEXEC flag is cleared. 690 * 691 * The proc lock needs to be released before taking the PMC 692 * SX. 693 */ 694 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { 695 PROC_UNLOCK(p); 696 pe.pm_credentialschanged = credential_changing; 697 pe.pm_entryaddr = imgp->entry_addr; 698 699 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); 700 } else 701 PROC_UNLOCK(p); 702 #else /* !HWPMC_HOOKS */ 703 PROC_UNLOCK(p); 704 #endif 705 706 /* Set values passed into the program in registers. */ 707 if (p->p_sysent->sv_setregs) 708 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, 709 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 710 else 711 exec_setregs(td, imgp->entry_addr, 712 (u_long)(uintptr_t)stack_base, imgp->ps_strings); 713 714 vfs_mark_atime(imgp->vp, td); 715 716 done1: 717 /* 718 * Free any resources malloc'd earlier that we didn't use. 719 */ 720 uifree(euip); 721 if (newcred == NULL) 722 crfree(oldcred); 723 else 724 crfree(newcred); 725 VOP_UNLOCK(imgp->vp, 0, td); 726 /* 727 * Handle deferred decrement of ref counts. 728 */ 729 if (textvp != NULL) { 730 int tvfslocked; 731 732 tvfslocked = VFS_LOCK_GIANT(textvp->v_mount); 733 vrele(textvp); 734 VFS_UNLOCK_GIANT(tvfslocked); 735 } 736 if (ndp->ni_vp && error != 0) 737 vrele(ndp->ni_vp); 738 #ifdef KTRACE 739 if (tracevp != NULL) { 740 int tvfslocked; 741 742 tvfslocked = VFS_LOCK_GIANT(tracevp->v_mount); 743 vrele(tracevp); 744 VFS_UNLOCK_GIANT(tvfslocked); 745 } 746 if (tracecred != NULL) 747 crfree(tracecred); 748 #endif 749 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); 750 if (oldargs != NULL) 751 pargs_drop(oldargs); 752 if (newargs != NULL) 753 pargs_drop(newargs); 754 if (oldsigacts != NULL) 755 sigacts_free(oldsigacts); 756 757 exec_fail_dealloc: 758 759 /* 760 * free various allocated resources 761 */ 762 if (imgp->firstpage != NULL) 763 exec_unmap_first_page(imgp); 764 765 if (imgp->vp != NULL) { 766 NDFREE(ndp, NDF_ONLY_PNBUF); 767 vput(imgp->vp); 768 } 769 770 if (imgp->object != NULL) 771 vm_object_deallocate(imgp->object); 772 773 if (error == 0) { 774 /* 775 * Stop the process here if its stop event mask has 776 * the S_EXEC bit set. 777 */ 778 STOPEVENT(p, S_EXEC, 0); 779 goto done2; 780 } 781 782 exec_fail: 783 /* we're done here, clear P_INEXEC */ 784 PROC_LOCK(p); 785 p->p_flag &= ~P_INEXEC; 786 PROC_UNLOCK(p); 787 788 done2: 789 #ifdef MAC 790 mac_execve_exit(imgp); 791 if (interplabel != NULL) 792 mac_vnode_label_free(interplabel); 793 #endif 794 VFS_UNLOCK_GIANT(vfslocked); 795 exec_free_args(args); 796 797 if (error && imgp->vmspace_destroyed) { 798 /* sorry, no more process anymore. exit gracefully */ 799 exit1(td, W_EXITCODE(0, SIGABRT)); 800 /* NOT REACHED */ 801 } 802 return (error); 803 } 804 805 int 806 exec_map_first_page(imgp) 807 struct image_params *imgp; 808 { 809 int rv, i; 810 int initial_pagein; 811 vm_page_t ma[VM_INITIAL_PAGEIN]; 812 vm_object_t object; 813 814 if (imgp->firstpage != NULL) 815 exec_unmap_first_page(imgp); 816 817 object = imgp->vp->v_object; 818 if (object == NULL) 819 return (EACCES); 820 VM_OBJECT_LOCK(object); 821 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 822 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 823 initial_pagein = VM_INITIAL_PAGEIN; 824 if (initial_pagein > object->size) 825 initial_pagein = object->size; 826 for (i = 1; i < initial_pagein; i++) { 827 if ((ma[i] = vm_page_lookup(object, i)) != NULL) { 828 if (ma[i]->valid) 829 break; 830 if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy) 831 break; 832 vm_page_busy(ma[i]); 833 } else { 834 ma[i] = vm_page_alloc(object, i, 835 VM_ALLOC_NORMAL); 836 if (ma[i] == NULL) 837 break; 838 } 839 } 840 initial_pagein = i; 841 rv = vm_pager_get_pages(object, ma, initial_pagein, 0); 842 ma[0] = vm_page_lookup(object, 0); 843 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || 844 (ma[0]->valid == 0)) { 845 if (ma[0]) { 846 vm_page_lock_queues(); 847 vm_page_free(ma[0]); 848 vm_page_unlock_queues(); 849 } 850 VM_OBJECT_UNLOCK(object); 851 return (EIO); 852 } 853 } 854 vm_page_lock_queues(); 855 vm_page_hold(ma[0]); 856 vm_page_unlock_queues(); 857 vm_page_wakeup(ma[0]); 858 VM_OBJECT_UNLOCK(object); 859 860 imgp->firstpage = sf_buf_alloc(ma[0], 0); 861 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); 862 863 return (0); 864 } 865 866 void 867 exec_unmap_first_page(imgp) 868 struct image_params *imgp; 869 { 870 vm_page_t m; 871 872 if (imgp->firstpage != NULL) { 873 m = sf_buf_page(imgp->firstpage); 874 sf_buf_free(imgp->firstpage); 875 imgp->firstpage = NULL; 876 vm_page_lock_queues(); 877 vm_page_unhold(m); 878 vm_page_unlock_queues(); 879 } 880 } 881 882 /* 883 * Destroy old address space, and allocate a new stack 884 * The new stack is only SGROWSIZ large because it is grown 885 * automatically in trap.c. 886 */ 887 int 888 exec_new_vmspace(imgp, sv) 889 struct image_params *imgp; 890 struct sysentvec *sv; 891 { 892 int error; 893 struct proc *p = imgp->proc; 894 struct vmspace *vmspace = p->p_vmspace; 895 vm_offset_t stack_addr; 896 vm_map_t map; 897 898 imgp->vmspace_destroyed = 1; 899 imgp->sysent = sv; 900 901 /* May be called with Giant held */ 902 EVENTHANDLER_INVOKE(process_exec, p, imgp); 903 904 /* 905 * Blow away entire process VM, if address space not shared, 906 * otherwise, create a new VM space so that other threads are 907 * not disrupted 908 */ 909 map = &vmspace->vm_map; 910 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && 911 vm_map_max(map) == sv->sv_maxuser) { 912 shmexit(vmspace); 913 pmap_remove_pages(vmspace_pmap(vmspace)); 914 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 915 } else { 916 vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); 917 vmspace = p->p_vmspace; 918 map = &vmspace->vm_map; 919 } 920 921 /* Allocate a new stack */ 922 stack_addr = sv->sv_usrstack - maxssiz; 923 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 924 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); 925 if (error) 926 return (error); 927 928 #ifdef __ia64__ 929 /* Allocate a new register stack */ 930 stack_addr = IA64_BACKINGSTORE; 931 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, 932 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP); 933 if (error) 934 return (error); 935 #endif 936 937 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the 938 * VM_STACK case, but they are still used to monitor the size of the 939 * process stack so we can check the stack rlimit. 940 */ 941 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 942 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz; 943 944 return (0); 945 } 946 947 /* 948 * Copy out argument and environment strings from the old process address 949 * space into the temporary string buffer. 950 */ 951 int 952 exec_copyin_args(struct image_args *args, char *fname, 953 enum uio_seg segflg, char **argv, char **envv) 954 { 955 char *argp, *envp; 956 int error; 957 size_t length; 958 959 error = 0; 960 961 bzero(args, sizeof(*args)); 962 if (argv == NULL) 963 return (EFAULT); 964 /* 965 * Allocate temporary demand zeroed space for argument and 966 * environment strings: 967 * 968 * o ARG_MAX for argument and environment; 969 * o MAXSHELLCMDLEN for the name of interpreters. 970 */ 971 args->buf = (char *) kmem_alloc_wait(exec_map, 972 PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); 973 if (args->buf == NULL) 974 return (ENOMEM); 975 args->begin_argv = args->buf; 976 args->endp = args->begin_argv; 977 args->stringspace = ARG_MAX; 978 979 args->fname = args->buf + ARG_MAX; 980 981 /* 982 * Copy the file name. 983 */ 984 error = (segflg == UIO_SYSSPACE) ? 985 copystr(fname, args->fname, PATH_MAX, &length) : 986 copyinstr(fname, args->fname, PATH_MAX, &length); 987 if (error != 0) 988 goto err_exit; 989 990 /* 991 * extract arguments first 992 */ 993 while ((argp = (caddr_t) (intptr_t) fuword(argv++))) { 994 if (argp == (caddr_t) -1) { 995 error = EFAULT; 996 goto err_exit; 997 } 998 if ((error = copyinstr(argp, args->endp, 999 args->stringspace, &length))) { 1000 if (error == ENAMETOOLONG) 1001 error = E2BIG; 1002 goto err_exit; 1003 } 1004 args->stringspace -= length; 1005 args->endp += length; 1006 args->argc++; 1007 } 1008 1009 args->begin_envv = args->endp; 1010 1011 /* 1012 * extract environment strings 1013 */ 1014 if (envv) { 1015 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { 1016 if (envp == (caddr_t)-1) { 1017 error = EFAULT; 1018 goto err_exit; 1019 } 1020 if ((error = copyinstr(envp, args->endp, 1021 args->stringspace, &length))) { 1022 if (error == ENAMETOOLONG) 1023 error = E2BIG; 1024 goto err_exit; 1025 } 1026 args->stringspace -= length; 1027 args->endp += length; 1028 args->envc++; 1029 } 1030 } 1031 1032 return (0); 1033 1034 err_exit: 1035 exec_free_args(args); 1036 return (error); 1037 } 1038 1039 static void 1040 exec_free_args(struct image_args *args) 1041 { 1042 1043 if (args->buf) { 1044 kmem_free_wakeup(exec_map, (vm_offset_t)args->buf, 1045 PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); 1046 args->buf = NULL; 1047 } 1048 } 1049 1050 /* 1051 * Copy strings out to the new process address space, constructing new arg 1052 * and env vector tables. Return a pointer to the base so that it can be used 1053 * as the initial stack pointer. 1054 */ 1055 register_t * 1056 exec_copyout_strings(imgp) 1057 struct image_params *imgp; 1058 { 1059 int argc, envc; 1060 char **vectp; 1061 char *stringp, *destp; 1062 register_t *stack_base; 1063 struct ps_strings *arginfo; 1064 struct proc *p; 1065 int szsigcode; 1066 1067 /* 1068 * Calculate string base and vector table pointers. 1069 * Also deal with signal trampoline code for this exec type. 1070 */ 1071 p = imgp->proc; 1072 szsigcode = 0; 1073 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; 1074 if (p->p_sysent->sv_szsigcode != NULL) 1075 szsigcode = *(p->p_sysent->sv_szsigcode); 1076 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - 1077 roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *)); 1078 1079 /* 1080 * install sigcode 1081 */ 1082 if (szsigcode) 1083 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - 1084 szsigcode), szsigcode); 1085 1086 /* 1087 * If we have a valid auxargs ptr, prepare some room 1088 * on the stack. 1089 */ 1090 if (imgp->auxargs) { 1091 /* 1092 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for 1093 * lower compatibility. 1094 */ 1095 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : 1096 (AT_COUNT * 2); 1097 /* 1098 * The '+ 2' is for the null pointers at the end of each of 1099 * the arg and env vector sets,and imgp->auxarg_size is room 1100 * for argument of Runtime loader. 1101 */ 1102 vectp = (char **)(destp - (imgp->args->argc + 1103 imgp->args->envc + 2 + imgp->auxarg_size) * 1104 sizeof(char *)); 1105 1106 } else { 1107 /* 1108 * The '+ 2' is for the null pointers at the end of each of 1109 * the arg and env vector sets 1110 */ 1111 vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * 1112 sizeof(char *)); 1113 } 1114 1115 /* 1116 * vectp also becomes our initial stack base 1117 */ 1118 stack_base = (register_t *)vectp; 1119 1120 stringp = imgp->args->begin_argv; 1121 argc = imgp->args->argc; 1122 envc = imgp->args->envc; 1123 1124 /* 1125 * Copy out strings - arguments and environment. 1126 */ 1127 copyout(stringp, destp, ARG_MAX - imgp->args->stringspace); 1128 1129 /* 1130 * Fill in "ps_strings" struct for ps, w, etc. 1131 */ 1132 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); 1133 suword(&arginfo->ps_nargvstr, argc); 1134 1135 /* 1136 * Fill in argument portion of vector table. 1137 */ 1138 for (; argc > 0; --argc) { 1139 suword(vectp++, (long)(intptr_t)destp); 1140 while (*stringp++ != 0) 1141 destp++; 1142 destp++; 1143 } 1144 1145 /* a null vector table pointer separates the argp's from the envp's */ 1146 suword(vectp++, 0); 1147 1148 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); 1149 suword(&arginfo->ps_nenvstr, envc); 1150 1151 /* 1152 * Fill in environment portion of vector table. 1153 */ 1154 for (; envc > 0; --envc) { 1155 suword(vectp++, (long)(intptr_t)destp); 1156 while (*stringp++ != 0) 1157 destp++; 1158 destp++; 1159 } 1160 1161 /* end of vector table is a null pointer */ 1162 suword(vectp, 0); 1163 1164 return (stack_base); 1165 } 1166 1167 /* 1168 * Check permissions of file to execute. 1169 * Called with imgp->vp locked. 1170 * Return 0 for success or error code on failure. 1171 */ 1172 int 1173 exec_check_permissions(imgp) 1174 struct image_params *imgp; 1175 { 1176 struct vnode *vp = imgp->vp; 1177 struct vattr *attr = imgp->attr; 1178 struct thread *td; 1179 int error; 1180 1181 td = curthread; /* XXXKSE */ 1182 1183 /* Get file attributes */ 1184 error = VOP_GETATTR(vp, attr, td->td_ucred, td); 1185 if (error) 1186 return (error); 1187 1188 #ifdef MAC 1189 error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp); 1190 if (error) 1191 return (error); 1192 #endif 1193 1194 /* 1195 * 1) Check if file execution is disabled for the filesystem that this 1196 * file resides on. 1197 * 2) Insure that at least one execute bit is on - otherwise root 1198 * will always succeed, and we don't want to happen unless the 1199 * file really is executable. 1200 * 3) Insure that the file is a regular file. 1201 */ 1202 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1203 ((attr->va_mode & 0111) == 0) || 1204 (attr->va_type != VREG)) 1205 return (EACCES); 1206 1207 /* 1208 * Zero length files can't be exec'd 1209 */ 1210 if (attr->va_size == 0) 1211 return (ENOEXEC); 1212 1213 /* 1214 * Check for execute permission to file based on current credentials. 1215 */ 1216 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1217 if (error) 1218 return (error); 1219 1220 /* 1221 * Check number of open-for-writes on the file and deny execution 1222 * if there are any. 1223 */ 1224 if (vp->v_writecount) 1225 return (ETXTBSY); 1226 1227 /* 1228 * Call filesystem specific open routine (which does nothing in the 1229 * general case). 1230 */ 1231 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 1232 return (error); 1233 } 1234 1235 /* 1236 * Exec handler registration 1237 */ 1238 int 1239 exec_register(execsw_arg) 1240 const struct execsw *execsw_arg; 1241 { 1242 const struct execsw **es, **xs, **newexecsw; 1243 int count = 2; /* New slot and trailing NULL */ 1244 1245 if (execsw) 1246 for (es = execsw; *es; es++) 1247 count++; 1248 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1249 if (newexecsw == NULL) 1250 return (ENOMEM); 1251 xs = newexecsw; 1252 if (execsw) 1253 for (es = execsw; *es; es++) 1254 *xs++ = *es; 1255 *xs++ = execsw_arg; 1256 *xs = NULL; 1257 if (execsw) 1258 free(execsw, M_TEMP); 1259 execsw = newexecsw; 1260 return (0); 1261 } 1262 1263 int 1264 exec_unregister(execsw_arg) 1265 const struct execsw *execsw_arg; 1266 { 1267 const struct execsw **es, **xs, **newexecsw; 1268 int count = 1; 1269 1270 if (execsw == NULL) 1271 panic("unregister with no handlers left?\n"); 1272 1273 for (es = execsw; *es; es++) { 1274 if (*es == execsw_arg) 1275 break; 1276 } 1277 if (*es == NULL) 1278 return (ENOENT); 1279 for (es = execsw; *es; es++) 1280 if (*es != execsw_arg) 1281 count++; 1282 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1283 if (newexecsw == NULL) 1284 return (ENOMEM); 1285 xs = newexecsw; 1286 for (es = execsw; *es; es++) 1287 if (*es != execsw_arg) 1288 *xs++ = *es; 1289 *xs = NULL; 1290 if (execsw) 1291 free(execsw, M_TEMP); 1292 execsw = newexecsw; 1293 return (0); 1294 } 1295