xref: /freebsd/sys/kern/kern_exec.c (revision 84ee9401a3fc8d3c22424266f421a928989cd692)
1 /*-
2  * Copyright (c) 1993, David Greenman
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_hwpmc_hooks.h"
31 #include "opt_ktrace.h"
32 #include "opt_mac.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/eventhandler.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sysproto.h>
40 #include <sys/signalvar.h>
41 #include <sys/kernel.h>
42 #include <sys/mac.h>
43 #include <sys/mount.h>
44 #include <sys/filedesc.h>
45 #include <sys/fcntl.h>
46 #include <sys/acct.h>
47 #include <sys/exec.h>
48 #include <sys/imgact.h>
49 #include <sys/imgact_elf.h>
50 #include <sys/wait.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/pioctl.h>
54 #include <sys/namei.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sf_buf.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/shm.h>
60 #include <sys/sysctl.h>
61 #include <sys/vnode.h>
62 #ifdef KTRACE
63 #include <sys/ktrace.h>
64 #endif
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_pager.h>
75 
76 #ifdef	HWPMC_HOOKS
77 #include <sys/pmckern.h>
78 #endif
79 
80 #include <machine/reg.h>
81 
82 #include <security/audit/audit.h>
83 
84 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
85 
86 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
87 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
88 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
89 static int do_execve(struct thread *td, struct image_args *args,
90     struct mac *mac_p);
91 static void exec_free_args(struct image_args *);
92 
93 /* XXX This should be vm_size_t. */
94 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
95     NULL, 0, sysctl_kern_ps_strings, "LU", "");
96 
97 /* XXX This should be vm_size_t. */
98 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD,
99     NULL, 0, sysctl_kern_usrstack, "LU", "");
100 
101 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
102     NULL, 0, sysctl_kern_stackprot, "I", "");
103 
104 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
105 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
106     &ps_arg_cache_limit, 0, "");
107 
108 static int
109 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
110 {
111 	struct proc *p;
112 	int error;
113 
114 	p = curproc;
115 #ifdef SCTL_MASK32
116 	if (req->flags & SCTL_MASK32) {
117 		unsigned int val;
118 		val = (unsigned int)p->p_sysent->sv_psstrings;
119 		error = SYSCTL_OUT(req, &val, sizeof(val));
120 	} else
121 #endif
122 		error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
123 		   sizeof(p->p_sysent->sv_psstrings));
124 	return error;
125 }
126 
127 static int
128 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
129 {
130 	struct proc *p;
131 	int error;
132 
133 	p = curproc;
134 #ifdef SCTL_MASK32
135 	if (req->flags & SCTL_MASK32) {
136 		unsigned int val;
137 		val = (unsigned int)p->p_sysent->sv_usrstack;
138 		error = SYSCTL_OUT(req, &val, sizeof(val));
139 	} else
140 #endif
141 		error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
142 		    sizeof(p->p_sysent->sv_usrstack));
143 	return error;
144 }
145 
146 static int
147 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
148 {
149 	struct proc *p;
150 
151 	p = curproc;
152 	return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
153 	    sizeof(p->p_sysent->sv_stackprot)));
154 }
155 
156 /*
157  * Each of the items is a pointer to a `const struct execsw', hence the
158  * double pointer here.
159  */
160 static const struct execsw **execsw;
161 
162 #ifndef _SYS_SYSPROTO_H_
163 struct execve_args {
164 	char    *fname;
165 	char    **argv;
166 	char    **envv;
167 };
168 #endif
169 
170 /*
171  * MPSAFE
172  */
173 int
174 execve(td, uap)
175 	struct thread *td;
176 	struct execve_args /* {
177 		char *fname;
178 		char **argv;
179 		char **envv;
180 	} */ *uap;
181 {
182 	int error;
183 	struct image_args args;
184 
185 	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
186 	    uap->argv, uap->envv);
187 	if (error == 0)
188 		error = kern_execve(td, &args, NULL);
189 	return (error);
190 }
191 
192 #ifndef _SYS_SYSPROTO_H_
193 struct __mac_execve_args {
194 	char	*fname;
195 	char	**argv;
196 	char	**envv;
197 	struct mac	*mac_p;
198 };
199 #endif
200 
201 /*
202  * MPSAFE
203  */
204 int
205 __mac_execve(td, uap)
206 	struct thread *td;
207 	struct __mac_execve_args /* {
208 		char *fname;
209 		char **argv;
210 		char **envv;
211 		struct mac *mac_p;
212 	} */ *uap;
213 {
214 #ifdef MAC
215 	int error;
216 	struct image_args args;
217 
218 	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
219 	    uap->argv, uap->envv);
220 	if (error == 0)
221 		error = kern_execve(td, &args, uap->mac_p);
222 	return (error);
223 #else
224 	return (ENOSYS);
225 #endif
226 }
227 
228 /*
229  * XXX: kern_execve has the astonishing property of not always
230  * returning to the caller.  If sufficiently bad things happen during
231  * the call to do_execve(), it can end up calling exit1(); as a result,
232  * callers must avoid doing anything which they might need to undo
233  * (e.g., allocating memory).
234  */
235 int
236 kern_execve(td, args, mac_p)
237 	struct thread *td;
238 	struct image_args *args;
239 	struct mac *mac_p;
240 {
241 	struct proc *p = td->td_proc;
242 	int error;
243 
244 	AUDIT_ARG(argv, args->begin_argv, args->argc,
245 	    args->begin_envv - args->begin_argv);
246 	AUDIT_ARG(envv, args->begin_envv, args->envc,
247 	    args->endp - args->begin_envv);
248 	if (p->p_flag & P_HADTHREADS) {
249 		PROC_LOCK(p);
250 		if (thread_single(SINGLE_BOUNDARY)) {
251 			PROC_UNLOCK(p);
252 	       		exec_free_args(args);
253 			return (ERESTART);	/* Try again later. */
254 		}
255 		PROC_UNLOCK(p);
256 	}
257 
258 	error = do_execve(td, args, mac_p);
259 
260 	if (p->p_flag & P_HADTHREADS) {
261 		PROC_LOCK(p);
262 		/*
263 		 * If success, we upgrade to SINGLE_EXIT state to
264 		 * force other threads to suicide.
265 		 */
266 		if (error == 0)
267 			thread_single(SINGLE_EXIT);
268 		else
269 			thread_single_end();
270 		PROC_UNLOCK(p);
271 	}
272 
273 	return (error);
274 }
275 
276 /*
277  * In-kernel implementation of execve().  All arguments are assumed to be
278  * userspace pointers from the passed thread.
279  *
280  * MPSAFE
281  */
282 static int
283 do_execve(td, args, mac_p)
284 	struct thread *td;
285 	struct image_args *args;
286 	struct mac *mac_p;
287 {
288 	struct proc *p = td->td_proc;
289 	struct nameidata nd, *ndp;
290 	struct ucred *newcred = NULL, *oldcred;
291 	struct uidinfo *euip;
292 	register_t *stack_base;
293 	int error, len, i;
294 	struct image_params image_params, *imgp;
295 	struct vattr attr;
296 	int (*img_first)(struct image_params *);
297 	struct pargs *oldargs = NULL, *newargs = NULL;
298 	struct sigacts *oldsigacts, *newsigacts;
299 #ifdef KTRACE
300 	struct vnode *tracevp = NULL;
301 	struct ucred *tracecred = NULL;
302 #endif
303 	struct vnode *textvp = NULL;
304 	int credential_changing;
305 	int vfslocked;
306 	int textset;
307 #ifdef MAC
308 	struct label *interplabel = NULL;
309 	int will_transition;
310 #endif
311 #ifdef HWPMC_HOOKS
312 	struct pmckern_procexec pe;
313 #endif
314 
315 	vfslocked = 0;
316 	imgp = &image_params;
317 
318 	/*
319 	 * Lock the process and set the P_INEXEC flag to indicate that
320 	 * it should be left alone until we're done here.  This is
321 	 * necessary to avoid race conditions - e.g. in ptrace() -
322 	 * that might allow a local user to illicitly obtain elevated
323 	 * privileges.
324 	 */
325 	PROC_LOCK(p);
326 	KASSERT((p->p_flag & P_INEXEC) == 0,
327 	    ("%s(): process already has P_INEXEC flag", __func__));
328 	p->p_flag |= P_INEXEC;
329 	PROC_UNLOCK(p);
330 
331 	/*
332 	 * Initialize part of the common data
333 	 */
334 	imgp->proc = p;
335 	imgp->execlabel = NULL;
336 	imgp->attr = &attr;
337 	imgp->entry_addr = 0;
338 	imgp->vmspace_destroyed = 0;
339 	imgp->interpreted = 0;
340 	imgp->interpreter_name = args->buf + PATH_MAX + ARG_MAX;
341 	imgp->auxargs = NULL;
342 	imgp->vp = NULL;
343 	imgp->object = NULL;
344 	imgp->firstpage = NULL;
345 	imgp->ps_strings = 0;
346 	imgp->auxarg_size = 0;
347 	imgp->args = args;
348 
349 #ifdef MAC
350 	error = mac_execve_enter(imgp, mac_p);
351 	if (error)
352 		goto exec_fail;
353 #endif
354 
355 	imgp->image_header = NULL;
356 
357 	/*
358 	 * Translate the file name. namei() returns a vnode pointer
359 	 *	in ni_vp amoung other things.
360 	 *
361 	 * XXXAUDIT: It would be desirable to also audit the name of the
362 	 * interpreter if this is an interpreted binary.
363 	 */
364 	ndp = &nd;
365 	NDINIT(ndp, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | MPSAFE |
366 	    AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
367 
368 interpret:
369 	error = namei(ndp);
370 	if (error)
371 		goto exec_fail;
372 
373 	vfslocked = NDHASGIANT(ndp);
374 	imgp->vp = ndp->ni_vp;
375 
376 	/*
377 	 * Check file permissions (also 'opens' file)
378 	 */
379 	error = exec_check_permissions(imgp);
380 	if (error)
381 		goto exec_fail_dealloc;
382 
383 	imgp->object = imgp->vp->v_object;
384 	if (imgp->object != NULL)
385 		vm_object_reference(imgp->object);
386 
387 	/*
388 	 * Set VV_TEXT now so no one can write to the executable while we're
389 	 * activating it.
390 	 *
391 	 * Remember if this was set before and unset it in case this is not
392 	 * actually an executable image.
393 	 */
394 	textset = imgp->vp->v_vflag & VV_TEXT;
395 	imgp->vp->v_vflag |= VV_TEXT;
396 
397 	error = exec_map_first_page(imgp);
398 	if (error)
399 		goto exec_fail_dealloc;
400 
401 	/*
402 	 *	If the current process has a special image activator it
403 	 *	wants to try first, call it.   For example, emulating shell
404 	 *	scripts differently.
405 	 */
406 	error = -1;
407 	if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
408 		error = img_first(imgp);
409 
410 	/*
411 	 *	Loop through the list of image activators, calling each one.
412 	 *	An activator returns -1 if there is no match, 0 on success,
413 	 *	and an error otherwise.
414 	 */
415 	for (i = 0; error == -1 && execsw[i]; ++i) {
416 		if (execsw[i]->ex_imgact == NULL ||
417 		    execsw[i]->ex_imgact == img_first) {
418 			continue;
419 		}
420 		error = (*execsw[i]->ex_imgact)(imgp);
421 	}
422 
423 	if (error) {
424 		if (error == -1) {
425 			if (textset == 0)
426 				imgp->vp->v_vflag &= ~VV_TEXT;
427 			error = ENOEXEC;
428 		}
429 		goto exec_fail_dealloc;
430 	}
431 
432 	/*
433 	 * Special interpreter operation, cleanup and loop up to try to
434 	 * activate the interpreter.
435 	 */
436 	if (imgp->interpreted) {
437 		exec_unmap_first_page(imgp);
438 		/*
439 		 * VV_TEXT needs to be unset for scripts.  There is a short
440 		 * period before we determine that something is a script where
441 		 * VV_TEXT will be set. The vnode lock is held over this
442 		 * entire period so nothing should illegitimately be blocked.
443 		 */
444 		imgp->vp->v_vflag &= ~VV_TEXT;
445 		/* free name buffer and old vnode */
446 		NDFREE(ndp, NDF_ONLY_PNBUF);
447 #ifdef MAC
448 		interplabel = mac_vnode_label_alloc();
449 		mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel);
450 #endif
451 		vput(ndp->ni_vp);
452 		vm_object_deallocate(imgp->object);
453 		imgp->object = NULL;
454 		VFS_UNLOCK_GIANT(vfslocked);
455 		vfslocked = 0;
456 		/* set new name to that of the interpreter */
457 		NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE,
458 		    UIO_SYSSPACE, imgp->interpreter_name, td);
459 		goto interpret;
460 	}
461 
462 	/*
463 	 * Copy out strings (args and env) and initialize stack base
464 	 */
465 	if (p->p_sysent->sv_copyout_strings)
466 		stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
467 	else
468 		stack_base = exec_copyout_strings(imgp);
469 
470 	/*
471 	 * If custom stack fixup routine present for this process
472 	 * let it do the stack setup.
473 	 * Else stuff argument count as first item on stack
474 	 */
475 	if (p->p_sysent->sv_fixup != NULL)
476 		(*p->p_sysent->sv_fixup)(&stack_base, imgp);
477 	else
478 		suword(--stack_base, imgp->args->argc);
479 
480 	/*
481 	 * For security and other reasons, the file descriptor table cannot
482 	 * be shared after an exec.
483 	 */
484 	fdunshare(p, td);
485 
486 	/*
487 	 * Malloc things before we need locks.
488 	 */
489 	newcred = crget();
490 	euip = uifind(attr.va_uid);
491 	i = imgp->args->begin_envv - imgp->args->begin_argv;
492 	/* Cache arguments if they fit inside our allowance */
493 	if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
494 		newargs = pargs_alloc(i);
495 		bcopy(imgp->args->begin_argv, newargs->ar_args, i);
496 	}
497 
498 	/* close files on exec */
499 	VOP_UNLOCK(imgp->vp, 0, td);
500 	fdcloseexec(td);
501 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
502 
503 	/* Get a reference to the vnode prior to locking the proc */
504 	VREF(ndp->ni_vp);
505 
506 	/*
507 	 * For security and other reasons, signal handlers cannot
508 	 * be shared after an exec. The new process gets a copy of the old
509 	 * handlers. In execsigs(), the new process will have its signals
510 	 * reset.
511 	 */
512 	PROC_LOCK(p);
513 	if (sigacts_shared(p->p_sigacts)) {
514 		oldsigacts = p->p_sigacts;
515 		PROC_UNLOCK(p);
516 		newsigacts = sigacts_alloc();
517 		sigacts_copy(newsigacts, oldsigacts);
518 		PROC_LOCK(p);
519 		p->p_sigacts = newsigacts;
520 	} else
521 		oldsigacts = NULL;
522 
523 	/* Stop profiling */
524 	stopprofclock(p);
525 
526 	/* reset caught signals */
527 	execsigs(p);
528 
529 	/* name this process - nameiexec(p, ndp) */
530 	len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
531 	bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
532 	p->p_comm[len] = 0;
533 
534 	/*
535 	 * mark as execed, wakeup the process that vforked (if any) and tell
536 	 * it that it now has its own resources back
537 	 */
538 	p->p_flag |= P_EXEC;
539 	if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
540 		p->p_flag &= ~P_PPWAIT;
541 		wakeup(p->p_pptr);
542 	}
543 
544 	/*
545 	 * Implement image setuid/setgid.
546 	 *
547 	 * Don't honor setuid/setgid if the filesystem prohibits it or if
548 	 * the process is being traced.
549 	 *
550 	 * XXXMAC: For the time being, use NOSUID to also prohibit
551 	 * transitions on the file system.
552 	 */
553 	oldcred = p->p_ucred;
554 	credential_changing = 0;
555 	credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid !=
556 	    attr.va_uid;
557 	credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid !=
558 	    attr.va_gid;
559 #ifdef MAC
560 	will_transition = mac_execve_will_transition(oldcred, imgp->vp,
561 	    interplabel, imgp);
562 	credential_changing |= will_transition;
563 #endif
564 
565 	if (credential_changing &&
566 	    (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
567 	    (p->p_flag & P_TRACED) == 0) {
568 		/*
569 		 * Turn off syscall tracing for set-id programs, except for
570 		 * root.  Record any set-id flags first to make sure that
571 		 * we do not regain any tracing during a possible block.
572 		 */
573 		setsugid(p);
574 #ifdef KTRACE
575 		if (p->p_tracevp != NULL && suser_cred(oldcred, SUSER_ALLOWJAIL)) {
576 			mtx_lock(&ktrace_mtx);
577 			p->p_traceflag = 0;
578 			tracevp = p->p_tracevp;
579 			p->p_tracevp = NULL;
580 			tracecred = p->p_tracecred;
581 			p->p_tracecred = NULL;
582 			mtx_unlock(&ktrace_mtx);
583 		}
584 #endif
585 		/*
586 		 * Close any file descriptors 0..2 that reference procfs,
587 		 * then make sure file descriptors 0..2 are in use.
588 		 *
589 		 * setugidsafety() may call closef() and then pfind()
590 		 * which may grab the process lock.
591 		 * fdcheckstd() may call falloc() which may block to
592 		 * allocate memory, so temporarily drop the process lock.
593 		 */
594 		PROC_UNLOCK(p);
595 		setugidsafety(td);
596 		VOP_UNLOCK(imgp->vp, 0, td);
597 		error = fdcheckstd(td);
598 		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
599 		if (error != 0)
600 			goto done1;
601 		PROC_LOCK(p);
602 		/*
603 		 * Set the new credentials.
604 		 */
605 		crcopy(newcred, oldcred);
606 		if (attr.va_mode & VSUID)
607 			change_euid(newcred, euip);
608 		if (attr.va_mode & VSGID)
609 			change_egid(newcred, attr.va_gid);
610 #ifdef MAC
611 		if (will_transition) {
612 			mac_execve_transition(oldcred, newcred, imgp->vp,
613 			    interplabel, imgp);
614 		}
615 #endif
616 		/*
617 		 * Implement correct POSIX saved-id behavior.
618 		 *
619 		 * XXXMAC: Note that the current logic will save the
620 		 * uid and gid if a MAC domain transition occurs, even
621 		 * though maybe it shouldn't.
622 		 */
623 		change_svuid(newcred, newcred->cr_uid);
624 		change_svgid(newcred, newcred->cr_gid);
625 		p->p_ucred = newcred;
626 		newcred = NULL;
627 	} else {
628 		if (oldcred->cr_uid == oldcred->cr_ruid &&
629 		    oldcred->cr_gid == oldcred->cr_rgid)
630 			p->p_flag &= ~P_SUGID;
631 		/*
632 		 * Implement correct POSIX saved-id behavior.
633 		 *
634 		 * XXX: It's not clear that the existing behavior is
635 		 * POSIX-compliant.  A number of sources indicate that the
636 		 * saved uid/gid should only be updated if the new ruid is
637 		 * not equal to the old ruid, or the new euid is not equal
638 		 * to the old euid and the new euid is not equal to the old
639 		 * ruid.  The FreeBSD code always updates the saved uid/gid.
640 		 * Also, this code uses the new (replaced) euid and egid as
641 		 * the source, which may or may not be the right ones to use.
642 		 */
643 		if (oldcred->cr_svuid != oldcred->cr_uid ||
644 		    oldcred->cr_svgid != oldcred->cr_gid) {
645 			crcopy(newcred, oldcred);
646 			change_svuid(newcred, newcred->cr_uid);
647 			change_svgid(newcred, newcred->cr_gid);
648 			p->p_ucred = newcred;
649 			newcred = NULL;
650 		}
651 	}
652 
653 	/*
654 	 * Store the vp for use in procfs.  This vnode was referenced prior
655 	 * to locking the proc lock.
656 	 */
657 	textvp = p->p_textvp;
658 	p->p_textvp = ndp->ni_vp;
659 
660 	/*
661 	 * Notify others that we exec'd, and clear the P_INEXEC flag
662 	 * as we're now a bona fide freshly-execed process.
663 	 */
664 	KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
665 	p->p_flag &= ~P_INEXEC;
666 
667 	/*
668 	 * If tracing the process, trap to debugger so breakpoints
669 	 * can be set before the program executes.
670 	 * Use tdsignal to deliver signal to current thread, use
671 	 * psignal may cause the signal to be delivered to wrong thread
672 	 * because that thread will exit, remember we are going to enter
673 	 * single thread mode.
674 	 */
675 	if (p->p_flag & P_TRACED)
676 		tdsignal(p, td, SIGTRAP, NULL);
677 
678 	/* clear "fork but no exec" flag, as we _are_ execing */
679 	p->p_acflag &= ~AFORK;
680 
681 	/*
682 	 * Free any previous argument cache and replace it with
683 	 * the new argument cache, if any.
684 	 */
685 	oldargs = p->p_args;
686 	p->p_args = newargs;
687 	newargs = NULL;
688 
689 #ifdef	HWPMC_HOOKS
690 	/*
691 	 * Check if system-wide sampling is in effect or if the
692 	 * current process is using PMCs.  If so, do exec() time
693 	 * processing.  This processing needs to happen AFTER the
694 	 * P_INEXEC flag is cleared.
695 	 *
696 	 * The proc lock needs to be released before taking the PMC
697 	 * SX.
698 	 */
699 	if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
700 		PROC_UNLOCK(p);
701 		pe.pm_credentialschanged = credential_changing;
702 		pe.pm_entryaddr = imgp->entry_addr;
703 
704 		PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
705 	} else
706 		PROC_UNLOCK(p);
707 #else  /* !HWPMC_HOOKS */
708 	PROC_UNLOCK(p);
709 #endif
710 
711 	/* Set values passed into the program in registers. */
712 	if (p->p_sysent->sv_setregs)
713 		(*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
714 		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
715 	else
716 		exec_setregs(td, imgp->entry_addr,
717 		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
718 
719 	vfs_mark_atime(imgp->vp, td);
720 
721 done1:
722 	/*
723 	 * Free any resources malloc'd earlier that we didn't use.
724 	 */
725 	uifree(euip);
726 	if (newcred == NULL)
727 		crfree(oldcred);
728 	else
729 		crfree(newcred);
730 	VOP_UNLOCK(imgp->vp, 0, td);
731 	/*
732 	 * Handle deferred decrement of ref counts.
733 	 */
734 	if (textvp != NULL) {
735 		int tvfslocked;
736 
737 		tvfslocked = VFS_LOCK_GIANT(textvp->v_mount);
738 		vrele(textvp);
739 		VFS_UNLOCK_GIANT(tvfslocked);
740 	}
741 	if (ndp->ni_vp && error != 0)
742 		vrele(ndp->ni_vp);
743 #ifdef KTRACE
744 	if (tracevp != NULL)
745 		vrele(tracevp);
746 	if (tracecred != NULL)
747 		crfree(tracecred);
748 #endif
749 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
750 	if (oldargs != NULL)
751 		pargs_drop(oldargs);
752 	if (newargs != NULL)
753 		pargs_drop(newargs);
754 	if (oldsigacts != NULL)
755 		sigacts_free(oldsigacts);
756 
757 exec_fail_dealloc:
758 
759 	/*
760 	 * free various allocated resources
761 	 */
762 	if (imgp->firstpage != NULL)
763 		exec_unmap_first_page(imgp);
764 
765 	if (imgp->vp != NULL) {
766 		NDFREE(ndp, NDF_ONLY_PNBUF);
767 		vput(imgp->vp);
768 	}
769 
770 	if (imgp->object != NULL)
771 		vm_object_deallocate(imgp->object);
772 
773 	if (error == 0) {
774 		/*
775 		 * Stop the process here if its stop event mask has
776 		 * the S_EXEC bit set.
777 		 */
778 		STOPEVENT(p, S_EXEC, 0);
779 		goto done2;
780 	}
781 
782 exec_fail:
783 	/* we're done here, clear P_INEXEC */
784 	PROC_LOCK(p);
785 	p->p_flag &= ~P_INEXEC;
786 	PROC_UNLOCK(p);
787 
788 done2:
789 #ifdef MAC
790 	mac_execve_exit(imgp);
791 	if (interplabel != NULL)
792 		mac_vnode_label_free(interplabel);
793 #endif
794 	VFS_UNLOCK_GIANT(vfslocked);
795 	exec_free_args(args);
796 
797 	if (error && imgp->vmspace_destroyed) {
798 		/* sorry, no more process anymore. exit gracefully */
799 		exit1(td, W_EXITCODE(0, SIGABRT));
800 		/* NOT REACHED */
801 	}
802 	return (error);
803 }
804 
805 int
806 exec_map_first_page(imgp)
807 	struct image_params *imgp;
808 {
809 	int rv, i;
810 	int initial_pagein;
811 	vm_page_t ma[VM_INITIAL_PAGEIN];
812 	vm_object_t object;
813 
814 	if (imgp->firstpage != NULL)
815 		exec_unmap_first_page(imgp);
816 
817 	object = imgp->vp->v_object;
818 	if (object == NULL)
819 		return (EACCES);
820 	VM_OBJECT_LOCK(object);
821 	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
822 	if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
823 		initial_pagein = VM_INITIAL_PAGEIN;
824 		if (initial_pagein > object->size)
825 			initial_pagein = object->size;
826 		for (i = 1; i < initial_pagein; i++) {
827 			if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
828 				if (ma[i]->valid)
829 					break;
830 				if ((ma[i]->flags & PG_BUSY) || ma[i]->busy)
831 					break;
832 				vm_page_lock_queues();
833 				vm_page_busy(ma[i]);
834 				vm_page_unlock_queues();
835 			} else {
836 				ma[i] = vm_page_alloc(object, i,
837 				    VM_ALLOC_NORMAL);
838 				if (ma[i] == NULL)
839 					break;
840 			}
841 		}
842 		initial_pagein = i;
843 		rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
844 		ma[0] = vm_page_lookup(object, 0);
845 		if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||
846 		    (ma[0]->valid == 0)) {
847 			if (ma[0]) {
848 				vm_page_lock_queues();
849 				vm_page_free(ma[0]);
850 				vm_page_unlock_queues();
851 			}
852 			VM_OBJECT_UNLOCK(object);
853 			return (EIO);
854 		}
855 	}
856 	vm_page_lock_queues();
857 	vm_page_hold(ma[0]);
858 	vm_page_wakeup(ma[0]);
859 	vm_page_unlock_queues();
860 	VM_OBJECT_UNLOCK(object);
861 
862 	imgp->firstpage = sf_buf_alloc(ma[0], 0);
863 	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
864 
865 	return (0);
866 }
867 
868 void
869 exec_unmap_first_page(imgp)
870 	struct image_params *imgp;
871 {
872 	vm_page_t m;
873 
874 	if (imgp->firstpage != NULL) {
875 		m = sf_buf_page(imgp->firstpage);
876 		sf_buf_free(imgp->firstpage);
877 		imgp->firstpage = NULL;
878 		vm_page_lock_queues();
879 		vm_page_unhold(m);
880 		vm_page_unlock_queues();
881 	}
882 }
883 
884 /*
885  * Destroy old address space, and allocate a new stack
886  *	The new stack is only SGROWSIZ large because it is grown
887  *	automatically in trap.c.
888  */
889 int
890 exec_new_vmspace(imgp, sv)
891 	struct image_params *imgp;
892 	struct sysentvec *sv;
893 {
894 	int error;
895 	struct proc *p = imgp->proc;
896 	struct vmspace *vmspace = p->p_vmspace;
897 	vm_offset_t stack_addr;
898 	vm_map_t map;
899 
900 	imgp->vmspace_destroyed = 1;
901 	imgp->sysent = sv;
902 
903 	/* Called with Giant held, do not depend on it! */
904 	EVENTHANDLER_INVOKE(process_exec, p, imgp);
905 
906 	/*
907 	 * Here is as good a place as any to do any resource limit cleanups.
908 	 * This is needed if a 64 bit binary exec's a 32 bit binary - the
909 	 * data size limit may need to be changed to a value that makes
910 	 * sense for the 32 bit binary.
911 	 */
912 	if (sv->sv_fixlimits != NULL)
913 		sv->sv_fixlimits(p);
914 
915 	/*
916 	 * Blow away entire process VM, if address space not shared,
917 	 * otherwise, create a new VM space so that other threads are
918 	 * not disrupted
919 	 */
920 	map = &vmspace->vm_map;
921 	if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
922 	    vm_map_max(map) == sv->sv_maxuser) {
923 		shmexit(vmspace);
924 		pmap_remove_pages(vmspace_pmap(vmspace));
925 		vm_map_remove(map, vm_map_min(map), vm_map_max(map));
926 	} else {
927 		vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
928 		vmspace = p->p_vmspace;
929 		map = &vmspace->vm_map;
930 	}
931 
932 	/* Allocate a new stack */
933 	stack_addr = sv->sv_usrstack - maxssiz;
934 	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
935 	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
936 	if (error)
937 		return (error);
938 
939 #ifdef __ia64__
940 	/* Allocate a new register stack */
941 	stack_addr = IA64_BACKINGSTORE;
942 	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
943 	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
944 	if (error)
945 		return (error);
946 #endif
947 
948 	/* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
949 	 * VM_STACK case, but they are still used to monitor the size of the
950 	 * process stack so we can check the stack rlimit.
951 	 */
952 	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
953 	vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz;
954 
955 	return (0);
956 }
957 
958 /*
959  * Copy out argument and environment strings from the old process
960  *	address space into the temporary string buffer.
961  */
962 int
963 exec_copyin_args(struct image_args *args, char *fname,
964     enum uio_seg segflg, char **argv, char **envv)
965 {
966 	char *argp, *envp;
967 	int error;
968 	size_t length;
969 
970 	error = 0;
971 
972 	bzero(args, sizeof(*args));
973 	if (argv == NULL)
974 		return (EFAULT);
975 	/*
976 	 * Allocate temporary demand zeroed space for argument and
977 	 *	environment strings:
978 	 *
979 	 * o ARG_MAX for argument and environment;
980 	 * o MAXSHELLCMDLEN for the name of interpreters.
981 	 */
982 	args->buf = (char *) kmem_alloc_wait(exec_map,
983 	    PATH_MAX + ARG_MAX + MAXSHELLCMDLEN);
984 	if (args->buf == NULL)
985 		return (ENOMEM);
986 	args->begin_argv = args->buf;
987 	args->endp = args->begin_argv;
988 	args->stringspace = ARG_MAX;
989 
990 	args->fname = args->buf + ARG_MAX;
991 
992 	/*
993 	 * Copy the file name.
994 	 */
995 	error = (segflg == UIO_SYSSPACE) ?
996 	    copystr(fname, args->fname, PATH_MAX, &length) :
997 	    copyinstr(fname, args->fname, PATH_MAX, &length);
998 	if (error != 0)
999 		goto err_exit;
1000 
1001 	/*
1002 	 * extract arguments first
1003 	 */
1004 	while ((argp = (caddr_t) (intptr_t) fuword(argv++))) {
1005 		if (argp == (caddr_t) -1) {
1006 			error = EFAULT;
1007 			goto err_exit;
1008 		}
1009 		if ((error = copyinstr(argp, args->endp,
1010 		    args->stringspace, &length))) {
1011 			if (error == ENAMETOOLONG)
1012 				error = E2BIG;
1013 			goto err_exit;
1014 		}
1015 		args->stringspace -= length;
1016 		args->endp += length;
1017 		args->argc++;
1018 	}
1019 
1020 	args->begin_envv = args->endp;
1021 
1022 	/*
1023 	 * extract environment strings
1024 	 */
1025 	if (envv) {
1026 		while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
1027 			if (envp == (caddr_t)-1) {
1028 				error = EFAULT;
1029 				goto err_exit;
1030 			}
1031 			if ((error = copyinstr(envp, args->endp,
1032 			    args->stringspace, &length))) {
1033 				if (error == ENAMETOOLONG)
1034 					error = E2BIG;
1035 				goto err_exit;
1036 			}
1037 			args->stringspace -= length;
1038 			args->endp += length;
1039 			args->envc++;
1040 		}
1041 	}
1042 
1043 	return (0);
1044 
1045 err_exit:
1046 	exec_free_args(args);
1047 	return (error);
1048 }
1049 
1050 static void
1051 exec_free_args(struct image_args *args)
1052 {
1053 
1054 	if (args->buf) {
1055 		kmem_free_wakeup(exec_map, (vm_offset_t)args->buf,
1056 		    PATH_MAX + ARG_MAX + MAXSHELLCMDLEN);
1057 		args->buf = NULL;
1058 	}
1059 }
1060 
1061 /*
1062  * Copy strings out to the new process address space, constructing
1063  *	new arg and env vector tables. Return a pointer to the base
1064  *	so that it can be used as the initial stack pointer.
1065  */
1066 register_t *
1067 exec_copyout_strings(imgp)
1068 	struct image_params *imgp;
1069 {
1070 	int argc, envc;
1071 	char **vectp;
1072 	char *stringp, *destp;
1073 	register_t *stack_base;
1074 	struct ps_strings *arginfo;
1075 	struct proc *p;
1076 	int szsigcode;
1077 
1078 	/*
1079 	 * Calculate string base and vector table pointers.
1080 	 * Also deal with signal trampoline code for this exec type.
1081 	 */
1082 	p = imgp->proc;
1083 	szsigcode = 0;
1084 	arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
1085 	if (p->p_sysent->sv_szsigcode != NULL)
1086 		szsigcode = *(p->p_sysent->sv_szsigcode);
1087 	destp =	(caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
1088 	    roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *));
1089 
1090 	/*
1091 	 * install sigcode
1092 	 */
1093 	if (szsigcode)
1094 		copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
1095 		    szsigcode), szsigcode);
1096 
1097 	/*
1098 	 * If we have a valid auxargs ptr, prepare some room
1099 	 * on the stack.
1100 	 */
1101 	if (imgp->auxargs) {
1102 		/*
1103 		 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
1104 		 * lower compatibility.
1105 		 */
1106 		imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
1107 		    (AT_COUNT * 2);
1108 		/*
1109 		 * The '+ 2' is for the null pointers at the end of each of
1110 		 * the arg and env vector sets,and imgp->auxarg_size is room
1111 		 * for argument of Runtime loader.
1112 		 */
1113 		vectp = (char **)(destp - (imgp->args->argc +
1114 		    imgp->args->envc + 2 + imgp->auxarg_size) *
1115 		    sizeof(char *));
1116 
1117 	} else {
1118 		/*
1119 		 * The '+ 2' is for the null pointers at the end of each of
1120 		 * the arg and env vector sets
1121 		 */
1122 		vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) *
1123 		    sizeof(char *));
1124 	}
1125 
1126 	/*
1127 	 * vectp also becomes our initial stack base
1128 	 */
1129 	stack_base = (register_t *)vectp;
1130 
1131 	stringp = imgp->args->begin_argv;
1132 	argc = imgp->args->argc;
1133 	envc = imgp->args->envc;
1134 
1135 	/*
1136 	 * Copy out strings - arguments and environment.
1137 	 */
1138 	copyout(stringp, destp, ARG_MAX - imgp->args->stringspace);
1139 
1140 	/*
1141 	 * Fill in "ps_strings" struct for ps, w, etc.
1142 	 */
1143 	suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
1144 	suword(&arginfo->ps_nargvstr, argc);
1145 
1146 	/*
1147 	 * Fill in argument portion of vector table.
1148 	 */
1149 	for (; argc > 0; --argc) {
1150 		suword(vectp++, (long)(intptr_t)destp);
1151 		while (*stringp++ != 0)
1152 			destp++;
1153 		destp++;
1154 	}
1155 
1156 	/* a null vector table pointer separates the argp's from the envp's */
1157 	suword(vectp++, 0);
1158 
1159 	suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
1160 	suword(&arginfo->ps_nenvstr, envc);
1161 
1162 	/*
1163 	 * Fill in environment portion of vector table.
1164 	 */
1165 	for (; envc > 0; --envc) {
1166 		suword(vectp++, (long)(intptr_t)destp);
1167 		while (*stringp++ != 0)
1168 			destp++;
1169 		destp++;
1170 	}
1171 
1172 	/* end of vector table is a null pointer */
1173 	suword(vectp, 0);
1174 
1175 	return (stack_base);
1176 }
1177 
1178 /*
1179  * Check permissions of file to execute.
1180  *	Called with imgp->vp locked.
1181  *	Return 0 for success or error code on failure.
1182  */
1183 int
1184 exec_check_permissions(imgp)
1185 	struct image_params *imgp;
1186 {
1187 	struct vnode *vp = imgp->vp;
1188 	struct vattr *attr = imgp->attr;
1189 	struct thread *td;
1190 	int error;
1191 
1192 	td = curthread;			/* XXXKSE */
1193 
1194 	/* Get file attributes */
1195 	error = VOP_GETATTR(vp, attr, td->td_ucred, td);
1196 	if (error)
1197 		return (error);
1198 
1199 #ifdef MAC
1200 	error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp);
1201 	if (error)
1202 		return (error);
1203 #endif
1204 
1205 	/*
1206 	 * 1) Check if file execution is disabled for the filesystem that this
1207 	 *	file resides on.
1208 	 * 2) Insure that at least one execute bit is on - otherwise root
1209 	 *	will always succeed, and we don't want to happen unless the
1210 	 *	file really is executable.
1211 	 * 3) Insure that the file is a regular file.
1212 	 */
1213 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1214 	    ((attr->va_mode & 0111) == 0) ||
1215 	    (attr->va_type != VREG))
1216 		return (EACCES);
1217 
1218 	/*
1219 	 * Zero length files can't be exec'd
1220 	 */
1221 	if (attr->va_size == 0)
1222 		return (ENOEXEC);
1223 
1224 	/*
1225 	 *  Check for execute permission to file based on current credentials.
1226 	 */
1227 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1228 	if (error)
1229 		return (error);
1230 
1231 	/*
1232 	 * Check number of open-for-writes on the file and deny execution
1233 	 * if there are any.
1234 	 */
1235 	if (vp->v_writecount)
1236 		return (ETXTBSY);
1237 
1238 	/*
1239 	 * Call filesystem specific open routine (which does nothing in the
1240 	 * general case).
1241 	 */
1242 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
1243 	return (error);
1244 }
1245 
1246 /*
1247  * Exec handler registration
1248  */
1249 int
1250 exec_register(execsw_arg)
1251 	const struct execsw *execsw_arg;
1252 {
1253 	const struct execsw **es, **xs, **newexecsw;
1254 	int count = 2;	/* New slot and trailing NULL */
1255 
1256 	if (execsw)
1257 		for (es = execsw; *es; es++)
1258 			count++;
1259 	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1260 	if (newexecsw == NULL)
1261 		return (ENOMEM);
1262 	xs = newexecsw;
1263 	if (execsw)
1264 		for (es = execsw; *es; es++)
1265 			*xs++ = *es;
1266 	*xs++ = execsw_arg;
1267 	*xs = NULL;
1268 	if (execsw)
1269 		free(execsw, M_TEMP);
1270 	execsw = newexecsw;
1271 	return (0);
1272 }
1273 
1274 int
1275 exec_unregister(execsw_arg)
1276 	const struct execsw *execsw_arg;
1277 {
1278 	const struct execsw **es, **xs, **newexecsw;
1279 	int count = 1;
1280 
1281 	if (execsw == NULL)
1282 		panic("unregister with no handlers left?\n");
1283 
1284 	for (es = execsw; *es; es++) {
1285 		if (*es == execsw_arg)
1286 			break;
1287 	}
1288 	if (*es == NULL)
1289 		return (ENOENT);
1290 	for (es = execsw; *es; es++)
1291 		if (*es != execsw_arg)
1292 			count++;
1293 	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1294 	if (newexecsw == NULL)
1295 		return (ENOMEM);
1296 	xs = newexecsw;
1297 	for (es = execsw; *es; es++)
1298 		if (*es != execsw_arg)
1299 			*xs++ = *es;
1300 	*xs = NULL;
1301 	if (execsw)
1302 		free(execsw, M_TEMP);
1303 	execsw = newexecsw;
1304 	return (0);
1305 }
1306