xref: /freebsd/sys/kern/kern_exec.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1993, David Greenman
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ktrace.h"
31 #include "opt_mac.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/eventhandler.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/sysproto.h>
39 #include <sys/signalvar.h>
40 #include <sys/kernel.h>
41 #include <sys/mac.h>
42 #include <sys/mount.h>
43 #include <sys/filedesc.h>
44 #include <sys/fcntl.h>
45 #include <sys/acct.h>
46 #include <sys/exec.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
49 #include <sys/wait.h>
50 #include <sys/malloc.h>
51 #include <sys/proc.h>
52 #include <sys/pioctl.h>
53 #include <sys/namei.h>
54 #include <sys/resourcevar.h>
55 #include <sys/sf_buf.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysent.h>
58 #include <sys/shm.h>
59 #include <sys/sysctl.h>
60 #include <sys/vnode.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_pager.h>
74 
75 #include <machine/reg.h>
76 
77 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
78 
79 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
80 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
81 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
82 static int do_execve(struct thread *td, struct image_args *args,
83     struct mac *mac_p);
84 
85 /* XXX This should be vm_size_t. */
86 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
87     NULL, 0, sysctl_kern_ps_strings, "LU", "");
88 
89 /* XXX This should be vm_size_t. */
90 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD,
91     NULL, 0, sysctl_kern_usrstack, "LU", "");
92 
93 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
94     NULL, 0, sysctl_kern_stackprot, "I", "");
95 
96 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
97 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
98     &ps_arg_cache_limit, 0, "");
99 
100 static int
101 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
102 {
103 	struct proc *p;
104 	int error;
105 
106 	p = curproc;
107 #ifdef SCTL_MASK32
108 	if (req->flags & SCTL_MASK32) {
109 		unsigned int val;
110 		val = (unsigned int)p->p_sysent->sv_psstrings;
111 		error = SYSCTL_OUT(req, &val, sizeof(val));
112 	} else
113 #endif
114 		error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
115 		   sizeof(p->p_sysent->sv_psstrings));
116 	return error;
117 }
118 
119 static int
120 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
121 {
122 	struct proc *p;
123 	int error;
124 
125 	p = curproc;
126 #ifdef SCTL_MASK32
127 	if (req->flags & SCTL_MASK32) {
128 		unsigned int val;
129 		val = (unsigned int)p->p_sysent->sv_usrstack;
130 		error = SYSCTL_OUT(req, &val, sizeof(val));
131 	} else
132 #endif
133 		error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
134 		    sizeof(p->p_sysent->sv_usrstack));
135 	return error;
136 }
137 
138 static int
139 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
140 {
141 	struct proc *p;
142 
143 	p = curproc;
144 	return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
145 	    sizeof(p->p_sysent->sv_stackprot)));
146 }
147 
148 /*
149  * Each of the items is a pointer to a `const struct execsw', hence the
150  * double pointer here.
151  */
152 static const struct execsw **execsw;
153 
154 #ifndef _SYS_SYSPROTO_H_
155 struct execve_args {
156 	char    *fname;
157 	char    **argv;
158 	char    **envv;
159 };
160 #endif
161 
162 /*
163  * MPSAFE
164  */
165 int
166 execve(td, uap)
167 	struct thread *td;
168 	struct execve_args /* {
169 		char *fname;
170 		char **argv;
171 		char **envv;
172 	} */ *uap;
173 {
174 	int error;
175 	struct image_args args;
176 
177 	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
178 	    uap->argv, uap->envv);
179 
180 	if (error == 0)
181 		error = kern_execve(td, &args, NULL);
182 
183 	exec_free_args(&args);
184 
185 	return (error);
186 }
187 
188 #ifndef _SYS_SYSPROTO_H_
189 struct __mac_execve_args {
190 	char	*fname;
191 	char	**argv;
192 	char	**envv;
193 	struct mac	*mac_p;
194 };
195 #endif
196 
197 /*
198  * MPSAFE
199  */
200 int
201 __mac_execve(td, uap)
202 	struct thread *td;
203 	struct __mac_execve_args /* {
204 		char *fname;
205 		char **argv;
206 		char **envv;
207 		struct mac *mac_p;
208 	} */ *uap;
209 {
210 #ifdef MAC
211 	int error;
212 	struct image_args args;
213 
214 	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
215 	    uap->argv, uap->envv);
216 
217 	if (error == 0)
218 		error = kern_execve(td, &args, uap->mac_p);
219 
220 	exec_free_args(&args);
221 
222 	return (error);
223 #else
224 	return (ENOSYS);
225 #endif
226 }
227 
228 int
229 kern_execve(td, args, mac_p)
230 	struct thread *td;
231 	struct image_args *args;
232 	struct mac *mac_p;
233 {
234 	struct proc *p = td->td_proc;
235 	int error;
236 
237 	if (p->p_flag & P_HADTHREADS) {
238 		PROC_LOCK(p);
239 		if (thread_single(SINGLE_BOUNDARY)) {
240 			PROC_UNLOCK(p);
241 			return (ERESTART);	/* Try again later. */
242 		}
243 		PROC_UNLOCK(p);
244 	}
245 
246 	error = do_execve(td, args, mac_p);
247 
248 	if (p->p_flag & P_HADTHREADS) {
249 		PROC_LOCK(p);
250 		/*
251 		 * If success, we upgrade to SINGLE_EXIT state to
252 		 * force other threads to suicide.
253 		 */
254 		if (error == 0)
255 			thread_single(SINGLE_EXIT);
256 		else
257 			thread_single_end();
258 		PROC_UNLOCK(p);
259 	}
260 
261 	return (error);
262 }
263 
264 /*
265  * In-kernel implementation of execve().  All arguments are assumed to be
266  * userspace pointers from the passed thread.
267  *
268  * MPSAFE
269  */
270 static int
271 do_execve(td, args, mac_p)
272 	struct thread *td;
273 	struct image_args *args;
274 	struct mac *mac_p;
275 {
276 	struct proc *p = td->td_proc;
277 	struct nameidata nd, *ndp;
278 	struct ucred *newcred = NULL, *oldcred;
279 	struct uidinfo *euip;
280 	register_t *stack_base;
281 	int error, len, i;
282 	struct image_params image_params, *imgp;
283 	struct vattr attr;
284 	int (*img_first)(struct image_params *);
285 	struct pargs *oldargs = NULL, *newargs = NULL;
286 	struct sigacts *oldsigacts, *newsigacts;
287 #ifdef KTRACE
288 	struct vnode *tracevp = NULL;
289 	struct ucred *tracecred = NULL;
290 #endif
291 	struct vnode *textvp = NULL;
292 	int credential_changing;
293 	int textset;
294 #ifdef MAC
295 	struct label *interplabel = NULL;
296 	int will_transition;
297 #endif
298 
299 	imgp = &image_params;
300 
301 	/*
302 	 * Lock the process and set the P_INEXEC flag to indicate that
303 	 * it should be left alone until we're done here.  This is
304 	 * necessary to avoid race conditions - e.g. in ptrace() -
305 	 * that might allow a local user to illicitly obtain elevated
306 	 * privileges.
307 	 */
308 	PROC_LOCK(p);
309 	KASSERT((p->p_flag & P_INEXEC) == 0,
310 	    ("%s(): process already has P_INEXEC flag", __func__));
311 	p->p_flag |= P_INEXEC;
312 	PROC_UNLOCK(p);
313 
314 	/*
315 	 * Initialize part of the common data
316 	 */
317 	imgp->proc = p;
318 	imgp->execlabel = NULL;
319 	imgp->attr = &attr;
320 	imgp->entry_addr = 0;
321 	imgp->vmspace_destroyed = 0;
322 	imgp->interpreted = 0;
323 	imgp->interpreter_name[0] = '\0';
324 	imgp->auxargs = NULL;
325 	imgp->vp = NULL;
326 	imgp->object = NULL;
327 	imgp->firstpage = NULL;
328 	imgp->ps_strings = 0;
329 	imgp->auxarg_size = 0;
330 	imgp->args = args;
331 
332 #ifdef MAC
333 	error = mac_execve_enter(imgp, mac_p);
334 	if (error) {
335 		mtx_lock(&Giant);
336 		goto exec_fail;
337 	}
338 #endif
339 
340 	imgp->image_header = NULL;
341 
342 	/*
343 	 * Translate the file name. namei() returns a vnode pointer
344 	 *	in ni_vp amoung other things.
345 	 */
346 	ndp = &nd;
347 	NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
348 	    UIO_SYSSPACE, args->fname, td);
349 
350 	mtx_lock(&Giant);
351 interpret:
352 
353 	error = namei(ndp);
354 	if (error)
355 		goto exec_fail;
356 
357 	imgp->vp = ndp->ni_vp;
358 
359 	/*
360 	 * Check file permissions (also 'opens' file)
361 	 */
362 	error = exec_check_permissions(imgp);
363 	if (error)
364 		goto exec_fail_dealloc;
365 
366 	imgp->object = imgp->vp->v_object;
367 	if (imgp->object != NULL)
368 		vm_object_reference(imgp->object);
369 
370 	/*
371 	 * Set VV_TEXT now so no one can write to the executable while we're
372 	 * activating it.
373 	 *
374 	 * Remember if this was set before and unset it in case this is not
375 	 * actually an executable image.
376 	 */
377 	textset = imgp->vp->v_vflag & VV_TEXT;
378 	imgp->vp->v_vflag |= VV_TEXT;
379 
380 	error = exec_map_first_page(imgp);
381 	if (error)
382 		goto exec_fail_dealloc;
383 
384 	/*
385 	 *	If the current process has a special image activator it
386 	 *	wants to try first, call it.   For example, emulating shell
387 	 *	scripts differently.
388 	 */
389 	error = -1;
390 	if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
391 		error = img_first(imgp);
392 
393 	/*
394 	 *	Loop through the list of image activators, calling each one.
395 	 *	An activator returns -1 if there is no match, 0 on success,
396 	 *	and an error otherwise.
397 	 */
398 	for (i = 0; error == -1 && execsw[i]; ++i) {
399 		if (execsw[i]->ex_imgact == NULL ||
400 		    execsw[i]->ex_imgact == img_first) {
401 			continue;
402 		}
403 		error = (*execsw[i]->ex_imgact)(imgp);
404 	}
405 
406 	if (error) {
407 		if (error == -1) {
408 			if (textset == 0)
409 				imgp->vp->v_vflag &= ~VV_TEXT;
410 			error = ENOEXEC;
411 		}
412 		goto exec_fail_dealloc;
413 	}
414 
415 	/*
416 	 * Special interpreter operation, cleanup and loop up to try to
417 	 * activate the interpreter.
418 	 */
419 	if (imgp->interpreted) {
420 		exec_unmap_first_page(imgp);
421 		/*
422 		 * VV_TEXT needs to be unset for scripts.  There is a short
423 		 * period before we determine that something is a script where
424 		 * VV_TEXT will be set. The vnode lock is held over this
425 		 * entire period so nothing should illegitimately be blocked.
426 		 */
427 		imgp->vp->v_vflag &= ~VV_TEXT;
428 		/* free name buffer and old vnode */
429 		NDFREE(ndp, NDF_ONLY_PNBUF);
430 #ifdef MAC
431 		interplabel = mac_vnode_label_alloc();
432 		mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel);
433 #endif
434 		vput(ndp->ni_vp);
435 		vm_object_deallocate(imgp->object);
436 		imgp->object = NULL;
437 		/* set new name to that of the interpreter */
438 		NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
439 		    UIO_SYSSPACE, imgp->interpreter_name, td);
440 		goto interpret;
441 	}
442 
443 	/*
444 	 * Copy out strings (args and env) and initialize stack base
445 	 */
446 	if (p->p_sysent->sv_copyout_strings)
447 		stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
448 	else
449 		stack_base = exec_copyout_strings(imgp);
450 
451 	/*
452 	 * If custom stack fixup routine present for this process
453 	 * let it do the stack setup.
454 	 * Else stuff argument count as first item on stack
455 	 */
456 	if (p->p_sysent->sv_fixup != NULL)
457 		(*p->p_sysent->sv_fixup)(&stack_base, imgp);
458 	else
459 		suword(--stack_base, imgp->args->argc);
460 
461 	/*
462 	 * For security and other reasons, the file descriptor table cannot
463 	 * be shared after an exec.
464 	 */
465 	fdunshare(p, td);
466 
467 	/*
468 	 * Malloc things before we need locks.
469 	 */
470 	newcred = crget();
471 	euip = uifind(attr.va_uid);
472 	i = imgp->args->begin_envv - imgp->args->begin_argv;
473 	if (ps_arg_cache_limit >= i + sizeof(struct pargs))
474 		newargs = pargs_alloc(i);
475 
476 	/* close files on exec */
477 	fdcloseexec(td);
478 
479 	/* Get a reference to the vnode prior to locking the proc */
480 	VREF(ndp->ni_vp);
481 
482 	/*
483 	 * For security and other reasons, signal handlers cannot
484 	 * be shared after an exec. The new process gets a copy of the old
485 	 * handlers. In execsigs(), the new process will have its signals
486 	 * reset.
487 	 */
488 	PROC_LOCK(p);
489 	if (sigacts_shared(p->p_sigacts)) {
490 		oldsigacts = p->p_sigacts;
491 		PROC_UNLOCK(p);
492 		newsigacts = sigacts_alloc();
493 		sigacts_copy(newsigacts, oldsigacts);
494 		PROC_LOCK(p);
495 		p->p_sigacts = newsigacts;
496 	} else
497 		oldsigacts = NULL;
498 
499 	/* Stop profiling */
500 	stopprofclock(p);
501 
502 	/* reset caught signals */
503 	execsigs(p);
504 
505 	/* name this process - nameiexec(p, ndp) */
506 	len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
507 	bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
508 	p->p_comm[len] = 0;
509 
510 	/*
511 	 * mark as execed, wakeup the process that vforked (if any) and tell
512 	 * it that it now has its own resources back
513 	 */
514 	p->p_flag |= P_EXEC;
515 	if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
516 		p->p_flag &= ~P_PPWAIT;
517 		wakeup(p->p_pptr);
518 	}
519 
520 	/*
521 	 * Implement image setuid/setgid.
522 	 *
523 	 * Don't honor setuid/setgid if the filesystem prohibits it or if
524 	 * the process is being traced.
525 	 *
526 	 * XXXMAC: For the time being, use NOSUID to also prohibit
527 	 * transitions on the file system.
528 	 */
529 	oldcred = p->p_ucred;
530 	credential_changing = 0;
531 	credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid !=
532 	    attr.va_uid;
533 	credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid !=
534 	    attr.va_gid;
535 #ifdef MAC
536 	will_transition = mac_execve_will_transition(oldcred, imgp->vp,
537 	    interplabel, imgp);
538 	credential_changing |= will_transition;
539 #endif
540 
541 	if (credential_changing &&
542 	    (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
543 	    (p->p_flag & P_TRACED) == 0) {
544 		/*
545 		 * Turn off syscall tracing for set-id programs, except for
546 		 * root.  Record any set-id flags first to make sure that
547 		 * we do not regain any tracing during a possible block.
548 		 */
549 		setsugid(p);
550 #ifdef KTRACE
551 		if (p->p_tracevp != NULL && suser_cred(oldcred, SUSER_ALLOWJAIL)) {
552 			mtx_lock(&ktrace_mtx);
553 			p->p_traceflag = 0;
554 			tracevp = p->p_tracevp;
555 			p->p_tracevp = NULL;
556 			tracecred = p->p_tracecred;
557 			p->p_tracecred = NULL;
558 			mtx_unlock(&ktrace_mtx);
559 		}
560 #endif
561 		/*
562 		 * Close any file descriptors 0..2 that reference procfs,
563 		 * then make sure file descriptors 0..2 are in use.
564 		 *
565 		 * setugidsafety() may call closef() and then pfind()
566 		 * which may grab the process lock.
567 		 * fdcheckstd() may call falloc() which may block to
568 		 * allocate memory, so temporarily drop the process lock.
569 		 */
570 		PROC_UNLOCK(p);
571 		setugidsafety(td);
572 		error = fdcheckstd(td);
573 		if (error != 0)
574 			goto done1;
575 		PROC_LOCK(p);
576 		/*
577 		 * Set the new credentials.
578 		 */
579 		crcopy(newcred, oldcred);
580 		if (attr.va_mode & VSUID)
581 			change_euid(newcred, euip);
582 		if (attr.va_mode & VSGID)
583 			change_egid(newcred, attr.va_gid);
584 #ifdef MAC
585 		if (will_transition) {
586 			mac_execve_transition(oldcred, newcred, imgp->vp,
587 			    interplabel, imgp);
588 		}
589 #endif
590 		/*
591 		 * Implement correct POSIX saved-id behavior.
592 		 *
593 		 * XXXMAC: Note that the current logic will save the
594 		 * uid and gid if a MAC domain transition occurs, even
595 		 * though maybe it shouldn't.
596 		 */
597 		change_svuid(newcred, newcred->cr_uid);
598 		change_svgid(newcred, newcred->cr_gid);
599 		p->p_ucred = newcred;
600 		newcred = NULL;
601 	} else {
602 		if (oldcred->cr_uid == oldcred->cr_ruid &&
603 		    oldcred->cr_gid == oldcred->cr_rgid)
604 			p->p_flag &= ~P_SUGID;
605 		/*
606 		 * Implement correct POSIX saved-id behavior.
607 		 *
608 		 * XXX: It's not clear that the existing behavior is
609 		 * POSIX-compliant.  A number of sources indicate that the
610 		 * saved uid/gid should only be updated if the new ruid is
611 		 * not equal to the old ruid, or the new euid is not equal
612 		 * to the old euid and the new euid is not equal to the old
613 		 * ruid.  The FreeBSD code always updates the saved uid/gid.
614 		 * Also, this code uses the new (replaced) euid and egid as
615 		 * the source, which may or may not be the right ones to use.
616 		 */
617 		if (oldcred->cr_svuid != oldcred->cr_uid ||
618 		    oldcred->cr_svgid != oldcred->cr_gid) {
619 			crcopy(newcred, oldcred);
620 			change_svuid(newcred, newcred->cr_uid);
621 			change_svgid(newcred, newcred->cr_gid);
622 			p->p_ucred = newcred;
623 			newcred = NULL;
624 		}
625 	}
626 
627 	/*
628 	 * Store the vp for use in procfs.  This vnode was referenced prior
629 	 * to locking the proc lock.
630 	 */
631 	textvp = p->p_textvp;
632 	p->p_textvp = ndp->ni_vp;
633 
634 	/*
635 	 * Notify others that we exec'd, and clear the P_INEXEC flag
636 	 * as we're now a bona fide freshly-execed process.
637 	 */
638 	KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
639 	p->p_flag &= ~P_INEXEC;
640 
641 	/*
642 	 * If tracing the process, trap to debugger so breakpoints
643 	 * can be set before the program executes.
644 	 * Use tdsignal to deliver signal to current thread, use
645 	 * psignal may cause the signal to be delivered to wrong thread
646 	 * because that thread will exit, remember we are going to enter
647 	 * single thread mode.
648 	 */
649 	if (p->p_flag & P_TRACED)
650 		tdsignal(td, SIGTRAP, SIGTARGET_TD);
651 
652 	/* clear "fork but no exec" flag, as we _are_ execing */
653 	p->p_acflag &= ~AFORK;
654 
655 	/* Free any previous argument cache */
656 	oldargs = p->p_args;
657 	p->p_args = NULL;
658 
659 	/* Cache arguments if they fit inside our allowance */
660 	if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
661 		bcopy(imgp->args->begin_argv, newargs->ar_args, i);
662 		p->p_args = newargs;
663 		newargs = NULL;
664 	}
665 	PROC_UNLOCK(p);
666 
667 	/* Set values passed into the program in registers. */
668 	if (p->p_sysent->sv_setregs)
669 		(*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
670 		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
671 	else
672 		exec_setregs(td, imgp->entry_addr,
673 		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
674 
675 done1:
676 	/*
677 	 * Free any resources malloc'd earlier that we didn't use.
678 	 */
679 	uifree(euip);
680 	if (newcred == NULL)
681 		crfree(oldcred);
682 	else
683 		crfree(newcred);
684 	/*
685 	 * Handle deferred decrement of ref counts.
686 	 */
687 	if (textvp != NULL)
688 		vrele(textvp);
689 	if (ndp->ni_vp && error != 0)
690 		vrele(ndp->ni_vp);
691 #ifdef KTRACE
692 	if (tracevp != NULL)
693 		vrele(tracevp);
694 	if (tracecred != NULL)
695 		crfree(tracecred);
696 #endif
697 	if (oldargs != NULL)
698 		pargs_drop(oldargs);
699 	if (newargs != NULL)
700 		pargs_drop(newargs);
701 	if (oldsigacts != NULL)
702 		sigacts_free(oldsigacts);
703 
704 exec_fail_dealloc:
705 
706 	/*
707 	 * free various allocated resources
708 	 */
709 	if (imgp->firstpage != NULL)
710 		exec_unmap_first_page(imgp);
711 
712 	if (imgp->vp != NULL) {
713 		NDFREE(ndp, NDF_ONLY_PNBUF);
714 		vput(imgp->vp);
715 	}
716 
717 	if (imgp->object != NULL)
718 		vm_object_deallocate(imgp->object);
719 
720 	if (error == 0) {
721 		/*
722 		 * Stop the process here if its stop event mask has
723 		 * the S_EXEC bit set.
724 		 */
725 		STOPEVENT(p, S_EXEC, 0);
726 		goto done2;
727 	}
728 
729 exec_fail:
730 	/* we're done here, clear P_INEXEC */
731 	PROC_LOCK(p);
732 	p->p_flag &= ~P_INEXEC;
733 	PROC_UNLOCK(p);
734 
735 	if (imgp->vmspace_destroyed) {
736 		/* sorry, no more process anymore. exit gracefully */
737 #ifdef MAC
738 		mac_execve_exit(imgp);
739 		if (interplabel != NULL)
740 			mac_vnode_label_free(interplabel);
741 #endif
742 		mtx_unlock(&Giant);
743 		exit1(td, W_EXITCODE(0, SIGABRT));
744 		/* NOT REACHED */
745 		error = 0;
746 	}
747 done2:
748 #ifdef MAC
749 	mac_execve_exit(imgp);
750 	if (interplabel != NULL)
751 		mac_vnode_label_free(interplabel);
752 #endif
753 	mtx_unlock(&Giant);
754 	return (error);
755 }
756 
757 int
758 exec_map_first_page(imgp)
759 	struct image_params *imgp;
760 {
761 	int rv, i;
762 	int initial_pagein;
763 	vm_page_t ma[VM_INITIAL_PAGEIN];
764 	vm_object_t object;
765 
766 	GIANT_REQUIRED;
767 
768 	if (imgp->firstpage != NULL)
769 		exec_unmap_first_page(imgp);
770 
771 	object = imgp->vp->v_object;
772 	VM_OBJECT_LOCK(object);
773 	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
774 	if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
775 		initial_pagein = VM_INITIAL_PAGEIN;
776 		if (initial_pagein > object->size)
777 			initial_pagein = object->size;
778 		for (i = 1; i < initial_pagein; i++) {
779 			if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
780 				if (ma[i]->valid)
781 					break;
782 				vm_page_lock_queues();
783 				if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) {
784 					vm_page_unlock_queues();
785 					break;
786 				}
787 				vm_page_busy(ma[i]);
788 				vm_page_unlock_queues();
789 			} else {
790 				ma[i] = vm_page_alloc(object, i,
791 				    VM_ALLOC_NORMAL);
792 				if (ma[i] == NULL)
793 					break;
794 			}
795 		}
796 		initial_pagein = i;
797 		rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
798 		ma[0] = vm_page_lookup(object, 0);
799 		if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||
800 		    (ma[0]->valid == 0)) {
801 			if (ma[0]) {
802 				vm_page_lock_queues();
803 				pmap_remove_all(ma[0]);
804 				vm_page_free(ma[0]);
805 				vm_page_unlock_queues();
806 			}
807 			VM_OBJECT_UNLOCK(object);
808 			return (EIO);
809 		}
810 	}
811 	vm_page_lock_queues();
812 	vm_page_hold(ma[0]);
813 	vm_page_wakeup(ma[0]);
814 	vm_page_unlock_queues();
815 	VM_OBJECT_UNLOCK(object);
816 
817 	imgp->firstpage = sf_buf_alloc(ma[0], 0);
818 	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
819 
820 	return (0);
821 }
822 
823 void
824 exec_unmap_first_page(imgp)
825 	struct image_params *imgp;
826 {
827 	vm_page_t m;
828 
829 	if (imgp->firstpage != NULL) {
830 		m = sf_buf_page(imgp->firstpage);
831 		sf_buf_free(imgp->firstpage);
832 		imgp->firstpage = NULL;
833 		vm_page_lock_queues();
834 		vm_page_unhold(m);
835 		vm_page_unlock_queues();
836 	}
837 }
838 
839 /*
840  * Destroy old address space, and allocate a new stack
841  *	The new stack is only SGROWSIZ large because it is grown
842  *	automatically in trap.c.
843  */
844 int
845 exec_new_vmspace(imgp, sv)
846 	struct image_params *imgp;
847 	struct sysentvec *sv;
848 {
849 	int error;
850 	struct proc *p = imgp->proc;
851 	struct vmspace *vmspace = p->p_vmspace;
852 	vm_offset_t stack_addr;
853 	vm_map_t map;
854 
855 	GIANT_REQUIRED;
856 
857 	imgp->vmspace_destroyed = 1;
858 
859 	/* Called with Giant held, do not depend on it! */
860 	EVENTHANDLER_INVOKE(process_exec, p);
861 
862 	/*
863 	 * Here is as good a place as any to do any resource limit cleanups.
864 	 * This is needed if a 64 bit binary exec's a 32 bit binary - the
865 	 * data size limit may need to be changed to a value that makes
866 	 * sense for the 32 bit binary.
867 	 */
868 	if (sv->sv_fixlimits != NULL)
869 		sv->sv_fixlimits(imgp);
870 
871 	/*
872 	 * Blow away entire process VM, if address space not shared,
873 	 * otherwise, create a new VM space so that other threads are
874 	 * not disrupted
875 	 */
876 	map = &vmspace->vm_map;
877 	if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
878 	    vm_map_max(map) == sv->sv_maxuser) {
879 		shmexit(vmspace);
880 		pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map),
881 		    vm_map_max(map));
882 		vm_map_remove(map, vm_map_min(map), vm_map_max(map));
883 	} else {
884 		vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
885 		vmspace = p->p_vmspace;
886 		map = &vmspace->vm_map;
887 	}
888 
889 	/* Allocate a new stack */
890 	stack_addr = sv->sv_usrstack - maxssiz;
891 	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
892 	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
893 	if (error)
894 		return (error);
895 
896 #ifdef __ia64__
897 	/* Allocate a new register stack */
898 	stack_addr = IA64_BACKINGSTORE;
899 	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
900 	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
901 	if (error)
902 		return (error);
903 #endif
904 
905 	/* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
906 	 * VM_STACK case, but they are still used to monitor the size of the
907 	 * process stack so we can check the stack rlimit.
908 	 */
909 	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
910 	vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz;
911 
912 	return (0);
913 }
914 
915 /*
916  * Copy out argument and environment strings from the old process
917  *	address space into the temporary string buffer.
918  */
919 int
920 exec_copyin_args(struct image_args *args, char *fname,
921     enum uio_seg segflg, char **argv, char **envv)
922 {
923 	char *argp, *envp;
924 	int error;
925 	size_t length;
926 
927 	error = 0;
928 
929 	bzero(args, sizeof(*args));
930 	if (argv == NULL)
931 		return (EFAULT);
932 	/*
933 	 * Allocate temporary demand zeroed space for argument and
934 	 *	environment strings
935 	 */
936 	args->buf = (char *) kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
937 	if (args->buf == NULL)
938 		return (ENOMEM);
939 	args->begin_argv = args->buf;
940 	args->endp = args->begin_argv;
941 	args->stringspace = ARG_MAX;
942 
943 	args->fname = args->buf + ARG_MAX;
944 
945 	/*
946 	 * Copy the file name.
947 	 */
948 	error = (segflg == UIO_SYSSPACE) ?
949 	    copystr(fname, args->fname, PATH_MAX, &length) :
950 	    copyinstr(fname, args->fname, PATH_MAX, &length);
951 	if (error != 0)
952 		return (error);
953 
954 	/*
955 	 * extract arguments first
956 	 */
957 	while ((argp = (caddr_t) (intptr_t) fuword(argv++))) {
958 		if (argp == (caddr_t) -1)
959 			return (EFAULT);
960 		if ((error = copyinstr(argp, args->endp,
961 		    args->stringspace, &length))) {
962 			if (error == ENAMETOOLONG)
963 				return (E2BIG);
964 			return (error);
965 		}
966 		args->stringspace -= length;
967 		args->endp += length;
968 		args->argc++;
969 	}
970 
971 	args->begin_envv = args->endp;
972 
973 	/*
974 	 * extract environment strings
975 	 */
976 	if (envv) {
977 		while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
978 			if (envp == (caddr_t)-1)
979 				return (EFAULT);
980 			if ((error = copyinstr(envp, args->endp,
981 			    args->stringspace, &length))) {
982 				if (error == ENAMETOOLONG)
983 					return (E2BIG);
984 				return (error);
985 			}
986 			args->stringspace -= length;
987 			args->endp += length;
988 			args->envc++;
989 		}
990 	}
991 
992 	return (0);
993 }
994 
995 void
996 exec_free_args(struct image_args *args)
997 {
998 
999 	if (args->buf) {
1000 		kmem_free_wakeup(exec_map,
1001 		    (vm_offset_t)args->buf, PATH_MAX + ARG_MAX);
1002 		args->buf = NULL;
1003 	}
1004 }
1005 
1006 /*
1007  * Copy strings out to the new process address space, constructing
1008  *	new arg and env vector tables. Return a pointer to the base
1009  *	so that it can be used as the initial stack pointer.
1010  */
1011 register_t *
1012 exec_copyout_strings(imgp)
1013 	struct image_params *imgp;
1014 {
1015 	int argc, envc;
1016 	char **vectp;
1017 	char *stringp, *destp;
1018 	register_t *stack_base;
1019 	struct ps_strings *arginfo;
1020 	struct proc *p;
1021 	int szsigcode;
1022 
1023 	/*
1024 	 * Calculate string base and vector table pointers.
1025 	 * Also deal with signal trampoline code for this exec type.
1026 	 */
1027 	p = imgp->proc;
1028 	szsigcode = 0;
1029 	arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
1030 	if (p->p_sysent->sv_szsigcode != NULL)
1031 		szsigcode = *(p->p_sysent->sv_szsigcode);
1032 	destp =	(caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
1033 	    roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *));
1034 
1035 	/*
1036 	 * install sigcode
1037 	 */
1038 	if (szsigcode)
1039 		copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
1040 		    szsigcode), szsigcode);
1041 
1042 	/*
1043 	 * If we have a valid auxargs ptr, prepare some room
1044 	 * on the stack.
1045 	 */
1046 	if (imgp->auxargs) {
1047 		/*
1048 		 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
1049 		 * lower compatibility.
1050 		 */
1051 		imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
1052 		    (AT_COUNT * 2);
1053 		/*
1054 		 * The '+ 2' is for the null pointers at the end of each of
1055 		 * the arg and env vector sets,and imgp->auxarg_size is room
1056 		 * for argument of Runtime loader.
1057 		 */
1058 		vectp = (char **)(destp - (imgp->args->argc +
1059 		    imgp->args->envc + 2 + imgp->auxarg_size) *
1060 		    sizeof(char *));
1061 
1062 	} else {
1063 		/*
1064 		 * The '+ 2' is for the null pointers at the end of each of
1065 		 * the arg and env vector sets
1066 		 */
1067 		vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) *
1068 		    sizeof(char *));
1069 	}
1070 
1071 	/*
1072 	 * vectp also becomes our initial stack base
1073 	 */
1074 	stack_base = (register_t *)vectp;
1075 
1076 	stringp = imgp->args->begin_argv;
1077 	argc = imgp->args->argc;
1078 	envc = imgp->args->envc;
1079 
1080 	/*
1081 	 * Copy out strings - arguments and environment.
1082 	 */
1083 	copyout(stringp, destp, ARG_MAX - imgp->args->stringspace);
1084 
1085 	/*
1086 	 * Fill in "ps_strings" struct for ps, w, etc.
1087 	 */
1088 	suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
1089 	suword(&arginfo->ps_nargvstr, argc);
1090 
1091 	/*
1092 	 * Fill in argument portion of vector table.
1093 	 */
1094 	for (; argc > 0; --argc) {
1095 		suword(vectp++, (long)(intptr_t)destp);
1096 		while (*stringp++ != 0)
1097 			destp++;
1098 		destp++;
1099 	}
1100 
1101 	/* a null vector table pointer separates the argp's from the envp's */
1102 	suword(vectp++, 0);
1103 
1104 	suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
1105 	suword(&arginfo->ps_nenvstr, envc);
1106 
1107 	/*
1108 	 * Fill in environment portion of vector table.
1109 	 */
1110 	for (; envc > 0; --envc) {
1111 		suword(vectp++, (long)(intptr_t)destp);
1112 		while (*stringp++ != 0)
1113 			destp++;
1114 		destp++;
1115 	}
1116 
1117 	/* end of vector table is a null pointer */
1118 	suword(vectp, 0);
1119 
1120 	return (stack_base);
1121 }
1122 
1123 /*
1124  * Check permissions of file to execute.
1125  *	Called with imgp->vp locked.
1126  *	Return 0 for success or error code on failure.
1127  */
1128 int
1129 exec_check_permissions(imgp)
1130 	struct image_params *imgp;
1131 {
1132 	struct vnode *vp = imgp->vp;
1133 	struct vattr *attr = imgp->attr;
1134 	struct thread *td;
1135 	int error;
1136 
1137 	td = curthread;			/* XXXKSE */
1138 
1139 	/* Get file attributes */
1140 	error = VOP_GETATTR(vp, attr, td->td_ucred, td);
1141 	if (error)
1142 		return (error);
1143 
1144 #ifdef MAC
1145 	error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp);
1146 	if (error)
1147 		return (error);
1148 #endif
1149 
1150 	/*
1151 	 * 1) Check if file execution is disabled for the filesystem that this
1152 	 *	file resides on.
1153 	 * 2) Insure that at least one execute bit is on - otherwise root
1154 	 *	will always succeed, and we don't want to happen unless the
1155 	 *	file really is executable.
1156 	 * 3) Insure that the file is a regular file.
1157 	 */
1158 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1159 	    ((attr->va_mode & 0111) == 0) ||
1160 	    (attr->va_type != VREG))
1161 		return (EACCES);
1162 
1163 	/*
1164 	 * Zero length files can't be exec'd
1165 	 */
1166 	if (attr->va_size == 0)
1167 		return (ENOEXEC);
1168 
1169 	/*
1170 	 *  Check for execute permission to file based on current credentials.
1171 	 */
1172 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1173 	if (error)
1174 		return (error);
1175 
1176 	/*
1177 	 * Check number of open-for-writes on the file and deny execution
1178 	 * if there are any.
1179 	 */
1180 	if (vp->v_writecount)
1181 		return (ETXTBSY);
1182 
1183 	/*
1184 	 * Call filesystem specific open routine (which does nothing in the
1185 	 * general case).
1186 	 */
1187 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
1188 	return (error);
1189 }
1190 
1191 /*
1192  * Exec handler registration
1193  */
1194 int
1195 exec_register(execsw_arg)
1196 	const struct execsw *execsw_arg;
1197 {
1198 	const struct execsw **es, **xs, **newexecsw;
1199 	int count = 2;	/* New slot and trailing NULL */
1200 
1201 	if (execsw)
1202 		for (es = execsw; *es; es++)
1203 			count++;
1204 	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1205 	if (newexecsw == NULL)
1206 		return (ENOMEM);
1207 	xs = newexecsw;
1208 	if (execsw)
1209 		for (es = execsw; *es; es++)
1210 			*xs++ = *es;
1211 	*xs++ = execsw_arg;
1212 	*xs = NULL;
1213 	if (execsw)
1214 		free(execsw, M_TEMP);
1215 	execsw = newexecsw;
1216 	return (0);
1217 }
1218 
1219 int
1220 exec_unregister(execsw_arg)
1221 	const struct execsw *execsw_arg;
1222 {
1223 	const struct execsw **es, **xs, **newexecsw;
1224 	int count = 1;
1225 
1226 	if (execsw == NULL)
1227 		panic("unregister with no handlers left?\n");
1228 
1229 	for (es = execsw; *es; es++) {
1230 		if (*es == execsw_arg)
1231 			break;
1232 	}
1233 	if (*es == NULL)
1234 		return (ENOENT);
1235 	for (es = execsw; *es; es++)
1236 		if (*es != execsw_arg)
1237 			count++;
1238 	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1239 	if (newexecsw == NULL)
1240 		return (ENOMEM);
1241 	xs = newexecsw;
1242 	for (es = execsw; *es; es++)
1243 		if (*es != execsw_arg)
1244 			*xs++ = *es;
1245 	*xs = NULL;
1246 	if (execsw)
1247 		free(execsw, M_TEMP);
1248 	execsw = newexecsw;
1249 	return (0);
1250 }
1251