xref: /illumos-gate/usr/src/uts/common/os/exec.c (revision dd72704bd9e794056c558153663c739e2012d721)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*	Copyright (c) 1988 AT&T	*/
27 /*	  All Rights Reserved	*/
28 /*
29  * Copyright 2019 Joyent, Inc.
30  * Copyright 2022 Oxide Computer Company
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/signal.h>
38 #include <sys/cred_impl.h>
39 #include <sys/policy.h>
40 #include <sys/user.h>
41 #include <sys/errno.h>
42 #include <sys/file.h>
43 #include <sys/vfs.h>
44 #include <sys/vnode.h>
45 #include <sys/mman.h>
46 #include <sys/acct.h>
47 #include <sys/cpuvar.h>
48 #include <sys/proc.h>
49 #include <sys/cmn_err.h>
50 #include <sys/debug.h>
51 #include <sys/pathname.h>
52 #include <sys/vm.h>
53 #include <sys/lgrp.h>
54 #include <sys/vtrace.h>
55 #include <sys/exec.h>
56 #include <sys/exechdr.h>
57 #include <sys/kmem.h>
58 #include <sys/prsystm.h>
59 #include <sys/modctl.h>
60 #include <sys/vmparam.h>
61 #include <sys/door.h>
62 #include <sys/schedctl.h>
63 #include <sys/utrap.h>
64 #include <sys/systeminfo.h>
65 #include <sys/stack.h>
66 #include <sys/rctl.h>
67 #include <sys/dtrace.h>
68 #include <sys/lwpchan_impl.h>
69 #include <sys/pool.h>
70 #include <sys/sdt.h>
71 #include <sys/brand.h>
72 #include <sys/klpd.h>
73 #include <sys/random.h>
74 
75 #include <c2/audit.h>
76 
77 #include <vm/hat.h>
78 #include <vm/anon.h>
79 #include <vm/as.h>
80 #include <vm/seg.h>
81 #include <vm/seg_vn.h>
82 #include <vm/seg_hole.h>
83 
84 #define	PRIV_RESET		0x01	/* needs to reset privs */
85 #define	PRIV_SETID		0x02	/* needs to change uids */
86 #define	PRIV_SETUGID		0x04	/* is setuid/setgid/forced privs */
87 #define	PRIV_INCREASE		0x08	/* child runs with more privs */
88 #define	MAC_FLAGS		0x10	/* need to adjust MAC flags */
89 #define	PRIV_FORCED		0x20	/* has forced privileges */
90 
91 static int execsetid(struct vnode *, struct vattr *, uid_t *, uid_t *,
92     priv_set_t *, cred_t *, const char *);
93 static int hold_execsw(struct execsw *);
94 
95 uint_t auxv_hwcap = 0;	/* auxv AT_SUN_HWCAP value; determined on the fly */
96 uint_t auxv_hwcap_2 = 0;	/* AT_SUN_HWCAP2 */
97 uint_t auxv_hwcap_3 = 0;	/* AT_SUN_HWCAP3 */
98 #if defined(_SYSCALL32_IMPL)
99 uint_t auxv_hwcap32 = 0;	/* 32-bit version of auxv_hwcap */
100 uint_t auxv_hwcap32_2 = 0;	/* 32-bit version of auxv_hwcap2 */
101 uint_t auxv_hwcap32_3 = 0;	/* 32-bit version of auxv_hwcap3 */
102 #endif
103 
104 #define	PSUIDFLAGS		(SNOCD|SUGID)
105 
106 /*
107  * These are consumed within the specific exec modules, but are defined here
108  * because
109  *
110  * 1) The exec modules are unloadable, which would make this near useless.
111  *
112  * 2) We want them to be common across all of them, should more than ELF come
113  *    to support them.
114  *
115  * All must be powers of 2.
116  */
117 size_t aslr_max_brk_skew = 16 * 1024 * 1024; /* 16MB */
118 #pragma weak exec_stackgap = aslr_max_stack_skew /* Old, compatible name */
119 size_t aslr_max_stack_skew = 64 * 1024; /* 64KB */
120 
121 /*
122  * Size of guard segment for 64-bit processes and minimum size it can be shrunk
123  * to in the case of grow() operations.  These are kept as variables in case
124  * they need to be tuned in an emergency.
125  */
126 size_t stack_guard_seg_sz = 256 * 1024 * 1024;
127 size_t stack_guard_min_sz = 64 * 1024 * 1024;
128 
129 /*
130  * exece() - system call wrapper around exec_common()
131  */
132 int
133 exece(const char *fname, const char **argp, const char **envp)
134 {
135 	int error;
136 
137 	error = exec_common(fname, argp, envp, EBA_NONE);
138 	return (error ? (set_errno(error)) : 0);
139 }
140 
141 int
142 exec_common(const char *fname, const char **argp, const char **envp,
143     int brand_action)
144 {
145 	vnode_t *vp = NULL, *dir = NULL, *tmpvp = NULL;
146 	proc_t *p = ttoproc(curthread);
147 	klwp_t *lwp = ttolwp(curthread);
148 	struct user *up = PTOU(p);
149 	size_t execsz;		/* temporary count of exec size */
150 	int i;
151 	int error;
152 	char exec_file[MAXCOMLEN+1];
153 	struct pathname pn;
154 	struct pathname resolvepn;
155 	struct uarg args;
156 	struct execa ua;
157 	k_sigset_t savedmask;
158 	lwpdir_t *lwpdir = NULL;
159 	tidhash_t *tidhash;
160 	lwpdir_t *old_lwpdir = NULL;
161 	uint_t old_lwpdir_sz;
162 	tidhash_t *old_tidhash;
163 	uint_t old_tidhash_sz;
164 	ret_tidhash_t *ret_tidhash;
165 	lwpent_t *lep;
166 	boolean_t brandme = B_FALSE;
167 
168 	/*
169 	 * exec() is not supported for the /proc agent lwp.
170 	 */
171 	if (curthread == p->p_agenttp)
172 		return (ENOTSUP);
173 
174 	if (brand_action != EBA_NONE) {
175 		/*
176 		 * Brand actions are not supported for processes that are not
177 		 * running in a branded zone.
178 		 */
179 		if (!ZONE_IS_BRANDED(p->p_zone))
180 			return (ENOTSUP);
181 
182 		if (brand_action == EBA_NATIVE) {
183 			/* Only branded processes can be unbranded */
184 			if (!PROC_IS_BRANDED(p))
185 				return (ENOTSUP);
186 		} else {
187 			/* Only unbranded processes can be branded */
188 			if (PROC_IS_BRANDED(p))
189 				return (ENOTSUP);
190 			brandme = B_TRUE;
191 		}
192 	} else {
193 		/*
194 		 * If this is a native zone, or if the process is already
195 		 * branded, then we don't need to do anything.  If this is
196 		 * a native process in a branded zone, we need to brand the
197 		 * process as it exec()s the new binary.
198 		 */
199 		if (ZONE_IS_BRANDED(p->p_zone) && !PROC_IS_BRANDED(p))
200 			brandme = B_TRUE;
201 	}
202 
203 	/*
204 	 * Inform /proc that an exec() has started.
205 	 * Hold signals that are ignored by default so that we will
206 	 * not be interrupted by a signal that will be ignored after
207 	 * successful completion of gexec().
208 	 */
209 	mutex_enter(&p->p_lock);
210 	prexecstart();
211 	schedctl_finish_sigblock(curthread);
212 	savedmask = curthread->t_hold;
213 	sigorset(&curthread->t_hold, &ignoredefault);
214 	mutex_exit(&p->p_lock);
215 
216 	/*
217 	 * Look up path name and remember last component for later.
218 	 * To help coreadm expand its %d token, we attempt to save
219 	 * the directory containing the executable in p_execdir. The
220 	 * first call to lookuppn() may fail and return EINVAL because
221 	 * dirvpp is non-NULL. In that case, we make a second call to
222 	 * lookuppn() with dirvpp set to NULL; p_execdir will be NULL,
223 	 * but coreadm is allowed to expand %d to the empty string and
224 	 * there are other cases in which that failure may occur.
225 	 */
226 	if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
227 		goto out;
228 	pn_alloc(&resolvepn);
229 	if ((error = lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) {
230 		pn_free(&resolvepn);
231 		pn_free(&pn);
232 		if (error != EINVAL)
233 			goto out;
234 
235 		dir = NULL;
236 		if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0)
237 			goto out;
238 		pn_alloc(&resolvepn);
239 		if ((error = lookuppn(&pn, &resolvepn, FOLLOW, NULLVPP,
240 		    &vp)) != 0) {
241 			pn_free(&resolvepn);
242 			pn_free(&pn);
243 			goto out;
244 		}
245 	}
246 	if (vp == NULL) {
247 		if (dir != NULL)
248 			VN_RELE(dir);
249 		error = ENOENT;
250 		pn_free(&resolvepn);
251 		pn_free(&pn);
252 		goto out;
253 	}
254 
255 	if ((error = secpolicy_basic_exec(CRED(), vp)) != 0) {
256 		if (dir != NULL)
257 			VN_RELE(dir);
258 		pn_free(&resolvepn);
259 		pn_free(&pn);
260 		VN_RELE(vp);
261 		goto out;
262 	}
263 
264 	/*
265 	 * We do not allow executing files in attribute directories.
266 	 * We test this by determining whether the resolved path
267 	 * contains a "/" when we're in an attribute directory;
268 	 * only if the pathname does not contain a "/" the resolved path
269 	 * points to a file in the current working (attribute) directory.
270 	 */
271 	if ((p->p_user.u_cdir->v_flag & V_XATTRDIR) != 0 &&
272 	    strchr(resolvepn.pn_path, '/') == NULL) {
273 		if (dir != NULL)
274 			VN_RELE(dir);
275 		error = EACCES;
276 		pn_free(&resolvepn);
277 		pn_free(&pn);
278 		VN_RELE(vp);
279 		goto out;
280 	}
281 
282 	bzero(exec_file, MAXCOMLEN+1);
283 	(void) strncpy(exec_file, pn.pn_path, MAXCOMLEN);
284 	bzero(&args, sizeof (args));
285 	args.pathname = resolvepn.pn_path;
286 	/* don't free resolvepn until we are done with args */
287 	pn_free(&pn);
288 
289 	/*
290 	 * If we're running in a profile shell, then call pfexecd.
291 	 */
292 	if ((CR_FLAGS(p->p_cred) & PRIV_PFEXEC) != 0) {
293 		error = pfexec_call(p->p_cred, &resolvepn, &args.pfcred,
294 		    &args.scrubenv);
295 
296 		/* Returning errno in case we're not allowed to execute. */
297 		if (error > 0) {
298 			if (dir != NULL)
299 				VN_RELE(dir);
300 			pn_free(&resolvepn);
301 			VN_RELE(vp);
302 			goto out;
303 		}
304 
305 		/* Don't change the credentials when using old ptrace. */
306 		if (args.pfcred != NULL &&
307 		    (p->p_proc_flag & P_PR_PTRACE) != 0) {
308 			crfree(args.pfcred);
309 			args.pfcred = NULL;
310 			args.scrubenv = B_FALSE;
311 		}
312 	}
313 
314 	/*
315 	 * Specific exec handlers, or policies determined via
316 	 * /etc/system may override the historical default.
317 	 */
318 	args.stk_prot = PROT_ZFOD;
319 	args.dat_prot = PROT_ZFOD;
320 
321 	CPU_STATS_ADD_K(sys, sysexec, 1);
322 	DTRACE_PROC1(exec, char *, args.pathname);
323 
324 	ua.fname = fname;
325 	ua.argp = argp;
326 	ua.envp = envp;
327 
328 	/* If necessary, brand this process before we start the exec. */
329 	if (brandme)
330 		brand_setbrand(p);
331 
332 	if ((error = gexec(&vp, &ua, &args, NULL, 0, &execsz,
333 	    exec_file, p->p_cred, brand_action)) != 0) {
334 		if (brandme)
335 			brand_clearbrand(p, B_FALSE);
336 		VN_RELE(vp);
337 		if (dir != NULL)
338 			VN_RELE(dir);
339 		pn_free(&resolvepn);
340 		goto fail;
341 	}
342 
343 	/*
344 	 * Free floating point registers (sun4u only)
345 	 */
346 	ASSERT(lwp != NULL);
347 	lwp_freeregs(lwp, 1);
348 
349 	/*
350 	 * Free thread and process context ops.
351 	 */
352 	if (curthread->t_ctx)
353 		freectx(curthread, 1);
354 	if (p->p_pctx)
355 		freepctx(p, 1);
356 
357 	/*
358 	 * Remember file name for accounting; clear any cached DTrace predicate.
359 	 */
360 	up->u_acflag &= ~AFORK;
361 	bcopy(exec_file, up->u_comm, MAXCOMLEN+1);
362 	curthread->t_predcache = 0;
363 
364 	/*
365 	 * Clear contract template state
366 	 */
367 	lwp_ctmpl_clear(lwp);
368 
369 	/*
370 	 * Save the directory in which we found the executable for expanding
371 	 * the %d token used in core file patterns.
372 	 */
373 	mutex_enter(&p->p_lock);
374 	tmpvp = p->p_execdir;
375 	p->p_execdir = dir;
376 	if (p->p_execdir != NULL)
377 		VN_HOLD(p->p_execdir);
378 	mutex_exit(&p->p_lock);
379 
380 	if (tmpvp != NULL)
381 		VN_RELE(tmpvp);
382 
383 	/*
384 	 * Reset stack state to the user stack, clear set of signals
385 	 * caught on the signal stack, and reset list of signals that
386 	 * restart system calls; the new program's environment should
387 	 * not be affected by detritus from the old program.  Any
388 	 * pending held signals remain held, so don't clear t_hold.
389 	 */
390 	mutex_enter(&p->p_lock);
391 	lwp->lwp_oldcontext = 0;
392 	lwp->lwp_ustack = 0;
393 	lwp->lwp_old_stk_ctl = 0;
394 	sigemptyset(&up->u_signodefer);
395 	sigemptyset(&up->u_sigonstack);
396 	sigemptyset(&up->u_sigresethand);
397 	lwp->lwp_sigaltstack.ss_sp = 0;
398 	lwp->lwp_sigaltstack.ss_size = 0;
399 	lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
400 
401 	/*
402 	 * Make saved resource limit == current resource limit.
403 	 */
404 	for (i = 0; i < RLIM_NLIMITS; i++) {
405 		/*CONSTCOND*/
406 		if (RLIM_SAVED(i)) {
407 			(void) rctl_rlimit_get(rctlproc_legacy[i], p,
408 			    &up->u_saved_rlimit[i]);
409 		}
410 	}
411 
412 	/*
413 	 * If the action was to catch the signal, then the action
414 	 * must be reset to SIG_DFL.
415 	 */
416 	sigdefault(p);
417 	p->p_flag &= ~(SNOWAIT|SJCTL);
418 	p->p_flag |= (SEXECED|SMSACCT|SMSFORK);
419 	up->u_signal[SIGCLD - 1] = SIG_DFL;
420 
421 	/*
422 	 * Delete the dot4 sigqueues/signotifies.
423 	 */
424 	sigqfree(p);
425 
426 	mutex_exit(&p->p_lock);
427 
428 	mutex_enter(&p->p_pflock);
429 	p->p_prof.pr_base = NULL;
430 	p->p_prof.pr_size = 0;
431 	p->p_prof.pr_off = 0;
432 	p->p_prof.pr_scale = 0;
433 	p->p_prof.pr_samples = 0;
434 	mutex_exit(&p->p_pflock);
435 
436 	ASSERT(curthread->t_schedctl == NULL);
437 
438 #if defined(__sparc)
439 	if (p->p_utraps != NULL)
440 		utrap_free(p);
441 #endif	/* __sparc */
442 
443 	/*
444 	 * Close all close-on-exec files.
445 	 */
446 	close_exec(P_FINFO(p));
447 	TRACE_2(TR_FAC_PROC, TR_PROC_EXEC, "proc_exec:p %p up %p", p, up);
448 
449 	/* Unbrand ourself if necessary. */
450 	if (PROC_IS_BRANDED(p) && (brand_action == EBA_NATIVE))
451 		brand_clearbrand(p, B_FALSE);
452 
453 	setregs(&args);
454 
455 	/* Mark this as an executable vnode */
456 	mutex_enter(&vp->v_lock);
457 	vp->v_flag |= VVMEXEC;
458 	mutex_exit(&vp->v_lock);
459 
460 	VN_RELE(vp);
461 	if (dir != NULL)
462 		VN_RELE(dir);
463 	pn_free(&resolvepn);
464 
465 	/*
466 	 * Allocate a new lwp directory and lwpid hash table if necessary.
467 	 */
468 	if (curthread->t_tid != 1 || p->p_lwpdir_sz != 2) {
469 		lwpdir = kmem_zalloc(2 * sizeof (lwpdir_t), KM_SLEEP);
470 		lwpdir->ld_next = lwpdir + 1;
471 		tidhash = kmem_zalloc(2 * sizeof (tidhash_t), KM_SLEEP);
472 		if (p->p_lwpdir != NULL)
473 			lep = p->p_lwpdir[curthread->t_dslot].ld_entry;
474 		else
475 			lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
476 	}
477 
478 	if (PROC_IS_BRANDED(p))
479 		BROP(p)->b_exec();
480 
481 	mutex_enter(&p->p_lock);
482 	prbarrier(p);
483 
484 	/*
485 	 * Reset lwp id to the default value of 1.
486 	 * This is a single-threaded process now
487 	 * and lwp #1 is lwp_wait()able by default.
488 	 * The t_unpark flag should not be inherited.
489 	 */
490 	ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
491 	curthread->t_tid = 1;
492 	kpreempt_disable();
493 	ASSERT(curthread->t_lpl != NULL);
494 	p->p_t1_lgrpid = curthread->t_lpl->lpl_lgrpid;
495 	kpreempt_enable();
496 	if (p->p_tr_lgrpid != LGRP_NONE && p->p_tr_lgrpid != p->p_t1_lgrpid) {
497 		lgrp_update_trthr_migrations(1);
498 	}
499 	curthread->t_unpark = 0;
500 	curthread->t_proc_flag |= TP_TWAIT;
501 	curthread->t_proc_flag &= ~TP_DAEMON;	/* daemons shouldn't exec */
502 	p->p_lwpdaemon = 0;			/* but oh well ... */
503 	p->p_lwpid = 1;
504 
505 	/*
506 	 * Install the newly-allocated lwp directory and lwpid hash table
507 	 * and insert the current thread into the new hash table.
508 	 */
509 	if (lwpdir != NULL) {
510 		old_lwpdir = p->p_lwpdir;
511 		old_lwpdir_sz = p->p_lwpdir_sz;
512 		old_tidhash = p->p_tidhash;
513 		old_tidhash_sz = p->p_tidhash_sz;
514 		p->p_lwpdir = p->p_lwpfree = lwpdir;
515 		p->p_lwpdir_sz = 2;
516 		lep->le_thread = curthread;
517 		lep->le_lwpid = curthread->t_tid;
518 		lep->le_start = curthread->t_start;
519 		lwp_hash_in(p, lep, tidhash, 2, 0);
520 		p->p_tidhash = tidhash;
521 		p->p_tidhash_sz = 2;
522 	}
523 	ret_tidhash = p->p_ret_tidhash;
524 	p->p_ret_tidhash = NULL;
525 
526 	/*
527 	 * Restore the saved signal mask and
528 	 * inform /proc that the exec() has finished.
529 	 */
530 	curthread->t_hold = savedmask;
531 	prexecend();
532 	mutex_exit(&p->p_lock);
533 	if (old_lwpdir) {
534 		kmem_free(old_lwpdir, old_lwpdir_sz * sizeof (lwpdir_t));
535 		kmem_free(old_tidhash, old_tidhash_sz * sizeof (tidhash_t));
536 	}
537 	while (ret_tidhash != NULL) {
538 		ret_tidhash_t *next = ret_tidhash->rth_next;
539 		kmem_free(ret_tidhash->rth_tidhash,
540 		    ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t));
541 		kmem_free(ret_tidhash, sizeof (*ret_tidhash));
542 		ret_tidhash = next;
543 	}
544 
545 	ASSERT(error == 0);
546 	DTRACE_PROC(exec__success);
547 	return (0);
548 
549 fail:
550 	DTRACE_PROC1(exec__failure, int, error);
551 out:		/* error return */
552 	mutex_enter(&p->p_lock);
553 	curthread->t_hold = savedmask;
554 	prexecend();
555 	mutex_exit(&p->p_lock);
556 	ASSERT(error != 0);
557 	return (error);
558 }
559 
560 
561 /*
562  * Perform generic exec duties and switchout to object-file specific
563  * handler.
564  */
565 int
566 gexec(
567 	struct vnode **vpp,
568 	struct execa *uap,
569 	struct uarg *args,
570 	struct intpdata *idatap,
571 	int level,
572 	size_t *execsz,
573 	caddr_t exec_file,
574 	struct cred *cred,
575 	int brand_action)
576 {
577 	struct vnode *vp, *execvp = NULL;
578 	proc_t *pp = ttoproc(curthread);
579 	struct execsw *eswp;
580 	int error = 0;
581 	int suidflags = 0;
582 	ssize_t resid;
583 	uid_t uid, gid;
584 	struct vattr vattr;
585 	char magbuf[MAGIC_BYTES];
586 	int setid;
587 	cred_t *oldcred, *newcred = NULL;
588 	int privflags = 0;
589 	int setidfl;
590 	priv_set_t fset;
591 	secflagset_t old_secflags;
592 
593 	secflags_copy(&old_secflags, &pp->p_secflags.psf_effective);
594 
595 	/*
596 	 * If the SNOCD or SUGID flag is set, turn it off and remember the
597 	 * previous setting so we can restore it if we encounter an error.
598 	 */
599 	if (level == 0 && (pp->p_flag & PSUIDFLAGS)) {
600 		mutex_enter(&pp->p_lock);
601 		suidflags = pp->p_flag & PSUIDFLAGS;
602 		pp->p_flag &= ~PSUIDFLAGS;
603 		mutex_exit(&pp->p_lock);
604 	}
605 
606 	if ((error = execpermissions(*vpp, &vattr, args)) != 0)
607 		goto bad_noclose;
608 
609 	/* need to open vnode for stateful file systems */
610 	if ((error = VOP_OPEN(vpp, FREAD, CRED(), NULL)) != 0)
611 		goto bad_noclose;
612 	vp = *vpp;
613 
614 	/*
615 	 * Note: to support binary compatibility with SunOS a.out
616 	 * executables, we read in the first four bytes, as the
617 	 * magic number is in bytes 2-3.
618 	 */
619 	if (error = vn_rdwr(UIO_READ, vp, magbuf, sizeof (magbuf),
620 	    (offset_t)0, UIO_SYSSPACE, 0, (rlim64_t)0, CRED(), &resid))
621 		goto bad;
622 	if (resid != 0)
623 		goto bad;
624 
625 	if ((eswp = findexec_by_hdr(magbuf)) == NULL)
626 		goto bad;
627 
628 	if (level == 0 &&
629 	    (privflags = execsetid(vp, &vattr, &uid, &gid, &fset,
630 	    args->pfcred == NULL ? cred : args->pfcred, args->pathname)) != 0) {
631 
632 		/* Pfcred is a credential with a ref count of 1 */
633 
634 		if (args->pfcred != NULL) {
635 			privflags |= PRIV_INCREASE|PRIV_RESET;
636 			newcred = cred = args->pfcred;
637 		} else {
638 			newcred = cred = crdup(cred);
639 		}
640 
641 		/* If we can, drop the PA bit */
642 		if ((privflags & PRIV_RESET) != 0)
643 			priv_adjust_PA(cred);
644 
645 		if (privflags & PRIV_SETID) {
646 			cred->cr_uid = uid;
647 			cred->cr_gid = gid;
648 			cred->cr_suid = uid;
649 			cred->cr_sgid = gid;
650 		}
651 
652 		if (privflags & MAC_FLAGS) {
653 			if (!(CR_FLAGS(cred) & NET_MAC_AWARE_INHERIT))
654 				CR_FLAGS(cred) &= ~NET_MAC_AWARE;
655 			CR_FLAGS(cred) &= ~NET_MAC_AWARE_INHERIT;
656 		}
657 
658 		/*
659 		 * Implement the privilege updates:
660 		 *
661 		 * Restrict with L:
662 		 *
663 		 *	I' = I & L
664 		 *
665 		 *	E' = P' = (I' + F) & A
666 		 *
667 		 * But if running under ptrace, we cap I and F with P.
668 		 */
669 		if ((privflags & (PRIV_RESET|PRIV_FORCED)) != 0) {
670 			if ((privflags & PRIV_INCREASE) != 0 &&
671 			    (pp->p_proc_flag & P_PR_PTRACE) != 0) {
672 				priv_intersect(&CR_OPPRIV(cred),
673 				    &CR_IPRIV(cred));
674 				priv_intersect(&CR_OPPRIV(cred), &fset);
675 			}
676 			priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
677 			CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
678 			if (privflags & PRIV_FORCED) {
679 				priv_set_PA(cred);
680 				priv_union(&fset, &CR_EPRIV(cred));
681 				priv_union(&fset, &CR_PPRIV(cred));
682 			}
683 			priv_adjust_PA(cred);
684 		}
685 	} else if (level == 0 && args->pfcred != NULL) {
686 		newcred = cred = args->pfcred;
687 		privflags |= PRIV_INCREASE;
688 		/* pfcred is not forced to adhere to these settings */
689 		priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred));
690 		CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred);
691 		priv_adjust_PA(cred);
692 	}
693 
694 	/* The new image gets the inheritable secflags as its secflags */
695 	secflags_promote(pp);
696 
697 	/* SunOS 4.x buy-back */
698 	if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) &&
699 	    (vattr.va_mode & (VSUID|VSGID))) {
700 		char path[MAXNAMELEN];
701 		refstr_t *mntpt = NULL;
702 		int ret = -1;
703 
704 		bzero(path, sizeof (path));
705 		zone_hold(pp->p_zone);
706 
707 		ret = vnodetopath(pp->p_zone->zone_rootvp, vp, path,
708 		    sizeof (path), cred);
709 
710 		/* fallback to mountpoint if a path can't be found */
711 		if ((ret != 0) || (ret == 0 && path[0] == '\0'))
712 			mntpt = vfs_getmntpoint(vp->v_vfsp);
713 
714 		if (mntpt == NULL)
715 			zcmn_err(pp->p_zone->zone_id, CE_NOTE,
716 			    "!uid %d: setuid execution not allowed, "
717 			    "file=%s", cred->cr_uid, path);
718 		else
719 			zcmn_err(pp->p_zone->zone_id, CE_NOTE,
720 			    "!uid %d: setuid execution not allowed, "
721 			    "fs=%s, file=%s", cred->cr_uid,
722 			    ZONE_PATH_TRANSLATE(refstr_value(mntpt),
723 			    pp->p_zone), exec_file);
724 
725 		if (!INGLOBALZONE(pp)) {
726 			/* zone_rootpath always has trailing / */
727 			if (mntpt == NULL)
728 				cmn_err(CE_NOTE, "!zone: %s, uid: %d "
729 				    "setuid execution not allowed, file=%s%s",
730 				    pp->p_zone->zone_name, cred->cr_uid,
731 				    pp->p_zone->zone_rootpath, path + 1);
732 			else
733 				cmn_err(CE_NOTE, "!zone: %s, uid: %d "
734 				    "setuid execution not allowed, fs=%s, "
735 				    "file=%s", pp->p_zone->zone_name,
736 				    cred->cr_uid, refstr_value(mntpt),
737 				    exec_file);
738 		}
739 
740 		if (mntpt != NULL)
741 			refstr_rele(mntpt);
742 
743 		zone_rele(pp->p_zone);
744 	}
745 
746 	/*
747 	 * execsetid() told us whether or not we had to change the
748 	 * credentials of the process.  In privflags, it told us
749 	 * whether we gained any privileges or executed a set-uid executable.
750 	 */
751 	setid = (privflags & (PRIV_SETUGID|PRIV_INCREASE|PRIV_FORCED));
752 
753 	/*
754 	 * Use /etc/system variable to determine if the stack
755 	 * should be marked as executable by default.
756 	 */
757 	if ((noexec_user_stack != 0) ||
758 	    secflag_enabled(pp, PROC_SEC_NOEXECSTACK))
759 		args->stk_prot &= ~PROT_EXEC;
760 
761 	args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */
762 	args->ex_vp = vp;
763 
764 	/*
765 	 * Traditionally, the setid flags told the sub processes whether
766 	 * the file just executed was set-uid or set-gid; this caused
767 	 * some confusion as the 'setid' flag did not match the SUGID
768 	 * process flag which is only set when the uids/gids do not match.
769 	 * A script set-gid/set-uid to the real uid/gid would start with
770 	 * /dev/fd/X but an executable would happily trust LD_LIBRARY_PATH.
771 	 * Now we flag those cases where the calling process cannot
772 	 * be trusted to influence the newly exec'ed process, either
773 	 * because it runs with more privileges or when the uids/gids
774 	 * do in fact not match.
775 	 * This also makes the runtime linker agree with the on exec
776 	 * values of SNOCD and SUGID.
777 	 */
778 	setidfl = 0;
779 	if (cred->cr_uid != cred->cr_ruid || (cred->cr_rgid != cred->cr_gid &&
780 	    !supgroupmember(cred->cr_gid, cred))) {
781 		setidfl |= EXECSETID_UGIDS;
782 	}
783 	if (setid & PRIV_SETUGID)
784 		setidfl |= EXECSETID_SETID;
785 	if (setid & PRIV_FORCED)
786 		setidfl |= EXECSETID_PRIVS;
787 
788 	execvp = pp->p_exec;
789 	if (execvp)
790 		VN_HOLD(execvp);
791 
792 	error = (*eswp->exec_func)(vp, uap, args, idatap, level, execsz,
793 	    setidfl, exec_file, cred, brand_action);
794 	rw_exit(eswp->exec_lock);
795 	if (error != 0) {
796 		if (execvp)
797 			VN_RELE(execvp);
798 		/*
799 		 * If this process's p_exec has been set to the vp of
800 		 * the executable by exec_func, we will return without
801 		 * calling VOP_CLOSE because proc_exit will close it
802 		 * on exit.
803 		 */
804 		if (pp->p_exec == vp)
805 			goto bad_noclose;
806 		else
807 			goto bad;
808 	}
809 
810 	if (level == 0) {
811 		uid_t oruid;
812 
813 		if (execvp != NULL) {
814 			/*
815 			 * Close the previous executable only if we are
816 			 * at level 0.
817 			 */
818 			(void) VOP_CLOSE(execvp, FREAD, 1, (offset_t)0,
819 			    cred, NULL);
820 		}
821 
822 		mutex_enter(&pp->p_crlock);
823 
824 		oruid = pp->p_cred->cr_ruid;
825 
826 		if (newcred != NULL) {
827 			/*
828 			 * Free the old credentials, and set the new ones.
829 			 * Do this for both the process and the (single) thread.
830 			 */
831 			crfree(pp->p_cred);
832 			pp->p_cred = cred;	/* cred already held for proc */
833 			crhold(cred);		/* hold new cred for thread */
834 			/*
835 			 * DTrace accesses t_cred in probe context.  t_cred
836 			 * must always be either NULL, or point to a valid,
837 			 * allocated cred structure.
838 			 */
839 			oldcred = curthread->t_cred;
840 			curthread->t_cred = cred;
841 			crfree(oldcred);
842 
843 			if (priv_basic_test >= 0 &&
844 			    !PRIV_ISASSERT(&CR_IPRIV(newcred),
845 			    priv_basic_test)) {
846 				pid_t pid = pp->p_pid;
847 				char *fn = PTOU(pp)->u_comm;
848 
849 				cmn_err(CE_WARN, "%s[%d]: exec: basic_test "
850 				    "privilege removed from E/I", fn, pid);
851 			}
852 		}
853 		/*
854 		 * On emerging from a successful exec(), the saved
855 		 * uid and gid equal the effective uid and gid.
856 		 */
857 		cred->cr_suid = cred->cr_uid;
858 		cred->cr_sgid = cred->cr_gid;
859 
860 		/*
861 		 * If the real and effective ids do not match, this
862 		 * is a setuid process that should not dump core.
863 		 * The group comparison is tricky; we prevent the code
864 		 * from flagging SNOCD when executing with an effective gid
865 		 * which is a supplementary group.
866 		 */
867 		if (cred->cr_ruid != cred->cr_uid ||
868 		    (cred->cr_rgid != cred->cr_gid &&
869 		    !supgroupmember(cred->cr_gid, cred)) ||
870 		    (privflags & PRIV_INCREASE) != 0)
871 			suidflags = PSUIDFLAGS;
872 		else
873 			suidflags = 0;
874 
875 		mutex_exit(&pp->p_crlock);
876 		if (newcred != NULL && oruid != newcred->cr_ruid) {
877 			/* Note that the process remains in the same zone. */
878 			mutex_enter(&pidlock);
879 			upcount_dec(oruid, crgetzoneid(newcred));
880 			upcount_inc(newcred->cr_ruid, crgetzoneid(newcred));
881 			mutex_exit(&pidlock);
882 		}
883 		if (suidflags) {
884 			mutex_enter(&pp->p_lock);
885 			pp->p_flag |= suidflags;
886 			mutex_exit(&pp->p_lock);
887 		}
888 		if (setid && (pp->p_proc_flag & P_PR_PTRACE) == 0) {
889 			/*
890 			 * If process is traced via /proc, arrange to
891 			 * invalidate the associated /proc vnode.
892 			 */
893 			if (pp->p_plist || (pp->p_proc_flag & P_PR_TRACE))
894 				args->traceinval = 1;
895 		}
896 		if (pp->p_proc_flag & P_PR_PTRACE)
897 			psignal(pp, SIGTRAP);
898 		if (args->traceinval)
899 			prinvalidate(&pp->p_user);
900 	}
901 	if (execvp)
902 		VN_RELE(execvp);
903 	return (0);
904 
905 bad:
906 	(void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, cred, NULL);
907 
908 bad_noclose:
909 	if (newcred != NULL)
910 		crfree(newcred);
911 	if (error == 0)
912 		error = ENOEXEC;
913 
914 	mutex_enter(&pp->p_lock);
915 	if (suidflags) {
916 		pp->p_flag |= suidflags;
917 	}
918 	/*
919 	 * Restore the effective secflags, to maintain the invariant they
920 	 * never change for a given process
921 	 */
922 	secflags_copy(&pp->p_secflags.psf_effective, &old_secflags);
923 	mutex_exit(&pp->p_lock);
924 
925 	return (error);
926 }
927 
928 extern char *execswnames[];
929 
930 struct execsw *
931 allocate_execsw(char *name, char *magic, size_t magic_size)
932 {
933 	int i, j;
934 	char *ename;
935 	char *magicp;
936 
937 	mutex_enter(&execsw_lock);
938 	for (i = 0; i < nexectype; i++) {
939 		if (execswnames[i] == NULL) {
940 			ename = kmem_alloc(strlen(name) + 1, KM_SLEEP);
941 			(void) strcpy(ename, name);
942 			execswnames[i] = ename;
943 			/*
944 			 * Set the magic number last so that we
945 			 * don't need to hold the execsw_lock in
946 			 * findexectype().
947 			 */
948 			magicp = kmem_alloc(magic_size, KM_SLEEP);
949 			for (j = 0; j < magic_size; j++)
950 				magicp[j] = magic[j];
951 			execsw[i].exec_magic = magicp;
952 			mutex_exit(&execsw_lock);
953 			return (&execsw[i]);
954 		}
955 	}
956 	mutex_exit(&execsw_lock);
957 	return (NULL);
958 }
959 
960 /*
961  * Find the exec switch table entry with the corresponding magic string.
962  */
963 struct execsw *
964 findexecsw(char *magic)
965 {
966 	struct execsw *eswp;
967 
968 	for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
969 		ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
970 		if (magic && eswp->exec_maglen != 0 &&
971 		    bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0)
972 			return (eswp);
973 	}
974 	return (NULL);
975 }
976 
977 /*
978  * Find the execsw[] index for the given exec header string by looking for the
979  * magic string at a specified offset and length for each kind of executable
980  * file format until one matches.  If no execsw[] entry is found, try to
981  * autoload a module for this magic string.
982  */
983 struct execsw *
984 findexec_by_hdr(char *header)
985 {
986 	struct execsw *eswp;
987 
988 	for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
989 		ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
990 		if (header && eswp->exec_maglen != 0 &&
991 		    bcmp(&header[eswp->exec_magoff], eswp->exec_magic,
992 		    eswp->exec_maglen) == 0) {
993 			if (hold_execsw(eswp) != 0)
994 				return (NULL);
995 			return (eswp);
996 		}
997 	}
998 	return (NULL);	/* couldn't find the type */
999 }
1000 
1001 /*
1002  * Find the execsw[] index for the given magic string.  If no execsw[] entry
1003  * is found, try to autoload a module for this magic string.
1004  */
1005 struct execsw *
1006 findexec_by_magic(char *magic)
1007 {
1008 	struct execsw *eswp;
1009 
1010 	for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) {
1011 		ASSERT(eswp->exec_maglen <= MAGIC_BYTES);
1012 		if (magic && eswp->exec_maglen != 0 &&
1013 		    bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) {
1014 			if (hold_execsw(eswp) != 0)
1015 				return (NULL);
1016 			return (eswp);
1017 		}
1018 	}
1019 	return (NULL);	/* couldn't find the type */
1020 }
1021 
1022 static int
1023 hold_execsw(struct execsw *eswp)
1024 {
1025 	char *name;
1026 
1027 	rw_enter(eswp->exec_lock, RW_READER);
1028 	while (!LOADED_EXEC(eswp)) {
1029 		rw_exit(eswp->exec_lock);
1030 		name = execswnames[eswp-execsw];
1031 		ASSERT(name);
1032 		if (modload("exec", name) == -1)
1033 			return (-1);
1034 		rw_enter(eswp->exec_lock, RW_READER);
1035 	}
1036 	return (0);
1037 }
1038 
1039 static int
1040 execsetid(struct vnode *vp, struct vattr *vattrp, uid_t *uidp, uid_t *gidp,
1041     priv_set_t *fset, cred_t *cr, const char *pathname)
1042 {
1043 	proc_t *pp = ttoproc(curthread);
1044 	uid_t uid, gid;
1045 	int privflags = 0;
1046 
1047 	/*
1048 	 * Remember credentials.
1049 	 */
1050 	uid = cr->cr_uid;
1051 	gid = cr->cr_gid;
1052 
1053 	/* Will try to reset the PRIV_AWARE bit later. */
1054 	if ((CR_FLAGS(cr) & (PRIV_AWARE|PRIV_AWARE_INHERIT)) == PRIV_AWARE)
1055 		privflags |= PRIV_RESET;
1056 
1057 	if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) == 0) {
1058 		/*
1059 		 * If it's a set-uid root program we perform the
1060 		 * forced privilege look-aside. This has three possible
1061 		 * outcomes:
1062 		 *	no look aside information -> treat as before
1063 		 *	look aside in Limit set -> apply forced privs
1064 		 *	look aside not in Limit set -> ignore set-uid root
1065 		 *
1066 		 * Ordinary set-uid root execution only allowed if the limit
1067 		 * set holds all unsafe privileges.
1068 		 */
1069 		if (vattrp->va_mode & VSUID) {
1070 			if (vattrp->va_uid == 0) {
1071 				int res = get_forced_privs(cr, pathname, fset);
1072 
1073 				switch (res) {
1074 				case -1:
1075 					if (priv_issubset(&priv_unsafe,
1076 					    &CR_LPRIV(cr))) {
1077 						uid = vattrp->va_uid;
1078 						privflags |= PRIV_SETUGID;
1079 					}
1080 					break;
1081 				case 0:
1082 					privflags |= PRIV_FORCED|PRIV_INCREASE;
1083 					break;
1084 				default:
1085 					break;
1086 				}
1087 			} else {
1088 				uid = vattrp->va_uid;
1089 				privflags |= PRIV_SETUGID;
1090 			}
1091 		}
1092 		if (vattrp->va_mode & VSGID) {
1093 			gid = vattrp->va_gid;
1094 			privflags |= PRIV_SETUGID;
1095 		}
1096 	}
1097 
1098 	/*
1099 	 * Do we need to change our credential anyway?
1100 	 * This is the case when E != I or P != I, as
1101 	 * we need to do the assignments (with F empty and A full)
1102 	 * Or when I is not a subset of L; in that case we need to
1103 	 * enforce L.
1104 	 *
1105 	 *		I' = L & I
1106 	 *
1107 	 *		E' = P' = (I' + F) & A
1108 	 * or
1109 	 *		E' = P' = I'
1110 	 */
1111 	if (!priv_isequalset(&CR_EPRIV(cr), &CR_IPRIV(cr)) ||
1112 	    !priv_issubset(&CR_IPRIV(cr), &CR_LPRIV(cr)) ||
1113 	    !priv_isequalset(&CR_PPRIV(cr), &CR_IPRIV(cr)))
1114 		privflags |= PRIV_RESET;
1115 
1116 	/* Child has more privileges than parent */
1117 	if (!priv_issubset(&CR_IPRIV(cr), &CR_PPRIV(cr)))
1118 		privflags |= PRIV_INCREASE;
1119 
1120 	/* If MAC-aware flag(s) are on, need to update cred to remove. */
1121 	if ((CR_FLAGS(cr) & NET_MAC_AWARE) ||
1122 	    (CR_FLAGS(cr) & NET_MAC_AWARE_INHERIT))
1123 		privflags |= MAC_FLAGS;
1124 	/*
1125 	 * Set setuid/setgid protections if no ptrace() compatibility.
1126 	 * For privileged processes, honor setuid/setgid even in
1127 	 * the presence of ptrace() compatibility.
1128 	 */
1129 	if (((pp->p_proc_flag & P_PR_PTRACE) == 0 ||
1130 	    PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, (uid == 0))) &&
1131 	    (cr->cr_uid != uid ||
1132 	    cr->cr_gid != gid ||
1133 	    cr->cr_suid != uid ||
1134 	    cr->cr_sgid != gid)) {
1135 		*uidp = uid;
1136 		*gidp = gid;
1137 		privflags |= PRIV_SETID;
1138 	}
1139 	return (privflags);
1140 }
1141 
1142 int
1143 execpermissions(struct vnode *vp, struct vattr *vattrp, struct uarg *args)
1144 {
1145 	int error;
1146 	proc_t *p = ttoproc(curthread);
1147 
1148 	vattrp->va_mask = AT_MODE | AT_UID | AT_GID | AT_SIZE;
1149 	if (error = VOP_GETATTR(vp, vattrp, ATTR_EXEC, p->p_cred, NULL))
1150 		return (error);
1151 	/*
1152 	 * Check the access mode.
1153 	 * If VPROC, ask /proc if the file is an object file.
1154 	 */
1155 	if ((error = VOP_ACCESS(vp, VEXEC, 0, p->p_cred, NULL)) != 0 ||
1156 	    !(vp->v_type == VREG || (vp->v_type == VPROC && pr_isobject(vp))) ||
1157 	    (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0 ||
1158 	    (vattrp->va_mode & (VEXEC|(VEXEC>>3)|(VEXEC>>6))) == 0) {
1159 		if (error == 0)
1160 			error = EACCES;
1161 		return (error);
1162 	}
1163 
1164 	if ((p->p_plist || (p->p_proc_flag & (P_PR_PTRACE|P_PR_TRACE))) &&
1165 	    (error = VOP_ACCESS(vp, VREAD, 0, p->p_cred, NULL))) {
1166 		/*
1167 		 * If process is under ptrace(2) compatibility,
1168 		 * fail the exec(2).
1169 		 */
1170 		if (p->p_proc_flag & P_PR_PTRACE)
1171 			goto bad;
1172 		/*
1173 		 * Process is traced via /proc.
1174 		 * Arrange to invalidate the /proc vnode.
1175 		 */
1176 		args->traceinval = 1;
1177 	}
1178 	return (0);
1179 bad:
1180 	if (error == 0)
1181 		error = ENOEXEC;
1182 	return (error);
1183 }
1184 
1185 /*
1186  * Map a section of an executable file into the user's
1187  * address space.
1188  */
1189 int
1190 execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1191     off_t offset, int prot, int page, uint_t szc)
1192 {
1193 	int error = 0;
1194 	off_t oldoffset;
1195 	caddr_t zfodbase, oldaddr;
1196 	size_t end, oldlen;
1197 	size_t zfoddiff;
1198 	label_t ljb;
1199 	proc_t *p = ttoproc(curthread);
1200 
1201 	oldaddr = addr;
1202 	addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1203 	if (len) {
1204 		oldlen = len;
1205 		len += ((size_t)oldaddr - (size_t)addr);
1206 		oldoffset = offset;
1207 		offset = (off_t)((uintptr_t)offset & PAGEMASK);
1208 		if (page) {
1209 			spgcnt_t  prefltmem, availm, npages;
1210 			int preread;
1211 			uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1212 
1213 			if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1214 				mflag |= MAP_TEXT;
1215 			} else {
1216 				mflag |= MAP_INITDATA;
1217 			}
1218 
1219 			if (valid_usr_range(addr, len, prot, p->p_as,
1220 			    p->p_as->a_userlimit) != RANGE_OKAY) {
1221 				error = ENOMEM;
1222 				goto bad;
1223 			}
1224 			if (error = VOP_MAP(vp, (offset_t)offset,
1225 			    p->p_as, &addr, len, prot, PROT_ALL,
1226 			    mflag, CRED(), NULL))
1227 				goto bad;
1228 
1229 			/*
1230 			 * If the segment can fit, then we prefault
1231 			 * the entire segment in.  This is based on the
1232 			 * model that says the best working set of a
1233 			 * small program is all of its pages.
1234 			 */
1235 			npages = (spgcnt_t)btopr(len);
1236 			prefltmem = freemem - desfree;
1237 			preread =
1238 			    (npages < prefltmem && len < PGTHRESH) ? 1 : 0;
1239 
1240 			/*
1241 			 * If we aren't prefaulting the segment,
1242 			 * increment "deficit", if necessary to ensure
1243 			 * that pages will become available when this
1244 			 * process starts executing.
1245 			 */
1246 			availm = freemem - lotsfree;
1247 			if (preread == 0 && npages > availm &&
1248 			    deficit < lotsfree) {
1249 				deficit += MIN((pgcnt_t)(npages - availm),
1250 				    lotsfree - deficit);
1251 			}
1252 
1253 			if (preread) {
1254 				TRACE_2(TR_FAC_PROC, TR_EXECMAP_PREREAD,
1255 				    "execmap preread:freemem %d size %lu",
1256 				    freemem, len);
1257 				(void) as_fault(p->p_as->a_hat, p->p_as,
1258 				    (caddr_t)addr, len, F_INVAL, S_READ);
1259 			}
1260 		} else {
1261 			if (valid_usr_range(addr, len, prot, p->p_as,
1262 			    p->p_as->a_userlimit) != RANGE_OKAY) {
1263 				error = ENOMEM;
1264 				goto bad;
1265 			}
1266 
1267 			if (error = as_map(p->p_as, addr, len,
1268 			    segvn_create, zfod_argsp))
1269 				goto bad;
1270 			/*
1271 			 * Read in the segment in one big chunk.
1272 			 */
1273 			if (error = vn_rdwr(UIO_READ, vp, (caddr_t)oldaddr,
1274 			    oldlen, (offset_t)oldoffset, UIO_USERSPACE, 0,
1275 			    (rlim64_t)0, CRED(), (ssize_t *)0))
1276 				goto bad;
1277 			/*
1278 			 * Now set protections.
1279 			 */
1280 			if (prot != PROT_ZFOD) {
1281 				(void) as_setprot(p->p_as, (caddr_t)addr,
1282 				    len, prot);
1283 			}
1284 		}
1285 	}
1286 
1287 	if (zfodlen) {
1288 		struct as *as = curproc->p_as;
1289 		struct seg *seg;
1290 		uint_t zprot = 0;
1291 
1292 		end = (size_t)addr + len;
1293 		zfodbase = (caddr_t)roundup(end, PAGESIZE);
1294 		zfoddiff = (uintptr_t)zfodbase - end;
1295 		if (zfoddiff) {
1296 			/*
1297 			 * Before we go to zero the remaining space on the last
1298 			 * page, make sure we have write permission.
1299 			 *
1300 			 * Normal illumos binaries don't even hit the case
1301 			 * where we have to change permission on the last page
1302 			 * since their protection is typically either
1303 			 *    PROT_USER | PROT_WRITE | PROT_READ
1304 			 * or
1305 			 *    PROT_ZFOD (same as PROT_ALL).
1306 			 *
1307 			 * We need to be careful how we zero-fill the last page
1308 			 * if the segment protection does not include
1309 			 * PROT_WRITE. Using as_setprot() can cause the VM
1310 			 * segment code to call segvn_vpage(), which must
1311 			 * allocate a page struct for each page in the segment.
1312 			 * If we have a very large segment, this may fail, so
1313 			 * we have to check for that, even though we ignore
1314 			 * other return values from as_setprot.
1315 			 */
1316 
1317 			AS_LOCK_ENTER(as, RW_READER);
1318 			seg = as_segat(curproc->p_as, (caddr_t)end);
1319 			if (seg != NULL)
1320 				SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1,
1321 				    &zprot);
1322 			AS_LOCK_EXIT(as);
1323 
1324 			if (seg != NULL && (zprot & PROT_WRITE) == 0) {
1325 				if (as_setprot(as, (caddr_t)end, zfoddiff - 1,
1326 				    zprot | PROT_WRITE) == ENOMEM) {
1327 					error = ENOMEM;
1328 					goto bad;
1329 				}
1330 			}
1331 
1332 			if (on_fault(&ljb)) {
1333 				no_fault();
1334 				if (seg != NULL && (zprot & PROT_WRITE) == 0)
1335 					(void) as_setprot(as, (caddr_t)end,
1336 					    zfoddiff - 1, zprot);
1337 				error = EFAULT;
1338 				goto bad;
1339 			}
1340 			uzero((void *)end, zfoddiff);
1341 			no_fault();
1342 			if (seg != NULL && (zprot & PROT_WRITE) == 0)
1343 				(void) as_setprot(as, (caddr_t)end,
1344 				    zfoddiff - 1, zprot);
1345 		}
1346 		if (zfodlen > zfoddiff) {
1347 			struct segvn_crargs crargs =
1348 			    SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1349 
1350 			zfodlen -= zfoddiff;
1351 			if (valid_usr_range(zfodbase, zfodlen, prot, p->p_as,
1352 			    p->p_as->a_userlimit) != RANGE_OKAY) {
1353 				error = ENOMEM;
1354 				goto bad;
1355 			}
1356 			if (szc > 0) {
1357 				/*
1358 				 * ASSERT alignment because the mapelfexec()
1359 				 * caller for the szc > 0 case extended zfod
1360 				 * so it's end is pgsz aligned.
1361 				 */
1362 				size_t pgsz = page_get_pagesize(szc);
1363 				ASSERT(IS_P2ALIGNED(zfodbase + zfodlen, pgsz));
1364 
1365 				if (IS_P2ALIGNED(zfodbase, pgsz)) {
1366 					crargs.szc = szc;
1367 				} else {
1368 					crargs.szc = AS_MAP_HEAP;
1369 				}
1370 			} else {
1371 				crargs.szc = AS_MAP_NO_LPOOB;
1372 			}
1373 			if (error = as_map(p->p_as, (caddr_t)zfodbase,
1374 			    zfodlen, segvn_create, &crargs))
1375 				goto bad;
1376 			if (prot != PROT_ZFOD) {
1377 				(void) as_setprot(p->p_as, (caddr_t)zfodbase,
1378 				    zfodlen, prot);
1379 			}
1380 		}
1381 	}
1382 	return (0);
1383 bad:
1384 	return (error);
1385 }
1386 
1387 void
1388 setexecenv(struct execenv *ep)
1389 {
1390 	proc_t *p = ttoproc(curthread);
1391 	klwp_t *lwp = ttolwp(curthread);
1392 	struct vnode *vp;
1393 
1394 	p->p_bssbase = ep->ex_bssbase;
1395 	p->p_brkbase = ep->ex_brkbase;
1396 	p->p_brksize = ep->ex_brksize;
1397 	if (p->p_exec)
1398 		VN_RELE(p->p_exec);	/* out with the old */
1399 	vp = p->p_exec = ep->ex_vp;
1400 	if (vp != NULL)
1401 		VN_HOLD(vp);		/* in with the new */
1402 
1403 	lwp->lwp_sigaltstack.ss_sp = 0;
1404 	lwp->lwp_sigaltstack.ss_size = 0;
1405 	lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
1406 }
1407 
1408 int
1409 execopen(struct vnode **vpp, int *fdp)
1410 {
1411 	struct vnode *vp = *vpp;
1412 	file_t *fp;
1413 	int error = 0;
1414 	int filemode = FREAD;
1415 
1416 	VN_HOLD(vp);		/* open reference */
1417 	if (error = falloc(NULL, filemode, &fp, fdp)) {
1418 		VN_RELE(vp);
1419 		*fdp = -1;	/* just in case falloc changed value */
1420 		return (error);
1421 	}
1422 	if (error = VOP_OPEN(&vp, filemode, CRED(), NULL)) {
1423 		VN_RELE(vp);
1424 		setf(*fdp, NULL);
1425 		unfalloc(fp);
1426 		*fdp = -1;
1427 		return (error);
1428 	}
1429 	*vpp = vp;		/* vnode should not have changed */
1430 	fp->f_vnode = vp;
1431 	mutex_exit(&fp->f_tlock);
1432 	setf(*fdp, fp);
1433 	return (0);
1434 }
1435 
1436 int
1437 execclose(int fd)
1438 {
1439 	return (closeandsetf(fd, NULL));
1440 }
1441 
1442 
1443 /*
1444  * noexec stub function.
1445  */
1446 /*ARGSUSED*/
1447 int
1448 noexec(
1449     struct vnode *vp,
1450     struct execa *uap,
1451     struct uarg *args,
1452     struct intpdata *idatap,
1453     int level,
1454     size_t *execsz,
1455     int setid,
1456     caddr_t exec_file,
1457     struct cred *cred)
1458 {
1459 	cmn_err(CE_WARN, "missing exec capability for %s", uap->fname);
1460 	return (ENOEXEC);
1461 }
1462 
1463 /*
1464  * Support routines for building a user stack.
1465  *
1466  * execve(path, argv, envp) must construct a new stack with the specified
1467  * arguments and environment variables (see exec_args() for a description
1468  * of the user stack layout).  To do this, we copy the arguments and
1469  * environment variables from the old user address space into the kernel,
1470  * free the old as, create the new as, and copy our buffered information
1471  * to the new stack.  Our kernel buffer has the following structure:
1472  *
1473  *	+-----------------------+ <--- stk_base + stk_size
1474  *	| string offsets	|
1475  *	+-----------------------+ <--- stk_offp
1476  *	|			|
1477  *	| STK_AVAIL() space	|
1478  *	|			|
1479  *	+-----------------------+ <--- stk_strp
1480  *	| strings		|
1481  *	+-----------------------+ <--- stk_base
1482  *
1483  * When we add a string, we store the string's contents (including the null
1484  * terminator) at stk_strp, and we store the offset of the string relative to
1485  * stk_base at --stk_offp.  At strings are added, stk_strp increases and
1486  * stk_offp decreases.  The amount of space remaining, STK_AVAIL(), is just
1487  * the difference between these pointers.  If we run out of space, we return
1488  * an error and exec_args() starts all over again with a buffer twice as large.
1489  * When we're all done, the kernel buffer looks like this:
1490  *
1491  *	+-----------------------+ <--- stk_base + stk_size
1492  *	| argv[0] offset	|
1493  *	+-----------------------+
1494  *	| ...			|
1495  *	+-----------------------+
1496  *	| argv[argc-1] offset	|
1497  *	+-----------------------+
1498  *	| envp[0] offset	|
1499  *	+-----------------------+
1500  *	| ...			|
1501  *	+-----------------------+
1502  *	| envp[envc-1] offset	|
1503  *	+-----------------------+
1504  *	| AT_SUN_PLATFORM offset|
1505  *	+-----------------------+
1506  *	| AT_SUN_EXECNAME offset|
1507  *	+-----------------------+ <--- stk_offp
1508  *	|			|
1509  *	| STK_AVAIL() space	|
1510  *	|			|
1511  *	+-----------------------+ <--- stk_strp
1512  *	| AT_SUN_EXECNAME offset|
1513  *	+-----------------------+
1514  *	| AT_SUN_PLATFORM offset|
1515  *	+-----------------------+
1516  *	| envp[envc-1] string	|
1517  *	+-----------------------+
1518  *	| ...			|
1519  *	+-----------------------+
1520  *	| envp[0] string	|
1521  *	+-----------------------+
1522  *	| argv[argc-1] string	|
1523  *	+-----------------------+
1524  *	| ...			|
1525  *	+-----------------------+
1526  *	| argv[0] string	|
1527  *	+-----------------------+ <--- stk_base
1528  */
1529 
1530 #define	STK_AVAIL(args)		((char *)(args)->stk_offp - (args)->stk_strp)
1531 
1532 /*
1533  * Add a string to the stack.
1534  */
1535 static int
1536 stk_add(uarg_t *args, const char *sp, enum uio_seg segflg)
1537 {
1538 	int error;
1539 	size_t len;
1540 
1541 	if (STK_AVAIL(args) < sizeof (int))
1542 		return (E2BIG);
1543 	*--args->stk_offp = args->stk_strp - args->stk_base;
1544 
1545 	if (segflg == UIO_USERSPACE) {
1546 		error = copyinstr(sp, args->stk_strp, STK_AVAIL(args), &len);
1547 		if (error != 0)
1548 			return (error);
1549 	} else {
1550 		len = strlen(sp) + 1;
1551 		if (len > STK_AVAIL(args))
1552 			return (E2BIG);
1553 		bcopy(sp, args->stk_strp, len);
1554 	}
1555 
1556 	args->stk_strp += len;
1557 
1558 	return (0);
1559 }
1560 
1561 static int
1562 stk_getptr(uarg_t *args, char *src, char **dst)
1563 {
1564 	int error;
1565 
1566 	if (args->from_model == DATAMODEL_NATIVE) {
1567 		ulong_t ptr;
1568 		error = fulword(src, &ptr);
1569 		*dst = (caddr_t)ptr;
1570 	} else {
1571 		uint32_t ptr;
1572 		error = fuword32(src, &ptr);
1573 		*dst = (caddr_t)(uintptr_t)ptr;
1574 	}
1575 	return (error);
1576 }
1577 
1578 static int
1579 stk_putptr(uarg_t *args, char *addr, char *value)
1580 {
1581 	if (args->to_model == DATAMODEL_NATIVE)
1582 		return (sulword(addr, (ulong_t)value));
1583 	else
1584 		return (suword32(addr, (uint32_t)(uintptr_t)value));
1585 }
1586 
1587 static int
1588 stk_copyin(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1589 {
1590 	char *sp;
1591 	int argc, error;
1592 	int argv_empty = 0;
1593 	size_t ptrsize = args->from_ptrsize;
1594 	size_t size, pad;
1595 	char *argv = (char *)uap->argp;
1596 	char *envp = (char *)uap->envp;
1597 
1598 	/*
1599 	 * Copy interpreter's name and argument to argv[0] and argv[1].
1600 	 * In the rare case that we have nested interpreters then those names
1601 	 * and arguments are also copied to the subsequent slots in argv.
1602 	 */
1603 	if (intp != NULL && intp->intp_name[0] != NULL) {
1604 		int i;
1605 
1606 		for (i = 0; i < INTP_MAXDEPTH; i++) {
1607 			if (intp->intp_name[i] == NULL)
1608 				break;
1609 			error = stk_add(args, intp->intp_name[i], UIO_SYSSPACE);
1610 			if (error != 0)
1611 				return (error);
1612 			if (intp->intp_arg[i] != NULL) {
1613 				error = stk_add(args, intp->intp_arg[i],
1614 				    UIO_SYSSPACE);
1615 				if (error != 0)
1616 					return (error);
1617 			}
1618 		}
1619 
1620 		if (args->fname != NULL)
1621 			error = stk_add(args, args->fname, UIO_SYSSPACE);
1622 		else
1623 			error = stk_add(args, uap->fname, UIO_USERSPACE);
1624 		if (error)
1625 			return (error);
1626 
1627 		/*
1628 		 * Check for an empty argv[].
1629 		 */
1630 		if (stk_getptr(args, argv, &sp))
1631 			return (EFAULT);
1632 		if (sp == NULL)
1633 			argv_empty = 1;
1634 
1635 		argv += ptrsize;		/* ignore original argv[0] */
1636 	}
1637 
1638 	if (argv_empty == 0) {
1639 		/*
1640 		 * Add argv[] strings to the stack.
1641 		 */
1642 		for (;;) {
1643 			if (stk_getptr(args, argv, &sp))
1644 				return (EFAULT);
1645 			if (sp == NULL)
1646 				break;
1647 			if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1648 				return (error);
1649 			argv += ptrsize;
1650 		}
1651 	}
1652 	argc = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1653 	args->arglen = args->stk_strp - args->stk_base;
1654 
1655 	/*
1656 	 * Add environ[] strings to the stack.
1657 	 */
1658 	if (envp != NULL) {
1659 		for (;;) {
1660 			char *tmp = args->stk_strp;
1661 			if (stk_getptr(args, envp, &sp))
1662 				return (EFAULT);
1663 			if (sp == NULL)
1664 				break;
1665 			if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0)
1666 				return (error);
1667 			if (args->scrubenv && strncmp(tmp, "LD_", 3) == 0) {
1668 				/* Undo the copied string */
1669 				args->stk_strp = tmp;
1670 				*(args->stk_offp++) = 0;
1671 			}
1672 			envp += ptrsize;
1673 		}
1674 	}
1675 	args->na = (int *)(args->stk_base + args->stk_size) - args->stk_offp;
1676 	args->ne = args->na - argc;
1677 
1678 	/*
1679 	 * Add AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME, and
1680 	 * AT_SUN_EMULATOR strings to the stack.
1681 	 */
1682 	if (auxvpp != NULL && *auxvpp != NULL) {
1683 		if ((error = stk_add(args, platform, UIO_SYSSPACE)) != 0)
1684 			return (error);
1685 		if ((error = stk_add(args, args->pathname, UIO_SYSSPACE)) != 0)
1686 			return (error);
1687 		if (args->brandname != NULL &&
1688 		    (error = stk_add(args, args->brandname, UIO_SYSSPACE)) != 0)
1689 			return (error);
1690 		if (args->emulator != NULL &&
1691 		    (error = stk_add(args, args->emulator, UIO_SYSSPACE)) != 0)
1692 			return (error);
1693 	}
1694 
1695 	/*
1696 	 * Compute the size of the stack.  This includes all the pointers,
1697 	 * the space reserved for the aux vector, and all the strings.
1698 	 * The total number of pointers is args->na (which is argc + envc)
1699 	 * plus 4 more: (1) a pointer's worth of space for argc; (2) the NULL
1700 	 * after the last argument (i.e. argv[argc]); (3) the NULL after the
1701 	 * last environment variable (i.e. envp[envc]); and (4) the NULL after
1702 	 * all the strings, at the very top of the stack.
1703 	 */
1704 	size = (args->na + 4) * args->to_ptrsize + args->auxsize +
1705 	    (args->stk_strp - args->stk_base);
1706 
1707 	/*
1708 	 * Pad the string section with zeroes to align the stack size.
1709 	 */
1710 	pad = P2NPHASE(size, args->stk_align);
1711 
1712 	if (STK_AVAIL(args) < pad)
1713 		return (E2BIG);
1714 
1715 	args->usrstack_size = size + pad;
1716 
1717 	while (pad-- != 0)
1718 		*args->stk_strp++ = 0;
1719 
1720 	args->nc = args->stk_strp - args->stk_base;
1721 
1722 	return (0);
1723 }
1724 
1725 static int
1726 stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up)
1727 {
1728 	size_t ptrsize = args->to_ptrsize;
1729 	ssize_t pslen;
1730 	char *kstrp = args->stk_base;
1731 	char *ustrp = usrstack - args->nc - ptrsize;
1732 	char *usp = usrstack - args->usrstack_size;
1733 	int *offp = (int *)(args->stk_base + args->stk_size);
1734 	int envc = args->ne;
1735 	int argc = args->na - envc;
1736 	int i;
1737 
1738 	/*
1739 	 * Record argc for /proc.
1740 	 */
1741 	up->u_argc = argc;
1742 
1743 	/*
1744 	 * Put argc on the stack.  Note that even though it's an int,
1745 	 * it always consumes ptrsize bytes (for alignment).
1746 	 */
1747 	if (stk_putptr(args, usp, (char *)(uintptr_t)argc))
1748 		return (-1);
1749 
1750 	/*
1751 	 * Add argc space (ptrsize) to usp and record argv for /proc.
1752 	 */
1753 	up->u_argv = (uintptr_t)(usp += ptrsize);
1754 
1755 	/*
1756 	 * Put the argv[] pointers on the stack.
1757 	 */
1758 	for (i = 0; i < argc; i++, usp += ptrsize)
1759 		if (stk_putptr(args, usp, &ustrp[*--offp]))
1760 			return (-1);
1761 
1762 	/*
1763 	 * Copy arguments to u_psargs.
1764 	 */
1765 	pslen = MIN(args->arglen, PSARGSZ) - 1;
1766 	for (i = 0; i < pslen; i++)
1767 		up->u_psargs[i] = (kstrp[i] == '\0' ? ' ' : kstrp[i]);
1768 	while (i < PSARGSZ)
1769 		up->u_psargs[i++] = '\0';
1770 
1771 	/*
1772 	 * Add space for argv[]'s NULL terminator (ptrsize) to usp and
1773 	 * record envp for /proc.
1774 	 */
1775 	up->u_envp = (uintptr_t)(usp += ptrsize);
1776 
1777 	/*
1778 	 * Put the envp[] pointers on the stack.
1779 	 */
1780 	for (i = 0; i < envc; i++, usp += ptrsize)
1781 		if (stk_putptr(args, usp, &ustrp[*--offp]))
1782 			return (-1);
1783 
1784 	/*
1785 	 * Add space for envp[]'s NULL terminator (ptrsize) to usp and
1786 	 * remember where the stack ends, which is also where auxv begins.
1787 	 */
1788 	args->stackend = usp += ptrsize;
1789 
1790 	/*
1791 	 * Put all the argv[], envp[], and auxv strings on the stack.
1792 	 */
1793 	if (copyout(args->stk_base, ustrp, args->nc))
1794 		return (-1);
1795 
1796 	/*
1797 	 * Fill in the aux vector now that we know the user stack addresses
1798 	 * for the AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME and
1799 	 * AT_SUN_EMULATOR strings.
1800 	 */
1801 	if (auxvpp != NULL && *auxvpp != NULL) {
1802 		if (args->to_model == DATAMODEL_NATIVE) {
1803 			auxv_t **a = (auxv_t **)auxvpp;
1804 			ADDAUX(*a, AT_SUN_PLATFORM, (long)&ustrp[*--offp])
1805 			ADDAUX(*a, AT_SUN_EXECNAME, (long)&ustrp[*--offp])
1806 			if (args->brandname != NULL)
1807 				ADDAUX(*a,
1808 				    AT_SUN_BRANDNAME, (long)&ustrp[*--offp])
1809 			if (args->emulator != NULL)
1810 				ADDAUX(*a,
1811 				    AT_SUN_EMULATOR, (long)&ustrp[*--offp])
1812 		} else {
1813 			auxv32_t **a = (auxv32_t **)auxvpp;
1814 			ADDAUX(*a,
1815 			    AT_SUN_PLATFORM, (int)(uintptr_t)&ustrp[*--offp])
1816 			ADDAUX(*a,
1817 			    AT_SUN_EXECNAME, (int)(uintptr_t)&ustrp[*--offp])
1818 			if (args->brandname != NULL)
1819 				ADDAUX(*a, AT_SUN_BRANDNAME,
1820 				    (int)(uintptr_t)&ustrp[*--offp])
1821 			if (args->emulator != NULL)
1822 				ADDAUX(*a, AT_SUN_EMULATOR,
1823 				    (int)(uintptr_t)&ustrp[*--offp])
1824 		}
1825 	}
1826 
1827 	return (0);
1828 }
1829 
1830 /*
1831  * Though the actual stack base is constant, slew the %sp by a random aligned
1832  * amount in [0,aslr_max_stack_skew).  Mostly, this makes life slightly more
1833  * complicated for buffer overflows hoping to overwrite the return address.
1834  *
1835  * On some platforms this helps avoid cache thrashing when identical processes
1836  * simultaneously share caches that don't provide enough associativity
1837  * (e.g. sun4v systems). In this case stack slewing makes the same hot stack
1838  * variables in different processes live in different cache sets increasing
1839  * effective associativity.
1840  */
1841 size_t
1842 exec_get_spslew(void)
1843 {
1844 #ifdef sun4v
1845 	static uint_t sp_color_stride = 16;
1846 	static uint_t sp_color_mask = 0x1f;
1847 	static uint_t sp_current_color = (uint_t)-1;
1848 #endif
1849 	size_t off;
1850 
1851 	ASSERT(ISP2(aslr_max_stack_skew));
1852 
1853 	if ((aslr_max_stack_skew == 0) ||
1854 	    !secflag_enabled(curproc, PROC_SEC_ASLR)) {
1855 #ifdef sun4v
1856 		uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
1857 		return ((size_t)((spcolor & sp_color_mask) *
1858 		    SA(sp_color_stride)));
1859 #else
1860 		return (0);
1861 #endif
1862 	}
1863 
1864 	(void) random_get_pseudo_bytes((uint8_t *)&off, sizeof (off));
1865 	return (SA(P2PHASE(off, aslr_max_stack_skew)));
1866 }
1867 
1868 /*
1869  * Initialize a new user stack with the specified arguments and environment.
1870  * The initial user stack layout is as follows:
1871  *
1872  *	User Stack
1873  *	+---------------+
1874  *	|		|
1875  *	| stack guard	|
1876  *	| (64-bit only)	|
1877  *	|		|
1878  *	+...............+ <--- stack limit (base - curproc->p_stk_ctl)
1879  *	.		.
1880  *	.		.
1881  *	.		.
1882  *	+---------------+ <--- curproc->p_usrstack
1883  *	|		|
1884  *	| slew		|
1885  *	|		|
1886  *	+---------------+
1887  *	| NULL		|
1888  *	+---------------+
1889  *	|		|
1890  *	| auxv strings	|
1891  *	|		|
1892  *	+---------------+
1893  *	|		|
1894  *	| envp strings	|
1895  *	|		|
1896  *	+---------------+
1897  *	|		|
1898  *	| argv strings	|
1899  *	|		|
1900  *	+---------------+ <--- ustrp
1901  *	|		|
1902  *	| aux vector	|
1903  *	|		|
1904  *	+---------------+ <--- auxv
1905  *	| NULL		|
1906  *	+---------------+
1907  *	| envp[envc-1]	|
1908  *	+---------------+
1909  *	| ...		|
1910  *	+---------------+
1911  *	| envp[0]	|
1912  *	+---------------+ <--- envp[]
1913  *	| NULL		|
1914  *	+---------------+
1915  *	| argv[argc-1]	|
1916  *	+---------------+
1917  *	| ...		|
1918  *	+---------------+
1919  *	| argv[0]	|
1920  *	+---------------+ <--- argv[]
1921  *	| argc		|
1922  *	+---------------+ <--- stack base
1923  *
1924  * In 64-bit processes, a stack guard segment is allocated at the address
1925  * immediately below where the stack limit ends.  This protects new library
1926  * mappings (such as the linker) from being placed in relatively dangerous
1927  * proximity to the stack.
1928  */
1929 int
1930 exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
1931 {
1932 	size_t size;
1933 	int error;
1934 	proc_t *p = ttoproc(curthread);
1935 	user_t *up = PTOU(p);
1936 	char *usrstack;
1937 	rctl_entity_p_t e;
1938 	struct as *as;
1939 	extern int use_stk_lpg;
1940 	size_t sp_slew;
1941 #if defined(_LP64)
1942 	const size_t sg_sz = (stack_guard_seg_sz & PAGEMASK);
1943 #endif /* defined(_LP64) */
1944 
1945 	args->from_model = p->p_model;
1946 	if (p->p_model == DATAMODEL_NATIVE) {
1947 		args->from_ptrsize = sizeof (long);
1948 	} else {
1949 		args->from_ptrsize = sizeof (int32_t);
1950 	}
1951 
1952 	if (args->to_model == DATAMODEL_NATIVE) {
1953 		args->to_ptrsize = sizeof (long);
1954 		args->ncargs = NCARGS;
1955 		args->stk_align = STACK_ALIGN;
1956 		if (args->addr32)
1957 			usrstack = (char *)USRSTACK64_32;
1958 		else
1959 			usrstack = (char *)USRSTACK;
1960 	} else {
1961 		args->to_ptrsize = sizeof (int32_t);
1962 		args->ncargs = NCARGS32;
1963 		args->stk_align = STACK_ALIGN32;
1964 		usrstack = (char *)USRSTACK32;
1965 	}
1966 
1967 	ASSERT(P2PHASE((uintptr_t)usrstack, args->stk_align) == 0);
1968 
1969 #if defined(__sparc)
1970 	/*
1971 	 * Make sure user register windows are empty before
1972 	 * attempting to make a new stack.
1973 	 */
1974 	(void) flush_user_windows_to_stack(NULL);
1975 #endif
1976 
1977 	for (size = PAGESIZE; ; size *= 2) {
1978 		args->stk_size = size;
1979 		args->stk_base = kmem_alloc(size, KM_SLEEP);
1980 		args->stk_strp = args->stk_base;
1981 		args->stk_offp = (int *)(args->stk_base + size);
1982 		error = stk_copyin(uap, args, intp, auxvpp);
1983 		if (error == 0)
1984 			break;
1985 		kmem_free(args->stk_base, size);
1986 		if (error != E2BIG && error != ENAMETOOLONG)
1987 			return (error);
1988 		if (size >= args->ncargs)
1989 			return (E2BIG);
1990 	}
1991 
1992 	size = args->usrstack_size;
1993 
1994 	ASSERT(error == 0);
1995 	ASSERT(P2PHASE(size, args->stk_align) == 0);
1996 	ASSERT((ssize_t)STK_AVAIL(args) >= 0);
1997 
1998 	if (size > args->ncargs) {
1999 		kmem_free(args->stk_base, args->stk_size);
2000 		return (E2BIG);
2001 	}
2002 
2003 	/*
2004 	 * Leave only the current lwp and force the other lwps to exit.
2005 	 * If another lwp beat us to the punch by calling exit(), bail out.
2006 	 */
2007 	if ((error = exitlwps(0)) != 0) {
2008 		kmem_free(args->stk_base, args->stk_size);
2009 		return (error);
2010 	}
2011 
2012 	/*
2013 	 * Revoke any doors created by the process.
2014 	 */
2015 	if (p->p_door_list)
2016 		door_exit();
2017 
2018 	/*
2019 	 * Release schedctl data structures.
2020 	 */
2021 	if (p->p_pagep)
2022 		schedctl_proc_cleanup();
2023 
2024 	/*
2025 	 * Clean up any DTrace helpers for the process.
2026 	 */
2027 	if (p->p_dtrace_helpers != NULL) {
2028 		ASSERT(dtrace_helpers_cleanup != NULL);
2029 		(*dtrace_helpers_cleanup)(p);
2030 	}
2031 
2032 	mutex_enter(&p->p_lock);
2033 	/*
2034 	 * Cleanup the DTrace provider associated with this process.
2035 	 */
2036 	if (p->p_dtrace_probes) {
2037 		ASSERT(dtrace_fasttrap_exec_ptr != NULL);
2038 		dtrace_fasttrap_exec_ptr(p);
2039 	}
2040 	mutex_exit(&p->p_lock);
2041 
2042 	/*
2043 	 * discard the lwpchan cache.
2044 	 */
2045 	if (p->p_lcp != NULL)
2046 		lwpchan_destroy_cache(1);
2047 
2048 	/*
2049 	 * Delete the POSIX timers.
2050 	 */
2051 	if (p->p_itimer != NULL)
2052 		timer_exit();
2053 
2054 	/*
2055 	 * Delete the ITIMER_REALPROF interval timer.
2056 	 * The other ITIMER_* interval timers are specified
2057 	 * to be inherited across exec().
2058 	 */
2059 	delete_itimer_realprof();
2060 
2061 	if (AU_AUDITING())
2062 		audit_exec(args->stk_base, args->stk_base + args->arglen,
2063 		    args->na - args->ne, args->ne, args->pfcred);
2064 
2065 	/*
2066 	 * Ensure that we don't change resource associations while we
2067 	 * change address spaces.
2068 	 */
2069 	mutex_enter(&p->p_lock);
2070 	pool_barrier_enter();
2071 	mutex_exit(&p->p_lock);
2072 
2073 	/*
2074 	 * Destroy the old address space and create a new one.
2075 	 * From here on, any errors are fatal to the exec()ing process.
2076 	 * On error we return -1, which means the caller must SIGKILL
2077 	 * the process.
2078 	 */
2079 	relvm();
2080 
2081 	mutex_enter(&p->p_lock);
2082 	pool_barrier_exit();
2083 	mutex_exit(&p->p_lock);
2084 
2085 	up->u_execsw = args->execswp;
2086 
2087 	p->p_brkbase = NULL;
2088 	p->p_brksize = 0;
2089 	p->p_brkpageszc = 0;
2090 	p->p_stksize = 0;
2091 	p->p_stkpageszc = 0;
2092 	p->p_stkg_start = 0;
2093 	p->p_stkg_end = 0;
2094 	p->p_model = args->to_model;
2095 	p->p_usrstack = usrstack;
2096 	p->p_stkprot = args->stk_prot;
2097 	p->p_datprot = args->dat_prot;
2098 
2099 	/*
2100 	 * Reset resource controls such that all controls are again active as
2101 	 * well as appropriate to the potentially new address model for the
2102 	 * process.
2103 	 */
2104 	e.rcep_p.proc = p;
2105 	e.rcep_t = RCENTITY_PROCESS;
2106 	rctl_set_reset(p->p_rctls, p, &e);
2107 
2108 	/* Too early to call map_pgsz for the heap */
2109 	if (use_stk_lpg) {
2110 		p->p_stkpageszc = page_szc(map_pgsz(MAPPGSZ_STK, p, 0, 0, 0));
2111 	}
2112 
2113 	mutex_enter(&p->p_lock);
2114 	p->p_flag |= SAUTOLPG;	/* kernel controls page sizes */
2115 	mutex_exit(&p->p_lock);
2116 
2117 	sp_slew = exec_get_spslew();
2118 	ASSERT(P2PHASE(sp_slew, args->stk_align) == 0);
2119 	/* Be certain we don't underflow */
2120 	VERIFY((curproc->p_usrstack - (size + sp_slew)) < curproc->p_usrstack);
2121 	exec_set_sp(size + sp_slew);
2122 
2123 	as = as_alloc();
2124 	p->p_as = as;
2125 	as->a_proc = p;
2126 	if (p->p_model == DATAMODEL_ILP32 || args->addr32)
2127 		as->a_userlimit = (caddr_t)USERLIMIT32;
2128 	(void) hat_setup(as->a_hat, HAT_ALLOC);
2129 	hat_join_srd(as->a_hat, args->ex_vp);
2130 
2131 	/* Write out the contents of the new stack. */
2132 	error = stk_copyout(args, usrstack - sp_slew, auxvpp, up);
2133 	kmem_free(args->stk_base, args->stk_size);
2134 
2135 #if defined(_LP64)
2136 	/* Add stack guard segment (if needed) after successful copyout */
2137 	if (error == 0 && p->p_model == DATAMODEL_LP64 && sg_sz != 0) {
2138 		seghole_crargs_t sca;
2139 		caddr_t addr_end = (caddr_t)(((uintptr_t)usrstack -
2140 		    p->p_stk_ctl) & PAGEMASK);
2141 		caddr_t addr_start = addr_end - sg_sz;
2142 
2143 		DTRACE_PROBE4(stack__guard__chk, proc_t *, p,
2144 		    caddr_t, addr_start, caddr_t, addr_end, size_t, sg_sz);
2145 
2146 		if (addr_end >= usrstack || addr_start >= addr_end ||
2147 		    valid_usr_range(addr_start, sg_sz, PROT_NONE, as,
2148 		    as->a_userlimit) != RANGE_OKAY) {
2149 			return (E2BIG);
2150 		}
2151 
2152 		/* Create un-mappable area in AS with seg_hole */
2153 		sca.name = "stack_guard";
2154 		error = as_map(as, addr_start, sg_sz, seghole_create, &sca);
2155 		if (error == 0) {
2156 			p->p_stkg_start = (uintptr_t)addr_start;
2157 			p->p_stkg_end = (uintptr_t)addr_start + sg_sz;
2158 		}
2159 	}
2160 #endif /* defined(_LP64) */
2161 
2162 	return (error);
2163 }
2164