1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1988 AT&T */ 28 /* All Rights Reserved */ 29 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/sysmacros.h> 33 #include <sys/systm.h> 34 #include <sys/signal.h> 35 #include <sys/cred_impl.h> 36 #include <sys/policy.h> 37 #include <sys/user.h> 38 #include <sys/errno.h> 39 #include <sys/file.h> 40 #include <sys/vfs.h> 41 #include <sys/vnode.h> 42 #include <sys/mman.h> 43 #include <sys/acct.h> 44 #include <sys/cpuvar.h> 45 #include <sys/proc.h> 46 #include <sys/cmn_err.h> 47 #include <sys/debug.h> 48 #include <sys/pathname.h> 49 #include <sys/vm.h> 50 #include <sys/lgrp.h> 51 #include <sys/vtrace.h> 52 #include <sys/exec.h> 53 #include <sys/exechdr.h> 54 #include <sys/kmem.h> 55 #include <sys/prsystm.h> 56 #include <sys/modctl.h> 57 #include <sys/vmparam.h> 58 #include <sys/door.h> 59 #include <sys/schedctl.h> 60 #include <sys/utrap.h> 61 #include <sys/systeminfo.h> 62 #include <sys/stack.h> 63 #include <sys/rctl.h> 64 #include <sys/dtrace.h> 65 #include <sys/lwpchan_impl.h> 66 #include <sys/pool.h> 67 #include <sys/sdt.h> 68 #include <sys/brand.h> 69 70 #include <c2/audit.h> 71 72 #include <vm/hat.h> 73 #include <vm/anon.h> 74 #include <vm/as.h> 75 #include <vm/seg.h> 76 #include <vm/seg_vn.h> 77 78 #define PRIV_RESET 0x01 /* needs to reset privs */ 79 #define PRIV_SETID 0x02 /* needs to change uids */ 80 #define PRIV_SETUGID 0x04 /* is setuid/setgid/forced privs */ 81 #define PRIV_INCREASE 0x08 /* child runs with more privs */ 82 #define MAC_FLAGS 0x10 /* need to adjust MAC flags */ 83 84 static int execsetid(struct vnode *, struct vattr *, uid_t *, uid_t *); 85 static int hold_execsw(struct execsw *); 86 87 uint_t auxv_hwcap = 0; /* auxv AT_SUN_HWCAP value; determined on the fly */ 88 #if defined(_SYSCALL32_IMPL) 89 uint_t auxv_hwcap32 = 0; /* 32-bit version of auxv_hwcap */ 90 #endif 91 92 #define PSUIDFLAGS (SNOCD|SUGID) 93 94 /* 95 * exec() - wrapper around exece providing NULL environment pointer 96 */ 97 int 98 exec(const char *fname, const char **argp) 99 { 100 return (exece(fname, argp, NULL)); 101 } 102 103 /* 104 * exece() - system call wrapper around exec_common() 105 */ 106 int 107 exece(const char *fname, const char **argp, const char **envp) 108 { 109 int error; 110 111 error = exec_common(fname, argp, envp, EBA_NONE); 112 return (error ? (set_errno(error)) : 0); 113 } 114 115 int 116 exec_common(const char *fname, const char **argp, const char **envp, 117 int brand_action) 118 { 119 vnode_t *vp = NULL, *dir = NULL, *tmpvp = NULL; 120 proc_t *p = ttoproc(curthread); 121 klwp_t *lwp = ttolwp(curthread); 122 struct user *up = PTOU(p); 123 long execsz; /* temporary count of exec size */ 124 int i; 125 int error; 126 char exec_file[MAXCOMLEN+1]; 127 struct pathname pn; 128 struct pathname resolvepn; 129 struct uarg args; 130 struct execa ua; 131 k_sigset_t savedmask; 132 lwpdir_t *lwpdir = NULL; 133 tidhash_t *tidhash; 134 lwpdir_t *old_lwpdir = NULL; 135 uint_t old_lwpdir_sz; 136 tidhash_t *old_tidhash; 137 uint_t old_tidhash_sz; 138 ret_tidhash_t *ret_tidhash; 139 lwpent_t *lep; 140 boolean_t brandme = B_FALSE; 141 142 /* 143 * exec() is not supported for the /proc agent lwp. 144 */ 145 if (curthread == p->p_agenttp) 146 return (ENOTSUP); 147 148 if (brand_action != EBA_NONE) { 149 /* 150 * Brand actions are not supported for processes that are not 151 * running in a branded zone. 152 */ 153 if (!ZONE_IS_BRANDED(p->p_zone)) 154 return (ENOTSUP); 155 156 if (brand_action == EBA_NATIVE) { 157 /* Only branded processes can be unbranded */ 158 if (!PROC_IS_BRANDED(p)) 159 return (ENOTSUP); 160 } else { 161 /* Only unbranded processes can be branded */ 162 if (PROC_IS_BRANDED(p)) 163 return (ENOTSUP); 164 brandme = B_TRUE; 165 } 166 } else { 167 /* 168 * If this is a native zone, or if the process is already 169 * branded, then we don't need to do anything. If this is 170 * a native process in a branded zone, we need to brand the 171 * process as it exec()s the new binary. 172 */ 173 if (ZONE_IS_BRANDED(p->p_zone) && !PROC_IS_BRANDED(p)) 174 brandme = B_TRUE; 175 } 176 177 /* 178 * Inform /proc that an exec() has started. 179 * Hold signals that are ignored by default so that we will 180 * not be interrupted by a signal that will be ignored after 181 * successful completion of gexec(). 182 */ 183 mutex_enter(&p->p_lock); 184 prexecstart(); 185 schedctl_finish_sigblock(curthread); 186 savedmask = curthread->t_hold; 187 sigorset(&curthread->t_hold, &ignoredefault); 188 mutex_exit(&p->p_lock); 189 190 /* 191 * Look up path name and remember last component for later. 192 * To help coreadm expand its %d token, we attempt to save 193 * the directory containing the executable in p_execdir. The 194 * first call to lookuppn() may fail and return EINVAL because 195 * dirvpp is non-NULL. In that case, we make a second call to 196 * lookuppn() with dirvpp set to NULL; p_execdir will be NULL, 197 * but coreadm is allowed to expand %d to the empty string and 198 * there are other cases in which that failure may occur. 199 */ 200 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0) 201 goto out; 202 pn_alloc(&resolvepn); 203 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, &dir, &vp)) != 0) { 204 pn_free(&resolvepn); 205 pn_free(&pn); 206 if (error != EINVAL) 207 goto out; 208 209 dir = NULL; 210 if ((error = pn_get((char *)fname, UIO_USERSPACE, &pn)) != 0) 211 goto out; 212 pn_alloc(&resolvepn); 213 if ((error = lookuppn(&pn, &resolvepn, FOLLOW, NULLVPP, 214 &vp)) != 0) { 215 pn_free(&resolvepn); 216 pn_free(&pn); 217 goto out; 218 } 219 } 220 if (vp == NULL) { 221 if (dir != NULL) 222 VN_RELE(dir); 223 error = ENOENT; 224 pn_free(&resolvepn); 225 pn_free(&pn); 226 goto out; 227 } 228 229 if ((error = secpolicy_basic_exec(CRED(), vp)) != 0) { 230 if (dir != NULL) 231 VN_RELE(dir); 232 pn_free(&resolvepn); 233 pn_free(&pn); 234 VN_RELE(vp); 235 goto out; 236 } 237 238 /* 239 * We do not allow executing files in attribute directories. 240 * We test this by determining whether the resolved path 241 * contains a "/" when we're in an attribute directory; 242 * only if the pathname does not contain a "/" the resolved path 243 * points to a file in the current working (attribute) directory. 244 */ 245 if ((p->p_user.u_cdir->v_flag & V_XATTRDIR) != 0 && 246 strchr(resolvepn.pn_path, '/') == NULL) { 247 if (dir != NULL) 248 VN_RELE(dir); 249 error = EACCES; 250 pn_free(&resolvepn); 251 pn_free(&pn); 252 VN_RELE(vp); 253 goto out; 254 } 255 256 bzero(exec_file, MAXCOMLEN+1); 257 (void) strncpy(exec_file, pn.pn_path, MAXCOMLEN); 258 bzero(&args, sizeof (args)); 259 args.pathname = resolvepn.pn_path; 260 /* don't free resolvepn until we are done with args */ 261 pn_free(&pn); 262 263 /* 264 * Specific exec handlers, or policies determined via 265 * /etc/system may override the historical default. 266 */ 267 args.stk_prot = PROT_ZFOD; 268 args.dat_prot = PROT_ZFOD; 269 270 CPU_STATS_ADD_K(sys, sysexec, 1); 271 DTRACE_PROC1(exec, char *, args.pathname); 272 273 ua.fname = fname; 274 ua.argp = argp; 275 ua.envp = envp; 276 277 /* If necessary, brand this process before we start the exec. */ 278 if (brandme) 279 brand_setbrand(p); 280 281 if ((error = gexec(&vp, &ua, &args, NULL, 0, &execsz, 282 exec_file, p->p_cred, brand_action)) != 0) { 283 if (brandme) 284 brand_clearbrand(p); 285 VN_RELE(vp); 286 if (dir != NULL) 287 VN_RELE(dir); 288 pn_free(&resolvepn); 289 goto fail; 290 } 291 292 /* 293 * Free floating point registers (sun4u only) 294 */ 295 ASSERT(lwp != NULL); 296 lwp_freeregs(lwp, 1); 297 298 /* 299 * Free thread and process context ops. 300 */ 301 if (curthread->t_ctx) 302 freectx(curthread, 1); 303 if (p->p_pctx) 304 freepctx(p, 1); 305 306 /* 307 * Remember file name for accounting; clear any cached DTrace predicate. 308 */ 309 up->u_acflag &= ~AFORK; 310 bcopy(exec_file, up->u_comm, MAXCOMLEN+1); 311 curthread->t_predcache = NULL; 312 313 /* 314 * Clear contract template state 315 */ 316 lwp_ctmpl_clear(lwp); 317 318 /* 319 * Save the directory in which we found the executable for expanding 320 * the %d token used in core file patterns. 321 */ 322 mutex_enter(&p->p_lock); 323 tmpvp = p->p_execdir; 324 p->p_execdir = dir; 325 if (p->p_execdir != NULL) 326 VN_HOLD(p->p_execdir); 327 mutex_exit(&p->p_lock); 328 329 if (tmpvp != NULL) 330 VN_RELE(tmpvp); 331 332 /* 333 * Reset stack state to the user stack, clear set of signals 334 * caught on the signal stack, and reset list of signals that 335 * restart system calls; the new program's environment should 336 * not be affected by detritus from the old program. Any 337 * pending held signals remain held, so don't clear t_hold. 338 */ 339 mutex_enter(&p->p_lock); 340 lwp->lwp_oldcontext = 0; 341 lwp->lwp_ustack = 0; 342 lwp->lwp_old_stk_ctl = 0; 343 sigemptyset(&up->u_signodefer); 344 sigemptyset(&up->u_sigonstack); 345 sigemptyset(&up->u_sigresethand); 346 lwp->lwp_sigaltstack.ss_sp = 0; 347 lwp->lwp_sigaltstack.ss_size = 0; 348 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 349 350 /* 351 * Make saved resource limit == current resource limit. 352 */ 353 for (i = 0; i < RLIM_NLIMITS; i++) { 354 /*CONSTCOND*/ 355 if (RLIM_SAVED(i)) { 356 (void) rctl_rlimit_get(rctlproc_legacy[i], p, 357 &up->u_saved_rlimit[i]); 358 } 359 } 360 361 /* 362 * If the action was to catch the signal, then the action 363 * must be reset to SIG_DFL. 364 */ 365 sigdefault(p); 366 p->p_flag &= ~(SNOWAIT|SJCTL); 367 p->p_flag |= (SEXECED|SMSACCT|SMSFORK); 368 up->u_signal[SIGCLD - 1] = SIG_DFL; 369 370 /* 371 * Delete the dot4 sigqueues/signotifies. 372 */ 373 sigqfree(p); 374 375 mutex_exit(&p->p_lock); 376 377 mutex_enter(&p->p_pflock); 378 p->p_prof.pr_base = NULL; 379 p->p_prof.pr_size = 0; 380 p->p_prof.pr_off = 0; 381 p->p_prof.pr_scale = 0; 382 p->p_prof.pr_samples = 0; 383 mutex_exit(&p->p_pflock); 384 385 ASSERT(curthread->t_schedctl == NULL); 386 387 #if defined(__sparc) 388 if (p->p_utraps != NULL) 389 utrap_free(p); 390 #endif /* __sparc */ 391 392 /* 393 * Close all close-on-exec files. 394 */ 395 close_exec(P_FINFO(p)); 396 TRACE_2(TR_FAC_PROC, TR_PROC_EXEC, "proc_exec:p %p up %p", p, up); 397 398 /* Unbrand ourself if necessary. */ 399 if (PROC_IS_BRANDED(p) && (brand_action == EBA_NATIVE)) 400 brand_clearbrand(p); 401 402 setregs(&args); 403 404 /* Mark this as an executable vnode */ 405 mutex_enter(&vp->v_lock); 406 vp->v_flag |= VVMEXEC; 407 mutex_exit(&vp->v_lock); 408 409 VN_RELE(vp); 410 if (dir != NULL) 411 VN_RELE(dir); 412 pn_free(&resolvepn); 413 414 /* 415 * Allocate a new lwp directory and lwpid hash table if necessary. 416 */ 417 if (curthread->t_tid != 1 || p->p_lwpdir_sz != 2) { 418 lwpdir = kmem_zalloc(2 * sizeof (lwpdir_t), KM_SLEEP); 419 lwpdir->ld_next = lwpdir + 1; 420 tidhash = kmem_zalloc(2 * sizeof (tidhash_t), KM_SLEEP); 421 if (p->p_lwpdir != NULL) 422 lep = p->p_lwpdir[curthread->t_dslot].ld_entry; 423 else 424 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 425 } 426 427 if (PROC_IS_BRANDED(p)) 428 BROP(p)->b_exec(); 429 430 mutex_enter(&p->p_lock); 431 prbarrier(p); 432 433 /* 434 * Reset lwp id to the default value of 1. 435 * This is a single-threaded process now 436 * and lwp #1 is lwp_wait()able by default. 437 * The t_unpark flag should not be inherited. 438 */ 439 ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0); 440 curthread->t_tid = 1; 441 kpreempt_disable(); 442 ASSERT(curthread->t_lpl != NULL); 443 p->p_t1_lgrpid = curthread->t_lpl->lpl_lgrpid; 444 kpreempt_enable(); 445 if (p->p_tr_lgrpid != LGRP_NONE && p->p_tr_lgrpid != p->p_t1_lgrpid) { 446 lgrp_update_trthr_migrations(1); 447 } 448 curthread->t_unpark = 0; 449 curthread->t_proc_flag |= TP_TWAIT; 450 curthread->t_proc_flag &= ~TP_DAEMON; /* daemons shouldn't exec */ 451 p->p_lwpdaemon = 0; /* but oh well ... */ 452 p->p_lwpid = 1; 453 454 /* 455 * Install the newly-allocated lwp directory and lwpid hash table 456 * and insert the current thread into the new hash table. 457 */ 458 if (lwpdir != NULL) { 459 old_lwpdir = p->p_lwpdir; 460 old_lwpdir_sz = p->p_lwpdir_sz; 461 old_tidhash = p->p_tidhash; 462 old_tidhash_sz = p->p_tidhash_sz; 463 p->p_lwpdir = p->p_lwpfree = lwpdir; 464 p->p_lwpdir_sz = 2; 465 lep->le_thread = curthread; 466 lep->le_lwpid = curthread->t_tid; 467 lep->le_start = curthread->t_start; 468 lwp_hash_in(p, lep, tidhash, 2, 0); 469 p->p_tidhash = tidhash; 470 p->p_tidhash_sz = 2; 471 } 472 ret_tidhash = p->p_ret_tidhash; 473 p->p_ret_tidhash = NULL; 474 475 /* 476 * Restore the saved signal mask and 477 * inform /proc that the exec() has finished. 478 */ 479 curthread->t_hold = savedmask; 480 prexecend(); 481 mutex_exit(&p->p_lock); 482 if (old_lwpdir) { 483 kmem_free(old_lwpdir, old_lwpdir_sz * sizeof (lwpdir_t)); 484 kmem_free(old_tidhash, old_tidhash_sz * sizeof (tidhash_t)); 485 } 486 while (ret_tidhash != NULL) { 487 ret_tidhash_t *next = ret_tidhash->rth_next; 488 kmem_free(ret_tidhash->rth_tidhash, 489 ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t)); 490 kmem_free(ret_tidhash, sizeof (*ret_tidhash)); 491 ret_tidhash = next; 492 } 493 494 ASSERT(error == 0); 495 DTRACE_PROC(exec__success); 496 return (0); 497 498 fail: 499 DTRACE_PROC1(exec__failure, int, error); 500 out: /* error return */ 501 mutex_enter(&p->p_lock); 502 curthread->t_hold = savedmask; 503 prexecend(); 504 mutex_exit(&p->p_lock); 505 ASSERT(error != 0); 506 return (error); 507 } 508 509 510 /* 511 * Perform generic exec duties and switchout to object-file specific 512 * handler. 513 */ 514 int 515 gexec( 516 struct vnode **vpp, 517 struct execa *uap, 518 struct uarg *args, 519 struct intpdata *idatap, 520 int level, 521 long *execsz, 522 caddr_t exec_file, 523 struct cred *cred, 524 int brand_action) 525 { 526 struct vnode *vp; 527 proc_t *pp = ttoproc(curthread); 528 struct execsw *eswp; 529 int error = 0; 530 int suidflags = 0; 531 ssize_t resid; 532 uid_t uid, gid; 533 struct vattr vattr; 534 char magbuf[MAGIC_BYTES]; 535 int setid; 536 cred_t *oldcred, *newcred = NULL; 537 int privflags = 0; 538 int setidfl; 539 540 /* 541 * If the SNOCD or SUGID flag is set, turn it off and remember the 542 * previous setting so we can restore it if we encounter an error. 543 */ 544 if (level == 0 && (pp->p_flag & PSUIDFLAGS)) { 545 mutex_enter(&pp->p_lock); 546 suidflags = pp->p_flag & PSUIDFLAGS; 547 pp->p_flag &= ~PSUIDFLAGS; 548 mutex_exit(&pp->p_lock); 549 } 550 551 if ((error = execpermissions(*vpp, &vattr, args)) != 0) 552 goto bad; 553 554 /* need to open vnode for stateful file systems like rfs */ 555 if ((error = VOP_OPEN(vpp, FREAD, CRED(), NULL)) != 0) 556 goto bad; 557 vp = *vpp; 558 559 /* 560 * Note: to support binary compatibility with SunOS a.out 561 * executables, we read in the first four bytes, as the 562 * magic number is in bytes 2-3. 563 */ 564 if (error = vn_rdwr(UIO_READ, vp, magbuf, sizeof (magbuf), 565 (offset_t)0, UIO_SYSSPACE, 0, (rlim64_t)0, CRED(), &resid)) 566 goto bad; 567 if (resid != 0) 568 goto bad; 569 570 if ((eswp = findexec_by_hdr(magbuf)) == NULL) 571 goto bad; 572 573 if (level == 0 && 574 (privflags = execsetid(vp, &vattr, &uid, &gid)) != 0) { 575 576 newcred = cred = crdup(cred); 577 578 /* If we can, drop the PA bit */ 579 if ((privflags & PRIV_RESET) != 0) 580 priv_adjust_PA(cred); 581 582 if (privflags & PRIV_SETID) { 583 cred->cr_uid = uid; 584 cred->cr_gid = gid; 585 cred->cr_suid = uid; 586 cred->cr_sgid = gid; 587 } 588 589 if (privflags & MAC_FLAGS) { 590 if (!(CR_FLAGS(cred) & NET_MAC_AWARE_INHERIT)) 591 CR_FLAGS(cred) &= ~NET_MAC_AWARE; 592 CR_FLAGS(cred) &= ~NET_MAC_AWARE_INHERIT; 593 } 594 595 /* 596 * Implement the privilege updates: 597 * 598 * Restrict with L: 599 * 600 * I' = I & L 601 * 602 * E' = P' = (I' + F) & A 603 * 604 * But if running under ptrace, we cap I with P. 605 */ 606 if ((privflags & PRIV_RESET) != 0) { 607 if ((privflags & PRIV_INCREASE) != 0 && 608 (pp->p_proc_flag & P_PR_PTRACE) != 0) 609 priv_intersect(&CR_OPPRIV(cred), 610 &CR_IPRIV(cred)); 611 priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred)); 612 CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred); 613 priv_adjust_PA(cred); 614 } 615 } 616 617 /* SunOS 4.x buy-back */ 618 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) && 619 (vattr.va_mode & (VSUID|VSGID))) { 620 char path[MAXNAMELEN]; 621 refstr_t *mntpt = NULL; 622 int ret = -1; 623 624 bzero(path, sizeof (path)); 625 zone_hold(pp->p_zone); 626 627 ret = vnodetopath(pp->p_zone->zone_rootvp, vp, path, 628 sizeof (path), cred); 629 630 /* fallback to mountpoint if a path can't be found */ 631 if ((ret != 0) || (ret == 0 && path[0] == '\0')) 632 mntpt = vfs_getmntpoint(vp->v_vfsp); 633 634 if (mntpt == NULL) 635 zcmn_err(pp->p_zone->zone_id, CE_NOTE, 636 "!uid %d: setuid execution not allowed, " 637 "file=%s", cred->cr_uid, path); 638 else 639 zcmn_err(pp->p_zone->zone_id, CE_NOTE, 640 "!uid %d: setuid execution not allowed, " 641 "fs=%s, file=%s", cred->cr_uid, 642 ZONE_PATH_TRANSLATE(refstr_value(mntpt), 643 pp->p_zone), exec_file); 644 645 if (!INGLOBALZONE(pp)) { 646 /* zone_rootpath always has trailing / */ 647 if (mntpt == NULL) 648 cmn_err(CE_NOTE, "!zone: %s, uid: %d " 649 "setuid execution not allowed, file=%s%s", 650 pp->p_zone->zone_name, cred->cr_uid, 651 pp->p_zone->zone_rootpath, path + 1); 652 else 653 cmn_err(CE_NOTE, "!zone: %s, uid: %d " 654 "setuid execution not allowed, fs=%s, " 655 "file=%s", pp->p_zone->zone_name, 656 cred->cr_uid, refstr_value(mntpt), 657 exec_file); 658 } 659 660 if (mntpt != NULL) 661 refstr_rele(mntpt); 662 663 zone_rele(pp->p_zone); 664 } 665 666 /* 667 * execsetid() told us whether or not we had to change the 668 * credentials of the process. In privflags, it told us 669 * whether we gained any privileges or executed a set-uid executable. 670 */ 671 setid = (privflags & (PRIV_SETUGID|PRIV_INCREASE)); 672 673 /* 674 * Use /etc/system variable to determine if the stack 675 * should be marked as executable by default. 676 */ 677 if (noexec_user_stack) 678 args->stk_prot &= ~PROT_EXEC; 679 680 args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */ 681 args->ex_vp = vp; 682 683 /* 684 * Traditionally, the setid flags told the sub processes whether 685 * the file just executed was set-uid or set-gid; this caused 686 * some confusion as the 'setid' flag did not match the SUGID 687 * process flag which is only set when the uids/gids do not match. 688 * A script set-gid/set-uid to the real uid/gid would start with 689 * /dev/fd/X but an executable would happily trust LD_LIBRARY_PATH. 690 * Now we flag those cases where the calling process cannot 691 * be trusted to influence the newly exec'ed process, either 692 * because it runs with more privileges or when the uids/gids 693 * do in fact not match. 694 * This also makes the runtime linker agree with the on exec 695 * values of SNOCD and SUGID. 696 */ 697 setidfl = 0; 698 if (cred->cr_uid != cred->cr_ruid || (cred->cr_rgid != cred->cr_gid && 699 !supgroupmember(cred->cr_gid, cred))) { 700 setidfl |= EXECSETID_UGIDS; 701 } 702 if (setid & PRIV_SETUGID) 703 setidfl |= EXECSETID_SETID; 704 if (setid & PRIV_INCREASE) 705 setidfl |= EXECSETID_PRIVS; 706 707 error = (*eswp->exec_func)(vp, uap, args, idatap, level, execsz, 708 setidfl, exec_file, cred, brand_action); 709 rw_exit(eswp->exec_lock); 710 if (error != 0) { 711 if (newcred != NULL) 712 crfree(newcred); 713 goto bad; 714 } 715 716 if (level == 0) { 717 mutex_enter(&pp->p_crlock); 718 if (newcred != NULL) { 719 /* 720 * Free the old credentials, and set the new ones. 721 * Do this for both the process and the (single) thread. 722 */ 723 crfree(pp->p_cred); 724 pp->p_cred = cred; /* cred already held for proc */ 725 crhold(cred); /* hold new cred for thread */ 726 /* 727 * DTrace accesses t_cred in probe context. t_cred 728 * must always be either NULL, or point to a valid, 729 * allocated cred structure. 730 */ 731 oldcred = curthread->t_cred; 732 curthread->t_cred = cred; 733 crfree(oldcred); 734 735 if (priv_basic_test >= 0 && 736 !PRIV_ISASSERT(&CR_IPRIV(newcred), 737 priv_basic_test)) { 738 pid_t pid = pp->p_pid; 739 char *fn = PTOU(pp)->u_comm; 740 741 cmn_err(CE_WARN, "%s[%d]: exec: basic_test " 742 "privilege removed from E/I", fn, pid); 743 } 744 } 745 /* 746 * On emerging from a successful exec(), the saved 747 * uid and gid equal the effective uid and gid. 748 */ 749 cred->cr_suid = cred->cr_uid; 750 cred->cr_sgid = cred->cr_gid; 751 752 /* 753 * If the real and effective ids do not match, this 754 * is a setuid process that should not dump core. 755 * The group comparison is tricky; we prevent the code 756 * from flagging SNOCD when executing with an effective gid 757 * which is a supplementary group. 758 */ 759 if (cred->cr_ruid != cred->cr_uid || 760 (cred->cr_rgid != cred->cr_gid && 761 !supgroupmember(cred->cr_gid, cred)) || 762 (privflags & PRIV_INCREASE) != 0) 763 suidflags = PSUIDFLAGS; 764 else 765 suidflags = 0; 766 767 mutex_exit(&pp->p_crlock); 768 if (suidflags) { 769 mutex_enter(&pp->p_lock); 770 pp->p_flag |= suidflags; 771 mutex_exit(&pp->p_lock); 772 } 773 if (setid && (pp->p_proc_flag & P_PR_PTRACE) == 0) { 774 /* 775 * If process is traced via /proc, arrange to 776 * invalidate the associated /proc vnode. 777 */ 778 if (pp->p_plist || (pp->p_proc_flag & P_PR_TRACE)) 779 args->traceinval = 1; 780 } 781 if (pp->p_proc_flag & P_PR_PTRACE) 782 psignal(pp, SIGTRAP); 783 if (args->traceinval) 784 prinvalidate(&pp->p_user); 785 } 786 787 return (0); 788 bad: 789 if (error == 0) 790 error = ENOEXEC; 791 792 if (suidflags) { 793 mutex_enter(&pp->p_lock); 794 pp->p_flag |= suidflags; 795 mutex_exit(&pp->p_lock); 796 } 797 return (error); 798 } 799 800 extern char *execswnames[]; 801 802 struct execsw * 803 allocate_execsw(char *name, char *magic, size_t magic_size) 804 { 805 int i, j; 806 char *ename; 807 char *magicp; 808 809 mutex_enter(&execsw_lock); 810 for (i = 0; i < nexectype; i++) { 811 if (execswnames[i] == NULL) { 812 ename = kmem_alloc(strlen(name) + 1, KM_SLEEP); 813 (void) strcpy(ename, name); 814 execswnames[i] = ename; 815 /* 816 * Set the magic number last so that we 817 * don't need to hold the execsw_lock in 818 * findexectype(). 819 */ 820 magicp = kmem_alloc(magic_size, KM_SLEEP); 821 for (j = 0; j < magic_size; j++) 822 magicp[j] = magic[j]; 823 execsw[i].exec_magic = magicp; 824 mutex_exit(&execsw_lock); 825 return (&execsw[i]); 826 } 827 } 828 mutex_exit(&execsw_lock); 829 return (NULL); 830 } 831 832 /* 833 * Find the exec switch table entry with the corresponding magic string. 834 */ 835 struct execsw * 836 findexecsw(char *magic) 837 { 838 struct execsw *eswp; 839 840 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) { 841 ASSERT(eswp->exec_maglen <= MAGIC_BYTES); 842 if (magic && eswp->exec_maglen != 0 && 843 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) 844 return (eswp); 845 } 846 return (NULL); 847 } 848 849 /* 850 * Find the execsw[] index for the given exec header string by looking for the 851 * magic string at a specified offset and length for each kind of executable 852 * file format until one matches. If no execsw[] entry is found, try to 853 * autoload a module for this magic string. 854 */ 855 struct execsw * 856 findexec_by_hdr(char *header) 857 { 858 struct execsw *eswp; 859 860 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) { 861 ASSERT(eswp->exec_maglen <= MAGIC_BYTES); 862 if (header && eswp->exec_maglen != 0 && 863 bcmp(&header[eswp->exec_magoff], eswp->exec_magic, 864 eswp->exec_maglen) == 0) { 865 if (hold_execsw(eswp) != 0) 866 return (NULL); 867 return (eswp); 868 } 869 } 870 return (NULL); /* couldn't find the type */ 871 } 872 873 /* 874 * Find the execsw[] index for the given magic string. If no execsw[] entry 875 * is found, try to autoload a module for this magic string. 876 */ 877 struct execsw * 878 findexec_by_magic(char *magic) 879 { 880 struct execsw *eswp; 881 882 for (eswp = execsw; eswp < &execsw[nexectype]; eswp++) { 883 ASSERT(eswp->exec_maglen <= MAGIC_BYTES); 884 if (magic && eswp->exec_maglen != 0 && 885 bcmp(magic, eswp->exec_magic, eswp->exec_maglen) == 0) { 886 if (hold_execsw(eswp) != 0) 887 return (NULL); 888 return (eswp); 889 } 890 } 891 return (NULL); /* couldn't find the type */ 892 } 893 894 static int 895 hold_execsw(struct execsw *eswp) 896 { 897 char *name; 898 899 rw_enter(eswp->exec_lock, RW_READER); 900 while (!LOADED_EXEC(eswp)) { 901 rw_exit(eswp->exec_lock); 902 name = execswnames[eswp-execsw]; 903 ASSERT(name); 904 if (modload("exec", name) == -1) 905 return (-1); 906 rw_enter(eswp->exec_lock, RW_READER); 907 } 908 return (0); 909 } 910 911 static int 912 execsetid(struct vnode *vp, struct vattr *vattrp, uid_t *uidp, uid_t *gidp) 913 { 914 proc_t *pp = ttoproc(curthread); 915 uid_t uid, gid; 916 cred_t *cr = pp->p_cred; 917 int privflags = 0; 918 919 /* 920 * Remember credentials. 921 */ 922 uid = cr->cr_uid; 923 gid = cr->cr_gid; 924 925 /* Will try to reset the PRIV_AWARE bit later. */ 926 if ((CR_FLAGS(cr) & (PRIV_AWARE|PRIV_AWARE_INHERIT)) == PRIV_AWARE) 927 privflags |= PRIV_RESET; 928 929 if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) == 0) { 930 /* 931 * Set-uid root execution only allowed if the limit set 932 * holds all unsafe privileges. 933 */ 934 if ((vattrp->va_mode & VSUID) && (vattrp->va_uid != 0 || 935 priv_issubset(&priv_unsafe, &CR_LPRIV(cr)))) { 936 uid = vattrp->va_uid; 937 privflags |= PRIV_SETUGID; 938 } 939 if (vattrp->va_mode & VSGID) { 940 gid = vattrp->va_gid; 941 privflags |= PRIV_SETUGID; 942 } 943 } 944 945 /* 946 * Do we need to change our credential anyway? 947 * This is the case when E != I or P != I, as 948 * we need to do the assignments (with F empty and A full) 949 * Or when I is not a subset of L; in that case we need to 950 * enforce L. 951 * 952 * I' = L & I 953 * 954 * E' = P' = (I' + F) & A 955 * or 956 * E' = P' = I' 957 */ 958 if (!priv_isequalset(&CR_EPRIV(cr), &CR_IPRIV(cr)) || 959 !priv_issubset(&CR_IPRIV(cr), &CR_LPRIV(cr)) || 960 !priv_isequalset(&CR_PPRIV(cr), &CR_IPRIV(cr))) 961 privflags |= PRIV_RESET; 962 963 /* If MAC-aware flag(s) are on, need to update cred to remove. */ 964 if ((CR_FLAGS(cr) & NET_MAC_AWARE) || 965 (CR_FLAGS(cr) & NET_MAC_AWARE_INHERIT)) 966 privflags |= MAC_FLAGS; 967 968 /* 969 * When we introduce the "forced" set then we will need 970 * to set PRIV_INCREASE here if I not a subset of P. 971 * If the "allowed" set is introduced we will need to do 972 * a similar thing; however, it seems more reasonable to 973 * have the allowed set reduce "L": script language interpreters 974 * would typically have an allowed set of "all". 975 */ 976 977 /* 978 * Set setuid/setgid protections if no ptrace() compatibility. 979 * For privileged processes, honor setuid/setgid even in 980 * the presence of ptrace() compatibility. 981 */ 982 if (((pp->p_proc_flag & P_PR_PTRACE) == 0 || 983 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, (uid == 0))) && 984 (cr->cr_uid != uid || 985 cr->cr_gid != gid || 986 cr->cr_suid != uid || 987 cr->cr_sgid != gid)) { 988 *uidp = uid; 989 *gidp = gid; 990 privflags |= PRIV_SETID; 991 } 992 return (privflags); 993 } 994 995 int 996 execpermissions(struct vnode *vp, struct vattr *vattrp, struct uarg *args) 997 { 998 int error; 999 proc_t *p = ttoproc(curthread); 1000 1001 vattrp->va_mask = AT_MODE | AT_UID | AT_GID | AT_SIZE; 1002 if (error = VOP_GETATTR(vp, vattrp, ATTR_EXEC, p->p_cred, NULL)) 1003 return (error); 1004 /* 1005 * Check the access mode. 1006 * If VPROC, ask /proc if the file is an object file. 1007 */ 1008 if ((error = VOP_ACCESS(vp, VEXEC, 0, p->p_cred, NULL)) != 0 || 1009 !(vp->v_type == VREG || (vp->v_type == VPROC && pr_isobject(vp))) || 1010 (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0 || 1011 (vattrp->va_mode & (VEXEC|(VEXEC>>3)|(VEXEC>>6))) == 0) { 1012 if (error == 0) 1013 error = EACCES; 1014 return (error); 1015 } 1016 1017 if ((p->p_plist || (p->p_proc_flag & (P_PR_PTRACE|P_PR_TRACE))) && 1018 (error = VOP_ACCESS(vp, VREAD, 0, p->p_cred, NULL))) { 1019 /* 1020 * If process is under ptrace(2) compatibility, 1021 * fail the exec(2). 1022 */ 1023 if (p->p_proc_flag & P_PR_PTRACE) 1024 goto bad; 1025 /* 1026 * Process is traced via /proc. 1027 * Arrange to invalidate the /proc vnode. 1028 */ 1029 args->traceinval = 1; 1030 } 1031 return (0); 1032 bad: 1033 if (error == 0) 1034 error = ENOEXEC; 1035 return (error); 1036 } 1037 1038 /* 1039 * Map a section of an executable file into the user's 1040 * address space. 1041 */ 1042 int 1043 execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen, 1044 off_t offset, int prot, int page, uint_t szc) 1045 { 1046 int error = 0; 1047 off_t oldoffset; 1048 caddr_t zfodbase, oldaddr; 1049 size_t end, oldlen; 1050 size_t zfoddiff; 1051 label_t ljb; 1052 proc_t *p = ttoproc(curthread); 1053 1054 oldaddr = addr; 1055 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 1056 if (len) { 1057 oldlen = len; 1058 len += ((size_t)oldaddr - (size_t)addr); 1059 oldoffset = offset; 1060 offset = (off_t)((uintptr_t)offset & PAGEMASK); 1061 if (page) { 1062 spgcnt_t prefltmem, availm, npages; 1063 int preread; 1064 uint_t mflag = MAP_PRIVATE | MAP_FIXED; 1065 1066 if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) { 1067 mflag |= MAP_TEXT; 1068 } else { 1069 mflag |= MAP_INITDATA; 1070 } 1071 1072 if (valid_usr_range(addr, len, prot, p->p_as, 1073 p->p_as->a_userlimit) != RANGE_OKAY) { 1074 error = ENOMEM; 1075 goto bad; 1076 } 1077 if (error = VOP_MAP(vp, (offset_t)offset, 1078 p->p_as, &addr, len, prot, PROT_ALL, 1079 mflag, CRED(), NULL)) 1080 goto bad; 1081 1082 /* 1083 * If the segment can fit, then we prefault 1084 * the entire segment in. This is based on the 1085 * model that says the best working set of a 1086 * small program is all of its pages. 1087 */ 1088 npages = (spgcnt_t)btopr(len); 1089 prefltmem = freemem - desfree; 1090 preread = 1091 (npages < prefltmem && len < PGTHRESH) ? 1 : 0; 1092 1093 /* 1094 * If we aren't prefaulting the segment, 1095 * increment "deficit", if necessary to ensure 1096 * that pages will become available when this 1097 * process starts executing. 1098 */ 1099 availm = freemem - lotsfree; 1100 if (preread == 0 && npages > availm && 1101 deficit < lotsfree) { 1102 deficit += MIN((pgcnt_t)(npages - availm), 1103 lotsfree - deficit); 1104 } 1105 1106 if (preread) { 1107 TRACE_2(TR_FAC_PROC, TR_EXECMAP_PREREAD, 1108 "execmap preread:freemem %d size %lu", 1109 freemem, len); 1110 (void) as_fault(p->p_as->a_hat, p->p_as, 1111 (caddr_t)addr, len, F_INVAL, S_READ); 1112 } 1113 } else { 1114 if (valid_usr_range(addr, len, prot, p->p_as, 1115 p->p_as->a_userlimit) != RANGE_OKAY) { 1116 error = ENOMEM; 1117 goto bad; 1118 } 1119 1120 if (error = as_map(p->p_as, addr, len, 1121 segvn_create, zfod_argsp)) 1122 goto bad; 1123 /* 1124 * Read in the segment in one big chunk. 1125 */ 1126 if (error = vn_rdwr(UIO_READ, vp, (caddr_t)oldaddr, 1127 oldlen, (offset_t)oldoffset, UIO_USERSPACE, 0, 1128 (rlim64_t)0, CRED(), (ssize_t *)0)) 1129 goto bad; 1130 /* 1131 * Now set protections. 1132 */ 1133 if (prot != PROT_ZFOD) { 1134 (void) as_setprot(p->p_as, (caddr_t)addr, 1135 len, prot); 1136 } 1137 } 1138 } 1139 1140 if (zfodlen) { 1141 struct as *as = curproc->p_as; 1142 struct seg *seg; 1143 uint_t zprot = 0; 1144 1145 end = (size_t)addr + len; 1146 zfodbase = (caddr_t)roundup(end, PAGESIZE); 1147 zfoddiff = (uintptr_t)zfodbase - end; 1148 if (zfoddiff) { 1149 /* 1150 * Before we go to zero the remaining space on the last 1151 * page, make sure we have write permission. 1152 */ 1153 1154 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 1155 seg = as_segat(curproc->p_as, (caddr_t)end); 1156 if (seg != NULL) 1157 SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1, 1158 &zprot); 1159 AS_LOCK_EXIT(as, &as->a_lock); 1160 1161 if (seg != NULL && (zprot & PROT_WRITE) == 0) { 1162 (void) as_setprot(as, (caddr_t)end, 1163 zfoddiff - 1, zprot | PROT_WRITE); 1164 } 1165 1166 if (on_fault(&ljb)) { 1167 no_fault(); 1168 if (seg != NULL && (zprot & PROT_WRITE) == 0) 1169 (void) as_setprot(as, (caddr_t)end, 1170 zfoddiff - 1, zprot); 1171 error = EFAULT; 1172 goto bad; 1173 } 1174 uzero((void *)end, zfoddiff); 1175 no_fault(); 1176 if (seg != NULL && (zprot & PROT_WRITE) == 0) 1177 (void) as_setprot(as, (caddr_t)end, 1178 zfoddiff - 1, zprot); 1179 } 1180 if (zfodlen > zfoddiff) { 1181 struct segvn_crargs crargs = 1182 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 1183 1184 zfodlen -= zfoddiff; 1185 if (valid_usr_range(zfodbase, zfodlen, prot, p->p_as, 1186 p->p_as->a_userlimit) != RANGE_OKAY) { 1187 error = ENOMEM; 1188 goto bad; 1189 } 1190 if (szc > 0) { 1191 /* 1192 * ASSERT alignment because the mapelfexec() 1193 * caller for the szc > 0 case extended zfod 1194 * so it's end is pgsz aligned. 1195 */ 1196 size_t pgsz = page_get_pagesize(szc); 1197 ASSERT(IS_P2ALIGNED(zfodbase + zfodlen, pgsz)); 1198 1199 if (IS_P2ALIGNED(zfodbase, pgsz)) { 1200 crargs.szc = szc; 1201 } else { 1202 crargs.szc = AS_MAP_HEAP; 1203 } 1204 } else { 1205 crargs.szc = AS_MAP_NO_LPOOB; 1206 } 1207 if (error = as_map(p->p_as, (caddr_t)zfodbase, 1208 zfodlen, segvn_create, &crargs)) 1209 goto bad; 1210 if (prot != PROT_ZFOD) { 1211 (void) as_setprot(p->p_as, (caddr_t)zfodbase, 1212 zfodlen, prot); 1213 } 1214 } 1215 } 1216 return (0); 1217 bad: 1218 return (error); 1219 } 1220 1221 void 1222 setexecenv(struct execenv *ep) 1223 { 1224 proc_t *p = ttoproc(curthread); 1225 klwp_t *lwp = ttolwp(curthread); 1226 struct vnode *vp; 1227 1228 p->p_bssbase = ep->ex_bssbase; 1229 p->p_brkbase = ep->ex_brkbase; 1230 p->p_brksize = ep->ex_brksize; 1231 if (p->p_exec) 1232 VN_RELE(p->p_exec); /* out with the old */ 1233 vp = p->p_exec = ep->ex_vp; 1234 if (vp != NULL) 1235 VN_HOLD(vp); /* in with the new */ 1236 1237 lwp->lwp_sigaltstack.ss_sp = 0; 1238 lwp->lwp_sigaltstack.ss_size = 0; 1239 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 1240 } 1241 1242 int 1243 execopen(struct vnode **vpp, int *fdp) 1244 { 1245 struct vnode *vp = *vpp; 1246 file_t *fp; 1247 int error = 0; 1248 int filemode = FREAD; 1249 1250 VN_HOLD(vp); /* open reference */ 1251 if (error = falloc(NULL, filemode, &fp, fdp)) { 1252 VN_RELE(vp); 1253 *fdp = -1; /* just in case falloc changed value */ 1254 return (error); 1255 } 1256 if (error = VOP_OPEN(&vp, filemode, CRED(), NULL)) { 1257 VN_RELE(vp); 1258 setf(*fdp, NULL); 1259 unfalloc(fp); 1260 *fdp = -1; 1261 return (error); 1262 } 1263 *vpp = vp; /* vnode should not have changed */ 1264 fp->f_vnode = vp; 1265 mutex_exit(&fp->f_tlock); 1266 setf(*fdp, fp); 1267 return (0); 1268 } 1269 1270 int 1271 execclose(int fd) 1272 { 1273 return (closeandsetf(fd, NULL)); 1274 } 1275 1276 1277 /* 1278 * noexec stub function. 1279 */ 1280 /*ARGSUSED*/ 1281 int 1282 noexec( 1283 struct vnode *vp, 1284 struct execa *uap, 1285 struct uarg *args, 1286 struct intpdata *idatap, 1287 int level, 1288 long *execsz, 1289 int setid, 1290 caddr_t exec_file, 1291 struct cred *cred) 1292 { 1293 cmn_err(CE_WARN, "missing exec capability for %s", uap->fname); 1294 return (ENOEXEC); 1295 } 1296 1297 /* 1298 * Support routines for building a user stack. 1299 * 1300 * execve(path, argv, envp) must construct a new stack with the specified 1301 * arguments and environment variables (see exec_args() for a description 1302 * of the user stack layout). To do this, we copy the arguments and 1303 * environment variables from the old user address space into the kernel, 1304 * free the old as, create the new as, and copy our buffered information 1305 * to the new stack. Our kernel buffer has the following structure: 1306 * 1307 * +-----------------------+ <--- stk_base + stk_size 1308 * | string offsets | 1309 * +-----------------------+ <--- stk_offp 1310 * | | 1311 * | STK_AVAIL() space | 1312 * | | 1313 * +-----------------------+ <--- stk_strp 1314 * | strings | 1315 * +-----------------------+ <--- stk_base 1316 * 1317 * When we add a string, we store the string's contents (including the null 1318 * terminator) at stk_strp, and we store the offset of the string relative to 1319 * stk_base at --stk_offp. At strings are added, stk_strp increases and 1320 * stk_offp decreases. The amount of space remaining, STK_AVAIL(), is just 1321 * the difference between these pointers. If we run out of space, we return 1322 * an error and exec_args() starts all over again with a buffer twice as large. 1323 * When we're all done, the kernel buffer looks like this: 1324 * 1325 * +-----------------------+ <--- stk_base + stk_size 1326 * | argv[0] offset | 1327 * +-----------------------+ 1328 * | ... | 1329 * +-----------------------+ 1330 * | argv[argc-1] offset | 1331 * +-----------------------+ 1332 * | envp[0] offset | 1333 * +-----------------------+ 1334 * | ... | 1335 * +-----------------------+ 1336 * | envp[envc-1] offset | 1337 * +-----------------------+ 1338 * | AT_SUN_PLATFORM offset| 1339 * +-----------------------+ 1340 * | AT_SUN_EXECNAME offset| 1341 * +-----------------------+ <--- stk_offp 1342 * | | 1343 * | STK_AVAIL() space | 1344 * | | 1345 * +-----------------------+ <--- stk_strp 1346 * | AT_SUN_EXECNAME offset| 1347 * +-----------------------+ 1348 * | AT_SUN_PLATFORM offset| 1349 * +-----------------------+ 1350 * | envp[envc-1] string | 1351 * +-----------------------+ 1352 * | ... | 1353 * +-----------------------+ 1354 * | envp[0] string | 1355 * +-----------------------+ 1356 * | argv[argc-1] string | 1357 * +-----------------------+ 1358 * | ... | 1359 * +-----------------------+ 1360 * | argv[0] string | 1361 * +-----------------------+ <--- stk_base 1362 */ 1363 1364 #define STK_AVAIL(args) ((char *)(args)->stk_offp - (args)->stk_strp) 1365 1366 /* 1367 * Add a string to the stack. 1368 */ 1369 static int 1370 stk_add(uarg_t *args, const char *sp, enum uio_seg segflg) 1371 { 1372 int error; 1373 size_t len; 1374 1375 if (STK_AVAIL(args) < sizeof (int)) 1376 return (E2BIG); 1377 *--args->stk_offp = args->stk_strp - args->stk_base; 1378 1379 if (segflg == UIO_USERSPACE) { 1380 error = copyinstr(sp, args->stk_strp, STK_AVAIL(args), &len); 1381 if (error != 0) 1382 return (error); 1383 } else { 1384 len = strlen(sp) + 1; 1385 if (len > STK_AVAIL(args)) 1386 return (E2BIG); 1387 bcopy(sp, args->stk_strp, len); 1388 } 1389 1390 args->stk_strp += len; 1391 1392 return (0); 1393 } 1394 1395 static int 1396 stk_getptr(uarg_t *args, char *src, char **dst) 1397 { 1398 int error; 1399 1400 if (args->from_model == DATAMODEL_NATIVE) { 1401 ulong_t ptr; 1402 error = fulword(src, &ptr); 1403 *dst = (caddr_t)ptr; 1404 } else { 1405 uint32_t ptr; 1406 error = fuword32(src, &ptr); 1407 *dst = (caddr_t)(uintptr_t)ptr; 1408 } 1409 return (error); 1410 } 1411 1412 static int 1413 stk_putptr(uarg_t *args, char *addr, char *value) 1414 { 1415 if (args->to_model == DATAMODEL_NATIVE) 1416 return (sulword(addr, (ulong_t)value)); 1417 else 1418 return (suword32(addr, (uint32_t)(uintptr_t)value)); 1419 } 1420 1421 static int 1422 stk_copyin(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp) 1423 { 1424 char *sp; 1425 int argc, error; 1426 int argv_empty = 0; 1427 size_t ptrsize = args->from_ptrsize; 1428 size_t size, pad; 1429 char *argv = (char *)uap->argp; 1430 char *envp = (char *)uap->envp; 1431 1432 /* 1433 * Copy interpreter's name and argument to argv[0] and argv[1]. 1434 */ 1435 if (intp != NULL && intp->intp_name != NULL) { 1436 if ((error = stk_add(args, intp->intp_name, UIO_SYSSPACE)) != 0) 1437 return (error); 1438 if (intp->intp_arg != NULL && 1439 (error = stk_add(args, intp->intp_arg, UIO_SYSSPACE)) != 0) 1440 return (error); 1441 if (args->fname != NULL) 1442 error = stk_add(args, args->fname, UIO_SYSSPACE); 1443 else 1444 error = stk_add(args, uap->fname, UIO_USERSPACE); 1445 if (error) 1446 return (error); 1447 1448 /* 1449 * Check for an empty argv[]. 1450 */ 1451 if (stk_getptr(args, argv, &sp)) 1452 return (EFAULT); 1453 if (sp == NULL) 1454 argv_empty = 1; 1455 1456 argv += ptrsize; /* ignore original argv[0] */ 1457 } 1458 1459 if (argv_empty == 0) { 1460 /* 1461 * Add argv[] strings to the stack. 1462 */ 1463 for (;;) { 1464 if (stk_getptr(args, argv, &sp)) 1465 return (EFAULT); 1466 if (sp == NULL) 1467 break; 1468 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0) 1469 return (error); 1470 argv += ptrsize; 1471 } 1472 } 1473 argc = (int *)(args->stk_base + args->stk_size) - args->stk_offp; 1474 args->arglen = args->stk_strp - args->stk_base; 1475 1476 /* 1477 * Add environ[] strings to the stack. 1478 */ 1479 if (envp != NULL) { 1480 for (;;) { 1481 if (stk_getptr(args, envp, &sp)) 1482 return (EFAULT); 1483 if (sp == NULL) 1484 break; 1485 if ((error = stk_add(args, sp, UIO_USERSPACE)) != 0) 1486 return (error); 1487 envp += ptrsize; 1488 } 1489 } 1490 args->na = (int *)(args->stk_base + args->stk_size) - args->stk_offp; 1491 args->ne = args->na - argc; 1492 1493 /* 1494 * Add AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME, and 1495 * AT_SUN_EMULATOR strings to the stack. 1496 */ 1497 if (auxvpp != NULL && *auxvpp != NULL) { 1498 if ((error = stk_add(args, platform, UIO_SYSSPACE)) != 0) 1499 return (error); 1500 if ((error = stk_add(args, args->pathname, UIO_SYSSPACE)) != 0) 1501 return (error); 1502 if (args->brandname != NULL && 1503 (error = stk_add(args, args->brandname, UIO_SYSSPACE)) != 0) 1504 return (error); 1505 if (args->emulator != NULL && 1506 (error = stk_add(args, args->emulator, UIO_SYSSPACE)) != 0) 1507 return (error); 1508 } 1509 1510 /* 1511 * Compute the size of the stack. This includes all the pointers, 1512 * the space reserved for the aux vector, and all the strings. 1513 * The total number of pointers is args->na (which is argc + envc) 1514 * plus 4 more: (1) a pointer's worth of space for argc; (2) the NULL 1515 * after the last argument (i.e. argv[argc]); (3) the NULL after the 1516 * last environment variable (i.e. envp[envc]); and (4) the NULL after 1517 * all the strings, at the very top of the stack. 1518 */ 1519 size = (args->na + 4) * args->to_ptrsize + args->auxsize + 1520 (args->stk_strp - args->stk_base); 1521 1522 /* 1523 * Pad the string section with zeroes to align the stack size. 1524 */ 1525 pad = P2NPHASE(size, args->stk_align); 1526 1527 if (STK_AVAIL(args) < pad) 1528 return (E2BIG); 1529 1530 args->usrstack_size = size + pad; 1531 1532 while (pad-- != 0) 1533 *args->stk_strp++ = 0; 1534 1535 args->nc = args->stk_strp - args->stk_base; 1536 1537 return (0); 1538 } 1539 1540 static int 1541 stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up) 1542 { 1543 size_t ptrsize = args->to_ptrsize; 1544 ssize_t pslen; 1545 char *kstrp = args->stk_base; 1546 char *ustrp = usrstack - args->nc - ptrsize; 1547 char *usp = usrstack - args->usrstack_size; 1548 int *offp = (int *)(args->stk_base + args->stk_size); 1549 int envc = args->ne; 1550 int argc = args->na - envc; 1551 int i; 1552 1553 /* 1554 * Record argc for /proc. 1555 */ 1556 up->u_argc = argc; 1557 1558 /* 1559 * Put argc on the stack. Note that even though it's an int, 1560 * it always consumes ptrsize bytes (for alignment). 1561 */ 1562 if (stk_putptr(args, usp, (char *)(uintptr_t)argc)) 1563 return (-1); 1564 1565 /* 1566 * Add argc space (ptrsize) to usp and record argv for /proc. 1567 */ 1568 up->u_argv = (uintptr_t)(usp += ptrsize); 1569 1570 /* 1571 * Put the argv[] pointers on the stack. 1572 */ 1573 for (i = 0; i < argc; i++, usp += ptrsize) 1574 if (stk_putptr(args, usp, &ustrp[*--offp])) 1575 return (-1); 1576 1577 /* 1578 * Copy arguments to u_psargs. 1579 */ 1580 pslen = MIN(args->arglen, PSARGSZ) - 1; 1581 for (i = 0; i < pslen; i++) 1582 up->u_psargs[i] = (kstrp[i] == '\0' ? ' ' : kstrp[i]); 1583 while (i < PSARGSZ) 1584 up->u_psargs[i++] = '\0'; 1585 1586 /* 1587 * Add space for argv[]'s NULL terminator (ptrsize) to usp and 1588 * record envp for /proc. 1589 */ 1590 up->u_envp = (uintptr_t)(usp += ptrsize); 1591 1592 /* 1593 * Put the envp[] pointers on the stack. 1594 */ 1595 for (i = 0; i < envc; i++, usp += ptrsize) 1596 if (stk_putptr(args, usp, &ustrp[*--offp])) 1597 return (-1); 1598 1599 /* 1600 * Add space for envp[]'s NULL terminator (ptrsize) to usp and 1601 * remember where the stack ends, which is also where auxv begins. 1602 */ 1603 args->stackend = usp += ptrsize; 1604 1605 /* 1606 * Put all the argv[], envp[], and auxv strings on the stack. 1607 */ 1608 if (copyout(args->stk_base, ustrp, args->nc)) 1609 return (-1); 1610 1611 /* 1612 * Fill in the aux vector now that we know the user stack addresses 1613 * for the AT_SUN_PLATFORM, AT_SUN_EXECNAME, AT_SUN_BRANDNAME and 1614 * AT_SUN_EMULATOR strings. 1615 */ 1616 if (auxvpp != NULL && *auxvpp != NULL) { 1617 if (args->to_model == DATAMODEL_NATIVE) { 1618 auxv_t **a = (auxv_t **)auxvpp; 1619 ADDAUX(*a, AT_SUN_PLATFORM, (long)&ustrp[*--offp]) 1620 ADDAUX(*a, AT_SUN_EXECNAME, (long)&ustrp[*--offp]) 1621 if (args->brandname != NULL) 1622 ADDAUX(*a, 1623 AT_SUN_BRANDNAME, (long)&ustrp[*--offp]) 1624 if (args->emulator != NULL) 1625 ADDAUX(*a, 1626 AT_SUN_EMULATOR, (long)&ustrp[*--offp]) 1627 } else { 1628 auxv32_t **a = (auxv32_t **)auxvpp; 1629 ADDAUX(*a, 1630 AT_SUN_PLATFORM, (int)(uintptr_t)&ustrp[*--offp]) 1631 ADDAUX(*a, 1632 AT_SUN_EXECNAME, (int)(uintptr_t)&ustrp[*--offp]) 1633 if (args->brandname != NULL) 1634 ADDAUX(*a, AT_SUN_BRANDNAME, 1635 (int)(uintptr_t)&ustrp[*--offp]) 1636 if (args->emulator != NULL) 1637 ADDAUX(*a, AT_SUN_EMULATOR, 1638 (int)(uintptr_t)&ustrp[*--offp]) 1639 } 1640 } 1641 1642 return (0); 1643 } 1644 1645 /* 1646 * Initialize a new user stack with the specified arguments and environment. 1647 * The initial user stack layout is as follows: 1648 * 1649 * User Stack 1650 * +---------------+ <--- curproc->p_usrstack 1651 * | | 1652 * | slew | 1653 * | | 1654 * +---------------+ 1655 * | NULL | 1656 * +---------------+ 1657 * | | 1658 * | auxv strings | 1659 * | | 1660 * +---------------+ 1661 * | | 1662 * | envp strings | 1663 * | | 1664 * +---------------+ 1665 * | | 1666 * | argv strings | 1667 * | | 1668 * +---------------+ <--- ustrp 1669 * | | 1670 * | aux vector | 1671 * | | 1672 * +---------------+ <--- auxv 1673 * | NULL | 1674 * +---------------+ 1675 * | envp[envc-1] | 1676 * +---------------+ 1677 * | ... | 1678 * +---------------+ 1679 * | envp[0] | 1680 * +---------------+ <--- envp[] 1681 * | NULL | 1682 * +---------------+ 1683 * | argv[argc-1] | 1684 * +---------------+ 1685 * | ... | 1686 * +---------------+ 1687 * | argv[0] | 1688 * +---------------+ <--- argv[] 1689 * | argc | 1690 * +---------------+ <--- stack base 1691 */ 1692 int 1693 exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp) 1694 { 1695 size_t size; 1696 int error; 1697 proc_t *p = ttoproc(curthread); 1698 user_t *up = PTOU(p); 1699 char *usrstack; 1700 rctl_entity_p_t e; 1701 struct as *as; 1702 extern int use_stk_lpg; 1703 size_t sp_slew; 1704 1705 args->from_model = p->p_model; 1706 if (p->p_model == DATAMODEL_NATIVE) { 1707 args->from_ptrsize = sizeof (long); 1708 } else { 1709 args->from_ptrsize = sizeof (int32_t); 1710 } 1711 1712 if (args->to_model == DATAMODEL_NATIVE) { 1713 args->to_ptrsize = sizeof (long); 1714 args->ncargs = NCARGS; 1715 args->stk_align = STACK_ALIGN; 1716 if (args->addr32) 1717 usrstack = (char *)USRSTACK64_32; 1718 else 1719 usrstack = (char *)USRSTACK; 1720 } else { 1721 args->to_ptrsize = sizeof (int32_t); 1722 args->ncargs = NCARGS32; 1723 args->stk_align = STACK_ALIGN32; 1724 usrstack = (char *)USRSTACK32; 1725 } 1726 1727 ASSERT(P2PHASE((uintptr_t)usrstack, args->stk_align) == 0); 1728 1729 #if defined(__sparc) 1730 /* 1731 * Make sure user register windows are empty before 1732 * attempting to make a new stack. 1733 */ 1734 (void) flush_user_windows_to_stack(NULL); 1735 #endif 1736 1737 for (size = PAGESIZE; ; size *= 2) { 1738 args->stk_size = size; 1739 args->stk_base = kmem_alloc(size, KM_SLEEP); 1740 args->stk_strp = args->stk_base; 1741 args->stk_offp = (int *)(args->stk_base + size); 1742 error = stk_copyin(uap, args, intp, auxvpp); 1743 if (error == 0) 1744 break; 1745 kmem_free(args->stk_base, size); 1746 if (error != E2BIG && error != ENAMETOOLONG) 1747 return (error); 1748 if (size >= args->ncargs) 1749 return (E2BIG); 1750 } 1751 1752 size = args->usrstack_size; 1753 1754 ASSERT(error == 0); 1755 ASSERT(P2PHASE(size, args->stk_align) == 0); 1756 ASSERT((ssize_t)STK_AVAIL(args) >= 0); 1757 1758 if (size > args->ncargs) { 1759 kmem_free(args->stk_base, args->stk_size); 1760 return (E2BIG); 1761 } 1762 1763 /* 1764 * Leave only the current lwp and force the other lwps to exit. 1765 * If another lwp beat us to the punch by calling exit(), bail out. 1766 */ 1767 if ((error = exitlwps(0)) != 0) { 1768 kmem_free(args->stk_base, args->stk_size); 1769 return (error); 1770 } 1771 1772 /* 1773 * Revoke any doors created by the process. 1774 */ 1775 if (p->p_door_list) 1776 door_exit(); 1777 1778 /* 1779 * Release schedctl data structures. 1780 */ 1781 if (p->p_pagep) 1782 schedctl_proc_cleanup(); 1783 1784 /* 1785 * Clean up any DTrace helpers for the process. 1786 */ 1787 if (p->p_dtrace_helpers != NULL) { 1788 ASSERT(dtrace_helpers_cleanup != NULL); 1789 (*dtrace_helpers_cleanup)(); 1790 } 1791 1792 mutex_enter(&p->p_lock); 1793 /* 1794 * Cleanup the DTrace provider associated with this process. 1795 */ 1796 if (p->p_dtrace_probes) { 1797 ASSERT(dtrace_fasttrap_exec_ptr != NULL); 1798 dtrace_fasttrap_exec_ptr(p); 1799 } 1800 mutex_exit(&p->p_lock); 1801 1802 /* 1803 * discard the lwpchan cache. 1804 */ 1805 if (p->p_lcp != NULL) 1806 lwpchan_destroy_cache(1); 1807 1808 /* 1809 * Delete the POSIX timers. 1810 */ 1811 if (p->p_itimer != NULL) 1812 timer_exit(); 1813 1814 /* 1815 * Delete the ITIMER_REALPROF interval timer. 1816 * The other ITIMER_* interval timers are specified 1817 * to be inherited across exec(). 1818 */ 1819 delete_itimer_realprof(); 1820 1821 if (audit_active) 1822 audit_exec(args->stk_base, args->stk_base + args->arglen, 1823 args->na - args->ne, args->ne); 1824 1825 /* 1826 * Ensure that we don't change resource associations while we 1827 * change address spaces. 1828 */ 1829 mutex_enter(&p->p_lock); 1830 pool_barrier_enter(); 1831 mutex_exit(&p->p_lock); 1832 1833 /* 1834 * Destroy the old address space and create a new one. 1835 * From here on, any errors are fatal to the exec()ing process. 1836 * On error we return -1, which means the caller must SIGKILL 1837 * the process. 1838 */ 1839 relvm(); 1840 1841 mutex_enter(&p->p_lock); 1842 pool_barrier_exit(); 1843 mutex_exit(&p->p_lock); 1844 1845 up->u_execsw = args->execswp; 1846 1847 p->p_brkbase = NULL; 1848 p->p_brksize = 0; 1849 p->p_brkpageszc = 0; 1850 p->p_stksize = 0; 1851 p->p_stkpageszc = 0; 1852 p->p_model = args->to_model; 1853 p->p_usrstack = usrstack; 1854 p->p_stkprot = args->stk_prot; 1855 p->p_datprot = args->dat_prot; 1856 1857 /* 1858 * Reset resource controls such that all controls are again active as 1859 * well as appropriate to the potentially new address model for the 1860 * process. 1861 */ 1862 e.rcep_p.proc = p; 1863 e.rcep_t = RCENTITY_PROCESS; 1864 rctl_set_reset(p->p_rctls, p, &e); 1865 1866 /* Too early to call map_pgsz for the heap */ 1867 if (use_stk_lpg) { 1868 p->p_stkpageszc = page_szc(map_pgsz(MAPPGSZ_STK, p, 0, 0, 0)); 1869 } 1870 1871 mutex_enter(&p->p_lock); 1872 p->p_flag |= SAUTOLPG; /* kernel controls page sizes */ 1873 mutex_exit(&p->p_lock); 1874 1875 /* 1876 * Some platforms may choose to randomize real stack start by adding a 1877 * small slew (not more than a few hundred bytes) to the top of the 1878 * stack. This helps avoid cache thrashing when identical processes 1879 * simultaneously share caches that don't provide enough associativity 1880 * (e.g. sun4v systems). In this case stack slewing makes the same hot 1881 * stack variables in different processes to live in different cache 1882 * sets increasing effective associativity. 1883 */ 1884 sp_slew = exec_get_spslew(); 1885 ASSERT(P2PHASE(sp_slew, args->stk_align) == 0); 1886 exec_set_sp(size + sp_slew); 1887 1888 as = as_alloc(); 1889 p->p_as = as; 1890 as->a_proc = p; 1891 if (p->p_model == DATAMODEL_ILP32 || args->addr32) 1892 as->a_userlimit = (caddr_t)USERLIMIT32; 1893 (void) hat_setup(as->a_hat, HAT_ALLOC); 1894 hat_join_srd(as->a_hat, args->ex_vp); 1895 1896 /* 1897 * Finally, write out the contents of the new stack. 1898 */ 1899 error = stk_copyout(args, usrstack - sp_slew, auxvpp, up); 1900 kmem_free(args->stk_base, args->stk_size); 1901 return (error); 1902 } 1903