1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1993, David Greenman 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_capsicum.h" 33 #include "opt_hwpmc_hooks.h" 34 #include "opt_ktrace.h" 35 #include "opt_vm.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/acct.h> 40 #include <sys/asan.h> 41 #include <sys/capsicum.h> 42 #include <sys/compressor.h> 43 #include <sys/eventhandler.h> 44 #include <sys/exec.h> 45 #include <sys/fcntl.h> 46 #include <sys/filedesc.h> 47 #include <sys/imgact.h> 48 #include <sys/imgact_elf.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/mman.h> 53 #include <sys/mount.h> 54 #include <sys/mutex.h> 55 #include <sys/namei.h> 56 #include <sys/priv.h> 57 #include <sys/proc.h> 58 #include <sys/ptrace.h> 59 #include <sys/reg.h> 60 #include <sys/resourcevar.h> 61 #include <sys/rwlock.h> 62 #include <sys/sched.h> 63 #include <sys/sdt.h> 64 #include <sys/sf_buf.h> 65 #include <sys/shm.h> 66 #include <sys/signalvar.h> 67 #include <sys/smp.h> 68 #include <sys/stat.h> 69 #include <sys/syscallsubr.h> 70 #include <sys/sysctl.h> 71 #include <sys/sysent.h> 72 #include <sys/sysproto.h> 73 #include <sys/timers.h> 74 #include <sys/umtxvar.h> 75 #include <sys/vnode.h> 76 #include <sys/wait.h> 77 #ifdef KTRACE 78 #include <sys/ktrace.h> 79 #endif 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_extern.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_pager.h> 90 91 #ifdef HWPMC_HOOKS 92 #include <sys/pmckern.h> 93 #endif 94 95 #include <security/audit/audit.h> 96 #include <security/mac/mac_framework.h> 97 98 #ifdef KDTRACE_HOOKS 99 #include <sys/dtrace_bsd.h> 100 dtrace_execexit_func_t dtrace_fasttrap_exec; 101 #endif 102 103 SDT_PROVIDER_DECLARE(proc); 104 SDT_PROBE_DEFINE1(proc, , , exec, "char *"); 105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int"); 106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *"); 107 108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 109 110 int coredump_pack_fileinfo = 1; 111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN, 112 &coredump_pack_fileinfo, 0, 113 "Enable file path packing in 'procstat -f' coredump notes"); 114 115 int coredump_pack_vmmapinfo = 1; 116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN, 117 &coredump_pack_vmmapinfo, 0, 118 "Enable file path packing in 'procstat -v' coredump notes"); 119 120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 123 static int do_execve(struct thread *td, struct image_args *args, 124 struct mac *mac_p, struct vmspace *oldvmspace); 125 126 /* XXX This should be vm_size_t. */ 127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD| 128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU", 129 "Location of process' ps_strings structure"); 130 131 /* XXX This should be vm_size_t. */ 132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD| 133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU", 134 "Top of process stack"); 135 136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE, 137 NULL, 0, sysctl_kern_stackprot, "I", 138 "Stack memory permissions"); 139 140 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 142 &ps_arg_cache_limit, 0, 143 "Process' command line characters cache limit"); 144 145 static int disallow_high_osrel; 146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW, 147 &disallow_high_osrel, 0, 148 "Disallow execution of binaries built for higher version of the world"); 149 150 static int map_at_zero = 0; 151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0, 152 "Permit processes to map an object at virtual address 0."); 153 154 static int core_dump_can_intr = 1; 155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN, 156 &core_dump_can_intr, 0, 157 "Core dumping interruptible with SIGKILL"); 158 159 static int 160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 161 { 162 struct proc *p; 163 vm_offset_t ps_strings; 164 165 p = curproc; 166 #ifdef SCTL_MASK32 167 if (req->flags & SCTL_MASK32) { 168 unsigned int val; 169 val = (unsigned int)PROC_PS_STRINGS(p); 170 return (SYSCTL_OUT(req, &val, sizeof(val))); 171 } 172 #endif 173 ps_strings = PROC_PS_STRINGS(p); 174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings))); 175 } 176 177 static int 178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 179 { 180 struct proc *p; 181 vm_offset_t val; 182 183 p = curproc; 184 #ifdef SCTL_MASK32 185 if (req->flags & SCTL_MASK32) { 186 unsigned int val32; 187 188 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop); 189 return (SYSCTL_OUT(req, &val32, sizeof(val32))); 190 } 191 #endif 192 val = round_page(p->p_vmspace->vm_stacktop); 193 return (SYSCTL_OUT(req, &val, sizeof(val))); 194 } 195 196 static int 197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 198 { 199 struct proc *p; 200 201 p = curproc; 202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 203 sizeof(p->p_sysent->sv_stackprot))); 204 } 205 206 /* 207 * Each of the items is a pointer to a `const struct execsw', hence the 208 * double pointer here. 209 */ 210 static const struct execsw **execsw; 211 212 #ifndef _SYS_SYSPROTO_H_ 213 struct execve_args { 214 char *fname; 215 char **argv; 216 char **envv; 217 }; 218 #endif 219 220 int 221 sys_execve(struct thread *td, struct execve_args *uap) 222 { 223 struct image_args args; 224 struct vmspace *oldvmspace; 225 int error; 226 227 error = pre_execve(td, &oldvmspace); 228 if (error != 0) 229 return (error); 230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 231 uap->argv, uap->envv); 232 if (error == 0) 233 error = kern_execve(td, &args, NULL, oldvmspace); 234 post_execve(td, error, oldvmspace); 235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 236 return (error); 237 } 238 239 #ifndef _SYS_SYSPROTO_H_ 240 struct fexecve_args { 241 int fd; 242 char **argv; 243 char **envv; 244 }; 245 #endif 246 int 247 sys_fexecve(struct thread *td, struct fexecve_args *uap) 248 { 249 struct image_args args; 250 struct vmspace *oldvmspace; 251 int error; 252 253 error = pre_execve(td, &oldvmspace); 254 if (error != 0) 255 return (error); 256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE, 257 uap->argv, uap->envv); 258 if (error == 0) { 259 args.fd = uap->fd; 260 error = kern_execve(td, &args, NULL, oldvmspace); 261 } 262 post_execve(td, error, oldvmspace); 263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 264 return (error); 265 } 266 267 #ifndef _SYS_SYSPROTO_H_ 268 struct __mac_execve_args { 269 char *fname; 270 char **argv; 271 char **envv; 272 struct mac *mac_p; 273 }; 274 #endif 275 276 int 277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap) 278 { 279 #ifdef MAC 280 struct image_args args; 281 struct vmspace *oldvmspace; 282 int error; 283 284 error = pre_execve(td, &oldvmspace); 285 if (error != 0) 286 return (error); 287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 288 uap->argv, uap->envv); 289 if (error == 0) 290 error = kern_execve(td, &args, uap->mac_p, oldvmspace); 291 post_execve(td, error, oldvmspace); 292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 293 return (error); 294 #else 295 return (ENOSYS); 296 #endif 297 } 298 299 int 300 pre_execve(struct thread *td, struct vmspace **oldvmspace) 301 { 302 struct proc *p; 303 int error; 304 305 KASSERT(td == curthread, ("non-current thread %p", td)); 306 error = 0; 307 p = td->td_proc; 308 if ((p->p_flag & P_HADTHREADS) != 0) { 309 PROC_LOCK(p); 310 while (p->p_singlethr > 0) { 311 error = msleep(&p->p_singlethr, &p->p_mtx, 312 PWAIT | PCATCH, "exec1t", 0); 313 if (error != 0) { 314 error = ERESTART; 315 goto unlock; 316 } 317 } 318 if (thread_single(p, SINGLE_BOUNDARY) != 0) 319 error = ERESTART; 320 unlock: 321 PROC_UNLOCK(p); 322 } 323 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0, 324 ("nested execve")); 325 *oldvmspace = p->p_vmspace; 326 return (error); 327 } 328 329 void 330 post_execve(struct thread *td, int error, struct vmspace *oldvmspace) 331 { 332 struct proc *p; 333 334 KASSERT(td == curthread, ("non-current thread %p", td)); 335 p = td->td_proc; 336 if ((p->p_flag & P_HADTHREADS) != 0) { 337 PROC_LOCK(p); 338 /* 339 * If success, we upgrade to SINGLE_EXIT state to 340 * force other threads to suicide. 341 */ 342 if (error == EJUSTRETURN) 343 thread_single(p, SINGLE_EXIT); 344 else 345 thread_single_end(p, SINGLE_BOUNDARY); 346 PROC_UNLOCK(p); 347 } 348 exec_cleanup(td, oldvmspace); 349 } 350 351 /* 352 * kern_execve() has the astonishing property of not always returning to 353 * the caller. If sufficiently bad things happen during the call to 354 * do_execve(), it can end up calling exit1(); as a result, callers must 355 * avoid doing anything which they might need to undo (e.g., allocating 356 * memory). 357 */ 358 int 359 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p, 360 struct vmspace *oldvmspace) 361 { 362 363 TSEXEC(td->td_proc->p_pid, args->begin_argv); 364 AUDIT_ARG_ARGV(args->begin_argv, args->argc, 365 exec_args_get_begin_envv(args) - args->begin_argv); 366 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc, 367 args->endp - exec_args_get_begin_envv(args)); 368 369 /* Must have at least one argument. */ 370 if (args->argc == 0) { 371 exec_free_args(args); 372 return (EINVAL); 373 } 374 return (do_execve(td, args, mac_p, oldvmspace)); 375 } 376 377 static void 378 execve_nosetid(struct image_params *imgp) 379 { 380 imgp->credential_setid = false; 381 if (imgp->newcred != NULL) { 382 crfree(imgp->newcred); 383 imgp->newcred = NULL; 384 } 385 } 386 387 /* 388 * In-kernel implementation of execve(). All arguments are assumed to be 389 * userspace pointers from the passed thread. 390 */ 391 static int 392 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p, 393 struct vmspace *oldvmspace) 394 { 395 struct proc *p = td->td_proc; 396 struct nameidata nd; 397 struct ucred *oldcred; 398 struct uidinfo *euip = NULL; 399 uintptr_t stack_base; 400 struct image_params image_params, *imgp; 401 struct vattr attr; 402 int (*img_first)(struct image_params *); 403 struct pargs *oldargs = NULL, *newargs = NULL; 404 struct sigacts *oldsigacts = NULL, *newsigacts = NULL; 405 #ifdef KTRACE 406 struct ktr_io_params *kiop; 407 #endif 408 struct vnode *oldtextvp, *newtextvp; 409 struct vnode *oldtextdvp, *newtextdvp; 410 char *oldbinname, *newbinname; 411 bool credential_changing; 412 #ifdef MAC 413 struct label *interpvplabel = NULL; 414 bool will_transition; 415 #endif 416 #ifdef HWPMC_HOOKS 417 struct pmckern_procexec pe; 418 #endif 419 int error, i, orig_osrel; 420 uint32_t orig_fctl0; 421 Elf_Brandinfo *orig_brandinfo; 422 size_t freepath_size; 423 static const char fexecv_proc_title[] = "(fexecv)"; 424 425 imgp = &image_params; 426 oldtextvp = oldtextdvp = NULL; 427 newtextvp = newtextdvp = NULL; 428 newbinname = oldbinname = NULL; 429 #ifdef KTRACE 430 kiop = NULL; 431 #endif 432 433 /* 434 * Lock the process and set the P_INEXEC flag to indicate that 435 * it should be left alone until we're done here. This is 436 * necessary to avoid race conditions - e.g. in ptrace() - 437 * that might allow a local user to illicitly obtain elevated 438 * privileges. 439 */ 440 PROC_LOCK(p); 441 KASSERT((p->p_flag & P_INEXEC) == 0, 442 ("%s(): process already has P_INEXEC flag", __func__)); 443 p->p_flag |= P_INEXEC; 444 PROC_UNLOCK(p); 445 446 /* 447 * Initialize part of the common data 448 */ 449 bzero(imgp, sizeof(*imgp)); 450 imgp->proc = p; 451 imgp->attr = &attr; 452 imgp->args = args; 453 oldcred = p->p_ucred; 454 orig_osrel = p->p_osrel; 455 orig_fctl0 = p->p_fctl0; 456 orig_brandinfo = p->p_elf_brandinfo; 457 458 #ifdef MAC 459 error = mac_execve_enter(imgp, mac_p); 460 if (error) 461 goto exec_fail; 462 #endif 463 464 SDT_PROBE1(proc, , , exec, args->fname); 465 466 interpret: 467 if (args->fname != NULL) { 468 #ifdef CAPABILITY_MODE 469 /* 470 * While capability mode can't reach this point via direct 471 * path arguments to execve(), we also don't allow 472 * interpreters to be used in capability mode (for now). 473 * Catch indirect lookups and return a permissions error. 474 */ 475 if (IN_CAPABILITY_MODE(td)) { 476 error = ECAPMODE; 477 goto exec_fail; 478 } 479 #endif 480 481 /* 482 * Translate the file name. namei() returns a vnode 483 * pointer in ni_vp among other things. 484 */ 485 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW | 486 SAVENAME | AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE, 487 args->fname); 488 489 error = namei(&nd); 490 if (error) 491 goto exec_fail; 492 493 newtextvp = nd.ni_vp; 494 newtextdvp = nd.ni_dvp; 495 nd.ni_dvp = NULL; 496 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS, 497 M_WAITOK); 498 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen); 499 newbinname[nd.ni_cnd.cn_namelen] = '\0'; 500 imgp->vp = newtextvp; 501 502 /* 503 * Do the best to calculate the full path to the image file. 504 */ 505 if (args->fname[0] == '/') { 506 imgp->execpath = args->fname; 507 } else { 508 VOP_UNLOCK(imgp->vp); 509 freepath_size = MAXPATHLEN; 510 if (vn_fullpath_hardlink(newtextvp, newtextdvp, 511 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath, 512 &imgp->freepath, &freepath_size) != 0) 513 imgp->execpath = args->fname; 514 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 515 } 516 } else { 517 AUDIT_ARG_FD(args->fd); 518 519 /* 520 * If the descriptors was not opened with O_PATH, then 521 * we require that it was opened with O_EXEC or 522 * O_RDONLY. In either case, exec_check_permissions() 523 * below checks _current_ file access mode regardless 524 * of the permissions additionally checked at the 525 * open(2). 526 */ 527 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights, 528 &newtextvp); 529 if (error != 0) 530 goto exec_fail; 531 532 if (vn_fullpath(newtextvp, &imgp->execpath, 533 &imgp->freepath) != 0) 534 imgp->execpath = args->fname; 535 vn_lock(newtextvp, LK_SHARED | LK_RETRY); 536 AUDIT_ARG_VNODE1(newtextvp); 537 imgp->vp = newtextvp; 538 } 539 540 /* 541 * Check file permissions. Also 'opens' file and sets its vnode to 542 * text mode. 543 */ 544 error = exec_check_permissions(imgp); 545 if (error) 546 goto exec_fail_dealloc; 547 548 imgp->object = imgp->vp->v_object; 549 if (imgp->object != NULL) 550 vm_object_reference(imgp->object); 551 552 error = exec_map_first_page(imgp); 553 if (error) 554 goto exec_fail_dealloc; 555 556 imgp->proc->p_osrel = 0; 557 imgp->proc->p_fctl0 = 0; 558 imgp->proc->p_elf_brandinfo = NULL; 559 560 /* 561 * Implement image setuid/setgid. 562 * 563 * Determine new credentials before attempting image activators 564 * so that it can be used by process_exec handlers to determine 565 * credential/setid changes. 566 * 567 * Don't honor setuid/setgid if the filesystem prohibits it or if 568 * the process is being traced. 569 * 570 * We disable setuid/setgid/etc in capability mode on the basis 571 * that most setugid applications are not written with that 572 * environment in mind, and will therefore almost certainly operate 573 * incorrectly. In principle there's no reason that setugid 574 * applications might not be useful in capability mode, so we may want 575 * to reconsider this conservative design choice in the future. 576 * 577 * XXXMAC: For the time being, use NOSUID to also prohibit 578 * transitions on the file system. 579 */ 580 credential_changing = false; 581 credential_changing |= (attr.va_mode & S_ISUID) && 582 oldcred->cr_uid != attr.va_uid; 583 credential_changing |= (attr.va_mode & S_ISGID) && 584 oldcred->cr_gid != attr.va_gid; 585 #ifdef MAC 586 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, 587 interpvplabel, imgp) != 0; 588 credential_changing |= will_transition; 589 #endif 590 591 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */ 592 if (credential_changing) 593 imgp->proc->p_pdeathsig = 0; 594 595 if (credential_changing && 596 #ifdef CAPABILITY_MODE 597 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) && 598 #endif 599 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 600 (p->p_flag & P_TRACED) == 0) { 601 imgp->credential_setid = true; 602 VOP_UNLOCK(imgp->vp); 603 imgp->newcred = crdup(oldcred); 604 if (attr.va_mode & S_ISUID) { 605 euip = uifind(attr.va_uid); 606 change_euid(imgp->newcred, euip); 607 } 608 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 609 if (attr.va_mode & S_ISGID) 610 change_egid(imgp->newcred, attr.va_gid); 611 /* 612 * Implement correct POSIX saved-id behavior. 613 * 614 * XXXMAC: Note that the current logic will save the 615 * uid and gid if a MAC domain transition occurs, even 616 * though maybe it shouldn't. 617 */ 618 change_svuid(imgp->newcred, imgp->newcred->cr_uid); 619 change_svgid(imgp->newcred, imgp->newcred->cr_gid); 620 } else { 621 /* 622 * Implement correct POSIX saved-id behavior. 623 * 624 * XXX: It's not clear that the existing behavior is 625 * POSIX-compliant. A number of sources indicate that the 626 * saved uid/gid should only be updated if the new ruid is 627 * not equal to the old ruid, or the new euid is not equal 628 * to the old euid and the new euid is not equal to the old 629 * ruid. The FreeBSD code always updates the saved uid/gid. 630 * Also, this code uses the new (replaced) euid and egid as 631 * the source, which may or may not be the right ones to use. 632 */ 633 if (oldcred->cr_svuid != oldcred->cr_uid || 634 oldcred->cr_svgid != oldcred->cr_gid) { 635 VOP_UNLOCK(imgp->vp); 636 imgp->newcred = crdup(oldcred); 637 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 638 change_svuid(imgp->newcred, imgp->newcred->cr_uid); 639 change_svgid(imgp->newcred, imgp->newcred->cr_gid); 640 } 641 } 642 /* The new credentials are installed into the process later. */ 643 644 /* 645 * If the current process has a special image activator it 646 * wants to try first, call it. For example, emulating shell 647 * scripts differently. 648 */ 649 error = -1; 650 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 651 error = img_first(imgp); 652 653 /* 654 * Loop through the list of image activators, calling each one. 655 * An activator returns -1 if there is no match, 0 on success, 656 * and an error otherwise. 657 */ 658 for (i = 0; error == -1 && execsw[i]; ++i) { 659 if (execsw[i]->ex_imgact == NULL || 660 execsw[i]->ex_imgact == img_first) { 661 continue; 662 } 663 error = (*execsw[i]->ex_imgact)(imgp); 664 } 665 666 if (error) { 667 if (error == -1) 668 error = ENOEXEC; 669 goto exec_fail_dealloc; 670 } 671 672 /* 673 * Special interpreter operation, cleanup and loop up to try to 674 * activate the interpreter. 675 */ 676 if (imgp->interpreted) { 677 exec_unmap_first_page(imgp); 678 /* 679 * The text reference needs to be removed for scripts. 680 * There is a short period before we determine that 681 * something is a script where text reference is active. 682 * The vnode lock is held over this entire period 683 * so nothing should illegitimately be blocked. 684 */ 685 MPASS(imgp->textset); 686 VOP_UNSET_TEXT_CHECKED(newtextvp); 687 imgp->textset = false; 688 /* free name buffer and old vnode */ 689 #ifdef MAC 690 mac_execve_interpreter_enter(newtextvp, &interpvplabel); 691 #endif 692 if (imgp->opened) { 693 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td); 694 imgp->opened = false; 695 } 696 vput(newtextvp); 697 imgp->vp = newtextvp = NULL; 698 if (args->fname != NULL) { 699 if (newtextdvp != NULL) { 700 vrele(newtextdvp); 701 newtextdvp = NULL; 702 } 703 NDFREE_PNBUF(&nd); 704 free(newbinname, M_PARGS); 705 newbinname = NULL; 706 } 707 vm_object_deallocate(imgp->object); 708 imgp->object = NULL; 709 execve_nosetid(imgp); 710 imgp->execpath = NULL; 711 free(imgp->freepath, M_TEMP); 712 imgp->freepath = NULL; 713 /* set new name to that of the interpreter */ 714 args->fname = imgp->interpreter_name; 715 goto interpret; 716 } 717 718 /* 719 * NB: We unlock the vnode here because it is believed that none 720 * of the sv_copyout_strings/sv_fixup operations require the vnode. 721 */ 722 VOP_UNLOCK(imgp->vp); 723 724 if (disallow_high_osrel && 725 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) { 726 error = ENOEXEC; 727 uprintf("Osrel %d for image %s too high\n", p->p_osrel, 728 imgp->execpath != NULL ? imgp->execpath : "<unresolved>"); 729 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 730 goto exec_fail_dealloc; 731 } 732 733 /* 734 * Copy out strings (args and env) and initialize stack base. 735 */ 736 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base); 737 if (error != 0) { 738 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 739 goto exec_fail_dealloc; 740 } 741 742 /* 743 * Stack setup. 744 */ 745 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp); 746 if (error != 0) { 747 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 748 goto exec_fail_dealloc; 749 } 750 751 /* 752 * For security and other reasons, the file descriptor table cannot be 753 * shared after an exec. 754 */ 755 fdunshare(td); 756 pdunshare(td); 757 /* close files on exec */ 758 fdcloseexec(td); 759 760 /* 761 * Malloc things before we need locks. 762 */ 763 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv; 764 /* Cache arguments if they fit inside our allowance */ 765 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 766 newargs = pargs_alloc(i); 767 bcopy(imgp->args->begin_argv, newargs->ar_args, i); 768 } 769 770 /* 771 * For security and other reasons, signal handlers cannot 772 * be shared after an exec. The new process gets a copy of the old 773 * handlers. In execsigs(), the new process will have its signals 774 * reset. 775 */ 776 if (sigacts_shared(p->p_sigacts)) { 777 oldsigacts = p->p_sigacts; 778 newsigacts = sigacts_alloc(); 779 sigacts_copy(newsigacts, oldsigacts); 780 } 781 782 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 783 784 PROC_LOCK(p); 785 if (oldsigacts) 786 p->p_sigacts = newsigacts; 787 /* Stop profiling */ 788 stopprofclock(p); 789 790 /* reset caught signals */ 791 execsigs(p); 792 793 /* name this process - nameiexec(p, ndp) */ 794 bzero(p->p_comm, sizeof(p->p_comm)); 795 if (args->fname) 796 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm, 797 min(nd.ni_cnd.cn_namelen, MAXCOMLEN)); 798 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0) 799 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title)); 800 bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); 801 #ifdef KTR 802 sched_clear_tdname(td); 803 #endif 804 805 /* 806 * mark as execed, wakeup the process that vforked (if any) and tell 807 * it that it now has its own resources back 808 */ 809 p->p_flag |= P_EXEC; 810 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0) 811 p->p_flag2 &= ~P2_NOTRACE; 812 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0) 813 p->p_flag2 &= ~P2_STKGAP_DISABLE; 814 if (p->p_flag & P_PPWAIT) { 815 p->p_flag &= ~(P_PPWAIT | P_PPTRACE); 816 cv_broadcast(&p->p_pwait); 817 /* STOPs are no longer ignored, arrange for AST */ 818 signotify(td); 819 } 820 821 if ((imgp->sysent->sv_setid_allowed != NULL && 822 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) || 823 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0) 824 execve_nosetid(imgp); 825 826 /* 827 * Implement image setuid/setgid installation. 828 */ 829 if (imgp->credential_setid) { 830 /* 831 * Turn off syscall tracing for set-id programs, except for 832 * root. Record any set-id flags first to make sure that 833 * we do not regain any tracing during a possible block. 834 */ 835 setsugid(p); 836 #ifdef KTRACE 837 kiop = ktrprocexec(p); 838 #endif 839 /* 840 * Close any file descriptors 0..2 that reference procfs, 841 * then make sure file descriptors 0..2 are in use. 842 * 843 * Both fdsetugidsafety() and fdcheckstd() may call functions 844 * taking sleepable locks, so temporarily drop our locks. 845 */ 846 PROC_UNLOCK(p); 847 VOP_UNLOCK(imgp->vp); 848 fdsetugidsafety(td); 849 error = fdcheckstd(td); 850 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 851 if (error != 0) 852 goto exec_fail_dealloc; 853 PROC_LOCK(p); 854 #ifdef MAC 855 if (will_transition) { 856 mac_vnode_execve_transition(oldcred, imgp->newcred, 857 imgp->vp, interpvplabel, imgp); 858 } 859 #endif 860 } else { 861 if (oldcred->cr_uid == oldcred->cr_ruid && 862 oldcred->cr_gid == oldcred->cr_rgid) 863 p->p_flag &= ~P_SUGID; 864 } 865 /* 866 * Set the new credentials. 867 */ 868 if (imgp->newcred != NULL) { 869 proc_set_cred(p, imgp->newcred); 870 crfree(oldcred); 871 oldcred = NULL; 872 } 873 874 /* 875 * Store the vp for use in kern.proc.pathname. This vnode was 876 * referenced by namei() or by fexecve variant of fname handling. 877 */ 878 oldtextvp = p->p_textvp; 879 p->p_textvp = newtextvp; 880 oldtextdvp = p->p_textdvp; 881 p->p_textdvp = newtextdvp; 882 newtextdvp = NULL; 883 oldbinname = p->p_binname; 884 p->p_binname = newbinname; 885 newbinname = NULL; 886 887 #ifdef KDTRACE_HOOKS 888 /* 889 * Tell the DTrace fasttrap provider about the exec if it 890 * has declared an interest. 891 */ 892 if (dtrace_fasttrap_exec) 893 dtrace_fasttrap_exec(p); 894 #endif 895 896 /* 897 * Notify others that we exec'd, and clear the P_INEXEC flag 898 * as we're now a bona fide freshly-execed process. 899 */ 900 KNOTE_LOCKED(p->p_klist, NOTE_EXEC); 901 p->p_flag &= ~P_INEXEC; 902 903 /* clear "fork but no exec" flag, as we _are_ execing */ 904 p->p_acflag &= ~AFORK; 905 906 /* 907 * Free any previous argument cache and replace it with 908 * the new argument cache, if any. 909 */ 910 oldargs = p->p_args; 911 p->p_args = newargs; 912 newargs = NULL; 913 914 PROC_UNLOCK(p); 915 916 #ifdef HWPMC_HOOKS 917 /* 918 * Check if system-wide sampling is in effect or if the 919 * current process is using PMCs. If so, do exec() time 920 * processing. This processing needs to happen AFTER the 921 * P_INEXEC flag is cleared. 922 */ 923 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { 924 VOP_UNLOCK(imgp->vp); 925 pe.pm_credentialschanged = credential_changing; 926 pe.pm_entryaddr = imgp->entry_addr; 927 928 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); 929 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 930 } 931 #endif 932 933 /* Set values passed into the program in registers. */ 934 (*p->p_sysent->sv_setregs)(td, imgp, stack_base); 935 936 VOP_MMAPPED(imgp->vp); 937 938 SDT_PROBE1(proc, , , exec__success, args->fname); 939 940 exec_fail_dealloc: 941 if (error != 0) { 942 p->p_osrel = orig_osrel; 943 p->p_fctl0 = orig_fctl0; 944 p->p_elf_brandinfo = orig_brandinfo; 945 } 946 947 if (imgp->firstpage != NULL) 948 exec_unmap_first_page(imgp); 949 950 if (imgp->vp != NULL) { 951 if (imgp->opened) 952 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td); 953 if (imgp->textset) 954 VOP_UNSET_TEXT_CHECKED(imgp->vp); 955 if (error != 0) 956 vput(imgp->vp); 957 else 958 VOP_UNLOCK(imgp->vp); 959 if (args->fname != NULL) 960 NDFREE_PNBUF(&nd); 961 if (newtextdvp != NULL) 962 vrele(newtextdvp); 963 free(newbinname, M_PARGS); 964 } 965 966 if (imgp->object != NULL) 967 vm_object_deallocate(imgp->object); 968 969 free(imgp->freepath, M_TEMP); 970 971 if (error == 0) { 972 if (p->p_ptevents & PTRACE_EXEC) { 973 PROC_LOCK(p); 974 if (p->p_ptevents & PTRACE_EXEC) 975 td->td_dbgflags |= TDB_EXEC; 976 PROC_UNLOCK(p); 977 } 978 } else { 979 exec_fail: 980 /* we're done here, clear P_INEXEC */ 981 PROC_LOCK(p); 982 p->p_flag &= ~P_INEXEC; 983 PROC_UNLOCK(p); 984 985 SDT_PROBE1(proc, , , exec__failure, error); 986 } 987 988 if (imgp->newcred != NULL && oldcred != NULL) 989 crfree(imgp->newcred); 990 991 #ifdef MAC 992 mac_execve_exit(imgp); 993 mac_execve_interpreter_exit(interpvplabel); 994 #endif 995 exec_free_args(args); 996 997 /* 998 * Handle deferred decrement of ref counts. 999 */ 1000 if (oldtextvp != NULL) 1001 vrele(oldtextvp); 1002 if (oldtextdvp != NULL) 1003 vrele(oldtextdvp); 1004 free(oldbinname, M_PARGS); 1005 #ifdef KTRACE 1006 ktr_io_params_free(kiop); 1007 #endif 1008 pargs_drop(oldargs); 1009 pargs_drop(newargs); 1010 if (oldsigacts != NULL) 1011 sigacts_free(oldsigacts); 1012 if (euip != NULL) 1013 uifree(euip); 1014 1015 if (error && imgp->vmspace_destroyed) { 1016 /* sorry, no more process anymore. exit gracefully */ 1017 exec_cleanup(td, oldvmspace); 1018 exit1(td, 0, SIGABRT); 1019 /* NOT REACHED */ 1020 } 1021 1022 #ifdef KTRACE 1023 if (error == 0) 1024 ktrprocctor(p); 1025 #endif 1026 1027 /* 1028 * We don't want cpu_set_syscall_retval() to overwrite any of 1029 * the register values put in place by exec_setregs(). 1030 * Implementations of cpu_set_syscall_retval() will leave 1031 * registers unmodified when returning EJUSTRETURN. 1032 */ 1033 return (error == 0 ? EJUSTRETURN : error); 1034 } 1035 1036 void 1037 exec_cleanup(struct thread *td, struct vmspace *oldvmspace) 1038 { 1039 if ((td->td_pflags & TDP_EXECVMSPC) != 0) { 1040 KASSERT(td->td_proc->p_vmspace != oldvmspace, 1041 ("oldvmspace still used")); 1042 vmspace_free(oldvmspace); 1043 td->td_pflags &= ~TDP_EXECVMSPC; 1044 } 1045 } 1046 1047 int 1048 exec_map_first_page(struct image_params *imgp) 1049 { 1050 vm_object_t object; 1051 vm_page_t m; 1052 int error; 1053 1054 if (imgp->firstpage != NULL) 1055 exec_unmap_first_page(imgp); 1056 1057 object = imgp->vp->v_object; 1058 if (object == NULL) 1059 return (EACCES); 1060 #if VM_NRESERVLEVEL > 0 1061 if ((object->flags & OBJ_COLORED) == 0) { 1062 VM_OBJECT_WLOCK(object); 1063 vm_object_color(object, 0); 1064 VM_OBJECT_WUNLOCK(object); 1065 } 1066 #endif 1067 error = vm_page_grab_valid_unlocked(&m, object, 0, 1068 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) | 1069 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 1070 1071 if (error != VM_PAGER_OK) 1072 return (EIO); 1073 imgp->firstpage = sf_buf_alloc(m, 0); 1074 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); 1075 1076 return (0); 1077 } 1078 1079 void 1080 exec_unmap_first_page(struct image_params *imgp) 1081 { 1082 vm_page_t m; 1083 1084 if (imgp->firstpage != NULL) { 1085 m = sf_buf_page(imgp->firstpage); 1086 sf_buf_free(imgp->firstpage); 1087 imgp->firstpage = NULL; 1088 vm_page_unwire(m, PQ_ACTIVE); 1089 } 1090 } 1091 1092 void 1093 exec_onexec_old(struct thread *td) 1094 { 1095 sigfastblock_clear(td); 1096 umtx_exec(td->td_proc); 1097 } 1098 1099 /* 1100 * This is an optimization which removes the unmanaged shared page 1101 * mapping. In combination with pmap_remove_pages(), which cleans all 1102 * managed mappings in the process' vmspace pmap, no work will be left 1103 * for pmap_remove(min, max). 1104 */ 1105 void 1106 exec_free_abi_mappings(struct proc *p) 1107 { 1108 struct vmspace *vmspace; 1109 1110 vmspace = p->p_vmspace; 1111 if (refcount_load(&vmspace->vm_refcnt) != 1) 1112 return; 1113 1114 if (!PROC_HAS_SHP(p)) 1115 return; 1116 1117 pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base, 1118 vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len); 1119 } 1120 1121 /* 1122 * Run down the current address space and install a new one. 1123 */ 1124 int 1125 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) 1126 { 1127 int error; 1128 struct proc *p = imgp->proc; 1129 struct vmspace *vmspace = p->p_vmspace; 1130 struct thread *td = curthread; 1131 vm_offset_t sv_minuser; 1132 vm_map_t map; 1133 1134 imgp->vmspace_destroyed = true; 1135 imgp->sysent = sv; 1136 1137 if (p->p_sysent->sv_onexec_old != NULL) 1138 p->p_sysent->sv_onexec_old(td); 1139 itimers_exec(p); 1140 1141 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp); 1142 1143 /* 1144 * Blow away entire process VM, if address space not shared, 1145 * otherwise, create a new VM space so that other threads are 1146 * not disrupted 1147 */ 1148 map = &vmspace->vm_map; 1149 if (map_at_zero) 1150 sv_minuser = sv->sv_minuser; 1151 else 1152 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE); 1153 if (refcount_load(&vmspace->vm_refcnt) == 1 && 1154 vm_map_min(map) == sv_minuser && 1155 vm_map_max(map) == sv->sv_maxuser && 1156 cpu_exec_vmspace_reuse(p, map)) { 1157 exec_free_abi_mappings(p); 1158 shmexit(vmspace); 1159 pmap_remove_pages(vmspace_pmap(vmspace)); 1160 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 1161 /* 1162 * An exec terminates mlockall(MCL_FUTURE). 1163 * ASLR and W^X states must be re-evaluated. 1164 */ 1165 vm_map_lock(map); 1166 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR | 1167 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX); 1168 vm_map_unlock(map); 1169 } else { 1170 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); 1171 if (error) 1172 return (error); 1173 vmspace = p->p_vmspace; 1174 map = &vmspace->vm_map; 1175 } 1176 map->flags |= imgp->map_flags; 1177 1178 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0); 1179 } 1180 1181 /* 1182 * Compute the stack size limit and map the main process stack. 1183 * Map the shared page. 1184 */ 1185 int 1186 exec_map_stack(struct image_params *imgp) 1187 { 1188 struct rlimit rlim_stack; 1189 struct sysentvec *sv; 1190 struct proc *p; 1191 vm_map_t map; 1192 struct vmspace *vmspace; 1193 vm_offset_t stack_addr, stack_top; 1194 vm_offset_t sharedpage_addr; 1195 u_long ssiz; 1196 int error, find_space, stack_off; 1197 vm_prot_t stack_prot; 1198 vm_object_t obj; 1199 1200 p = imgp->proc; 1201 sv = p->p_sysent; 1202 1203 if (imgp->stack_sz != 0) { 1204 ssiz = trunc_page(imgp->stack_sz); 1205 PROC_LOCK(p); 1206 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack); 1207 PROC_UNLOCK(p); 1208 if (ssiz > rlim_stack.rlim_max) 1209 ssiz = rlim_stack.rlim_max; 1210 if (ssiz > rlim_stack.rlim_cur) { 1211 rlim_stack.rlim_cur = ssiz; 1212 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack); 1213 } 1214 } else if (sv->sv_maxssiz != NULL) { 1215 ssiz = *sv->sv_maxssiz; 1216 } else { 1217 ssiz = maxssiz; 1218 } 1219 1220 vmspace = p->p_vmspace; 1221 map = &vmspace->vm_map; 1222 1223 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? 1224 imgp->stack_prot : sv->sv_stackprot; 1225 if ((map->flags & MAP_ASLR_STACK) != 0) { 1226 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 1227 lim_max(curthread, RLIMIT_DATA)); 1228 find_space = VMFS_ANY_SPACE; 1229 } else { 1230 stack_addr = sv->sv_usrstack - ssiz; 1231 find_space = VMFS_NO_SPACE; 1232 } 1233 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz, 1234 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL, 1235 MAP_STACK_GROWS_DOWN); 1236 if (error != KERN_SUCCESS) { 1237 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x " 1238 "failed, mach error %d errno %d\n", (uintmax_t)ssiz, 1239 stack_prot, error, vm_mmap_to_errno(error)); 1240 return (vm_mmap_to_errno(error)); 1241 } 1242 1243 stack_top = stack_addr + ssiz; 1244 if ((map->flags & MAP_ASLR_STACK) != 0) { 1245 /* Randomize within the first page of the stack. */ 1246 arc4rand(&stack_off, sizeof(stack_off), 0); 1247 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *)); 1248 } 1249 1250 /* Map a shared page */ 1251 obj = sv->sv_shared_page_obj; 1252 if (obj == NULL) { 1253 sharedpage_addr = 0; 1254 goto out; 1255 } 1256 1257 /* 1258 * If randomization is disabled then the shared page will 1259 * be mapped at address specified in sysentvec. 1260 * Otherwise any address above .data section can be selected. 1261 * Same logic is used for stack address randomization. 1262 * If the address randomization is applied map a guard page 1263 * at the top of UVA. 1264 */ 1265 vm_object_reference(obj); 1266 if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) { 1267 sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 1268 lim_max(curthread, RLIMIT_DATA)); 1269 1270 error = vm_map_fixed(map, NULL, 0, 1271 sv->sv_maxuser - PAGE_SIZE, PAGE_SIZE, 1272 VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD); 1273 if (error != KERN_SUCCESS) { 1274 /* 1275 * This is not fatal, so let's just print a warning 1276 * and continue. 1277 */ 1278 uprintf("%s: Mapping guard page at the top of UVA failed" 1279 " mach error %d errno %d", 1280 __func__, error, vm_mmap_to_errno(error)); 1281 } 1282 1283 error = vm_map_find(map, obj, 0, 1284 &sharedpage_addr, sv->sv_shared_page_len, 1285 sv->sv_maxuser, VMFS_ANY_SPACE, 1286 VM_PROT_READ | VM_PROT_EXECUTE, 1287 VM_PROT_READ | VM_PROT_EXECUTE, 1288 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); 1289 } else { 1290 sharedpage_addr = sv->sv_shared_page_base; 1291 vm_map_fixed(map, obj, 0, 1292 sharedpage_addr, sv->sv_shared_page_len, 1293 VM_PROT_READ | VM_PROT_EXECUTE, 1294 VM_PROT_READ | VM_PROT_EXECUTE, 1295 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); 1296 } 1297 if (error != KERN_SUCCESS) { 1298 uprintf("%s: mapping shared page at addr: %p" 1299 "failed, mach error %d errno %d\n", __func__, 1300 (void *)sharedpage_addr, error, vm_mmap_to_errno(error)); 1301 vm_object_deallocate(obj); 1302 return (vm_mmap_to_errno(error)); 1303 } 1304 out: 1305 /* 1306 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they 1307 * are still used to enforce the stack rlimit on the process stack. 1308 */ 1309 vmspace->vm_maxsaddr = (char *)stack_addr; 1310 vmspace->vm_stacktop = stack_top; 1311 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 1312 vmspace->vm_shp_base = sharedpage_addr; 1313 1314 return (0); 1315 } 1316 1317 /* 1318 * Copy out argument and environment strings from the old process address 1319 * space into the temporary string buffer. 1320 */ 1321 int 1322 exec_copyin_args(struct image_args *args, const char *fname, 1323 enum uio_seg segflg, char **argv, char **envv) 1324 { 1325 u_long arg, env; 1326 int error; 1327 1328 bzero(args, sizeof(*args)); 1329 if (argv == NULL) 1330 return (EFAULT); 1331 1332 /* 1333 * Allocate demand-paged memory for the file name, argument, and 1334 * environment strings. 1335 */ 1336 error = exec_alloc_args(args); 1337 if (error != 0) 1338 return (error); 1339 1340 /* 1341 * Copy the file name. 1342 */ 1343 error = exec_args_add_fname(args, fname, segflg); 1344 if (error != 0) 1345 goto err_exit; 1346 1347 /* 1348 * extract arguments first 1349 */ 1350 for (;;) { 1351 error = fueword(argv++, &arg); 1352 if (error == -1) { 1353 error = EFAULT; 1354 goto err_exit; 1355 } 1356 if (arg == 0) 1357 break; 1358 error = exec_args_add_arg(args, (char *)(uintptr_t)arg, 1359 UIO_USERSPACE); 1360 if (error != 0) 1361 goto err_exit; 1362 } 1363 1364 /* 1365 * extract environment strings 1366 */ 1367 if (envv) { 1368 for (;;) { 1369 error = fueword(envv++, &env); 1370 if (error == -1) { 1371 error = EFAULT; 1372 goto err_exit; 1373 } 1374 if (env == 0) 1375 break; 1376 error = exec_args_add_env(args, 1377 (char *)(uintptr_t)env, UIO_USERSPACE); 1378 if (error != 0) 1379 goto err_exit; 1380 } 1381 } 1382 1383 return (0); 1384 1385 err_exit: 1386 exec_free_args(args); 1387 return (error); 1388 } 1389 1390 struct exec_args_kva { 1391 vm_offset_t addr; 1392 u_int gen; 1393 SLIST_ENTRY(exec_args_kva) next; 1394 }; 1395 1396 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva); 1397 1398 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist; 1399 static struct mtx exec_args_kva_mtx; 1400 static u_int exec_args_gen; 1401 1402 static void 1403 exec_prealloc_args_kva(void *arg __unused) 1404 { 1405 struct exec_args_kva *argkva; 1406 u_int i; 1407 1408 SLIST_INIT(&exec_args_kva_freelist); 1409 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF); 1410 for (i = 0; i < exec_map_entries; i++) { 1411 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK); 1412 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size); 1413 argkva->gen = exec_args_gen; 1414 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next); 1415 } 1416 } 1417 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL); 1418 1419 static vm_offset_t 1420 exec_alloc_args_kva(void **cookie) 1421 { 1422 struct exec_args_kva *argkva; 1423 1424 argkva = (void *)atomic_readandclear_ptr( 1425 (uintptr_t *)DPCPU_PTR(exec_args_kva)); 1426 if (argkva == NULL) { 1427 mtx_lock(&exec_args_kva_mtx); 1428 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL) 1429 (void)mtx_sleep(&exec_args_kva_freelist, 1430 &exec_args_kva_mtx, 0, "execkva", 0); 1431 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next); 1432 mtx_unlock(&exec_args_kva_mtx); 1433 } 1434 kasan_mark((void *)argkva->addr, exec_map_entry_size, 1435 exec_map_entry_size, 0); 1436 *(struct exec_args_kva **)cookie = argkva; 1437 return (argkva->addr); 1438 } 1439 1440 static void 1441 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen) 1442 { 1443 vm_offset_t base; 1444 1445 base = argkva->addr; 1446 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size, 1447 KASAN_EXEC_ARGS_FREED); 1448 if (argkva->gen != gen) { 1449 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size, 1450 MADV_FREE); 1451 argkva->gen = gen; 1452 } 1453 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva), 1454 (uintptr_t)NULL, (uintptr_t)argkva)) { 1455 mtx_lock(&exec_args_kva_mtx); 1456 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next); 1457 wakeup_one(&exec_args_kva_freelist); 1458 mtx_unlock(&exec_args_kva_mtx); 1459 } 1460 } 1461 1462 static void 1463 exec_free_args_kva(void *cookie) 1464 { 1465 1466 exec_release_args_kva(cookie, exec_args_gen); 1467 } 1468 1469 static void 1470 exec_args_kva_lowmem(void *arg __unused) 1471 { 1472 SLIST_HEAD(, exec_args_kva) head; 1473 struct exec_args_kva *argkva; 1474 u_int gen; 1475 int i; 1476 1477 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1; 1478 1479 /* 1480 * Force an madvise of each KVA range. Any currently allocated ranges 1481 * will have MADV_FREE applied once they are freed. 1482 */ 1483 SLIST_INIT(&head); 1484 mtx_lock(&exec_args_kva_mtx); 1485 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva); 1486 mtx_unlock(&exec_args_kva_mtx); 1487 while ((argkva = SLIST_FIRST(&head)) != NULL) { 1488 SLIST_REMOVE_HEAD(&head, next); 1489 exec_release_args_kva(argkva, gen); 1490 } 1491 1492 CPU_FOREACH(i) { 1493 argkva = (void *)atomic_readandclear_ptr( 1494 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva)); 1495 if (argkva != NULL) 1496 exec_release_args_kva(argkva, gen); 1497 } 1498 } 1499 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL, 1500 EVENTHANDLER_PRI_ANY); 1501 1502 /* 1503 * Allocate temporary demand-paged, zero-filled memory for the file name, 1504 * argument, and environment strings. 1505 */ 1506 int 1507 exec_alloc_args(struct image_args *args) 1508 { 1509 1510 args->buf = (char *)exec_alloc_args_kva(&args->bufkva); 1511 return (0); 1512 } 1513 1514 void 1515 exec_free_args(struct image_args *args) 1516 { 1517 1518 if (args->buf != NULL) { 1519 exec_free_args_kva(args->bufkva); 1520 args->buf = NULL; 1521 } 1522 if (args->fname_buf != NULL) { 1523 free(args->fname_buf, M_TEMP); 1524 args->fname_buf = NULL; 1525 } 1526 } 1527 1528 /* 1529 * A set to functions to fill struct image args. 1530 * 1531 * NOTE: exec_args_add_fname() must be called (possibly with a NULL 1532 * fname) before the other functions. All exec_args_add_arg() calls must 1533 * be made before any exec_args_add_env() calls. exec_args_adjust_args() 1534 * may be called any time after exec_args_add_fname(). 1535 * 1536 * exec_args_add_fname() - install path to be executed 1537 * exec_args_add_arg() - append an argument string 1538 * exec_args_add_env() - append an env string 1539 * exec_args_adjust_args() - adjust location of the argument list to 1540 * allow new arguments to be prepended 1541 */ 1542 int 1543 exec_args_add_fname(struct image_args *args, const char *fname, 1544 enum uio_seg segflg) 1545 { 1546 int error; 1547 size_t length; 1548 1549 KASSERT(args->fname == NULL, ("fname already appended")); 1550 KASSERT(args->endp == NULL, ("already appending to args")); 1551 1552 if (fname != NULL) { 1553 args->fname = args->buf; 1554 error = segflg == UIO_SYSSPACE ? 1555 copystr(fname, args->fname, PATH_MAX, &length) : 1556 copyinstr(fname, args->fname, PATH_MAX, &length); 1557 if (error != 0) 1558 return (error == ENAMETOOLONG ? E2BIG : error); 1559 } else 1560 length = 0; 1561 1562 /* Set up for _arg_*()/_env_*() */ 1563 args->endp = args->buf + length; 1564 /* begin_argv must be set and kept updated */ 1565 args->begin_argv = args->endp; 1566 KASSERT(exec_map_entry_size - length >= ARG_MAX, 1567 ("too little space remaining for arguments %zu < %zu", 1568 exec_map_entry_size - length, (size_t)ARG_MAX)); 1569 args->stringspace = ARG_MAX; 1570 1571 return (0); 1572 } 1573 1574 static int 1575 exec_args_add_str(struct image_args *args, const char *str, 1576 enum uio_seg segflg, int *countp) 1577 { 1578 int error; 1579 size_t length; 1580 1581 KASSERT(args->endp != NULL, ("endp not initialized")); 1582 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized")); 1583 1584 error = (segflg == UIO_SYSSPACE) ? 1585 copystr(str, args->endp, args->stringspace, &length) : 1586 copyinstr(str, args->endp, args->stringspace, &length); 1587 if (error != 0) 1588 return (error == ENAMETOOLONG ? E2BIG : error); 1589 args->stringspace -= length; 1590 args->endp += length; 1591 (*countp)++; 1592 1593 return (0); 1594 } 1595 1596 int 1597 exec_args_add_arg(struct image_args *args, const char *argp, 1598 enum uio_seg segflg) 1599 { 1600 1601 KASSERT(args->envc == 0, ("appending args after env")); 1602 1603 return (exec_args_add_str(args, argp, segflg, &args->argc)); 1604 } 1605 1606 int 1607 exec_args_add_env(struct image_args *args, const char *envp, 1608 enum uio_seg segflg) 1609 { 1610 1611 if (args->envc == 0) 1612 args->begin_envv = args->endp; 1613 1614 return (exec_args_add_str(args, envp, segflg, &args->envc)); 1615 } 1616 1617 int 1618 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend) 1619 { 1620 ssize_t offset; 1621 1622 KASSERT(args->endp != NULL, ("endp not initialized")); 1623 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized")); 1624 1625 offset = extend - consume; 1626 if (args->stringspace < offset) 1627 return (E2BIG); 1628 memmove(args->begin_argv + extend, args->begin_argv + consume, 1629 args->endp - args->begin_argv + consume); 1630 if (args->envc > 0) 1631 args->begin_envv += offset; 1632 args->endp += offset; 1633 args->stringspace -= offset; 1634 return (0); 1635 } 1636 1637 char * 1638 exec_args_get_begin_envv(struct image_args *args) 1639 { 1640 1641 KASSERT(args->endp != NULL, ("endp not initialized")); 1642 1643 if (args->envc > 0) 1644 return (args->begin_envv); 1645 return (args->endp); 1646 } 1647 1648 /* 1649 * Copy strings out to the new process address space, constructing new arg 1650 * and env vector tables. Return a pointer to the base so that it can be used 1651 * as the initial stack pointer. 1652 */ 1653 int 1654 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base) 1655 { 1656 int argc, envc; 1657 char **vectp; 1658 char *stringp; 1659 uintptr_t destp, ustringp; 1660 struct ps_strings *arginfo; 1661 struct proc *p; 1662 struct sysentvec *sysent; 1663 size_t execpath_len; 1664 int error, szsigcode; 1665 char canary[sizeof(long) * 8]; 1666 1667 p = imgp->proc; 1668 sysent = p->p_sysent; 1669 1670 destp = PROC_PS_STRINGS(p); 1671 arginfo = imgp->ps_strings = (void *)destp; 1672 1673 /* 1674 * Install sigcode. 1675 */ 1676 if (sysent->sv_shared_page_base == 0 && sysent->sv_szsigcode != NULL) { 1677 szsigcode = *(sysent->sv_szsigcode); 1678 destp -= szsigcode; 1679 destp = rounddown2(destp, sizeof(void *)); 1680 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode); 1681 if (error != 0) 1682 return (error); 1683 } 1684 1685 /* 1686 * Copy the image path for the rtld. 1687 */ 1688 if (imgp->execpath != NULL && imgp->auxargs != NULL) { 1689 execpath_len = strlen(imgp->execpath) + 1; 1690 destp -= execpath_len; 1691 destp = rounddown2(destp, sizeof(void *)); 1692 imgp->execpathp = (void *)destp; 1693 error = copyout(imgp->execpath, imgp->execpathp, execpath_len); 1694 if (error != 0) 1695 return (error); 1696 } 1697 1698 /* 1699 * Prepare the canary for SSP. 1700 */ 1701 arc4rand(canary, sizeof(canary), 0); 1702 destp -= sizeof(canary); 1703 imgp->canary = (void *)destp; 1704 error = copyout(canary, imgp->canary, sizeof(canary)); 1705 if (error != 0) 1706 return (error); 1707 imgp->canarylen = sizeof(canary); 1708 1709 /* 1710 * Prepare the pagesizes array. 1711 */ 1712 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES; 1713 destp -= imgp->pagesizeslen; 1714 destp = rounddown2(destp, sizeof(void *)); 1715 imgp->pagesizes = (void *)destp; 1716 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen); 1717 if (error != 0) 1718 return (error); 1719 1720 /* 1721 * Allocate room for the argument and environment strings. 1722 */ 1723 destp -= ARG_MAX - imgp->args->stringspace; 1724 destp = rounddown2(destp, sizeof(void *)); 1725 ustringp = destp; 1726 1727 if (imgp->auxargs) { 1728 /* 1729 * Allocate room on the stack for the ELF auxargs 1730 * array. It has up to AT_COUNT entries. 1731 */ 1732 destp -= AT_COUNT * sizeof(Elf_Auxinfo); 1733 destp = rounddown2(destp, sizeof(void *)); 1734 } 1735 1736 vectp = (char **)destp; 1737 1738 /* 1739 * Allocate room for the argv[] and env vectors including the 1740 * terminating NULL pointers. 1741 */ 1742 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1; 1743 1744 /* 1745 * vectp also becomes our initial stack base 1746 */ 1747 *stack_base = (uintptr_t)vectp; 1748 1749 stringp = imgp->args->begin_argv; 1750 argc = imgp->args->argc; 1751 envc = imgp->args->envc; 1752 1753 /* 1754 * Copy out strings - arguments and environment. 1755 */ 1756 error = copyout(stringp, (void *)ustringp, 1757 ARG_MAX - imgp->args->stringspace); 1758 if (error != 0) 1759 return (error); 1760 1761 /* 1762 * Fill in "ps_strings" struct for ps, w, etc. 1763 */ 1764 imgp->argv = vectp; 1765 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 || 1766 suword32(&arginfo->ps_nargvstr, argc) != 0) 1767 return (EFAULT); 1768 1769 /* 1770 * Fill in argument portion of vector table. 1771 */ 1772 for (; argc > 0; --argc) { 1773 if (suword(vectp++, ustringp) != 0) 1774 return (EFAULT); 1775 while (*stringp++ != 0) 1776 ustringp++; 1777 ustringp++; 1778 } 1779 1780 /* a null vector table pointer separates the argp's from the envp's */ 1781 if (suword(vectp++, 0) != 0) 1782 return (EFAULT); 1783 1784 imgp->envv = vectp; 1785 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 || 1786 suword32(&arginfo->ps_nenvstr, envc) != 0) 1787 return (EFAULT); 1788 1789 /* 1790 * Fill in environment portion of vector table. 1791 */ 1792 for (; envc > 0; --envc) { 1793 if (suword(vectp++, ustringp) != 0) 1794 return (EFAULT); 1795 while (*stringp++ != 0) 1796 ustringp++; 1797 ustringp++; 1798 } 1799 1800 /* end of vector table is a null pointer */ 1801 if (suword(vectp, 0) != 0) 1802 return (EFAULT); 1803 1804 if (imgp->auxargs) { 1805 vectp++; 1806 error = imgp->sysent->sv_copyout_auxargs(imgp, 1807 (uintptr_t)vectp); 1808 if (error != 0) 1809 return (error); 1810 } 1811 1812 return (0); 1813 } 1814 1815 /* 1816 * Check permissions of file to execute. 1817 * Called with imgp->vp locked. 1818 * Return 0 for success or error code on failure. 1819 */ 1820 int 1821 exec_check_permissions(struct image_params *imgp) 1822 { 1823 struct vnode *vp = imgp->vp; 1824 struct vattr *attr = imgp->attr; 1825 struct thread *td; 1826 int error; 1827 1828 td = curthread; 1829 1830 /* Get file attributes */ 1831 error = VOP_GETATTR(vp, attr, td->td_ucred); 1832 if (error) 1833 return (error); 1834 1835 #ifdef MAC 1836 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp); 1837 if (error) 1838 return (error); 1839 #endif 1840 1841 /* 1842 * 1) Check if file execution is disabled for the filesystem that 1843 * this file resides on. 1844 * 2) Ensure that at least one execute bit is on. Otherwise, a 1845 * privileged user will always succeed, and we don't want this 1846 * to happen unless the file really is executable. 1847 * 3) Ensure that the file is a regular file. 1848 */ 1849 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1850 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 || 1851 (attr->va_type != VREG)) 1852 return (EACCES); 1853 1854 /* 1855 * Zero length files can't be exec'd 1856 */ 1857 if (attr->va_size == 0) 1858 return (ENOEXEC); 1859 1860 /* 1861 * Check for execute permission to file based on current credentials. 1862 */ 1863 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1864 if (error) 1865 return (error); 1866 1867 /* 1868 * Check number of open-for-writes on the file and deny execution 1869 * if there are any. 1870 * 1871 * Add a text reference now so no one can write to the 1872 * executable while we're activating it. 1873 * 1874 * Remember if this was set before and unset it in case this is not 1875 * actually an executable image. 1876 */ 1877 error = VOP_SET_TEXT(vp); 1878 if (error != 0) 1879 return (error); 1880 imgp->textset = true; 1881 1882 /* 1883 * Call filesystem specific open routine (which does nothing in the 1884 * general case). 1885 */ 1886 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 1887 if (error == 0) 1888 imgp->opened = true; 1889 return (error); 1890 } 1891 1892 /* 1893 * Exec handler registration 1894 */ 1895 int 1896 exec_register(const struct execsw *execsw_arg) 1897 { 1898 const struct execsw **es, **xs, **newexecsw; 1899 u_int count = 2; /* New slot and trailing NULL */ 1900 1901 if (execsw) 1902 for (es = execsw; *es; es++) 1903 count++; 1904 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1905 xs = newexecsw; 1906 if (execsw) 1907 for (es = execsw; *es; es++) 1908 *xs++ = *es; 1909 *xs++ = execsw_arg; 1910 *xs = NULL; 1911 if (execsw) 1912 free(execsw, M_TEMP); 1913 execsw = newexecsw; 1914 return (0); 1915 } 1916 1917 int 1918 exec_unregister(const struct execsw *execsw_arg) 1919 { 1920 const struct execsw **es, **xs, **newexecsw; 1921 int count = 1; 1922 1923 if (execsw == NULL) 1924 panic("unregister with no handlers left?\n"); 1925 1926 for (es = execsw; *es; es++) { 1927 if (*es == execsw_arg) 1928 break; 1929 } 1930 if (*es == NULL) 1931 return (ENOENT); 1932 for (es = execsw; *es; es++) 1933 if (*es != execsw_arg) 1934 count++; 1935 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1936 xs = newexecsw; 1937 for (es = execsw; *es; es++) 1938 if (*es != execsw_arg) 1939 *xs++ = *es; 1940 *xs = NULL; 1941 if (execsw) 1942 free(execsw, M_TEMP); 1943 execsw = newexecsw; 1944 return (0); 1945 } 1946 1947 /* 1948 * Write out a core segment to the compression stream. 1949 */ 1950 static int 1951 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len) 1952 { 1953 size_t chunk_len; 1954 int error; 1955 1956 while (len > 0) { 1957 chunk_len = MIN(len, CORE_BUF_SIZE); 1958 1959 /* 1960 * We can get EFAULT error here. 1961 * In that case zero out the current chunk of the segment. 1962 */ 1963 error = copyin(base, buf, chunk_len); 1964 if (error != 0) 1965 bzero(buf, chunk_len); 1966 error = compressor_write(cp->comp, buf, chunk_len); 1967 if (error != 0) 1968 break; 1969 base += chunk_len; 1970 len -= chunk_len; 1971 } 1972 return (error); 1973 } 1974 1975 int 1976 core_write(struct coredump_params *cp, const void *base, size_t len, 1977 off_t offset, enum uio_seg seg, size_t *resid) 1978 { 1979 1980 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base), 1981 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, 1982 cp->active_cred, cp->file_cred, resid, cp->td)); 1983 } 1984 1985 int 1986 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp, 1987 void *tmpbuf) 1988 { 1989 vm_map_t map; 1990 struct mount *mp; 1991 size_t resid, runlen; 1992 int error; 1993 bool success; 1994 1995 KASSERT((uintptr_t)base % PAGE_SIZE == 0, 1996 ("%s: user address %p is not page-aligned", __func__, base)); 1997 1998 if (cp->comp != NULL) 1999 return (compress_chunk(cp, base, tmpbuf, len)); 2000 2001 map = &cp->td->td_proc->p_vmspace->vm_map; 2002 for (; len > 0; base += runlen, offset += runlen, len -= runlen) { 2003 /* 2004 * Attempt to page in all virtual pages in the range. If a 2005 * virtual page is not backed by the pager, it is represented as 2006 * a hole in the file. This can occur with zero-filled 2007 * anonymous memory or truncated files, for example. 2008 */ 2009 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { 2010 if (core_dump_can_intr && curproc_sigkilled()) 2011 return (EINTR); 2012 error = vm_fault(map, (uintptr_t)base + runlen, 2013 VM_PROT_READ, VM_FAULT_NOFILL, NULL); 2014 if (runlen == 0) 2015 success = error == KERN_SUCCESS; 2016 else if ((error == KERN_SUCCESS) != success) 2017 break; 2018 } 2019 2020 if (success) { 2021 error = core_write(cp, base, runlen, offset, 2022 UIO_USERSPACE, &resid); 2023 if (error != 0) { 2024 if (error != EFAULT) 2025 break; 2026 2027 /* 2028 * EFAULT may be returned if the user mapping 2029 * could not be accessed, e.g., because a mapped 2030 * file has been truncated. Skip the page if no 2031 * progress was made, to protect against a 2032 * hypothetical scenario where vm_fault() was 2033 * successful but core_write() returns EFAULT 2034 * anyway. 2035 */ 2036 runlen -= resid; 2037 if (runlen == 0) { 2038 success = false; 2039 runlen = PAGE_SIZE; 2040 } 2041 } 2042 } 2043 if (!success) { 2044 error = vn_start_write(cp->vp, &mp, V_WAIT); 2045 if (error != 0) 2046 break; 2047 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY); 2048 error = vn_truncate_locked(cp->vp, offset + runlen, 2049 false, cp->td->td_ucred); 2050 VOP_UNLOCK(cp->vp); 2051 vn_finished_write(mp); 2052 if (error != 0) 2053 break; 2054 } 2055 } 2056 return (error); 2057 } 2058 2059 /* 2060 * Drain into a core file. 2061 */ 2062 int 2063 sbuf_drain_core_output(void *arg, const char *data, int len) 2064 { 2065 struct coredump_params *cp; 2066 struct proc *p; 2067 int error, locked; 2068 2069 cp = arg; 2070 p = cp->td->td_proc; 2071 2072 /* 2073 * Some kern_proc out routines that print to this sbuf may 2074 * call us with the process lock held. Draining with the 2075 * non-sleepable lock held is unsafe. The lock is needed for 2076 * those routines when dumping a live process. In our case we 2077 * can safely release the lock before draining and acquire 2078 * again after. 2079 */ 2080 locked = PROC_LOCKED(p); 2081 if (locked) 2082 PROC_UNLOCK(p); 2083 if (cp->comp != NULL) 2084 error = compressor_write(cp->comp, __DECONST(char *, data), 2085 len); 2086 else 2087 error = core_write(cp, __DECONST(void *, data), len, cp->offset, 2088 UIO_SYSSPACE, NULL); 2089 if (locked) 2090 PROC_LOCK(p); 2091 if (error != 0) 2092 return (-error); 2093 cp->offset += len; 2094 return (len); 2095 } 2096