1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1993, David Greenman 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_capsicum.h" 33 #include "opt_hwpmc_hooks.h" 34 #include "opt_ktrace.h" 35 #include "opt_vm.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/acct.h> 40 #include <sys/asan.h> 41 #include <sys/capsicum.h> 42 #include <sys/compressor.h> 43 #include <sys/eventhandler.h> 44 #include <sys/exec.h> 45 #include <sys/fcntl.h> 46 #include <sys/filedesc.h> 47 #include <sys/imgact.h> 48 #include <sys/imgact_elf.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/mman.h> 53 #include <sys/mount.h> 54 #include <sys/mutex.h> 55 #include <sys/namei.h> 56 #include <sys/priv.h> 57 #include <sys/proc.h> 58 #include <sys/ptrace.h> 59 #include <sys/reg.h> 60 #include <sys/resourcevar.h> 61 #include <sys/rwlock.h> 62 #include <sys/sched.h> 63 #include <sys/sdt.h> 64 #include <sys/sf_buf.h> 65 #include <sys/shm.h> 66 #include <sys/signalvar.h> 67 #include <sys/smp.h> 68 #include <sys/stat.h> 69 #include <sys/syscallsubr.h> 70 #include <sys/sysctl.h> 71 #include <sys/sysent.h> 72 #include <sys/sysproto.h> 73 #include <sys/timers.h> 74 #include <sys/umtxvar.h> 75 #include <sys/vnode.h> 76 #include <sys/wait.h> 77 #ifdef KTRACE 78 #include <sys/ktrace.h> 79 #endif 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_extern.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_pager.h> 90 91 #ifdef HWPMC_HOOKS 92 #include <sys/pmckern.h> 93 #endif 94 95 #include <security/audit/audit.h> 96 #include <security/mac/mac_framework.h> 97 98 #ifdef KDTRACE_HOOKS 99 #include <sys/dtrace_bsd.h> 100 dtrace_execexit_func_t dtrace_fasttrap_exec; 101 #endif 102 103 SDT_PROVIDER_DECLARE(proc); 104 SDT_PROBE_DEFINE1(proc, , , exec, "char *"); 105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int"); 106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *"); 107 108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); 109 110 int coredump_pack_fileinfo = 1; 111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN, 112 &coredump_pack_fileinfo, 0, 113 "Enable file path packing in 'procstat -f' coredump notes"); 114 115 int coredump_pack_vmmapinfo = 1; 116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN, 117 &coredump_pack_vmmapinfo, 0, 118 "Enable file path packing in 'procstat -v' coredump notes"); 119 120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); 121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); 122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); 123 static int do_execve(struct thread *td, struct image_args *args, 124 struct mac *mac_p, struct vmspace *oldvmspace); 125 126 /* XXX This should be vm_size_t. */ 127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD| 128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU", 129 "Location of process' ps_strings structure"); 130 131 /* XXX This should be vm_size_t. */ 132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD| 133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU", 134 "Top of process stack"); 135 136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE, 137 NULL, 0, sysctl_kern_stackprot, "I", 138 "Stack memory permissions"); 139 140 u_long ps_arg_cache_limit = PAGE_SIZE / 16; 141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 142 &ps_arg_cache_limit, 0, 143 "Process' command line characters cache limit"); 144 145 static int disallow_high_osrel; 146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW, 147 &disallow_high_osrel, 0, 148 "Disallow execution of binaries built for higher version of the world"); 149 150 static int map_at_zero = 0; 151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0, 152 "Permit processes to map an object at virtual address 0."); 153 154 static int core_dump_can_intr = 1; 155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN, 156 &core_dump_can_intr, 0, 157 "Core dumping interruptible with SIGKILL"); 158 159 static int 160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) 161 { 162 struct proc *p; 163 vm_offset_t ps_strings; 164 165 p = curproc; 166 #ifdef SCTL_MASK32 167 if (req->flags & SCTL_MASK32) { 168 unsigned int val; 169 val = (unsigned int)PROC_PS_STRINGS(p); 170 return (SYSCTL_OUT(req, &val, sizeof(val))); 171 } 172 #endif 173 ps_strings = PROC_PS_STRINGS(p); 174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings))); 175 } 176 177 static int 178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) 179 { 180 struct proc *p; 181 vm_offset_t val; 182 183 p = curproc; 184 #ifdef SCTL_MASK32 185 if (req->flags & SCTL_MASK32) { 186 unsigned int val32; 187 188 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop); 189 return (SYSCTL_OUT(req, &val32, sizeof(val32))); 190 } 191 #endif 192 val = round_page(p->p_vmspace->vm_stacktop); 193 return (SYSCTL_OUT(req, &val, sizeof(val))); 194 } 195 196 static int 197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) 198 { 199 struct proc *p; 200 201 p = curproc; 202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, 203 sizeof(p->p_sysent->sv_stackprot))); 204 } 205 206 /* 207 * Each of the items is a pointer to a `const struct execsw', hence the 208 * double pointer here. 209 */ 210 static const struct execsw **execsw; 211 212 #ifndef _SYS_SYSPROTO_H_ 213 struct execve_args { 214 char *fname; 215 char **argv; 216 char **envv; 217 }; 218 #endif 219 220 int 221 sys_execve(struct thread *td, struct execve_args *uap) 222 { 223 struct image_args args; 224 struct vmspace *oldvmspace; 225 int error; 226 227 error = pre_execve(td, &oldvmspace); 228 if (error != 0) 229 return (error); 230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 231 uap->argv, uap->envv); 232 if (error == 0) 233 error = kern_execve(td, &args, NULL, oldvmspace); 234 post_execve(td, error, oldvmspace); 235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 236 return (error); 237 } 238 239 #ifndef _SYS_SYSPROTO_H_ 240 struct fexecve_args { 241 int fd; 242 char **argv; 243 char **envv; 244 }; 245 #endif 246 int 247 sys_fexecve(struct thread *td, struct fexecve_args *uap) 248 { 249 struct image_args args; 250 struct vmspace *oldvmspace; 251 int error; 252 253 error = pre_execve(td, &oldvmspace); 254 if (error != 0) 255 return (error); 256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE, 257 uap->argv, uap->envv); 258 if (error == 0) { 259 args.fd = uap->fd; 260 error = kern_execve(td, &args, NULL, oldvmspace); 261 } 262 post_execve(td, error, oldvmspace); 263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 264 return (error); 265 } 266 267 #ifndef _SYS_SYSPROTO_H_ 268 struct __mac_execve_args { 269 char *fname; 270 char **argv; 271 char **envv; 272 struct mac *mac_p; 273 }; 274 #endif 275 276 int 277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap) 278 { 279 #ifdef MAC 280 struct image_args args; 281 struct vmspace *oldvmspace; 282 int error; 283 284 error = pre_execve(td, &oldvmspace); 285 if (error != 0) 286 return (error); 287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, 288 uap->argv, uap->envv); 289 if (error == 0) 290 error = kern_execve(td, &args, uap->mac_p, oldvmspace); 291 post_execve(td, error, oldvmspace); 292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); 293 return (error); 294 #else 295 return (ENOSYS); 296 #endif 297 } 298 299 int 300 pre_execve(struct thread *td, struct vmspace **oldvmspace) 301 { 302 struct proc *p; 303 int error; 304 305 KASSERT(td == curthread, ("non-current thread %p", td)); 306 error = 0; 307 p = td->td_proc; 308 if ((p->p_flag & P_HADTHREADS) != 0) { 309 PROC_LOCK(p); 310 while (p->p_singlethr > 0) { 311 error = msleep(&p->p_singlethr, &p->p_mtx, 312 PWAIT | PCATCH, "exec1t", 0); 313 if (error != 0) { 314 error = ERESTART; 315 goto unlock; 316 } 317 } 318 if (thread_single(p, SINGLE_BOUNDARY) != 0) 319 error = ERESTART; 320 unlock: 321 PROC_UNLOCK(p); 322 } 323 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0, 324 ("nested execve")); 325 *oldvmspace = p->p_vmspace; 326 return (error); 327 } 328 329 void 330 post_execve(struct thread *td, int error, struct vmspace *oldvmspace) 331 { 332 struct proc *p; 333 334 KASSERT(td == curthread, ("non-current thread %p", td)); 335 p = td->td_proc; 336 if ((p->p_flag & P_HADTHREADS) != 0) { 337 PROC_LOCK(p); 338 /* 339 * If success, we upgrade to SINGLE_EXIT state to 340 * force other threads to suicide. 341 */ 342 if (error == EJUSTRETURN) 343 thread_single(p, SINGLE_EXIT); 344 else 345 thread_single_end(p, SINGLE_BOUNDARY); 346 PROC_UNLOCK(p); 347 } 348 exec_cleanup(td, oldvmspace); 349 } 350 351 /* 352 * kern_execve() has the astonishing property of not always returning to 353 * the caller. If sufficiently bad things happen during the call to 354 * do_execve(), it can end up calling exit1(); as a result, callers must 355 * avoid doing anything which they might need to undo (e.g., allocating 356 * memory). 357 */ 358 int 359 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p, 360 struct vmspace *oldvmspace) 361 { 362 363 TSEXEC(td->td_proc->p_pid, args->begin_argv); 364 AUDIT_ARG_ARGV(args->begin_argv, args->argc, 365 exec_args_get_begin_envv(args) - args->begin_argv); 366 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc, 367 args->endp - exec_args_get_begin_envv(args)); 368 369 /* Must have at least one argument. */ 370 if (args->argc == 0) { 371 exec_free_args(args); 372 return (EINVAL); 373 } 374 return (do_execve(td, args, mac_p, oldvmspace)); 375 } 376 377 static void 378 execve_nosetid(struct image_params *imgp) 379 { 380 imgp->credential_setid = false; 381 if (imgp->newcred != NULL) { 382 crfree(imgp->newcred); 383 imgp->newcred = NULL; 384 } 385 } 386 387 /* 388 * In-kernel implementation of execve(). All arguments are assumed to be 389 * userspace pointers from the passed thread. 390 */ 391 static int 392 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p, 393 struct vmspace *oldvmspace) 394 { 395 struct proc *p = td->td_proc; 396 struct nameidata nd; 397 struct ucred *oldcred; 398 struct uidinfo *euip = NULL; 399 uintptr_t stack_base; 400 struct image_params image_params, *imgp; 401 struct vattr attr; 402 int (*img_first)(struct image_params *); 403 struct pargs *oldargs = NULL, *newargs = NULL; 404 struct sigacts *oldsigacts = NULL, *newsigacts = NULL; 405 #ifdef KTRACE 406 struct ktr_io_params *kiop; 407 #endif 408 struct vnode *oldtextvp, *newtextvp; 409 struct vnode *oldtextdvp, *newtextdvp; 410 char *oldbinname, *newbinname; 411 bool credential_changing; 412 #ifdef MAC 413 struct label *interpvplabel = NULL; 414 bool will_transition; 415 #endif 416 #ifdef HWPMC_HOOKS 417 struct pmckern_procexec pe; 418 #endif 419 int error, i, orig_osrel; 420 uint32_t orig_fctl0; 421 Elf_Brandinfo *orig_brandinfo; 422 size_t freepath_size; 423 static const char fexecv_proc_title[] = "(fexecv)"; 424 425 imgp = &image_params; 426 oldtextvp = oldtextdvp = NULL; 427 newtextvp = newtextdvp = NULL; 428 newbinname = oldbinname = NULL; 429 #ifdef KTRACE 430 kiop = NULL; 431 #endif 432 433 /* 434 * Lock the process and set the P_INEXEC flag to indicate that 435 * it should be left alone until we're done here. This is 436 * necessary to avoid race conditions - e.g. in ptrace() - 437 * that might allow a local user to illicitly obtain elevated 438 * privileges. 439 */ 440 PROC_LOCK(p); 441 KASSERT((p->p_flag & P_INEXEC) == 0, 442 ("%s(): process already has P_INEXEC flag", __func__)); 443 p->p_flag |= P_INEXEC; 444 PROC_UNLOCK(p); 445 446 /* 447 * Initialize part of the common data 448 */ 449 bzero(imgp, sizeof(*imgp)); 450 imgp->proc = p; 451 imgp->attr = &attr; 452 imgp->args = args; 453 oldcred = p->p_ucred; 454 orig_osrel = p->p_osrel; 455 orig_fctl0 = p->p_fctl0; 456 orig_brandinfo = p->p_elf_brandinfo; 457 458 #ifdef MAC 459 error = mac_execve_enter(imgp, mac_p); 460 if (error) 461 goto exec_fail; 462 #endif 463 464 SDT_PROBE1(proc, , , exec, args->fname); 465 466 interpret: 467 if (args->fname != NULL) { 468 #ifdef CAPABILITY_MODE 469 /* 470 * While capability mode can't reach this point via direct 471 * path arguments to execve(), we also don't allow 472 * interpreters to be used in capability mode (for now). 473 * Catch indirect lookups and return a permissions error. 474 */ 475 if (IN_CAPABILITY_MODE(td)) { 476 error = ECAPMODE; 477 goto exec_fail; 478 } 479 #endif 480 481 /* 482 * Translate the file name. namei() returns a vnode 483 * pointer in ni_vp among other things. 484 */ 485 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW | 486 SAVENAME | AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE, 487 args->fname); 488 489 error = namei(&nd); 490 if (error) 491 goto exec_fail; 492 493 newtextvp = nd.ni_vp; 494 newtextdvp = nd.ni_dvp; 495 nd.ni_dvp = NULL; 496 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS, 497 M_WAITOK); 498 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen); 499 newbinname[nd.ni_cnd.cn_namelen] = '\0'; 500 imgp->vp = newtextvp; 501 502 /* 503 * Do the best to calculate the full path to the image file. 504 */ 505 if (args->fname[0] == '/') { 506 imgp->execpath = args->fname; 507 } else { 508 VOP_UNLOCK(imgp->vp); 509 freepath_size = MAXPATHLEN; 510 if (vn_fullpath_hardlink(newtextvp, newtextdvp, 511 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath, 512 &imgp->freepath, &freepath_size) != 0) 513 imgp->execpath = args->fname; 514 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 515 } 516 } else { 517 AUDIT_ARG_FD(args->fd); 518 519 /* 520 * If the descriptors was not opened with O_PATH, then 521 * we require that it was opened with O_EXEC or 522 * O_RDONLY. In either case, exec_check_permissions() 523 * below checks _current_ file access mode regardless 524 * of the permissions additionally checked at the 525 * open(2). 526 */ 527 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights, 528 &newtextvp); 529 if (error != 0) 530 goto exec_fail; 531 532 if (vn_fullpath(newtextvp, &imgp->execpath, 533 &imgp->freepath) != 0) 534 imgp->execpath = args->fname; 535 vn_lock(newtextvp, LK_SHARED | LK_RETRY); 536 AUDIT_ARG_VNODE1(newtextvp); 537 imgp->vp = newtextvp; 538 } 539 540 /* 541 * Check file permissions. Also 'opens' file and sets its vnode to 542 * text mode. 543 */ 544 error = exec_check_permissions(imgp); 545 if (error) 546 goto exec_fail_dealloc; 547 548 imgp->object = imgp->vp->v_object; 549 if (imgp->object != NULL) 550 vm_object_reference(imgp->object); 551 552 error = exec_map_first_page(imgp); 553 if (error) 554 goto exec_fail_dealloc; 555 556 imgp->proc->p_osrel = 0; 557 imgp->proc->p_fctl0 = 0; 558 imgp->proc->p_elf_brandinfo = NULL; 559 560 /* 561 * Implement image setuid/setgid. 562 * 563 * Determine new credentials before attempting image activators 564 * so that it can be used by process_exec handlers to determine 565 * credential/setid changes. 566 * 567 * Don't honor setuid/setgid if the filesystem prohibits it or if 568 * the process is being traced. 569 * 570 * We disable setuid/setgid/etc in capability mode on the basis 571 * that most setugid applications are not written with that 572 * environment in mind, and will therefore almost certainly operate 573 * incorrectly. In principle there's no reason that setugid 574 * applications might not be useful in capability mode, so we may want 575 * to reconsider this conservative design choice in the future. 576 * 577 * XXXMAC: For the time being, use NOSUID to also prohibit 578 * transitions on the file system. 579 */ 580 credential_changing = false; 581 credential_changing |= (attr.va_mode & S_ISUID) && 582 oldcred->cr_uid != attr.va_uid; 583 credential_changing |= (attr.va_mode & S_ISGID) && 584 oldcred->cr_gid != attr.va_gid; 585 #ifdef MAC 586 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, 587 interpvplabel, imgp) != 0; 588 credential_changing |= will_transition; 589 #endif 590 591 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */ 592 if (credential_changing) 593 imgp->proc->p_pdeathsig = 0; 594 595 if (credential_changing && 596 #ifdef CAPABILITY_MODE 597 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) && 598 #endif 599 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && 600 (p->p_flag & P_TRACED) == 0) { 601 imgp->credential_setid = true; 602 VOP_UNLOCK(imgp->vp); 603 imgp->newcred = crdup(oldcred); 604 if (attr.va_mode & S_ISUID) { 605 euip = uifind(attr.va_uid); 606 change_euid(imgp->newcred, euip); 607 } 608 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 609 if (attr.va_mode & S_ISGID) 610 change_egid(imgp->newcred, attr.va_gid); 611 /* 612 * Implement correct POSIX saved-id behavior. 613 * 614 * XXXMAC: Note that the current logic will save the 615 * uid and gid if a MAC domain transition occurs, even 616 * though maybe it shouldn't. 617 */ 618 change_svuid(imgp->newcred, imgp->newcred->cr_uid); 619 change_svgid(imgp->newcred, imgp->newcred->cr_gid); 620 } else { 621 /* 622 * Implement correct POSIX saved-id behavior. 623 * 624 * XXX: It's not clear that the existing behavior is 625 * POSIX-compliant. A number of sources indicate that the 626 * saved uid/gid should only be updated if the new ruid is 627 * not equal to the old ruid, or the new euid is not equal 628 * to the old euid and the new euid is not equal to the old 629 * ruid. The FreeBSD code always updates the saved uid/gid. 630 * Also, this code uses the new (replaced) euid and egid as 631 * the source, which may or may not be the right ones to use. 632 */ 633 if (oldcred->cr_svuid != oldcred->cr_uid || 634 oldcred->cr_svgid != oldcred->cr_gid) { 635 VOP_UNLOCK(imgp->vp); 636 imgp->newcred = crdup(oldcred); 637 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 638 change_svuid(imgp->newcred, imgp->newcred->cr_uid); 639 change_svgid(imgp->newcred, imgp->newcred->cr_gid); 640 } 641 } 642 /* The new credentials are installed into the process later. */ 643 644 /* 645 * If the current process has a special image activator it 646 * wants to try first, call it. For example, emulating shell 647 * scripts differently. 648 */ 649 error = -1; 650 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) 651 error = img_first(imgp); 652 653 /* 654 * Loop through the list of image activators, calling each one. 655 * An activator returns -1 if there is no match, 0 on success, 656 * and an error otherwise. 657 */ 658 for (i = 0; error == -1 && execsw[i]; ++i) { 659 if (execsw[i]->ex_imgact == NULL || 660 execsw[i]->ex_imgact == img_first) { 661 continue; 662 } 663 error = (*execsw[i]->ex_imgact)(imgp); 664 } 665 666 if (error) { 667 if (error == -1) 668 error = ENOEXEC; 669 goto exec_fail_dealloc; 670 } 671 672 /* 673 * Special interpreter operation, cleanup and loop up to try to 674 * activate the interpreter. 675 */ 676 if (imgp->interpreted) { 677 exec_unmap_first_page(imgp); 678 /* 679 * The text reference needs to be removed for scripts. 680 * There is a short period before we determine that 681 * something is a script where text reference is active. 682 * The vnode lock is held over this entire period 683 * so nothing should illegitimately be blocked. 684 */ 685 MPASS(imgp->textset); 686 VOP_UNSET_TEXT_CHECKED(newtextvp); 687 imgp->textset = false; 688 /* free name buffer and old vnode */ 689 #ifdef MAC 690 mac_execve_interpreter_enter(newtextvp, &interpvplabel); 691 #endif 692 if (imgp->opened) { 693 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td); 694 imgp->opened = false; 695 } 696 vput(newtextvp); 697 imgp->vp = newtextvp = NULL; 698 if (args->fname != NULL) { 699 if (newtextdvp != NULL) { 700 vrele(newtextdvp); 701 newtextdvp = NULL; 702 } 703 NDFREE_PNBUF(&nd); 704 free(newbinname, M_PARGS); 705 newbinname = NULL; 706 } 707 vm_object_deallocate(imgp->object); 708 imgp->object = NULL; 709 execve_nosetid(imgp); 710 imgp->execpath = NULL; 711 free(imgp->freepath, M_TEMP); 712 imgp->freepath = NULL; 713 /* set new name to that of the interpreter */ 714 args->fname = imgp->interpreter_name; 715 goto interpret; 716 } 717 718 /* 719 * NB: We unlock the vnode here because it is believed that none 720 * of the sv_copyout_strings/sv_fixup operations require the vnode. 721 */ 722 VOP_UNLOCK(imgp->vp); 723 724 if (disallow_high_osrel && 725 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) { 726 error = ENOEXEC; 727 uprintf("Osrel %d for image %s too high\n", p->p_osrel, 728 imgp->execpath != NULL ? imgp->execpath : "<unresolved>"); 729 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 730 goto exec_fail_dealloc; 731 } 732 733 /* 734 * Copy out strings (args and env) and initialize stack base. 735 */ 736 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base); 737 if (error != 0) { 738 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 739 goto exec_fail_dealloc; 740 } 741 742 /* 743 * Stack setup. 744 */ 745 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp); 746 if (error != 0) { 747 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 748 goto exec_fail_dealloc; 749 } 750 751 /* 752 * For security and other reasons, the file descriptor table cannot be 753 * shared after an exec. 754 */ 755 fdunshare(td); 756 pdunshare(td); 757 /* close files on exec */ 758 fdcloseexec(td); 759 760 /* 761 * Malloc things before we need locks. 762 */ 763 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv; 764 /* Cache arguments if they fit inside our allowance */ 765 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { 766 newargs = pargs_alloc(i); 767 bcopy(imgp->args->begin_argv, newargs->ar_args, i); 768 } 769 770 /* 771 * For security and other reasons, signal handlers cannot 772 * be shared after an exec. The new process gets a copy of the old 773 * handlers. In execsigs(), the new process will have its signals 774 * reset. 775 */ 776 if (sigacts_shared(p->p_sigacts)) { 777 oldsigacts = p->p_sigacts; 778 newsigacts = sigacts_alloc(); 779 sigacts_copy(newsigacts, oldsigacts); 780 } 781 782 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 783 784 PROC_LOCK(p); 785 if (oldsigacts) 786 p->p_sigacts = newsigacts; 787 /* Stop profiling */ 788 stopprofclock(p); 789 790 /* reset caught signals */ 791 execsigs(p); 792 793 /* name this process - nameiexec(p, ndp) */ 794 bzero(p->p_comm, sizeof(p->p_comm)); 795 if (args->fname) 796 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm, 797 min(nd.ni_cnd.cn_namelen, MAXCOMLEN)); 798 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0) 799 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title)); 800 bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); 801 #ifdef KTR 802 sched_clear_tdname(td); 803 #endif 804 805 /* 806 * mark as execed, wakeup the process that vforked (if any) and tell 807 * it that it now has its own resources back 808 */ 809 p->p_flag |= P_EXEC; 810 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0) 811 p->p_flag2 &= ~P2_NOTRACE; 812 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0) 813 p->p_flag2 &= ~P2_STKGAP_DISABLE; 814 if (p->p_flag & P_PPWAIT) { 815 p->p_flag &= ~(P_PPWAIT | P_PPTRACE); 816 cv_broadcast(&p->p_pwait); 817 /* STOPs are no longer ignored, arrange for AST */ 818 signotify(td); 819 } 820 821 if ((imgp->sysent->sv_setid_allowed != NULL && 822 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) || 823 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0) 824 execve_nosetid(imgp); 825 826 /* 827 * Implement image setuid/setgid installation. 828 */ 829 if (imgp->credential_setid) { 830 /* 831 * Turn off syscall tracing for set-id programs, except for 832 * root. Record any set-id flags first to make sure that 833 * we do not regain any tracing during a possible block. 834 */ 835 setsugid(p); 836 #ifdef KTRACE 837 kiop = ktrprocexec(p); 838 #endif 839 /* 840 * Close any file descriptors 0..2 that reference procfs, 841 * then make sure file descriptors 0..2 are in use. 842 * 843 * Both fdsetugidsafety() and fdcheckstd() may call functions 844 * taking sleepable locks, so temporarily drop our locks. 845 */ 846 PROC_UNLOCK(p); 847 VOP_UNLOCK(imgp->vp); 848 fdsetugidsafety(td); 849 error = fdcheckstd(td); 850 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 851 if (error != 0) 852 goto exec_fail_dealloc; 853 PROC_LOCK(p); 854 #ifdef MAC 855 if (will_transition) { 856 mac_vnode_execve_transition(oldcred, imgp->newcred, 857 imgp->vp, interpvplabel, imgp); 858 } 859 #endif 860 } else { 861 if (oldcred->cr_uid == oldcred->cr_ruid && 862 oldcred->cr_gid == oldcred->cr_rgid) 863 p->p_flag &= ~P_SUGID; 864 } 865 /* 866 * Set the new credentials. 867 */ 868 if (imgp->newcred != NULL) { 869 proc_set_cred(p, imgp->newcred); 870 crfree(oldcred); 871 oldcred = NULL; 872 } 873 874 /* 875 * Store the vp for use in kern.proc.pathname. This vnode was 876 * referenced by namei() or by fexecve variant of fname handling. 877 */ 878 oldtextvp = p->p_textvp; 879 p->p_textvp = newtextvp; 880 oldtextdvp = p->p_textdvp; 881 p->p_textdvp = newtextdvp; 882 newtextdvp = NULL; 883 oldbinname = p->p_binname; 884 p->p_binname = newbinname; 885 newbinname = NULL; 886 887 #ifdef KDTRACE_HOOKS 888 /* 889 * Tell the DTrace fasttrap provider about the exec if it 890 * has declared an interest. 891 */ 892 if (dtrace_fasttrap_exec) 893 dtrace_fasttrap_exec(p); 894 #endif 895 896 /* 897 * Notify others that we exec'd, and clear the P_INEXEC flag 898 * as we're now a bona fide freshly-execed process. 899 */ 900 KNOTE_LOCKED(p->p_klist, NOTE_EXEC); 901 p->p_flag &= ~P_INEXEC; 902 903 /* clear "fork but no exec" flag, as we _are_ execing */ 904 p->p_acflag &= ~AFORK; 905 906 /* 907 * Free any previous argument cache and replace it with 908 * the new argument cache, if any. 909 */ 910 oldargs = p->p_args; 911 p->p_args = newargs; 912 newargs = NULL; 913 914 PROC_UNLOCK(p); 915 916 #ifdef HWPMC_HOOKS 917 /* 918 * Check if system-wide sampling is in effect or if the 919 * current process is using PMCs. If so, do exec() time 920 * processing. This processing needs to happen AFTER the 921 * P_INEXEC flag is cleared. 922 */ 923 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { 924 VOP_UNLOCK(imgp->vp); 925 pe.pm_credentialschanged = credential_changing; 926 pe.pm_entryaddr = imgp->entry_addr; 927 928 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); 929 vn_lock(imgp->vp, LK_SHARED | LK_RETRY); 930 } 931 #endif 932 933 /* Set values passed into the program in registers. */ 934 (*p->p_sysent->sv_setregs)(td, imgp, stack_base); 935 936 VOP_MMAPPED(imgp->vp); 937 938 SDT_PROBE1(proc, , , exec__success, args->fname); 939 940 exec_fail_dealloc: 941 if (error != 0) { 942 p->p_osrel = orig_osrel; 943 p->p_fctl0 = orig_fctl0; 944 p->p_elf_brandinfo = orig_brandinfo; 945 } 946 947 if (imgp->firstpage != NULL) 948 exec_unmap_first_page(imgp); 949 950 if (imgp->vp != NULL) { 951 if (imgp->opened) 952 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td); 953 if (imgp->textset) 954 VOP_UNSET_TEXT_CHECKED(imgp->vp); 955 if (error != 0) 956 vput(imgp->vp); 957 else 958 VOP_UNLOCK(imgp->vp); 959 if (args->fname != NULL) 960 NDFREE_PNBUF(&nd); 961 if (newtextdvp != NULL) 962 vrele(newtextdvp); 963 free(newbinname, M_PARGS); 964 } 965 966 if (imgp->object != NULL) 967 vm_object_deallocate(imgp->object); 968 969 free(imgp->freepath, M_TEMP); 970 971 if (error == 0) { 972 if (p->p_ptevents & PTRACE_EXEC) { 973 PROC_LOCK(p); 974 if (p->p_ptevents & PTRACE_EXEC) 975 td->td_dbgflags |= TDB_EXEC; 976 PROC_UNLOCK(p); 977 } 978 } else { 979 exec_fail: 980 /* we're done here, clear P_INEXEC */ 981 PROC_LOCK(p); 982 p->p_flag &= ~P_INEXEC; 983 PROC_UNLOCK(p); 984 985 SDT_PROBE1(proc, , , exec__failure, error); 986 } 987 988 if (imgp->newcred != NULL && oldcred != NULL) 989 crfree(imgp->newcred); 990 991 #ifdef MAC 992 mac_execve_exit(imgp); 993 mac_execve_interpreter_exit(interpvplabel); 994 #endif 995 exec_free_args(args); 996 997 /* 998 * Handle deferred decrement of ref counts. 999 */ 1000 if (oldtextvp != NULL) 1001 vrele(oldtextvp); 1002 if (oldtextdvp != NULL) 1003 vrele(oldtextdvp); 1004 free(oldbinname, M_PARGS); 1005 #ifdef KTRACE 1006 ktr_io_params_free(kiop); 1007 #endif 1008 pargs_drop(oldargs); 1009 pargs_drop(newargs); 1010 if (oldsigacts != NULL) 1011 sigacts_free(oldsigacts); 1012 if (euip != NULL) 1013 uifree(euip); 1014 1015 if (error && imgp->vmspace_destroyed) { 1016 /* sorry, no more process anymore. exit gracefully */ 1017 exec_cleanup(td, oldvmspace); 1018 exit1(td, 0, SIGABRT); 1019 /* NOT REACHED */ 1020 } 1021 1022 #ifdef KTRACE 1023 if (error == 0) 1024 ktrprocctor(p); 1025 #endif 1026 1027 /* 1028 * We don't want cpu_set_syscall_retval() to overwrite any of 1029 * the register values put in place by exec_setregs(). 1030 * Implementations of cpu_set_syscall_retval() will leave 1031 * registers unmodified when returning EJUSTRETURN. 1032 */ 1033 return (error == 0 ? EJUSTRETURN : error); 1034 } 1035 1036 void 1037 exec_cleanup(struct thread *td, struct vmspace *oldvmspace) 1038 { 1039 if ((td->td_pflags & TDP_EXECVMSPC) != 0) { 1040 KASSERT(td->td_proc->p_vmspace != oldvmspace, 1041 ("oldvmspace still used")); 1042 vmspace_free(oldvmspace); 1043 td->td_pflags &= ~TDP_EXECVMSPC; 1044 } 1045 } 1046 1047 int 1048 exec_map_first_page(struct image_params *imgp) 1049 { 1050 vm_object_t object; 1051 vm_page_t m; 1052 int error; 1053 1054 if (imgp->firstpage != NULL) 1055 exec_unmap_first_page(imgp); 1056 1057 object = imgp->vp->v_object; 1058 if (object == NULL) 1059 return (EACCES); 1060 #if VM_NRESERVLEVEL > 0 1061 if ((object->flags & OBJ_COLORED) == 0) { 1062 VM_OBJECT_WLOCK(object); 1063 vm_object_color(object, 0); 1064 VM_OBJECT_WUNLOCK(object); 1065 } 1066 #endif 1067 error = vm_page_grab_valid_unlocked(&m, object, 0, 1068 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) | 1069 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 1070 1071 if (error != VM_PAGER_OK) 1072 return (EIO); 1073 imgp->firstpage = sf_buf_alloc(m, 0); 1074 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); 1075 1076 return (0); 1077 } 1078 1079 void 1080 exec_unmap_first_page(struct image_params *imgp) 1081 { 1082 vm_page_t m; 1083 1084 if (imgp->firstpage != NULL) { 1085 m = sf_buf_page(imgp->firstpage); 1086 sf_buf_free(imgp->firstpage); 1087 imgp->firstpage = NULL; 1088 vm_page_unwire(m, PQ_ACTIVE); 1089 } 1090 } 1091 1092 void 1093 exec_onexec_old(struct thread *td) 1094 { 1095 sigfastblock_clear(td); 1096 umtx_exec(td->td_proc); 1097 } 1098 1099 /* 1100 * This is an optimization which removes the unmanaged shared page 1101 * mapping. In combination with pmap_remove_pages(), which cleans all 1102 * managed mappings in the process' vmspace pmap, no work will be left 1103 * for pmap_remove(min, max). 1104 */ 1105 void 1106 exec_free_abi_mappings(struct proc *p) 1107 { 1108 struct vmspace *vmspace; 1109 struct sysentvec *sv; 1110 1111 vmspace = p->p_vmspace; 1112 if (refcount_load(&vmspace->vm_refcnt) != 1) 1113 return; 1114 1115 sv = p->p_sysent; 1116 if (sv->sv_shared_page_obj == NULL) 1117 return; 1118 1119 pmap_remove(vmspace_pmap(vmspace), sv->sv_shared_page_base, 1120 sv->sv_shared_page_base + sv->sv_shared_page_len); 1121 } 1122 1123 /* 1124 * Run down the current address space and install a new one. Map the shared 1125 * page. 1126 */ 1127 int 1128 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) 1129 { 1130 int error; 1131 struct proc *p = imgp->proc; 1132 struct vmspace *vmspace = p->p_vmspace; 1133 struct thread *td = curthread; 1134 vm_object_t obj; 1135 vm_offset_t sv_minuser; 1136 vm_map_t map; 1137 1138 imgp->vmspace_destroyed = true; 1139 imgp->sysent = sv; 1140 1141 if (p->p_sysent->sv_onexec_old != NULL) 1142 p->p_sysent->sv_onexec_old(td); 1143 itimers_exec(p); 1144 1145 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp); 1146 1147 /* 1148 * Blow away entire process VM, if address space not shared, 1149 * otherwise, create a new VM space so that other threads are 1150 * not disrupted 1151 */ 1152 map = &vmspace->vm_map; 1153 if (map_at_zero) 1154 sv_minuser = sv->sv_minuser; 1155 else 1156 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE); 1157 if (refcount_load(&vmspace->vm_refcnt) == 1 && 1158 vm_map_min(map) == sv_minuser && 1159 vm_map_max(map) == sv->sv_maxuser && 1160 cpu_exec_vmspace_reuse(p, map)) { 1161 exec_free_abi_mappings(p); 1162 shmexit(vmspace); 1163 pmap_remove_pages(vmspace_pmap(vmspace)); 1164 vm_map_remove(map, vm_map_min(map), vm_map_max(map)); 1165 /* 1166 * An exec terminates mlockall(MCL_FUTURE). 1167 * ASLR and W^X states must be re-evaluated. 1168 */ 1169 vm_map_lock(map); 1170 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR | 1171 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX); 1172 vm_map_unlock(map); 1173 } else { 1174 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); 1175 if (error) 1176 return (error); 1177 vmspace = p->p_vmspace; 1178 map = &vmspace->vm_map; 1179 } 1180 map->flags |= imgp->map_flags; 1181 1182 /* Map a shared page */ 1183 obj = sv->sv_shared_page_obj; 1184 if (obj != NULL) { 1185 vm_object_reference(obj); 1186 error = vm_map_fixed(map, obj, 0, 1187 sv->sv_shared_page_base, sv->sv_shared_page_len, 1188 VM_PROT_READ | VM_PROT_EXECUTE, 1189 VM_PROT_READ | VM_PROT_EXECUTE, 1190 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); 1191 if (error != KERN_SUCCESS) { 1192 vm_object_deallocate(obj); 1193 return (vm_mmap_to_errno(error)); 1194 } 1195 } 1196 1197 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0); 1198 } 1199 1200 /* 1201 * Compute the stack size limit and map the main process stack. 1202 */ 1203 int 1204 exec_map_stack(struct image_params *imgp) 1205 { 1206 struct rlimit rlim_stack; 1207 struct sysentvec *sv; 1208 struct proc *p; 1209 vm_map_t map; 1210 struct vmspace *vmspace; 1211 vm_offset_t stack_addr, stack_top; 1212 u_long ssiz; 1213 int error, find_space, stack_off; 1214 vm_prot_t stack_prot; 1215 1216 p = imgp->proc; 1217 sv = p->p_sysent; 1218 1219 if (imgp->stack_sz != 0) { 1220 ssiz = trunc_page(imgp->stack_sz); 1221 PROC_LOCK(p); 1222 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack); 1223 PROC_UNLOCK(p); 1224 if (ssiz > rlim_stack.rlim_max) 1225 ssiz = rlim_stack.rlim_max; 1226 if (ssiz > rlim_stack.rlim_cur) { 1227 rlim_stack.rlim_cur = ssiz; 1228 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack); 1229 } 1230 } else if (sv->sv_maxssiz != NULL) { 1231 ssiz = *sv->sv_maxssiz; 1232 } else { 1233 ssiz = maxssiz; 1234 } 1235 1236 vmspace = p->p_vmspace; 1237 map = &vmspace->vm_map; 1238 1239 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? 1240 imgp->stack_prot : sv->sv_stackprot; 1241 if ((map->flags & MAP_ASLR_STACK) != 0) { 1242 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 1243 lim_max(curthread, RLIMIT_DATA)); 1244 find_space = VMFS_ANY_SPACE; 1245 } else { 1246 stack_addr = sv->sv_usrstack - ssiz; 1247 find_space = VMFS_NO_SPACE; 1248 } 1249 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz, 1250 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL, 1251 MAP_STACK_GROWS_DOWN); 1252 if (error != KERN_SUCCESS) { 1253 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x " 1254 "failed, mach error %d errno %d\n", (uintmax_t)ssiz, 1255 stack_prot, error, vm_mmap_to_errno(error)); 1256 return (vm_mmap_to_errno(error)); 1257 } 1258 1259 stack_top = stack_addr + ssiz; 1260 if ((map->flags & MAP_ASLR_STACK) != 0) { 1261 /* Randomize within the first page of the stack. */ 1262 arc4rand(&stack_off, sizeof(stack_off), 0); 1263 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *)); 1264 } 1265 1266 /* 1267 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they 1268 * are still used to enforce the stack rlimit on the process stack. 1269 */ 1270 vmspace->vm_maxsaddr = (char *)stack_addr; 1271 vmspace->vm_stacktop = stack_top; 1272 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; 1273 1274 return (0); 1275 } 1276 1277 /* 1278 * Copy out argument and environment strings from the old process address 1279 * space into the temporary string buffer. 1280 */ 1281 int 1282 exec_copyin_args(struct image_args *args, const char *fname, 1283 enum uio_seg segflg, char **argv, char **envv) 1284 { 1285 u_long arg, env; 1286 int error; 1287 1288 bzero(args, sizeof(*args)); 1289 if (argv == NULL) 1290 return (EFAULT); 1291 1292 /* 1293 * Allocate demand-paged memory for the file name, argument, and 1294 * environment strings. 1295 */ 1296 error = exec_alloc_args(args); 1297 if (error != 0) 1298 return (error); 1299 1300 /* 1301 * Copy the file name. 1302 */ 1303 error = exec_args_add_fname(args, fname, segflg); 1304 if (error != 0) 1305 goto err_exit; 1306 1307 /* 1308 * extract arguments first 1309 */ 1310 for (;;) { 1311 error = fueword(argv++, &arg); 1312 if (error == -1) { 1313 error = EFAULT; 1314 goto err_exit; 1315 } 1316 if (arg == 0) 1317 break; 1318 error = exec_args_add_arg(args, (char *)(uintptr_t)arg, 1319 UIO_USERSPACE); 1320 if (error != 0) 1321 goto err_exit; 1322 } 1323 1324 /* 1325 * extract environment strings 1326 */ 1327 if (envv) { 1328 for (;;) { 1329 error = fueword(envv++, &env); 1330 if (error == -1) { 1331 error = EFAULT; 1332 goto err_exit; 1333 } 1334 if (env == 0) 1335 break; 1336 error = exec_args_add_env(args, 1337 (char *)(uintptr_t)env, UIO_USERSPACE); 1338 if (error != 0) 1339 goto err_exit; 1340 } 1341 } 1342 1343 return (0); 1344 1345 err_exit: 1346 exec_free_args(args); 1347 return (error); 1348 } 1349 1350 struct exec_args_kva { 1351 vm_offset_t addr; 1352 u_int gen; 1353 SLIST_ENTRY(exec_args_kva) next; 1354 }; 1355 1356 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva); 1357 1358 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist; 1359 static struct mtx exec_args_kva_mtx; 1360 static u_int exec_args_gen; 1361 1362 static void 1363 exec_prealloc_args_kva(void *arg __unused) 1364 { 1365 struct exec_args_kva *argkva; 1366 u_int i; 1367 1368 SLIST_INIT(&exec_args_kva_freelist); 1369 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF); 1370 for (i = 0; i < exec_map_entries; i++) { 1371 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK); 1372 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size); 1373 argkva->gen = exec_args_gen; 1374 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next); 1375 } 1376 } 1377 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL); 1378 1379 static vm_offset_t 1380 exec_alloc_args_kva(void **cookie) 1381 { 1382 struct exec_args_kva *argkva; 1383 1384 argkva = (void *)atomic_readandclear_ptr( 1385 (uintptr_t *)DPCPU_PTR(exec_args_kva)); 1386 if (argkva == NULL) { 1387 mtx_lock(&exec_args_kva_mtx); 1388 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL) 1389 (void)mtx_sleep(&exec_args_kva_freelist, 1390 &exec_args_kva_mtx, 0, "execkva", 0); 1391 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next); 1392 mtx_unlock(&exec_args_kva_mtx); 1393 } 1394 kasan_mark((void *)argkva->addr, exec_map_entry_size, 1395 exec_map_entry_size, 0); 1396 *(struct exec_args_kva **)cookie = argkva; 1397 return (argkva->addr); 1398 } 1399 1400 static void 1401 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen) 1402 { 1403 vm_offset_t base; 1404 1405 base = argkva->addr; 1406 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size, 1407 KASAN_EXEC_ARGS_FREED); 1408 if (argkva->gen != gen) { 1409 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size, 1410 MADV_FREE); 1411 argkva->gen = gen; 1412 } 1413 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva), 1414 (uintptr_t)NULL, (uintptr_t)argkva)) { 1415 mtx_lock(&exec_args_kva_mtx); 1416 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next); 1417 wakeup_one(&exec_args_kva_freelist); 1418 mtx_unlock(&exec_args_kva_mtx); 1419 } 1420 } 1421 1422 static void 1423 exec_free_args_kva(void *cookie) 1424 { 1425 1426 exec_release_args_kva(cookie, exec_args_gen); 1427 } 1428 1429 static void 1430 exec_args_kva_lowmem(void *arg __unused) 1431 { 1432 SLIST_HEAD(, exec_args_kva) head; 1433 struct exec_args_kva *argkva; 1434 u_int gen; 1435 int i; 1436 1437 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1; 1438 1439 /* 1440 * Force an madvise of each KVA range. Any currently allocated ranges 1441 * will have MADV_FREE applied once they are freed. 1442 */ 1443 SLIST_INIT(&head); 1444 mtx_lock(&exec_args_kva_mtx); 1445 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva); 1446 mtx_unlock(&exec_args_kva_mtx); 1447 while ((argkva = SLIST_FIRST(&head)) != NULL) { 1448 SLIST_REMOVE_HEAD(&head, next); 1449 exec_release_args_kva(argkva, gen); 1450 } 1451 1452 CPU_FOREACH(i) { 1453 argkva = (void *)atomic_readandclear_ptr( 1454 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva)); 1455 if (argkva != NULL) 1456 exec_release_args_kva(argkva, gen); 1457 } 1458 } 1459 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL, 1460 EVENTHANDLER_PRI_ANY); 1461 1462 /* 1463 * Allocate temporary demand-paged, zero-filled memory for the file name, 1464 * argument, and environment strings. 1465 */ 1466 int 1467 exec_alloc_args(struct image_args *args) 1468 { 1469 1470 args->buf = (char *)exec_alloc_args_kva(&args->bufkva); 1471 return (0); 1472 } 1473 1474 void 1475 exec_free_args(struct image_args *args) 1476 { 1477 1478 if (args->buf != NULL) { 1479 exec_free_args_kva(args->bufkva); 1480 args->buf = NULL; 1481 } 1482 if (args->fname_buf != NULL) { 1483 free(args->fname_buf, M_TEMP); 1484 args->fname_buf = NULL; 1485 } 1486 } 1487 1488 /* 1489 * A set to functions to fill struct image args. 1490 * 1491 * NOTE: exec_args_add_fname() must be called (possibly with a NULL 1492 * fname) before the other functions. All exec_args_add_arg() calls must 1493 * be made before any exec_args_add_env() calls. exec_args_adjust_args() 1494 * may be called any time after exec_args_add_fname(). 1495 * 1496 * exec_args_add_fname() - install path to be executed 1497 * exec_args_add_arg() - append an argument string 1498 * exec_args_add_env() - append an env string 1499 * exec_args_adjust_args() - adjust location of the argument list to 1500 * allow new arguments to be prepended 1501 */ 1502 int 1503 exec_args_add_fname(struct image_args *args, const char *fname, 1504 enum uio_seg segflg) 1505 { 1506 int error; 1507 size_t length; 1508 1509 KASSERT(args->fname == NULL, ("fname already appended")); 1510 KASSERT(args->endp == NULL, ("already appending to args")); 1511 1512 if (fname != NULL) { 1513 args->fname = args->buf; 1514 error = segflg == UIO_SYSSPACE ? 1515 copystr(fname, args->fname, PATH_MAX, &length) : 1516 copyinstr(fname, args->fname, PATH_MAX, &length); 1517 if (error != 0) 1518 return (error == ENAMETOOLONG ? E2BIG : error); 1519 } else 1520 length = 0; 1521 1522 /* Set up for _arg_*()/_env_*() */ 1523 args->endp = args->buf + length; 1524 /* begin_argv must be set and kept updated */ 1525 args->begin_argv = args->endp; 1526 KASSERT(exec_map_entry_size - length >= ARG_MAX, 1527 ("too little space remaining for arguments %zu < %zu", 1528 exec_map_entry_size - length, (size_t)ARG_MAX)); 1529 args->stringspace = ARG_MAX; 1530 1531 return (0); 1532 } 1533 1534 static int 1535 exec_args_add_str(struct image_args *args, const char *str, 1536 enum uio_seg segflg, int *countp) 1537 { 1538 int error; 1539 size_t length; 1540 1541 KASSERT(args->endp != NULL, ("endp not initialized")); 1542 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized")); 1543 1544 error = (segflg == UIO_SYSSPACE) ? 1545 copystr(str, args->endp, args->stringspace, &length) : 1546 copyinstr(str, args->endp, args->stringspace, &length); 1547 if (error != 0) 1548 return (error == ENAMETOOLONG ? E2BIG : error); 1549 args->stringspace -= length; 1550 args->endp += length; 1551 (*countp)++; 1552 1553 return (0); 1554 } 1555 1556 int 1557 exec_args_add_arg(struct image_args *args, const char *argp, 1558 enum uio_seg segflg) 1559 { 1560 1561 KASSERT(args->envc == 0, ("appending args after env")); 1562 1563 return (exec_args_add_str(args, argp, segflg, &args->argc)); 1564 } 1565 1566 int 1567 exec_args_add_env(struct image_args *args, const char *envp, 1568 enum uio_seg segflg) 1569 { 1570 1571 if (args->envc == 0) 1572 args->begin_envv = args->endp; 1573 1574 return (exec_args_add_str(args, envp, segflg, &args->envc)); 1575 } 1576 1577 int 1578 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend) 1579 { 1580 ssize_t offset; 1581 1582 KASSERT(args->endp != NULL, ("endp not initialized")); 1583 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized")); 1584 1585 offset = extend - consume; 1586 if (args->stringspace < offset) 1587 return (E2BIG); 1588 memmove(args->begin_argv + extend, args->begin_argv + consume, 1589 args->endp - args->begin_argv + consume); 1590 if (args->envc > 0) 1591 args->begin_envv += offset; 1592 args->endp += offset; 1593 args->stringspace -= offset; 1594 return (0); 1595 } 1596 1597 char * 1598 exec_args_get_begin_envv(struct image_args *args) 1599 { 1600 1601 KASSERT(args->endp != NULL, ("endp not initialized")); 1602 1603 if (args->envc > 0) 1604 return (args->begin_envv); 1605 return (args->endp); 1606 } 1607 1608 /* 1609 * Copy strings out to the new process address space, constructing new arg 1610 * and env vector tables. Return a pointer to the base so that it can be used 1611 * as the initial stack pointer. 1612 */ 1613 int 1614 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base) 1615 { 1616 int argc, envc; 1617 char **vectp; 1618 char *stringp; 1619 uintptr_t destp, ustringp; 1620 struct ps_strings *arginfo; 1621 struct proc *p; 1622 struct sysentvec *sysent; 1623 size_t execpath_len; 1624 int error, szsigcode; 1625 char canary[sizeof(long) * 8]; 1626 1627 p = imgp->proc; 1628 sysent = p->p_sysent; 1629 1630 destp = PROC_PS_STRINGS(p); 1631 arginfo = imgp->ps_strings = (void *)destp; 1632 1633 /* 1634 * Install sigcode. 1635 */ 1636 if (sysent->sv_sigcode_base == 0 && sysent->sv_szsigcode != NULL) { 1637 szsigcode = *(sysent->sv_szsigcode); 1638 destp -= szsigcode; 1639 destp = rounddown2(destp, sizeof(void *)); 1640 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode); 1641 if (error != 0) 1642 return (error); 1643 } 1644 1645 /* 1646 * Copy the image path for the rtld. 1647 */ 1648 if (imgp->execpath != NULL && imgp->auxargs != NULL) { 1649 execpath_len = strlen(imgp->execpath) + 1; 1650 destp -= execpath_len; 1651 destp = rounddown2(destp, sizeof(void *)); 1652 imgp->execpathp = (void *)destp; 1653 error = copyout(imgp->execpath, imgp->execpathp, execpath_len); 1654 if (error != 0) 1655 return (error); 1656 } 1657 1658 /* 1659 * Prepare the canary for SSP. 1660 */ 1661 arc4rand(canary, sizeof(canary), 0); 1662 destp -= sizeof(canary); 1663 imgp->canary = (void *)destp; 1664 error = copyout(canary, imgp->canary, sizeof(canary)); 1665 if (error != 0) 1666 return (error); 1667 imgp->canarylen = sizeof(canary); 1668 1669 /* 1670 * Prepare the pagesizes array. 1671 */ 1672 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES; 1673 destp -= imgp->pagesizeslen; 1674 destp = rounddown2(destp, sizeof(void *)); 1675 imgp->pagesizes = (void *)destp; 1676 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen); 1677 if (error != 0) 1678 return (error); 1679 1680 /* 1681 * Allocate room for the argument and environment strings. 1682 */ 1683 destp -= ARG_MAX - imgp->args->stringspace; 1684 destp = rounddown2(destp, sizeof(void *)); 1685 ustringp = destp; 1686 1687 if (imgp->auxargs) { 1688 /* 1689 * Allocate room on the stack for the ELF auxargs 1690 * array. It has up to AT_COUNT entries. 1691 */ 1692 destp -= AT_COUNT * sizeof(Elf_Auxinfo); 1693 destp = rounddown2(destp, sizeof(void *)); 1694 } 1695 1696 vectp = (char **)destp; 1697 1698 /* 1699 * Allocate room for the argv[] and env vectors including the 1700 * terminating NULL pointers. 1701 */ 1702 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1; 1703 1704 /* 1705 * vectp also becomes our initial stack base 1706 */ 1707 *stack_base = (uintptr_t)vectp; 1708 1709 stringp = imgp->args->begin_argv; 1710 argc = imgp->args->argc; 1711 envc = imgp->args->envc; 1712 1713 /* 1714 * Copy out strings - arguments and environment. 1715 */ 1716 error = copyout(stringp, (void *)ustringp, 1717 ARG_MAX - imgp->args->stringspace); 1718 if (error != 0) 1719 return (error); 1720 1721 /* 1722 * Fill in "ps_strings" struct for ps, w, etc. 1723 */ 1724 imgp->argv = vectp; 1725 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 || 1726 suword32(&arginfo->ps_nargvstr, argc) != 0) 1727 return (EFAULT); 1728 1729 /* 1730 * Fill in argument portion of vector table. 1731 */ 1732 for (; argc > 0; --argc) { 1733 if (suword(vectp++, ustringp) != 0) 1734 return (EFAULT); 1735 while (*stringp++ != 0) 1736 ustringp++; 1737 ustringp++; 1738 } 1739 1740 /* a null vector table pointer separates the argp's from the envp's */ 1741 if (suword(vectp++, 0) != 0) 1742 return (EFAULT); 1743 1744 imgp->envv = vectp; 1745 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 || 1746 suword32(&arginfo->ps_nenvstr, envc) != 0) 1747 return (EFAULT); 1748 1749 /* 1750 * Fill in environment portion of vector table. 1751 */ 1752 for (; envc > 0; --envc) { 1753 if (suword(vectp++, ustringp) != 0) 1754 return (EFAULT); 1755 while (*stringp++ != 0) 1756 ustringp++; 1757 ustringp++; 1758 } 1759 1760 /* end of vector table is a null pointer */ 1761 if (suword(vectp, 0) != 0) 1762 return (EFAULT); 1763 1764 if (imgp->auxargs) { 1765 vectp++; 1766 error = imgp->sysent->sv_copyout_auxargs(imgp, 1767 (uintptr_t)vectp); 1768 if (error != 0) 1769 return (error); 1770 } 1771 1772 return (0); 1773 } 1774 1775 /* 1776 * Check permissions of file to execute. 1777 * Called with imgp->vp locked. 1778 * Return 0 for success or error code on failure. 1779 */ 1780 int 1781 exec_check_permissions(struct image_params *imgp) 1782 { 1783 struct vnode *vp = imgp->vp; 1784 struct vattr *attr = imgp->attr; 1785 struct thread *td; 1786 int error; 1787 1788 td = curthread; 1789 1790 /* Get file attributes */ 1791 error = VOP_GETATTR(vp, attr, td->td_ucred); 1792 if (error) 1793 return (error); 1794 1795 #ifdef MAC 1796 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp); 1797 if (error) 1798 return (error); 1799 #endif 1800 1801 /* 1802 * 1) Check if file execution is disabled for the filesystem that 1803 * this file resides on. 1804 * 2) Ensure that at least one execute bit is on. Otherwise, a 1805 * privileged user will always succeed, and we don't want this 1806 * to happen unless the file really is executable. 1807 * 3) Ensure that the file is a regular file. 1808 */ 1809 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 1810 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 || 1811 (attr->va_type != VREG)) 1812 return (EACCES); 1813 1814 /* 1815 * Zero length files can't be exec'd 1816 */ 1817 if (attr->va_size == 0) 1818 return (ENOEXEC); 1819 1820 /* 1821 * Check for execute permission to file based on current credentials. 1822 */ 1823 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 1824 if (error) 1825 return (error); 1826 1827 /* 1828 * Check number of open-for-writes on the file and deny execution 1829 * if there are any. 1830 * 1831 * Add a text reference now so no one can write to the 1832 * executable while we're activating it. 1833 * 1834 * Remember if this was set before and unset it in case this is not 1835 * actually an executable image. 1836 */ 1837 error = VOP_SET_TEXT(vp); 1838 if (error != 0) 1839 return (error); 1840 imgp->textset = true; 1841 1842 /* 1843 * Call filesystem specific open routine (which does nothing in the 1844 * general case). 1845 */ 1846 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 1847 if (error == 0) 1848 imgp->opened = true; 1849 return (error); 1850 } 1851 1852 /* 1853 * Exec handler registration 1854 */ 1855 int 1856 exec_register(const struct execsw *execsw_arg) 1857 { 1858 const struct execsw **es, **xs, **newexecsw; 1859 u_int count = 2; /* New slot and trailing NULL */ 1860 1861 if (execsw) 1862 for (es = execsw; *es; es++) 1863 count++; 1864 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1865 xs = newexecsw; 1866 if (execsw) 1867 for (es = execsw; *es; es++) 1868 *xs++ = *es; 1869 *xs++ = execsw_arg; 1870 *xs = NULL; 1871 if (execsw) 1872 free(execsw, M_TEMP); 1873 execsw = newexecsw; 1874 return (0); 1875 } 1876 1877 int 1878 exec_unregister(const struct execsw *execsw_arg) 1879 { 1880 const struct execsw **es, **xs, **newexecsw; 1881 int count = 1; 1882 1883 if (execsw == NULL) 1884 panic("unregister with no handlers left?\n"); 1885 1886 for (es = execsw; *es; es++) { 1887 if (*es == execsw_arg) 1888 break; 1889 } 1890 if (*es == NULL) 1891 return (ENOENT); 1892 for (es = execsw; *es; es++) 1893 if (*es != execsw_arg) 1894 count++; 1895 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); 1896 xs = newexecsw; 1897 for (es = execsw; *es; es++) 1898 if (*es != execsw_arg) 1899 *xs++ = *es; 1900 *xs = NULL; 1901 if (execsw) 1902 free(execsw, M_TEMP); 1903 execsw = newexecsw; 1904 return (0); 1905 } 1906 1907 /* 1908 * Write out a core segment to the compression stream. 1909 */ 1910 static int 1911 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len) 1912 { 1913 size_t chunk_len; 1914 int error; 1915 1916 while (len > 0) { 1917 chunk_len = MIN(len, CORE_BUF_SIZE); 1918 1919 /* 1920 * We can get EFAULT error here. 1921 * In that case zero out the current chunk of the segment. 1922 */ 1923 error = copyin(base, buf, chunk_len); 1924 if (error != 0) 1925 bzero(buf, chunk_len); 1926 error = compressor_write(cp->comp, buf, chunk_len); 1927 if (error != 0) 1928 break; 1929 base += chunk_len; 1930 len -= chunk_len; 1931 } 1932 return (error); 1933 } 1934 1935 int 1936 core_write(struct coredump_params *cp, const void *base, size_t len, 1937 off_t offset, enum uio_seg seg, size_t *resid) 1938 { 1939 1940 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base), 1941 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, 1942 cp->active_cred, cp->file_cred, resid, cp->td)); 1943 } 1944 1945 int 1946 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp, 1947 void *tmpbuf) 1948 { 1949 vm_map_t map; 1950 struct mount *mp; 1951 size_t resid, runlen; 1952 int error; 1953 bool success; 1954 1955 KASSERT((uintptr_t)base % PAGE_SIZE == 0, 1956 ("%s: user address %p is not page-aligned", __func__, base)); 1957 1958 if (cp->comp != NULL) 1959 return (compress_chunk(cp, base, tmpbuf, len)); 1960 1961 map = &cp->td->td_proc->p_vmspace->vm_map; 1962 for (; len > 0; base += runlen, offset += runlen, len -= runlen) { 1963 /* 1964 * Attempt to page in all virtual pages in the range. If a 1965 * virtual page is not backed by the pager, it is represented as 1966 * a hole in the file. This can occur with zero-filled 1967 * anonymous memory or truncated files, for example. 1968 */ 1969 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { 1970 if (core_dump_can_intr && curproc_sigkilled()) 1971 return (EINTR); 1972 error = vm_fault(map, (uintptr_t)base + runlen, 1973 VM_PROT_READ, VM_FAULT_NOFILL, NULL); 1974 if (runlen == 0) 1975 success = error == KERN_SUCCESS; 1976 else if ((error == KERN_SUCCESS) != success) 1977 break; 1978 } 1979 1980 if (success) { 1981 error = core_write(cp, base, runlen, offset, 1982 UIO_USERSPACE, &resid); 1983 if (error != 0) { 1984 if (error != EFAULT) 1985 break; 1986 1987 /* 1988 * EFAULT may be returned if the user mapping 1989 * could not be accessed, e.g., because a mapped 1990 * file has been truncated. Skip the page if no 1991 * progress was made, to protect against a 1992 * hypothetical scenario where vm_fault() was 1993 * successful but core_write() returns EFAULT 1994 * anyway. 1995 */ 1996 runlen -= resid; 1997 if (runlen == 0) { 1998 success = false; 1999 runlen = PAGE_SIZE; 2000 } 2001 } 2002 } 2003 if (!success) { 2004 error = vn_start_write(cp->vp, &mp, V_WAIT); 2005 if (error != 0) 2006 break; 2007 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY); 2008 error = vn_truncate_locked(cp->vp, offset + runlen, 2009 false, cp->td->td_ucred); 2010 VOP_UNLOCK(cp->vp); 2011 vn_finished_write(mp); 2012 if (error != 0) 2013 break; 2014 } 2015 } 2016 return (error); 2017 } 2018 2019 /* 2020 * Drain into a core file. 2021 */ 2022 int 2023 sbuf_drain_core_output(void *arg, const char *data, int len) 2024 { 2025 struct coredump_params *cp; 2026 struct proc *p; 2027 int error, locked; 2028 2029 cp = arg; 2030 p = cp->td->td_proc; 2031 2032 /* 2033 * Some kern_proc out routines that print to this sbuf may 2034 * call us with the process lock held. Draining with the 2035 * non-sleepable lock held is unsafe. The lock is needed for 2036 * those routines when dumping a live process. In our case we 2037 * can safely release the lock before draining and acquire 2038 * again after. 2039 */ 2040 locked = PROC_LOCKED(p); 2041 if (locked) 2042 PROC_UNLOCK(p); 2043 if (cp->comp != NULL) 2044 error = compressor_write(cp->comp, __DECONST(char *, data), 2045 len); 2046 else 2047 error = core_write(cp, __DECONST(void *, data), len, cp->offset, 2048 UIO_SYSSPACE, NULL); 2049 if (locked) 2050 PROC_LOCK(p); 2051 if (error != 0) 2052 return (-error); 2053 cp->offset += len; 2054 return (len); 2055 } 2056