1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/pioctl.h> 54 #include <sys/tty.h> 55 #include <sys/wait.h> 56 #include <sys/vmmeter.h> 57 #include <sys/vnode.h> 58 #include <sys/resourcevar.h> 59 #include <sys/signalvar.h> 60 #include <sys/sx.h> 61 #include <sys/ptrace.h> 62 #include <sys/acct.h> /* for acct_process() function prototype */ 63 #include <sys/filedesc.h> 64 #include <sys/shm.h> 65 #include <sys/sem.h> 66 #include <sys/jail.h> 67 #ifdef KTRACE 68 #include <sys/ktrace.h> 69 #endif 70 71 #include <vm/vm.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_param.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_map.h> 76 #include <vm/uma.h> 77 #include <sys/user.h> 78 79 /* Required to be non-static for SysVR4 emulator */ 80 MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 81 82 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 83 84 static int wait1(struct thread *, struct wait_args *, int); 85 86 /* 87 * callout list for things to do at exit time 88 */ 89 struct exitlist { 90 exitlist_fn function; 91 TAILQ_ENTRY(exitlist) next; 92 }; 93 94 TAILQ_HEAD(exit_list_head, exitlist); 95 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 96 97 /* 98 * exit -- 99 * Death of process. 100 * 101 * MPSAFE 102 */ 103 void 104 sys_exit(td, uap) 105 struct thread *td; 106 struct sys_exit_args /* { 107 int rval; 108 } */ *uap; 109 { 110 111 mtx_lock(&Giant); 112 exit1(td, W_EXITCODE(uap->rval, 0)); 113 /* NOTREACHED */ 114 } 115 116 /* 117 * Exit: deallocate address space and other resources, change proc state 118 * to zombie, and unlink proc from allproc and parent's lists. Save exit 119 * status and rusage for wait(). Check for child processes and orphan them. 120 */ 121 void 122 exit1(td, rv) 123 register struct thread *td; 124 int rv; 125 { 126 struct exitlist *ep; 127 struct proc *p, *nq, *q; 128 struct tty *tp; 129 struct vnode *ttyvp; 130 register struct vmspace *vm; 131 struct vnode *vtmp; 132 #ifdef KTRACE 133 struct vnode *tracevp; 134 #endif 135 136 GIANT_REQUIRED; 137 138 p = td->td_proc; 139 if (p == initproc) { 140 printf("init died (signal %d, exit %d)\n", 141 WTERMSIG(rv), WEXITSTATUS(rv)); 142 panic("Going nowhere without my init!"); 143 } 144 145 /* 146 * XXXXKSE: MUST abort all other threads before proceeding past here. 147 */ 148 149 /* Are we a task leader? */ 150 PROC_LOCK(p); 151 if (p == p->p_leader) { 152 q = p->p_peers; 153 while (q != NULL) { 154 PROC_LOCK(q); 155 psignal(q, SIGKILL); 156 PROC_UNLOCK(q); 157 q = q->p_peers; 158 } 159 while (p->p_peers) 160 msleep((caddr_t)p, &p->p_mtx, PWAIT, "exit1", 0); 161 } 162 PROC_UNLOCK(p); 163 164 #ifdef PGINPROF 165 vmsizmon(); 166 #endif 167 STOPEVENT(p, S_EXIT, rv); 168 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 169 170 /* 171 * Check if any loadable modules need anything done at process exit. 172 * e.g. SYSV IPC stuff 173 * XXX what if one of these generates an error? 174 */ 175 TAILQ_FOREACH(ep, &exit_list, next) 176 (*ep->function)(p); 177 178 stopprofclock(p); 179 180 MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), 181 M_ZOMBIE, M_WAITOK); 182 /* 183 * If parent is waiting for us to exit or exec, 184 * P_PPWAIT is set; we will wakeup the parent below. 185 */ 186 PROC_LOCK(p); 187 p->p_flag &= ~(P_TRACED | P_PPWAIT); 188 p->p_flag |= P_WEXIT; 189 SIGEMPTYSET(p->p_siglist); 190 PROC_UNLOCK(p); 191 if (timevalisset(&p->p_realtimer.it_value)) 192 callout_stop(&p->p_itcallout); 193 194 /* 195 * Reset any sigio structures pointing to us as a result of 196 * F_SETOWN with our pid. 197 */ 198 funsetownlst(&p->p_sigiolst); 199 200 /* 201 * Close open files and release open-file table. 202 * This may block! 203 */ 204 fdfree(td); /* XXXKSE *//* may not be the one in proc */ 205 206 /* 207 * Remove ourself from our leader's peer list and wake our leader. 208 */ 209 PROC_LOCK(p->p_leader); 210 if (p->p_leader->p_peers) { 211 q = p->p_leader; 212 while (q->p_peers != p) 213 q = q->p_peers; 214 q->p_peers = p->p_peers; 215 wakeup((caddr_t)p->p_leader); 216 } 217 PROC_UNLOCK(p->p_leader); 218 219 /* The next two chunks should probably be moved to vmspace_exit. */ 220 vm = p->p_vmspace; 221 /* 222 * Release user portion of address space. 223 * This releases references to vnodes, 224 * which could cause I/O if the file has been unlinked. 225 * Need to do this early enough that we can still sleep. 226 * Can't free the entire vmspace as the kernel stack 227 * may be mapped within that space also. 228 */ 229 if (--vm->vm_refcnt == 0) { 230 if (vm->vm_shm) 231 shmexit(p); 232 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS, 233 VM_MAXUSER_ADDRESS); 234 (void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, 235 VM_MAXUSER_ADDRESS); 236 vm->vm_freer = p; 237 } 238 239 sx_xlock(&proctree_lock); 240 if (SESS_LEADER(p)) { 241 register struct session *sp; 242 243 sp = p->p_session; 244 if (sp->s_ttyvp) { 245 /* 246 * Controlling process. 247 * Signal foreground pgrp, 248 * drain controlling terminal 249 * and revoke access to controlling terminal. 250 */ 251 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 252 tp = sp->s_ttyp; 253 if (sp->s_ttyp->t_pgrp) { 254 PGRP_LOCK(sp->s_ttyp->t_pgrp); 255 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 256 PGRP_UNLOCK(sp->s_ttyp->t_pgrp); 257 } 258 /* XXX tp should be locked. */ 259 sx_xunlock(&proctree_lock); 260 (void) ttywait(tp); 261 sx_xlock(&proctree_lock); 262 /* 263 * The tty could have been revoked 264 * if we blocked. 265 */ 266 if (sp->s_ttyvp) { 267 ttyvp = sp->s_ttyvp; 268 SESS_LOCK(p->p_session); 269 sp->s_ttyvp = NULL; 270 SESS_UNLOCK(p->p_session); 271 sx_xunlock(&proctree_lock); 272 VOP_REVOKE(ttyvp, REVOKEALL); 273 vrele(ttyvp); 274 sx_xlock(&proctree_lock); 275 } 276 } 277 if (sp->s_ttyvp) { 278 ttyvp = sp->s_ttyvp; 279 SESS_LOCK(p->p_session); 280 sp->s_ttyvp = NULL; 281 SESS_UNLOCK(p->p_session); 282 vrele(ttyvp); 283 } 284 /* 285 * s_ttyp is not zero'd; we use this to indicate 286 * that the session once had a controlling terminal. 287 * (for logging and informational purposes) 288 */ 289 } 290 SESS_LOCK(p->p_session); 291 sp->s_leader = NULL; 292 SESS_UNLOCK(p->p_session); 293 } 294 fixjobc(p, p->p_pgrp, 0); 295 sx_xunlock(&proctree_lock); 296 (void)acct_process(td); 297 #ifdef KTRACE 298 /* 299 * release trace file 300 */ 301 PROC_LOCK(p); 302 mtx_lock(&ktrace_mtx); 303 p->p_traceflag = 0; /* don't trace the vrele() */ 304 tracevp = p->p_tracep; 305 p->p_tracep = NULL; 306 mtx_unlock(&ktrace_mtx); 307 PROC_UNLOCK(p); 308 if (tracevp != NULL) 309 vrele(tracevp); 310 #endif 311 /* 312 * Release reference to text vnode 313 */ 314 if ((vtmp = p->p_textvp) != NULL) { 315 p->p_textvp = NULL; 316 vrele(vtmp); 317 } 318 319 /* 320 * Release our limits structure. 321 */ 322 mtx_assert(&Giant, MA_OWNED); 323 if (--p->p_limit->p_refcnt == 0) { 324 FREE(p->p_limit, M_SUBPROC); 325 p->p_limit = NULL; 326 } 327 328 /* 329 * Release this thread's reference to the ucred. The actual proc 330 * reference will stay around until the proc is harvested by 331 * wait(). At this point the ucred is immutable (no other threads 332 * from this proc are around that can change it) so we leave the 333 * per-thread ucred pointer intact in case it is needed although 334 * in theory nothing should be using it at this point. 335 */ 336 crfree(td->td_ucred); 337 338 /* 339 * Remove proc from allproc queue and pidhash chain. 340 * Place onto zombproc. Unlink from parent's child list. 341 */ 342 sx_xlock(&allproc_lock); 343 LIST_REMOVE(p, p_list); 344 LIST_INSERT_HEAD(&zombproc, p, p_list); 345 LIST_REMOVE(p, p_hash); 346 sx_xunlock(&allproc_lock); 347 348 sx_xlock(&proctree_lock); 349 q = LIST_FIRST(&p->p_children); 350 if (q != NULL) /* only need this if any child is S_ZOMB */ 351 wakeup((caddr_t) initproc); 352 for (; q != NULL; q = nq) { 353 nq = LIST_NEXT(q, p_sibling); 354 PROC_LOCK(q); 355 proc_reparent(q, initproc); 356 q->p_sigparent = SIGCHLD; 357 /* 358 * Traced processes are killed 359 * since their existence means someone is screwing up. 360 */ 361 if (q->p_flag & P_TRACED) { 362 q->p_flag &= ~P_TRACED; 363 psignal(q, SIGKILL); 364 } 365 PROC_UNLOCK(q); 366 } 367 368 /* 369 * Save exit status and final rusage info, adding in child rusage 370 * info and self times. 371 */ 372 PROC_LOCK(p); 373 p->p_xstat = rv; 374 *p->p_ru = p->p_stats->p_ru; 375 mtx_lock_spin(&sched_lock); 376 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 377 mtx_unlock_spin(&sched_lock); 378 ruadd(p->p_ru, &p->p_stats->p_cru); 379 380 /* 381 * Notify interested parties of our demise. 382 */ 383 KNOTE(&p->p_klist, NOTE_EXIT); 384 385 /* 386 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 387 * flag set, or if the handler is set to SIG_IGN, notify process 388 * 1 instead (and hope it will handle this situation). 389 */ 390 PROC_LOCK(p->p_pptr); 391 if (p->p_pptr->p_procsig->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 392 struct proc *pp; 393 394 pp = p->p_pptr; 395 PROC_UNLOCK(pp); 396 proc_reparent(p, initproc); 397 PROC_LOCK(p->p_pptr); 398 /* 399 * If this was the last child of our parent, notify 400 * parent, so in case he was wait(2)ing, he will 401 * continue. 402 */ 403 if (LIST_EMPTY(&pp->p_children)) 404 wakeup((caddr_t)pp); 405 } 406 407 if (p->p_sigparent && p->p_pptr != initproc) 408 psignal(p->p_pptr, p->p_sigparent); 409 else 410 psignal(p->p_pptr, SIGCHLD); 411 PROC_UNLOCK(p->p_pptr); 412 413 /* 414 * If this is a kthread, then wakeup anyone waiting for it to exit. 415 */ 416 if (p->p_flag & P_KTHREAD) 417 wakeup((caddr_t)p); 418 PROC_UNLOCK(p); 419 420 /* 421 * Finally, call machine-dependent code to release the remaining 422 * resources including address space, the kernel stack and pcb. 423 * The address space is released by "vmspace_exitfree(p)" in 424 * vm_waitproc(). 425 */ 426 cpu_exit(td); 427 428 PROC_LOCK(p); 429 PROC_LOCK(p->p_pptr); 430 sx_xunlock(&proctree_lock); 431 mtx_lock_spin(&sched_lock); 432 while (mtx_owned(&Giant)) 433 mtx_unlock(&Giant); 434 435 /* 436 * We have to wait until after releasing all locks before 437 * changing p_stat. If we block on a mutex then we will be 438 * back at SRUN when we resume and our parent will never 439 * harvest us. 440 */ 441 p->p_stat = SZOMB; 442 443 wakeup(p->p_pptr); 444 PROC_UNLOCK(p->p_pptr); 445 PROC_UNLOCK(p); 446 447 cnt.v_swtch++; 448 binuptime(PCPU_PTR(switchtime)); 449 PCPU_SET(switchticks, ticks); 450 451 cpu_throw(); 452 panic("exit1"); 453 } 454 455 #ifdef COMPAT_43 456 /* 457 * MPSAFE. The dirty work is handled by wait1(). 458 */ 459 int 460 owait(td, uap) 461 struct thread *td; 462 register struct owait_args /* { 463 int dummy; 464 } */ *uap; 465 { 466 struct wait_args w; 467 468 w.options = 0; 469 w.rusage = NULL; 470 w.pid = WAIT_ANY; 471 w.status = NULL; 472 return (wait1(td, &w, 1)); 473 } 474 #endif /* COMPAT_43 */ 475 476 /* 477 * MPSAFE. The dirty work is handled by wait1(). 478 */ 479 int 480 wait4(td, uap) 481 struct thread *td; 482 struct wait_args *uap; 483 { 484 485 return (wait1(td, uap, 0)); 486 } 487 488 /* 489 * MPSAFE 490 */ 491 static int 492 wait1(td, uap, compat) 493 register struct thread *td; 494 register struct wait_args /* { 495 int pid; 496 int *status; 497 int options; 498 struct rusage *rusage; 499 } */ *uap; 500 int compat; 501 { 502 struct rusage ru; 503 register int nfound; 504 register struct proc *p, *q, *t; 505 int status, error; 506 507 q = td->td_proc; 508 if (uap->pid == 0) { 509 PROC_LOCK(q); 510 uap->pid = -q->p_pgid; 511 PROC_UNLOCK(q); 512 } 513 if (uap->options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 514 return (EINVAL); 515 mtx_lock(&Giant); 516 loop: 517 nfound = 0; 518 sx_xlock(&proctree_lock); 519 LIST_FOREACH(p, &q->p_children, p_sibling) { 520 PROC_LOCK(p); 521 if (uap->pid != WAIT_ANY && 522 p->p_pid != uap->pid && p->p_pgid != -uap->pid) { 523 PROC_UNLOCK(p); 524 continue; 525 } 526 527 /* 528 * This special case handles a kthread spawned by linux_clone 529 * (see linux_misc.c). The linux_wait4 and linux_waitpid 530 * functions need to be able to distinguish between waiting 531 * on a process and waiting on a thread. It is a thread if 532 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 533 * signifies we want to wait for threads and not processes. 534 */ 535 if ((p->p_sigparent != SIGCHLD) ^ 536 ((uap->options & WLINUXCLONE) != 0)) { 537 PROC_UNLOCK(p); 538 continue; 539 } 540 541 nfound++; 542 if (p->p_stat == SZOMB) { 543 /* 544 * charge childs scheduling cpu usage to parent 545 * XXXKSE assume only one thread & kse & ksegrp 546 * keep estcpu in each ksegrp 547 * so charge it to the ksegrp that did the wait 548 * since process estcpu is sum of all ksegrps, 549 * this is strictly as expected. 550 * Assume that the child process aggregated all 551 * tke estcpu into the 'build-in' ksegrp. 552 * XXXKSE 553 */ 554 if (curthread->td_proc->p_pid != 1) { 555 mtx_lock_spin(&sched_lock); 556 curthread->td_ksegrp->kg_estcpu = 557 ESTCPULIM(curthread->td_ksegrp->kg_estcpu + 558 p->p_ksegrp.kg_estcpu); 559 mtx_unlock_spin(&sched_lock); 560 } 561 562 td->td_retval[0] = p->p_pid; 563 #ifdef COMPAT_43 564 if (compat) 565 td->td_retval[1] = p->p_xstat; 566 else 567 #endif 568 if (uap->status) { 569 status = p->p_xstat; /* convert to int */ 570 PROC_UNLOCK(p); 571 if ((error = copyout((caddr_t)&status, 572 (caddr_t)uap->status, sizeof(status)))) { 573 sx_xunlock(&proctree_lock); 574 mtx_unlock(&Giant); 575 return (error); 576 } 577 PROC_LOCK(p); 578 } 579 if (uap->rusage) { 580 bcopy(p->p_ru, &ru, sizeof(ru)); 581 PROC_UNLOCK(p); 582 if ((error = copyout((caddr_t)&ru, 583 (caddr_t)uap->rusage, 584 sizeof (struct rusage)))) { 585 sx_xunlock(&proctree_lock); 586 mtx_unlock(&Giant); 587 return (error); 588 } 589 } else 590 PROC_UNLOCK(p); 591 /* 592 * If we got the child via a ptrace 'attach', 593 * we need to give it back to the old parent. 594 */ 595 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 596 PROC_LOCK(p); 597 p->p_oppid = 0; 598 proc_reparent(p, t); 599 PROC_UNLOCK(p); 600 psignal(t, SIGCHLD); 601 wakeup((caddr_t)t); 602 PROC_UNLOCK(t); 603 sx_xunlock(&proctree_lock); 604 mtx_unlock(&Giant); 605 return (0); 606 } 607 /* 608 * Remove other references to this process to ensure 609 * we have an exclusive reference. 610 */ 611 leavepgrp(p); 612 613 sx_xlock(&allproc_lock); 614 LIST_REMOVE(p, p_list); /* off zombproc */ 615 sx_xunlock(&allproc_lock); 616 617 LIST_REMOVE(p, p_sibling); 618 sx_xunlock(&proctree_lock); 619 620 /* 621 * As a side effect of this lock, we know that 622 * all other writes to this proc are visible now, so 623 * no more locking is needed for p. 624 */ 625 PROC_LOCK(p); 626 p->p_xstat = 0; /* XXX: why? */ 627 PROC_UNLOCK(p); 628 PROC_LOCK(q); 629 ruadd(&q->p_stats->p_cru, p->p_ru); 630 PROC_UNLOCK(q); 631 FREE(p->p_ru, M_ZOMBIE); 632 p->p_ru = NULL; 633 634 /* 635 * Decrement the count of procs running with this uid. 636 */ 637 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 638 639 /* 640 * Free up credentials. 641 */ 642 crfree(p->p_ucred); 643 p->p_ucred = NULL; /* XXX: why? */ 644 645 /* 646 * Remove unused arguments 647 */ 648 pargs_drop(p->p_args); 649 p->p_args = NULL; 650 651 if (--p->p_procsig->ps_refcnt == 0) { 652 if (p->p_sigacts != &p->p_uarea->u_sigacts) 653 FREE(p->p_sigacts, M_SUBPROC); 654 FREE(p->p_procsig, M_SUBPROC); 655 p->p_procsig = NULL; 656 } 657 658 /* 659 * Give vm and machine-dependent layer a chance 660 * to free anything that cpu_exit couldn't 661 * release while still running in process context. 662 */ 663 vm_waitproc(p); 664 mtx_destroy(&p->p_mtx); 665 uma_zfree(proc_zone, p); 666 sx_xlock(&allproc_lock); 667 nprocs--; 668 sx_xunlock(&allproc_lock); 669 mtx_unlock(&Giant); 670 return (0); 671 } 672 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 673 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { 674 p->p_flag |= P_WAITED; 675 sx_xunlock(&proctree_lock); 676 td->td_retval[0] = p->p_pid; 677 #ifdef COMPAT_43 678 if (compat) { 679 td->td_retval[1] = W_STOPCODE(p->p_xstat); 680 PROC_UNLOCK(p); 681 error = 0; 682 } else 683 #endif 684 if (uap->status) { 685 status = W_STOPCODE(p->p_xstat); 686 PROC_UNLOCK(p); 687 error = copyout((caddr_t)&status, 688 (caddr_t)uap->status, sizeof(status)); 689 } else { 690 PROC_UNLOCK(p); 691 error = 0; 692 } 693 mtx_unlock(&Giant); 694 return (error); 695 } 696 if (uap->options & WCONTINUED && (p->p_flag & P_CONTINUED)) { 697 sx_xunlock(&proctree_lock); 698 td->td_retval[0] = p->p_pid; 699 p->p_flag &= ~P_CONTINUED; 700 PROC_UNLOCK(p); 701 702 if (uap->status) { 703 status = SIGCONT; 704 error = copyout((caddr_t)&status, 705 (caddr_t)uap->status, sizeof(status)); 706 } else 707 error = 0; 708 709 mtx_unlock(&Giant); 710 return (error); 711 } 712 PROC_UNLOCK(p); 713 } 714 if (nfound == 0) { 715 sx_xunlock(&proctree_lock); 716 mtx_unlock(&Giant); 717 return (ECHILD); 718 } 719 if (uap->options & WNOHANG) { 720 sx_xunlock(&proctree_lock); 721 td->td_retval[0] = 0; 722 mtx_unlock(&Giant); 723 return (0); 724 } 725 PROC_LOCK(q); 726 sx_xunlock(&proctree_lock); 727 error = msleep((caddr_t)q, &q->p_mtx, PWAIT | PCATCH, "wait", 0); 728 PROC_UNLOCK(q); 729 if (error) { 730 mtx_unlock(&Giant); 731 return (error); 732 } 733 goto loop; 734 } 735 736 /* 737 * Make process 'parent' the new parent of process 'child'. 738 * Must be called with an exclusive hold of proctree lock. 739 */ 740 void 741 proc_reparent(child, parent) 742 register struct proc *child; 743 register struct proc *parent; 744 { 745 746 sx_assert(&proctree_lock, SX_XLOCKED); 747 PROC_LOCK_ASSERT(child, MA_OWNED); 748 if (child->p_pptr == parent) 749 return; 750 751 LIST_REMOVE(child, p_sibling); 752 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 753 child->p_pptr = parent; 754 } 755 756 /* 757 * The next two functions are to handle adding/deleting items on the 758 * exit callout list 759 * 760 * at_exit(): 761 * Take the arguments given and put them onto the exit callout list, 762 * However first make sure that it's not already there. 763 * returns 0 on success. 764 */ 765 766 int 767 at_exit(function) 768 exitlist_fn function; 769 { 770 struct exitlist *ep; 771 772 #ifdef INVARIANTS 773 /* Be noisy if the programmer has lost track of things */ 774 if (rm_at_exit(function)) 775 printf("WARNING: exit callout entry (%p) already present\n", 776 function); 777 #endif 778 ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 779 if (ep == NULL) 780 return (ENOMEM); 781 ep->function = function; 782 TAILQ_INSERT_TAIL(&exit_list, ep, next); 783 return (0); 784 } 785 786 /* 787 * Scan the exit callout list for the given item and remove it. 788 * Returns the number of items removed (0 or 1) 789 */ 790 int 791 rm_at_exit(function) 792 exitlist_fn function; 793 { 794 struct exitlist *ep; 795 796 TAILQ_FOREACH(ep, &exit_list, next) { 797 if (ep->function == function) { 798 TAILQ_REMOVE(&exit_list, ep, next); 799 free(ep, M_ATEXIT); 800 return (1); 801 } 802 } 803 return (0); 804 } 805