1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/pioctl.h> 54 #include <sys/tty.h> 55 #include <sys/wait.h> 56 #include <sys/vnode.h> 57 #include <sys/vmmeter.h> 58 #include <sys/resourcevar.h> 59 #include <sys/signalvar.h> 60 #include <sys/sx.h> 61 #include <sys/ptrace.h> 62 #include <sys/acct.h> /* for acct_process() function prototype */ 63 #include <sys/filedesc.h> 64 #include <sys/shm.h> 65 #include <sys/sem.h> 66 #include <sys/jail.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <vm/vm_extern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/uma.h> 74 #include <sys/user.h> 75 76 /* Required to be non-static for SysVR4 emulator */ 77 MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 78 79 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 80 81 static int wait1(struct thread *, struct wait_args *, int); 82 83 /* 84 * callout list for things to do at exit time 85 */ 86 struct exitlist { 87 exitlist_fn function; 88 TAILQ_ENTRY(exitlist) next; 89 }; 90 91 TAILQ_HEAD(exit_list_head, exitlist); 92 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 93 94 /* 95 * exit -- 96 * Death of process. 97 * 98 * MPSAFE 99 */ 100 void 101 sys_exit(td, uap) 102 struct thread *td; 103 struct sys_exit_args /* { 104 int rval; 105 } */ *uap; 106 { 107 108 mtx_lock(&Giant); 109 exit1(td, W_EXITCODE(uap->rval, 0)); 110 /* NOTREACHED */ 111 } 112 113 /* 114 * Exit: deallocate address space and other resources, change proc state 115 * to zombie, and unlink proc from allproc and parent's lists. Save exit 116 * status and rusage for wait(). Check for child processes and orphan them. 117 */ 118 void 119 exit1(td, rv) 120 register struct thread *td; 121 int rv; 122 { 123 struct proc *p = td->td_proc; 124 register struct proc *q, *nq; 125 register struct vmspace *vm; 126 struct vnode *vtmp; 127 struct exitlist *ep; 128 struct vnode *ttyvp; 129 struct tty *tp; 130 #ifdef KTRACE 131 struct vnode *tracevp; 132 #endif 133 134 GIANT_REQUIRED; 135 136 if (p->p_pid == 1) { 137 printf("init died (signal %d, exit %d)\n", 138 WTERMSIG(rv), WEXITSTATUS(rv)); 139 panic("Going nowhere without my init!"); 140 } 141 142 /* XXXXKSE */ 143 /* MUST abort all other threads before proceeding past this point */ 144 145 /* are we a task leader? */ 146 PROC_LOCK(p); 147 if (p == p->p_leader) { 148 q = p->p_peers; 149 while (q != NULL) { 150 PROC_LOCK(q); 151 psignal(q, SIGKILL); 152 PROC_UNLOCK(q); 153 q = q->p_peers; 154 } 155 while (p->p_peers) 156 msleep((caddr_t)p, &p->p_mtx, PWAIT, "exit1", 0); 157 } 158 PROC_UNLOCK(p); 159 160 #ifdef PGINPROF 161 vmsizmon(); 162 #endif 163 STOPEVENT(p, S_EXIT, rv); 164 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 165 166 /* 167 * Check if any loadable modules need anything done at process exit. 168 * e.g. SYSV IPC stuff 169 * XXX what if one of these generates an error? 170 */ 171 TAILQ_FOREACH(ep, &exit_list, next) 172 (*ep->function)(p); 173 174 stopprofclock(p); 175 176 MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), 177 M_ZOMBIE, M_WAITOK); 178 /* 179 * If parent is waiting for us to exit or exec, 180 * P_PPWAIT is set; we will wakeup the parent below. 181 */ 182 PROC_LOCK(p); 183 p->p_flag &= ~(P_TRACED | P_PPWAIT); 184 p->p_flag |= P_WEXIT; 185 SIGEMPTYSET(p->p_siglist); 186 PROC_UNLOCK(p); 187 if (timevalisset(&p->p_realtimer.it_value)) 188 callout_stop(&p->p_itcallout); 189 190 /* 191 * Reset any sigio structures pointing to us as a result of 192 * F_SETOWN with our pid. 193 */ 194 SIGIO_LOCK(); 195 PROC_LOCK(p); 196 funsetownlst(&p->p_sigiolst); 197 PROC_UNLOCK(p); 198 SIGIO_UNLOCK(); 199 200 /* 201 * Close open files and release open-file table. 202 * This may block! 203 */ 204 fdfree(td); /* XXXKSE *//* may not be the one in proc */ 205 206 /* 207 * Remove ourself from our leader's peer list and wake our leader. 208 */ 209 PROC_LOCK(p->p_leader); 210 if (p->p_leader->p_peers) { 211 q = p->p_leader; 212 while (q->p_peers != p) 213 q = q->p_peers; 214 q->p_peers = p->p_peers; 215 wakeup((caddr_t)p->p_leader); 216 } 217 PROC_UNLOCK(p->p_leader); 218 219 /* The next two chunks should probably be moved to vmspace_exit. */ 220 vm = p->p_vmspace; 221 /* 222 * Release user portion of address space. 223 * This releases references to vnodes, 224 * which could cause I/O if the file has been unlinked. 225 * Need to do this early enough that we can still sleep. 226 * Can't free the entire vmspace as the kernel stack 227 * may be mapped within that space also. 228 */ 229 if (--vm->vm_refcnt == 0) { 230 if (vm->vm_shm) 231 shmexit(p); 232 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS, 233 VM_MAXUSER_ADDRESS); 234 (void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, 235 VM_MAXUSER_ADDRESS); 236 vm->vm_freer = p; 237 } 238 239 sx_xlock(&proctree_lock); 240 if (SESS_LEADER(p)) { 241 register struct session *sp; 242 243 sp = p->p_session; 244 if (sp->s_ttyvp) { 245 /* 246 * Controlling process. 247 * Signal foreground pgrp, 248 * drain controlling terminal 249 * and revoke access to controlling terminal. 250 */ 251 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 252 tp = sp->s_ttyp; 253 if (sp->s_ttyp->t_pgrp) { 254 PGRP_LOCK(sp->s_ttyp->t_pgrp); 255 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 256 PGRP_UNLOCK(sp->s_ttyp->t_pgrp); 257 } 258 /* XXX tp should be locked. */ 259 sx_xunlock(&proctree_lock); 260 (void) ttywait(tp); 261 sx_xlock(&proctree_lock); 262 /* 263 * The tty could have been revoked 264 * if we blocked. 265 */ 266 if (sp->s_ttyvp) { 267 ttyvp = sp->s_ttyvp; 268 SESS_LOCK(p->p_session); 269 sp->s_ttyvp = NULL; 270 SESS_UNLOCK(p->p_session); 271 sx_xunlock(&proctree_lock); 272 VOP_REVOKE(ttyvp, REVOKEALL); 273 vrele(ttyvp); 274 sx_xlock(&proctree_lock); 275 } 276 } 277 if (sp->s_ttyvp) { 278 ttyvp = sp->s_ttyvp; 279 SESS_LOCK(p->p_session); 280 sp->s_ttyvp = NULL; 281 SESS_UNLOCK(p->p_session); 282 vrele(ttyvp); 283 } 284 /* 285 * s_ttyp is not zero'd; we use this to indicate 286 * that the session once had a controlling terminal. 287 * (for logging and informational purposes) 288 */ 289 } 290 SESS_LOCK(p->p_session); 291 sp->s_leader = NULL; 292 SESS_UNLOCK(p->p_session); 293 } 294 fixjobc(p, p->p_pgrp, 0); 295 sx_xunlock(&proctree_lock); 296 (void)acct_process(td); 297 #ifdef KTRACE 298 /* 299 * release trace file 300 */ 301 PROC_LOCK(p); 302 p->p_traceflag = 0; /* don't trace the vrele() */ 303 tracevp = p->p_tracep; 304 p->p_tracep = NULL; 305 PROC_UNLOCK(p); 306 if (tracevp != NULL) 307 vrele(tracevp); 308 #endif 309 /* 310 * Release reference to text vnode 311 */ 312 if ((vtmp = p->p_textvp) != NULL) { 313 p->p_textvp = NULL; 314 vrele(vtmp); 315 } 316 317 /* 318 * Release our limits structure. 319 */ 320 mtx_assert(&Giant, MA_OWNED); 321 if (--p->p_limit->p_refcnt == 0) { 322 FREE(p->p_limit, M_SUBPROC); 323 p->p_limit = NULL; 324 } 325 326 /* 327 * Release this thread's reference to the ucred. The actual proc 328 * reference will stay around until the proc is harvested by 329 * wait(). At this point the ucred is immutable (no other threads 330 * from this proc are around that can change it) so we leave the 331 * per-thread ucred pointer intact in case it is needed although 332 * in theory nothing should be using it at this point. 333 */ 334 crfree(td->td_ucred); 335 336 /* 337 * Remove proc from allproc queue and pidhash chain. 338 * Place onto zombproc. Unlink from parent's child list. 339 */ 340 sx_xlock(&allproc_lock); 341 LIST_REMOVE(p, p_list); 342 LIST_INSERT_HEAD(&zombproc, p, p_list); 343 LIST_REMOVE(p, p_hash); 344 sx_xunlock(&allproc_lock); 345 346 sx_xlock(&proctree_lock); 347 q = LIST_FIRST(&p->p_children); 348 if (q != NULL) /* only need this if any child is S_ZOMB */ 349 wakeup((caddr_t) initproc); 350 for (; q != NULL; q = nq) { 351 nq = LIST_NEXT(q, p_sibling); 352 PROC_LOCK(q); 353 proc_reparent(q, initproc); 354 q->p_sigparent = SIGCHLD; 355 /* 356 * Traced processes are killed 357 * since their existence means someone is screwing up. 358 */ 359 if (q->p_flag & P_TRACED) { 360 q->p_flag &= ~P_TRACED; 361 psignal(q, SIGKILL); 362 } 363 PROC_UNLOCK(q); 364 } 365 366 /* 367 * Save exit status and final rusage info, adding in child rusage 368 * info and self times. 369 */ 370 PROC_LOCK(p); 371 p->p_xstat = rv; 372 *p->p_ru = p->p_stats->p_ru; 373 mtx_lock_spin(&sched_lock); 374 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 375 mtx_unlock_spin(&sched_lock); 376 ruadd(p->p_ru, &p->p_stats->p_cru); 377 378 /* 379 * notify interested parties of our demise. 380 */ 381 KNOTE(&p->p_klist, NOTE_EXIT); 382 383 /* 384 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 385 * flag set, or if the handler is set to SIG_IGN, notify process 386 * 1 instead (and hope it will handle this situation). 387 */ 388 PROC_LOCK(p->p_pptr); 389 if (p->p_pptr->p_procsig->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 390 struct proc *pp = p->p_pptr; 391 PROC_UNLOCK(pp); 392 proc_reparent(p, initproc); 393 PROC_LOCK(p->p_pptr); 394 /* 395 * If this was the last child of our parent, notify 396 * parent, so in case he was wait(2)ing, he will 397 * continue. 398 */ 399 if (LIST_EMPTY(&pp->p_children)) 400 wakeup((caddr_t)pp); 401 } 402 403 if (p->p_sigparent && p->p_pptr != initproc) 404 psignal(p->p_pptr, p->p_sigparent); 405 else 406 psignal(p->p_pptr, SIGCHLD); 407 PROC_UNLOCK(p->p_pptr); 408 409 /* 410 * If this is a kthread, then wakeup anyone waiting for it to exit. 411 */ 412 if (p->p_flag & P_KTHREAD) 413 wakeup((caddr_t)p); 414 PROC_UNLOCK(p); 415 416 /* 417 * Finally, call machine-dependent code to release the remaining 418 * resources including address space, the kernel stack and pcb. 419 * The address space is released by "vmspace_exitfree(p)" in 420 * vm_waitproc(). 421 */ 422 cpu_exit(td); 423 424 PROC_LOCK(p); 425 PROC_LOCK(p->p_pptr); 426 sx_xunlock(&proctree_lock); 427 mtx_lock_spin(&sched_lock); 428 while (mtx_owned(&Giant)) 429 mtx_unlock(&Giant); 430 431 /* 432 * We have to wait until after releasing all locks before 433 * changing p_stat. If we block on a mutex then we will be 434 * back at SRUN when we resume and our parent will never 435 * harvest us. 436 */ 437 p->p_stat = SZOMB; 438 439 wakeup(p->p_pptr); 440 PROC_UNLOCK(p->p_pptr); 441 PROC_UNLOCK(p); 442 443 cnt.v_swtch++; 444 binuptime(PCPU_PTR(switchtime)); 445 PCPU_SET(switchticks, ticks); 446 447 cpu_throw(); 448 panic("exit1"); 449 } 450 451 #ifdef COMPAT_43 452 /* 453 * MPSAFE, the dirty work is handled by wait1(). 454 */ 455 int 456 owait(td, uap) 457 struct thread *td; 458 register struct owait_args /* { 459 int dummy; 460 } */ *uap; 461 { 462 struct wait_args w; 463 464 w.options = 0; 465 w.rusage = NULL; 466 w.pid = WAIT_ANY; 467 w.status = NULL; 468 return (wait1(td, &w, 1)); 469 } 470 #endif /* COMPAT_43 */ 471 472 /* 473 * MPSAFE, the dirty work is handled by wait1(). 474 */ 475 int 476 wait4(td, uap) 477 struct thread *td; 478 struct wait_args *uap; 479 { 480 481 return (wait1(td, uap, 0)); 482 } 483 484 /* 485 * MPSAFE 486 */ 487 static int 488 wait1(td, uap, compat) 489 register struct thread *td; 490 register struct wait_args /* { 491 int pid; 492 int *status; 493 int options; 494 struct rusage *rusage; 495 } */ *uap; 496 int compat; 497 { 498 register int nfound; 499 register struct proc *q, *p, *t; 500 int status, error; 501 502 q = td->td_proc; 503 if (uap->pid == 0) { 504 PROC_LOCK(q); 505 uap->pid = -q->p_pgid; 506 PROC_UNLOCK(q); 507 } 508 if (uap->options &~ (WUNTRACED|WNOHANG|WLINUXCLONE)) 509 return (EINVAL); 510 mtx_lock(&Giant); 511 loop: 512 nfound = 0; 513 sx_xlock(&proctree_lock); 514 LIST_FOREACH(p, &q->p_children, p_sibling) { 515 PROC_LOCK(p); 516 if (uap->pid != WAIT_ANY && 517 p->p_pid != uap->pid && p->p_pgid != -uap->pid) { 518 PROC_UNLOCK(p); 519 continue; 520 } 521 522 /* 523 * This special case handles a kthread spawned by linux_clone 524 * (see linux_misc.c). The linux_wait4 and linux_waitpid 525 * functions need to be able to distinguish between waiting 526 * on a process and waiting on a thread. It is a thread if 527 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 528 * signifies we want to wait for threads and not processes. 529 */ 530 if ((p->p_sigparent != SIGCHLD) ^ 531 ((uap->options & WLINUXCLONE) != 0)) { 532 PROC_UNLOCK(p); 533 continue; 534 } 535 536 nfound++; 537 if (p->p_stat == SZOMB) { 538 /* 539 * charge childs scheduling cpu usage to parent 540 * XXXKSE assume only one thread & kse & ksegrp 541 * keep estcpu in each ksegrp 542 * so charge it to the ksegrp that did the wait 543 * since process estcpu is sum of all ksegrps, 544 * this is strictly as expected. 545 * Assume that the child process aggregated all 546 * tke estcpu into the 'build-in' ksegrp. 547 * XXXKSE 548 */ 549 if (curthread->td_proc->p_pid != 1) { 550 mtx_lock_spin(&sched_lock); 551 curthread->td_ksegrp->kg_estcpu = 552 ESTCPULIM(curthread->td_ksegrp->kg_estcpu + 553 p->p_ksegrp.kg_estcpu); 554 mtx_unlock_spin(&sched_lock); 555 } 556 557 td->td_retval[0] = p->p_pid; 558 #ifdef COMPAT_43 559 if (compat) 560 td->td_retval[1] = p->p_xstat; 561 else 562 #endif 563 if (uap->status) { 564 status = p->p_xstat; /* convert to int */ 565 PROC_UNLOCK(p); 566 if ((error = copyout((caddr_t)&status, 567 (caddr_t)uap->status, sizeof(status)))) { 568 sx_xunlock(&proctree_lock); 569 mtx_unlock(&Giant); 570 return (error); 571 } 572 PROC_LOCK(p); 573 } 574 if (uap->rusage) { 575 struct rusage ru; 576 577 bcopy(p->p_ru, &ru, sizeof(ru)); 578 PROC_UNLOCK(p); 579 if ((error = copyout((caddr_t)&ru, 580 (caddr_t)uap->rusage, 581 sizeof (struct rusage)))) { 582 sx_xunlock(&proctree_lock); 583 mtx_unlock(&Giant); 584 return (error); 585 } 586 } else 587 PROC_UNLOCK(p); 588 /* 589 * If we got the child via a ptrace 'attach', 590 * we need to give it back to the old parent. 591 */ 592 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 593 PROC_LOCK(p); 594 p->p_oppid = 0; 595 proc_reparent(p, t); 596 PROC_UNLOCK(p); 597 psignal(t, SIGCHLD); 598 wakeup((caddr_t)t); 599 PROC_UNLOCK(t); 600 sx_xunlock(&proctree_lock); 601 mtx_unlock(&Giant); 602 return (0); 603 } 604 /* 605 * Remove other references to this process to ensure 606 * we have an exclusive reference. 607 */ 608 leavepgrp(p); 609 610 sx_xlock(&allproc_lock); 611 LIST_REMOVE(p, p_list); /* off zombproc */ 612 sx_xunlock(&allproc_lock); 613 614 LIST_REMOVE(p, p_sibling); 615 sx_xunlock(&proctree_lock); 616 617 /* 618 * As a side effect of this lock, we know that 619 * all other writes to this proc are visible now, so 620 * no more locking is needed for p. 621 */ 622 PROC_LOCK(p); 623 p->p_xstat = 0; /* XXX: why? */ 624 PROC_UNLOCK(p); 625 PROC_LOCK(q); 626 ruadd(&q->p_stats->p_cru, p->p_ru); 627 PROC_UNLOCK(q); 628 FREE(p->p_ru, M_ZOMBIE); 629 p->p_ru = NULL; 630 631 /* 632 * Decrement the count of procs running with this uid. 633 */ 634 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 635 636 /* 637 * Free up credentials. 638 */ 639 crfree(p->p_ucred); 640 p->p_ucred = NULL; /* XXX: why? */ 641 642 /* 643 * Remove unused arguments 644 */ 645 pargs_drop(p->p_args); 646 p->p_args = NULL; 647 648 if (--p->p_procsig->ps_refcnt == 0) { 649 if (p->p_sigacts != &p->p_uarea->u_sigacts) 650 FREE(p->p_sigacts, M_SUBPROC); 651 FREE(p->p_procsig, M_SUBPROC); 652 p->p_procsig = NULL; 653 } 654 655 /* 656 * Give vm and machine-dependent layer a chance 657 * to free anything that cpu_exit couldn't 658 * release while still running in process context. 659 */ 660 vm_waitproc(p); 661 mtx_destroy(&p->p_mtx); 662 uma_zfree(proc_zone, p); 663 sx_xlock(&allproc_lock); 664 nprocs--; 665 sx_xunlock(&allproc_lock); 666 mtx_unlock(&Giant); 667 return (0); 668 } 669 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 670 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { 671 p->p_flag |= P_WAITED; 672 sx_xunlock(&proctree_lock); 673 td->td_retval[0] = p->p_pid; 674 #ifdef COMPAT_43 675 if (compat) { 676 td->td_retval[1] = W_STOPCODE(p->p_xstat); 677 PROC_UNLOCK(p); 678 error = 0; 679 } else 680 #endif 681 if (uap->status) { 682 status = W_STOPCODE(p->p_xstat); 683 PROC_UNLOCK(p); 684 error = copyout((caddr_t)&status, 685 (caddr_t)uap->status, sizeof(status)); 686 } else { 687 PROC_UNLOCK(p); 688 error = 0; 689 } 690 mtx_unlock(&Giant); 691 return (error); 692 } 693 PROC_UNLOCK(p); 694 } 695 if (nfound == 0) { 696 sx_xunlock(&proctree_lock); 697 mtx_unlock(&Giant); 698 return (ECHILD); 699 } 700 if (uap->options & WNOHANG) { 701 sx_xunlock(&proctree_lock); 702 td->td_retval[0] = 0; 703 mtx_unlock(&Giant); 704 return (0); 705 } 706 PROC_LOCK(q); 707 sx_xunlock(&proctree_lock); 708 error = msleep((caddr_t)q, &q->p_mtx, PWAIT | PCATCH, "wait", 0); 709 PROC_UNLOCK(q); 710 if (error) { 711 mtx_unlock(&Giant); 712 return (error); 713 } 714 goto loop; 715 } 716 717 /* 718 * Make process 'parent' the new parent of process 'child'. 719 * Must be called with an exclusive hold of proctree lock. 720 */ 721 void 722 proc_reparent(child, parent) 723 register struct proc *child; 724 register struct proc *parent; 725 { 726 727 sx_assert(&proctree_lock, SX_XLOCKED); 728 PROC_LOCK_ASSERT(child, MA_OWNED); 729 if (child->p_pptr == parent) 730 return; 731 732 LIST_REMOVE(child, p_sibling); 733 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 734 child->p_pptr = parent; 735 } 736 737 /* 738 * The next two functions are to handle adding/deleting items on the 739 * exit callout list 740 * 741 * at_exit(): 742 * Take the arguments given and put them onto the exit callout list, 743 * However first make sure that it's not already there. 744 * returns 0 on success. 745 */ 746 747 int 748 at_exit(function) 749 exitlist_fn function; 750 { 751 struct exitlist *ep; 752 753 #ifdef INVARIANTS 754 /* Be noisy if the programmer has lost track of things */ 755 if (rm_at_exit(function)) 756 printf("WARNING: exit callout entry (%p) already present\n", 757 function); 758 #endif 759 ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 760 if (ep == NULL) 761 return (ENOMEM); 762 ep->function = function; 763 TAILQ_INSERT_TAIL(&exit_list, ep, next); 764 return (0); 765 } 766 767 /* 768 * Scan the exit callout list for the given item and remove it. 769 * Returns the number of items removed (0 or 1) 770 */ 771 int 772 rm_at_exit(function) 773 exitlist_fn function; 774 { 775 struct exitlist *ep; 776 777 TAILQ_FOREACH(ep, &exit_list, next) { 778 if (ep->function == function) { 779 TAILQ_REMOVE(&exit_list, ep, next); 780 free(ep, M_ATEXIT); 781 return(1); 782 } 783 } 784 return (0); 785 } 786