1 /*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/resourcevar.h> 39 #include <sys/smp.h> 40 #include <sys/sysctl.h> 41 #include <sys/sched.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/turnstile.h> 44 #include <sys/ktr.h> 45 #include <sys/umtx.h> 46 47 #include <security/audit/audit.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/uma.h> 52 53 /* 54 * thread related storage. 55 */ 56 static uma_zone_t thread_zone; 57 58 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 59 60 int max_threads_per_proc = 1500; 61 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 62 &max_threads_per_proc, 0, "Limit on threads per proc"); 63 64 int max_threads_hits; 65 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 66 &max_threads_hits, 0, ""); 67 68 #ifdef KSE 69 int virtual_cpu; 70 71 #endif 72 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 73 static struct mtx zombie_lock; 74 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 75 76 static void thread_zombie(struct thread *); 77 78 #ifdef KSE 79 static int 80 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 81 { 82 int error, new_val; 83 int def_val; 84 85 def_val = mp_ncpus; 86 if (virtual_cpu == 0) 87 new_val = def_val; 88 else 89 new_val = virtual_cpu; 90 error = sysctl_handle_int(oidp, &new_val, 0, req); 91 if (error != 0 || req->newptr == NULL) 92 return (error); 93 if (new_val < 0) 94 return (EINVAL); 95 virtual_cpu = new_val; 96 return (0); 97 } 98 99 /* DEBUG ONLY */ 100 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 101 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 102 "debug virtual cpus"); 103 #endif 104 105 struct mtx tid_lock; 106 static struct unrhdr *tid_unrhdr; 107 108 /* 109 * Prepare a thread for use. 110 */ 111 static int 112 thread_ctor(void *mem, int size, void *arg, int flags) 113 { 114 struct thread *td; 115 116 td = (struct thread *)mem; 117 td->td_state = TDS_INACTIVE; 118 td->td_oncpu = NOCPU; 119 120 td->td_tid = alloc_unr(tid_unrhdr); 121 td->td_syscalls = 0; 122 123 /* 124 * Note that td_critnest begins life as 1 because the thread is not 125 * running and is thereby implicitly waiting to be on the receiving 126 * end of a context switch. 127 */ 128 td->td_critnest = 1; 129 130 #ifdef AUDIT 131 audit_thread_alloc(td); 132 #endif 133 umtx_thread_alloc(td); 134 return (0); 135 } 136 137 /* 138 * Reclaim a thread after use. 139 */ 140 static void 141 thread_dtor(void *mem, int size, void *arg) 142 { 143 struct thread *td; 144 145 td = (struct thread *)mem; 146 147 #ifdef INVARIANTS 148 /* Verify that this thread is in a safe state to free. */ 149 switch (td->td_state) { 150 case TDS_INHIBITED: 151 case TDS_RUNNING: 152 case TDS_CAN_RUN: 153 case TDS_RUNQ: 154 /* 155 * We must never unlink a thread that is in one of 156 * these states, because it is currently active. 157 */ 158 panic("bad state for thread unlinking"); 159 /* NOTREACHED */ 160 case TDS_INACTIVE: 161 break; 162 default: 163 panic("bad thread state"); 164 /* NOTREACHED */ 165 } 166 #endif 167 #ifdef AUDIT 168 audit_thread_free(td); 169 #endif 170 free_unr(tid_unrhdr, td->td_tid); 171 sched_newthread(td); 172 } 173 174 /* 175 * Initialize type-stable parts of a thread (when newly created). 176 */ 177 static int 178 thread_init(void *mem, int size, int flags) 179 { 180 struct thread *td; 181 182 td = (struct thread *)mem; 183 184 td->td_sleepqueue = sleepq_alloc(); 185 td->td_turnstile = turnstile_alloc(); 186 td->td_sched = (struct td_sched *)&td[1]; 187 sched_newthread(td); 188 umtx_thread_init(td); 189 td->td_kstack = 0; 190 return (0); 191 } 192 193 /* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196 static void 197 thread_fini(void *mem, int size) 198 { 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 turnstile_free(td->td_turnstile); 203 sleepq_free(td->td_sleepqueue); 204 umtx_thread_fini(td); 205 } 206 207 /* 208 * For a newly created process, 209 * link up all the structures and its initial threads etc. 210 * called from: 211 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 212 * proc_dtor() (should go away) 213 * proc_init() 214 */ 215 void 216 proc_linkup0(struct proc *p, struct thread *td) 217 { 218 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 219 proc_linkup(p, td); 220 } 221 222 void 223 proc_linkup(struct proc *p, struct thread *td) 224 { 225 226 #ifdef KSE 227 TAILQ_INIT(&p->p_upcalls); /* upcall list */ 228 #endif 229 sigqueue_init(&p->p_sigqueue, p); 230 p->p_ksi = ksiginfo_alloc(1); 231 if (p->p_ksi != NULL) { 232 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 233 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 234 } 235 bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); 236 LIST_INIT(&p->p_mqnotifier); 237 p->p_numthreads = 0; 238 thread_link(td, p); 239 } 240 241 /* 242 * Initialize global thread allocation resources. 243 */ 244 void 245 threadinit(void) 246 { 247 248 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 249 tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock); 250 251 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 252 thread_ctor, thread_dtor, thread_init, thread_fini, 253 16 - 1, 0); 254 #ifdef KSE 255 kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 256 #endif 257 } 258 259 /* 260 * Place an unused thread on the zombie list. 261 * Use the slpq as that must be unused by now. 262 */ 263 void 264 thread_zombie(struct thread *td) 265 { 266 mtx_lock_spin(&zombie_lock); 267 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 268 mtx_unlock_spin(&zombie_lock); 269 } 270 271 /* 272 * Release a thread that has exited after cpu_throw(). 273 */ 274 void 275 thread_stash(struct thread *td) 276 { 277 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 278 thread_zombie(td); 279 } 280 281 /* 282 * Reap zombie kse resource. 283 */ 284 void 285 thread_reap(void) 286 { 287 struct thread *td_first, *td_next; 288 289 /* 290 * Don't even bother to lock if none at this instant, 291 * we really don't care about the next instant.. 292 */ 293 if (!TAILQ_EMPTY(&zombie_threads)) { 294 mtx_lock_spin(&zombie_lock); 295 td_first = TAILQ_FIRST(&zombie_threads); 296 if (td_first) 297 TAILQ_INIT(&zombie_threads); 298 mtx_unlock_spin(&zombie_lock); 299 while (td_first) { 300 td_next = TAILQ_NEXT(td_first, td_slpq); 301 if (td_first->td_ucred) 302 crfree(td_first->td_ucred); 303 thread_free(td_first); 304 td_first = td_next; 305 } 306 } 307 #ifdef KSE 308 upcall_reap(); 309 #endif 310 } 311 312 /* 313 * Allocate a thread. 314 */ 315 struct thread * 316 thread_alloc(void) 317 { 318 struct thread *td; 319 320 thread_reap(); /* check if any zombies to get */ 321 322 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 323 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 324 if (!vm_thread_new(td, 0)) { 325 uma_zfree(thread_zone, td); 326 return (NULL); 327 } 328 cpu_thread_alloc(td); 329 return (td); 330 } 331 332 333 /* 334 * Deallocate a thread. 335 */ 336 void 337 thread_free(struct thread *td) 338 { 339 340 cpu_thread_free(td); 341 if (td->td_altkstack != 0) 342 vm_thread_dispose_altkstack(td); 343 if (td->td_kstack != 0) 344 vm_thread_dispose(td); 345 uma_zfree(thread_zone, td); 346 } 347 348 /* 349 * Discard the current thread and exit from its context. 350 * Always called with scheduler locked. 351 * 352 * Because we can't free a thread while we're operating under its context, 353 * push the current thread into our CPU's deadthread holder. This means 354 * we needn't worry about someone else grabbing our context before we 355 * do a cpu_throw(). This may not be needed now as we are under schedlock. 356 * Maybe we can just do a thread_stash() as thr_exit1 does. 357 */ 358 /* XXX 359 * libthr expects its thread exit to return for the last 360 * thread, meaning that the program is back to non-threaded 361 * mode I guess. Because we do this (cpu_throw) unconditionally 362 * here, they have their own version of it. (thr_exit1()) 363 * that doesn't do it all if this was the last thread. 364 * It is also called from thread_suspend_check(). 365 * Of course in the end, they end up coming here through exit1 366 * anyhow.. After fixing 'thr' to play by the rules we should be able 367 * to merge these two functions together. 368 * 369 * called from: 370 * exit1() 371 * kse_exit() 372 * thr_exit() 373 * ifdef KSE 374 * thread_user_enter() 375 * thread_userret() 376 * endif 377 * thread_suspend_check() 378 */ 379 void 380 thread_exit(void) 381 { 382 uint64_t new_switchtime; 383 struct thread *td; 384 struct thread *td2; 385 struct proc *p; 386 387 td = curthread; 388 p = td->td_proc; 389 390 PROC_SLOCK_ASSERT(p, MA_OWNED); 391 mtx_assert(&Giant, MA_NOTOWNED); 392 393 PROC_LOCK_ASSERT(p, MA_OWNED); 394 KASSERT(p != NULL, ("thread exiting without a process")); 395 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 396 (long)p->p_pid, td->td_name); 397 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 398 399 #ifdef AUDIT 400 AUDIT_SYSCALL_EXIT(0, td); 401 #endif 402 403 #ifdef KSE 404 if (td->td_standin != NULL) { 405 /* 406 * Note that we don't need to free the cred here as it 407 * is done in thread_reap(). 408 */ 409 thread_zombie(td->td_standin); 410 td->td_standin = NULL; 411 } 412 #endif 413 414 umtx_thread_exit(td); 415 416 /* 417 * drop FPU & debug register state storage, or any other 418 * architecture specific resources that 419 * would not be on a new untouched process. 420 */ 421 cpu_thread_exit(td); /* XXXSMP */ 422 423 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 424 new_switchtime = cpu_ticks(); 425 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 426 PCPU_SET(switchtime, new_switchtime); 427 PCPU_SET(switchticks, ticks); 428 PCPU_INC(cnt.v_swtch); 429 /* Save our resource usage in our process. */ 430 td->td_ru.ru_nvcsw++; 431 rucollect(&p->p_ru, &td->td_ru); 432 /* 433 * The last thread is left attached to the process 434 * So that the whole bundle gets recycled. Skip 435 * all this stuff if we never had threads. 436 * EXIT clears all sign of other threads when 437 * it goes to single threading, so the last thread always 438 * takes the short path. 439 */ 440 if (p->p_flag & P_HADTHREADS) { 441 if (p->p_numthreads > 1) { 442 thread_lock(td); 443 #ifdef KSE 444 kse_unlink(td); 445 #else 446 thread_unlink(td); 447 #endif 448 thread_unlock(td); 449 td2 = FIRST_THREAD_IN_PROC(p); 450 sched_exit_thread(td2, td); 451 452 /* 453 * The test below is NOT true if we are the 454 * sole exiting thread. P_STOPPED_SNGL is unset 455 * in exit1() after it is the only survivor. 456 */ 457 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 458 if (p->p_numthreads == p->p_suspcount) { 459 thread_lock(p->p_singlethread); 460 thread_unsuspend_one(p->p_singlethread); 461 thread_unlock(p->p_singlethread); 462 } 463 } 464 465 atomic_add_int(&td->td_proc->p_exitthreads, 1); 466 PCPU_SET(deadthread, td); 467 } else { 468 /* 469 * The last thread is exiting.. but not through exit() 470 * what should we do? 471 * Theoretically this can't happen 472 * exit1() - clears threading flags before coming here 473 * kse_exit() - treats last thread specially 474 * thr_exit() - treats last thread specially 475 * ifdef KSE 476 * thread_user_enter() - only if more exist 477 * thread_userret() - only if more exist 478 * endif 479 * thread_suspend_check() - only if more exist 480 */ 481 panic ("thread_exit: Last thread exiting on its own"); 482 } 483 } 484 PROC_UNLOCK(p); 485 thread_lock(td); 486 /* Save our tick information with both the thread and proc locked */ 487 ruxagg(&p->p_rux, td); 488 PROC_SUNLOCK(p); 489 td->td_state = TDS_INACTIVE; 490 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 491 sched_throw(td); 492 panic("I'm a teapot!"); 493 /* NOTREACHED */ 494 } 495 496 /* 497 * Do any thread specific cleanups that may be needed in wait() 498 * called with Giant, proc and schedlock not held. 499 */ 500 void 501 thread_wait(struct proc *p) 502 { 503 struct thread *td; 504 505 mtx_assert(&Giant, MA_NOTOWNED); 506 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 507 td = FIRST_THREAD_IN_PROC(p); 508 #ifdef KSE 509 if (td->td_standin != NULL) { 510 if (td->td_standin->td_ucred != NULL) { 511 crfree(td->td_standin->td_ucred); 512 td->td_standin->td_ucred = NULL; 513 } 514 thread_free(td->td_standin); 515 td->td_standin = NULL; 516 } 517 #endif 518 /* Lock the last thread so we spin until it exits cpu_throw(). */ 519 thread_lock(td); 520 thread_unlock(td); 521 /* Wait for any remaining threads to exit cpu_throw(). */ 522 while (p->p_exitthreads) 523 sched_relinquish(curthread); 524 cpu_thread_clean(td); 525 crfree(td->td_ucred); 526 thread_reap(); /* check for zombie threads etc. */ 527 } 528 529 /* 530 * Link a thread to a process. 531 * set up anything that needs to be initialized for it to 532 * be used by the process. 533 * 534 * Note that we do not link to the proc's ucred here. 535 * The thread is linked as if running but no KSE assigned. 536 * Called from: 537 * proc_linkup() 538 * thread_schedule_upcall() 539 * thr_create() 540 */ 541 void 542 thread_link(struct thread *td, struct proc *p) 543 { 544 545 /* 546 * XXX This can't be enabled because it's called for proc0 before 547 * it's spinlock has been created. 548 * PROC_SLOCK_ASSERT(p, MA_OWNED); 549 */ 550 td->td_state = TDS_INACTIVE; 551 td->td_proc = p; 552 td->td_flags = TDF_INMEM; 553 554 LIST_INIT(&td->td_contested); 555 sigqueue_init(&td->td_sigqueue, p); 556 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 557 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 558 p->p_numthreads++; 559 } 560 561 /* 562 * Convert a process with one thread to an unthreaded process. 563 * Called from: 564 * thread_single(exit) (called from execve and exit) 565 * kse_exit() XXX may need cleaning up wrt KSE stuff 566 */ 567 void 568 thread_unthread(struct thread *td) 569 { 570 struct proc *p = td->td_proc; 571 572 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 573 #ifdef KSE 574 thread_lock(td); 575 upcall_remove(td); 576 thread_unlock(td); 577 p->p_flag &= ~(P_SA|P_HADTHREADS); 578 td->td_mailbox = NULL; 579 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 580 if (td->td_standin != NULL) { 581 thread_zombie(td->td_standin); 582 td->td_standin = NULL; 583 } 584 #else 585 p->p_flag &= ~P_HADTHREADS; 586 #endif 587 } 588 589 /* 590 * Called from: 591 * thread_exit() 592 */ 593 void 594 thread_unlink(struct thread *td) 595 { 596 struct proc *p = td->td_proc; 597 598 PROC_SLOCK_ASSERT(p, MA_OWNED); 599 TAILQ_REMOVE(&p->p_threads, td, td_plist); 600 p->p_numthreads--; 601 /* could clear a few other things here */ 602 /* Must NOT clear links to proc! */ 603 } 604 605 /* 606 * Enforce single-threading. 607 * 608 * Returns 1 if the caller must abort (another thread is waiting to 609 * exit the process or similar). Process is locked! 610 * Returns 0 when you are successfully the only thread running. 611 * A process has successfully single threaded in the suspend mode when 612 * There are no threads in user mode. Threads in the kernel must be 613 * allowed to continue until they get to the user boundary. They may even 614 * copy out their return values and data before suspending. They may however be 615 * accelerated in reaching the user boundary as we will wake up 616 * any sleeping threads that are interruptable. (PCATCH). 617 */ 618 int 619 thread_single(int mode) 620 { 621 struct thread *td; 622 struct thread *td2; 623 struct proc *p; 624 int remaining; 625 626 td = curthread; 627 p = td->td_proc; 628 mtx_assert(&Giant, MA_NOTOWNED); 629 PROC_LOCK_ASSERT(p, MA_OWNED); 630 KASSERT((td != NULL), ("curthread is NULL")); 631 632 if ((p->p_flag & P_HADTHREADS) == 0) 633 return (0); 634 635 /* Is someone already single threading? */ 636 if (p->p_singlethread != NULL && p->p_singlethread != td) 637 return (1); 638 639 if (mode == SINGLE_EXIT) { 640 p->p_flag |= P_SINGLE_EXIT; 641 p->p_flag &= ~P_SINGLE_BOUNDARY; 642 } else { 643 p->p_flag &= ~P_SINGLE_EXIT; 644 if (mode == SINGLE_BOUNDARY) 645 p->p_flag |= P_SINGLE_BOUNDARY; 646 else 647 p->p_flag &= ~P_SINGLE_BOUNDARY; 648 } 649 p->p_flag |= P_STOPPED_SINGLE; 650 PROC_SLOCK(p); 651 p->p_singlethread = td; 652 if (mode == SINGLE_EXIT) 653 remaining = p->p_numthreads; 654 else if (mode == SINGLE_BOUNDARY) 655 remaining = p->p_numthreads - p->p_boundary_count; 656 else 657 remaining = p->p_numthreads - p->p_suspcount; 658 while (remaining != 1) { 659 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 660 goto stopme; 661 FOREACH_THREAD_IN_PROC(p, td2) { 662 if (td2 == td) 663 continue; 664 thread_lock(td2); 665 td2->td_flags |= TDF_ASTPENDING; 666 if (TD_IS_INHIBITED(td2)) { 667 switch (mode) { 668 case SINGLE_EXIT: 669 if (td->td_flags & TDF_DBSUSPEND) 670 td->td_flags &= ~TDF_DBSUSPEND; 671 if (TD_IS_SUSPENDED(td2)) 672 thread_unsuspend_one(td2); 673 if (TD_ON_SLEEPQ(td2) && 674 (td2->td_flags & TDF_SINTR)) 675 sleepq_abort(td2, EINTR); 676 break; 677 case SINGLE_BOUNDARY: 678 break; 679 default: 680 if (TD_IS_SUSPENDED(td2)) { 681 thread_unlock(td2); 682 continue; 683 } 684 /* 685 * maybe other inhibited states too? 686 */ 687 if ((td2->td_flags & TDF_SINTR) && 688 (td2->td_inhibitors & 689 (TDI_SLEEPING | TDI_SWAPPED))) 690 thread_suspend_one(td2); 691 break; 692 } 693 } 694 #ifdef SMP 695 else if (TD_IS_RUNNING(td2) && td != td2) { 696 forward_signal(td2); 697 } 698 #endif 699 thread_unlock(td2); 700 } 701 if (mode == SINGLE_EXIT) 702 remaining = p->p_numthreads; 703 else if (mode == SINGLE_BOUNDARY) 704 remaining = p->p_numthreads - p->p_boundary_count; 705 else 706 remaining = p->p_numthreads - p->p_suspcount; 707 708 /* 709 * Maybe we suspended some threads.. was it enough? 710 */ 711 if (remaining == 1) 712 break; 713 714 stopme: 715 /* 716 * Wake us up when everyone else has suspended. 717 * In the mean time we suspend as well. 718 */ 719 thread_suspend_switch(td); 720 if (mode == SINGLE_EXIT) 721 remaining = p->p_numthreads; 722 else if (mode == SINGLE_BOUNDARY) 723 remaining = p->p_numthreads - p->p_boundary_count; 724 else 725 remaining = p->p_numthreads - p->p_suspcount; 726 } 727 if (mode == SINGLE_EXIT) { 728 /* 729 * We have gotten rid of all the other threads and we 730 * are about to either exit or exec. In either case, 731 * we try our utmost to revert to being a non-threaded 732 * process. 733 */ 734 p->p_singlethread = NULL; 735 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 736 thread_unthread(td); 737 } 738 PROC_SUNLOCK(p); 739 return (0); 740 } 741 742 /* 743 * Called in from locations that can safely check to see 744 * whether we have to suspend or at least throttle for a 745 * single-thread event (e.g. fork). 746 * 747 * Such locations include userret(). 748 * If the "return_instead" argument is non zero, the thread must be able to 749 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 750 * 751 * The 'return_instead' argument tells the function if it may do a 752 * thread_exit() or suspend, or whether the caller must abort and back 753 * out instead. 754 * 755 * If the thread that set the single_threading request has set the 756 * P_SINGLE_EXIT bit in the process flags then this call will never return 757 * if 'return_instead' is false, but will exit. 758 * 759 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 760 *---------------+--------------------+--------------------- 761 * 0 | returns 0 | returns 0 or 1 762 * | when ST ends | immediatly 763 *---------------+--------------------+--------------------- 764 * 1 | thread exits | returns 1 765 * | | immediatly 766 * 0 = thread_exit() or suspension ok, 767 * other = return error instead of stopping the thread. 768 * 769 * While a full suspension is under effect, even a single threading 770 * thread would be suspended if it made this call (but it shouldn't). 771 * This call should only be made from places where 772 * thread_exit() would be safe as that may be the outcome unless 773 * return_instead is set. 774 */ 775 int 776 thread_suspend_check(int return_instead) 777 { 778 struct thread *td; 779 struct proc *p; 780 781 td = curthread; 782 p = td->td_proc; 783 mtx_assert(&Giant, MA_NOTOWNED); 784 PROC_LOCK_ASSERT(p, MA_OWNED); 785 while (P_SHOULDSTOP(p) || 786 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 787 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 788 KASSERT(p->p_singlethread != NULL, 789 ("singlethread not set")); 790 /* 791 * The only suspension in action is a 792 * single-threading. Single threader need not stop. 793 * XXX Should be safe to access unlocked 794 * as it can only be set to be true by us. 795 */ 796 if (p->p_singlethread == td) 797 return (0); /* Exempt from stopping. */ 798 } 799 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 800 return (EINTR); 801 802 /* Should we goto user boundary if we didn't come from there? */ 803 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 804 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 805 return (ERESTART); 806 807 /* If thread will exit, flush its pending signals */ 808 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 809 sigqueue_flush(&td->td_sigqueue); 810 811 PROC_SLOCK(p); 812 thread_stopped(p); 813 /* 814 * If the process is waiting for us to exit, 815 * this thread should just suicide. 816 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 817 */ 818 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 819 thread_exit(); 820 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 821 if (p->p_numthreads == p->p_suspcount + 1) { 822 thread_lock(p->p_singlethread); 823 thread_unsuspend_one(p->p_singlethread); 824 thread_unlock(p->p_singlethread); 825 } 826 } 827 PROC_UNLOCK(p); 828 thread_lock(td); 829 /* 830 * When a thread suspends, it just 831 * gets taken off all queues. 832 */ 833 thread_suspend_one(td); 834 if (return_instead == 0) { 835 p->p_boundary_count++; 836 td->td_flags |= TDF_BOUNDARY; 837 } 838 PROC_SUNLOCK(p); 839 mi_switch(SW_INVOL, NULL); 840 if (return_instead == 0) 841 td->td_flags &= ~TDF_BOUNDARY; 842 thread_unlock(td); 843 PROC_LOCK(p); 844 if (return_instead == 0) 845 p->p_boundary_count--; 846 } 847 return (0); 848 } 849 850 void 851 thread_suspend_switch(struct thread *td) 852 { 853 struct proc *p; 854 855 p = td->td_proc; 856 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 857 PROC_LOCK_ASSERT(p, MA_OWNED); 858 PROC_SLOCK_ASSERT(p, MA_OWNED); 859 /* 860 * We implement thread_suspend_one in stages here to avoid 861 * dropping the proc lock while the thread lock is owned. 862 */ 863 thread_stopped(p); 864 p->p_suspcount++; 865 PROC_UNLOCK(p); 866 thread_lock(td); 867 sched_sleep(td); 868 TD_SET_SUSPENDED(td); 869 PROC_SUNLOCK(p); 870 DROP_GIANT(); 871 mi_switch(SW_VOL, NULL); 872 thread_unlock(td); 873 PICKUP_GIANT(); 874 PROC_LOCK(p); 875 PROC_SLOCK(p); 876 } 877 878 void 879 thread_suspend_one(struct thread *td) 880 { 881 struct proc *p = td->td_proc; 882 883 PROC_SLOCK_ASSERT(p, MA_OWNED); 884 THREAD_LOCK_ASSERT(td, MA_OWNED); 885 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 886 p->p_suspcount++; 887 sched_sleep(td); 888 TD_SET_SUSPENDED(td); 889 } 890 891 void 892 thread_unsuspend_one(struct thread *td) 893 { 894 struct proc *p = td->td_proc; 895 896 PROC_SLOCK_ASSERT(p, MA_OWNED); 897 THREAD_LOCK_ASSERT(td, MA_OWNED); 898 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 899 TD_CLR_SUSPENDED(td); 900 p->p_suspcount--; 901 setrunnable(td); 902 } 903 904 /* 905 * Allow all threads blocked by single threading to continue running. 906 */ 907 void 908 thread_unsuspend(struct proc *p) 909 { 910 struct thread *td; 911 912 PROC_LOCK_ASSERT(p, MA_OWNED); 913 PROC_SLOCK_ASSERT(p, MA_OWNED); 914 if (!P_SHOULDSTOP(p)) { 915 FOREACH_THREAD_IN_PROC(p, td) { 916 thread_lock(td); 917 if (TD_IS_SUSPENDED(td)) { 918 thread_unsuspend_one(td); 919 } 920 thread_unlock(td); 921 } 922 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 923 (p->p_numthreads == p->p_suspcount)) { 924 /* 925 * Stopping everything also did the job for the single 926 * threading request. Now we've downgraded to single-threaded, 927 * let it continue. 928 */ 929 thread_lock(p->p_singlethread); 930 thread_unsuspend_one(p->p_singlethread); 931 thread_unlock(p->p_singlethread); 932 } 933 } 934 935 /* 936 * End the single threading mode.. 937 */ 938 void 939 thread_single_end(void) 940 { 941 struct thread *td; 942 struct proc *p; 943 944 td = curthread; 945 p = td->td_proc; 946 PROC_LOCK_ASSERT(p, MA_OWNED); 947 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 948 PROC_SLOCK(p); 949 p->p_singlethread = NULL; 950 /* 951 * If there are other threads they mey now run, 952 * unless of course there is a blanket 'stop order' 953 * on the process. The single threader must be allowed 954 * to continue however as this is a bad place to stop. 955 */ 956 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 957 FOREACH_THREAD_IN_PROC(p, td) { 958 thread_lock(td); 959 if (TD_IS_SUSPENDED(td)) { 960 thread_unsuspend_one(td); 961 } 962 thread_unlock(td); 963 } 964 } 965 PROC_SUNLOCK(p); 966 } 967 968 struct thread * 969 thread_find(struct proc *p, lwpid_t tid) 970 { 971 struct thread *td; 972 973 PROC_LOCK_ASSERT(p, MA_OWNED); 974 PROC_SLOCK(p); 975 FOREACH_THREAD_IN_PROC(p, td) { 976 if (td->td_tid == tid) 977 break; 978 } 979 PROC_SUNLOCK(p); 980 return (td); 981 } 982