1 /*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/resourcevar.h> 39 #include <sys/smp.h> 40 #include <sys/sysctl.h> 41 #include <sys/sched.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/selinfo.h> 44 #include <sys/turnstile.h> 45 #include <sys/ktr.h> 46 #include <sys/umtx.h> 47 #include <sys/cpuset.h> 48 49 #include <security/audit/audit.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <vm/uma.h> 54 #include <sys/eventhandler.h> 55 56 /* 57 * thread related storage. 58 */ 59 static uma_zone_t thread_zone; 60 61 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 62 63 int max_threads_per_proc = 1500; 64 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 65 &max_threads_per_proc, 0, "Limit on threads per proc"); 66 67 int max_threads_hits; 68 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 69 &max_threads_hits, 0, ""); 70 71 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 72 static struct mtx zombie_lock; 73 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 74 75 static void thread_zombie(struct thread *); 76 77 struct mtx tid_lock; 78 static struct unrhdr *tid_unrhdr; 79 80 /* 81 * Prepare a thread for use. 82 */ 83 static int 84 thread_ctor(void *mem, int size, void *arg, int flags) 85 { 86 struct thread *td; 87 88 td = (struct thread *)mem; 89 td->td_state = TDS_INACTIVE; 90 td->td_oncpu = NOCPU; 91 92 td->td_tid = alloc_unr(tid_unrhdr); 93 td->td_syscalls = 0; 94 95 /* 96 * Note that td_critnest begins life as 1 because the thread is not 97 * running and is thereby implicitly waiting to be on the receiving 98 * end of a context switch. 99 */ 100 td->td_critnest = 1; 101 EVENTHANDLER_INVOKE(thread_ctor, td); 102 #ifdef AUDIT 103 audit_thread_alloc(td); 104 #endif 105 umtx_thread_alloc(td); 106 return (0); 107 } 108 109 /* 110 * Reclaim a thread after use. 111 */ 112 static void 113 thread_dtor(void *mem, int size, void *arg) 114 { 115 struct thread *td; 116 117 td = (struct thread *)mem; 118 119 #ifdef INVARIANTS 120 /* Verify that this thread is in a safe state to free. */ 121 switch (td->td_state) { 122 case TDS_INHIBITED: 123 case TDS_RUNNING: 124 case TDS_CAN_RUN: 125 case TDS_RUNQ: 126 /* 127 * We must never unlink a thread that is in one of 128 * these states, because it is currently active. 129 */ 130 panic("bad state for thread unlinking"); 131 /* NOTREACHED */ 132 case TDS_INACTIVE: 133 break; 134 default: 135 panic("bad thread state"); 136 /* NOTREACHED */ 137 } 138 #endif 139 #ifdef AUDIT 140 audit_thread_free(td); 141 #endif 142 EVENTHANDLER_INVOKE(thread_dtor, td); 143 free_unr(tid_unrhdr, td->td_tid); 144 } 145 146 /* 147 * Initialize type-stable parts of a thread (when newly created). 148 */ 149 static int 150 thread_init(void *mem, int size, int flags) 151 { 152 struct thread *td; 153 154 td = (struct thread *)mem; 155 156 td->td_sleepqueue = sleepq_alloc(); 157 td->td_turnstile = turnstile_alloc(); 158 EVENTHANDLER_INVOKE(thread_init, td); 159 td->td_sched = (struct td_sched *)&td[1]; 160 umtx_thread_init(td); 161 td->td_kstack = 0; 162 return (0); 163 } 164 165 /* 166 * Tear down type-stable parts of a thread (just before being discarded). 167 */ 168 static void 169 thread_fini(void *mem, int size) 170 { 171 struct thread *td; 172 173 td = (struct thread *)mem; 174 EVENTHANDLER_INVOKE(thread_fini, td); 175 turnstile_free(td->td_turnstile); 176 sleepq_free(td->td_sleepqueue); 177 umtx_thread_fini(td); 178 seltdfini(td); 179 } 180 181 /* 182 * For a newly created process, 183 * link up all the structures and its initial threads etc. 184 * called from: 185 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 186 * proc_dtor() (should go away) 187 * proc_init() 188 */ 189 void 190 proc_linkup0(struct proc *p, struct thread *td) 191 { 192 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 193 proc_linkup(p, td); 194 } 195 196 void 197 proc_linkup(struct proc *p, struct thread *td) 198 { 199 200 sigqueue_init(&p->p_sigqueue, p); 201 p->p_ksi = ksiginfo_alloc(1); 202 if (p->p_ksi != NULL) { 203 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 204 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 205 } 206 LIST_INIT(&p->p_mqnotifier); 207 p->p_numthreads = 0; 208 thread_link(td, p); 209 } 210 211 /* 212 * Initialize global thread allocation resources. 213 */ 214 void 215 threadinit(void) 216 { 217 218 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 219 /* leave one number for thread0 */ 220 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 221 222 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 223 thread_ctor, thread_dtor, thread_init, thread_fini, 224 16 - 1, 0); 225 } 226 227 /* 228 * Place an unused thread on the zombie list. 229 * Use the slpq as that must be unused by now. 230 */ 231 void 232 thread_zombie(struct thread *td) 233 { 234 mtx_lock_spin(&zombie_lock); 235 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 236 mtx_unlock_spin(&zombie_lock); 237 } 238 239 /* 240 * Release a thread that has exited after cpu_throw(). 241 */ 242 void 243 thread_stash(struct thread *td) 244 { 245 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 246 thread_zombie(td); 247 } 248 249 /* 250 * Reap zombie resources. 251 */ 252 void 253 thread_reap(void) 254 { 255 struct thread *td_first, *td_next; 256 257 /* 258 * Don't even bother to lock if none at this instant, 259 * we really don't care about the next instant.. 260 */ 261 if (!TAILQ_EMPTY(&zombie_threads)) { 262 mtx_lock_spin(&zombie_lock); 263 td_first = TAILQ_FIRST(&zombie_threads); 264 if (td_first) 265 TAILQ_INIT(&zombie_threads); 266 mtx_unlock_spin(&zombie_lock); 267 while (td_first) { 268 td_next = TAILQ_NEXT(td_first, td_slpq); 269 if (td_first->td_ucred) 270 crfree(td_first->td_ucred); 271 thread_free(td_first); 272 td_first = td_next; 273 } 274 } 275 } 276 277 /* 278 * Allocate a thread. 279 */ 280 struct thread * 281 thread_alloc(void) 282 { 283 struct thread *td; 284 285 thread_reap(); /* check if any zombies to get */ 286 287 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 288 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 289 if (!vm_thread_new(td, 0)) { 290 uma_zfree(thread_zone, td); 291 return (NULL); 292 } 293 cpu_thread_alloc(td); 294 return (td); 295 } 296 297 298 /* 299 * Deallocate a thread. 300 */ 301 void 302 thread_free(struct thread *td) 303 { 304 if (td->td_cpuset) 305 cpuset_rel(td->td_cpuset); 306 td->td_cpuset = NULL; 307 cpu_thread_free(td); 308 if (td->td_altkstack != 0) 309 vm_thread_dispose_altkstack(td); 310 if (td->td_kstack != 0) 311 vm_thread_dispose(td); 312 uma_zfree(thread_zone, td); 313 } 314 315 /* 316 * Discard the current thread and exit from its context. 317 * Always called with scheduler locked. 318 * 319 * Because we can't free a thread while we're operating under its context, 320 * push the current thread into our CPU's deadthread holder. This means 321 * we needn't worry about someone else grabbing our context before we 322 * do a cpu_throw(). 323 */ 324 void 325 thread_exit(void) 326 { 327 uint64_t new_switchtime; 328 struct thread *td; 329 struct thread *td2; 330 struct proc *p; 331 332 td = curthread; 333 p = td->td_proc; 334 335 PROC_SLOCK_ASSERT(p, MA_OWNED); 336 mtx_assert(&Giant, MA_NOTOWNED); 337 338 PROC_LOCK_ASSERT(p, MA_OWNED); 339 KASSERT(p != NULL, ("thread exiting without a process")); 340 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 341 (long)p->p_pid, td->td_name); 342 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 343 344 #ifdef AUDIT 345 AUDIT_SYSCALL_EXIT(0, td); 346 #endif 347 umtx_thread_exit(td); 348 /* 349 * drop FPU & debug register state storage, or any other 350 * architecture specific resources that 351 * would not be on a new untouched process. 352 */ 353 cpu_thread_exit(td); /* XXXSMP */ 354 355 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 356 new_switchtime = cpu_ticks(); 357 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 358 PCPU_SET(switchtime, new_switchtime); 359 PCPU_SET(switchticks, ticks); 360 PCPU_INC(cnt.v_swtch); 361 /* Save our resource usage in our process. */ 362 td->td_ru.ru_nvcsw++; 363 rucollect(&p->p_ru, &td->td_ru); 364 /* 365 * The last thread is left attached to the process 366 * So that the whole bundle gets recycled. Skip 367 * all this stuff if we never had threads. 368 * EXIT clears all sign of other threads when 369 * it goes to single threading, so the last thread always 370 * takes the short path. 371 */ 372 if (p->p_flag & P_HADTHREADS) { 373 if (p->p_numthreads > 1) { 374 thread_unlink(td); 375 td2 = FIRST_THREAD_IN_PROC(p); 376 sched_exit_thread(td2, td); 377 378 /* 379 * The test below is NOT true if we are the 380 * sole exiting thread. P_STOPPED_SNGL is unset 381 * in exit1() after it is the only survivor. 382 */ 383 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 384 if (p->p_numthreads == p->p_suspcount) { 385 thread_lock(p->p_singlethread); 386 thread_unsuspend_one(p->p_singlethread); 387 thread_unlock(p->p_singlethread); 388 } 389 } 390 391 atomic_add_int(&td->td_proc->p_exitthreads, 1); 392 PCPU_SET(deadthread, td); 393 } else { 394 /* 395 * The last thread is exiting.. but not through exit() 396 */ 397 panic ("thread_exit: Last thread exiting on its own"); 398 } 399 } 400 PROC_UNLOCK(p); 401 thread_lock(td); 402 /* Save our tick information with both the thread and proc locked */ 403 ruxagg(&p->p_rux, td); 404 PROC_SUNLOCK(p); 405 td->td_state = TDS_INACTIVE; 406 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 407 sched_throw(td); 408 panic("I'm a teapot!"); 409 /* NOTREACHED */ 410 } 411 412 /* 413 * Do any thread specific cleanups that may be needed in wait() 414 * called with Giant, proc and schedlock not held. 415 */ 416 void 417 thread_wait(struct proc *p) 418 { 419 struct thread *td; 420 421 mtx_assert(&Giant, MA_NOTOWNED); 422 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 423 td = FIRST_THREAD_IN_PROC(p); 424 /* Lock the last thread so we spin until it exits cpu_throw(). */ 425 thread_lock(td); 426 thread_unlock(td); 427 /* Wait for any remaining threads to exit cpu_throw(). */ 428 while (p->p_exitthreads) 429 sched_relinquish(curthread); 430 cpuset_rel(td->td_cpuset); 431 td->td_cpuset = NULL; 432 cpu_thread_clean(td); 433 crfree(td->td_ucred); 434 thread_reap(); /* check for zombie threads etc. */ 435 } 436 437 /* 438 * Link a thread to a process. 439 * set up anything that needs to be initialized for it to 440 * be used by the process. 441 */ 442 void 443 thread_link(struct thread *td, struct proc *p) 444 { 445 446 /* 447 * XXX This can't be enabled because it's called for proc0 before 448 * its lock has been created. 449 * PROC_LOCK_ASSERT(p, MA_OWNED); 450 */ 451 td->td_state = TDS_INACTIVE; 452 td->td_proc = p; 453 td->td_flags = TDF_INMEM; 454 455 LIST_INIT(&td->td_contested); 456 LIST_INIT(&td->td_lprof[0]); 457 LIST_INIT(&td->td_lprof[1]); 458 sigqueue_init(&td->td_sigqueue, p); 459 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 460 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 461 p->p_numthreads++; 462 } 463 464 /* 465 * Convert a process with one thread to an unthreaded process. 466 */ 467 void 468 thread_unthread(struct thread *td) 469 { 470 struct proc *p = td->td_proc; 471 472 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 473 p->p_flag &= ~P_HADTHREADS; 474 } 475 476 /* 477 * Called from: 478 * thread_exit() 479 */ 480 void 481 thread_unlink(struct thread *td) 482 { 483 struct proc *p = td->td_proc; 484 485 PROC_LOCK_ASSERT(p, MA_OWNED); 486 TAILQ_REMOVE(&p->p_threads, td, td_plist); 487 p->p_numthreads--; 488 /* could clear a few other things here */ 489 /* Must NOT clear links to proc! */ 490 } 491 492 /* 493 * Enforce single-threading. 494 * 495 * Returns 1 if the caller must abort (another thread is waiting to 496 * exit the process or similar). Process is locked! 497 * Returns 0 when you are successfully the only thread running. 498 * A process has successfully single threaded in the suspend mode when 499 * There are no threads in user mode. Threads in the kernel must be 500 * allowed to continue until they get to the user boundary. They may even 501 * copy out their return values and data before suspending. They may however be 502 * accelerated in reaching the user boundary as we will wake up 503 * any sleeping threads that are interruptable. (PCATCH). 504 */ 505 int 506 thread_single(int mode) 507 { 508 struct thread *td; 509 struct thread *td2; 510 struct proc *p; 511 int remaining, wakeup_swapper; 512 513 td = curthread; 514 p = td->td_proc; 515 mtx_assert(&Giant, MA_NOTOWNED); 516 PROC_LOCK_ASSERT(p, MA_OWNED); 517 KASSERT((td != NULL), ("curthread is NULL")); 518 519 if ((p->p_flag & P_HADTHREADS) == 0) 520 return (0); 521 522 /* Is someone already single threading? */ 523 if (p->p_singlethread != NULL && p->p_singlethread != td) 524 return (1); 525 526 if (mode == SINGLE_EXIT) { 527 p->p_flag |= P_SINGLE_EXIT; 528 p->p_flag &= ~P_SINGLE_BOUNDARY; 529 } else { 530 p->p_flag &= ~P_SINGLE_EXIT; 531 if (mode == SINGLE_BOUNDARY) 532 p->p_flag |= P_SINGLE_BOUNDARY; 533 else 534 p->p_flag &= ~P_SINGLE_BOUNDARY; 535 } 536 p->p_flag |= P_STOPPED_SINGLE; 537 PROC_SLOCK(p); 538 p->p_singlethread = td; 539 if (mode == SINGLE_EXIT) 540 remaining = p->p_numthreads; 541 else if (mode == SINGLE_BOUNDARY) 542 remaining = p->p_numthreads - p->p_boundary_count; 543 else 544 remaining = p->p_numthreads - p->p_suspcount; 545 while (remaining != 1) { 546 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 547 goto stopme; 548 wakeup_swapper = 0; 549 FOREACH_THREAD_IN_PROC(p, td2) { 550 if (td2 == td) 551 continue; 552 thread_lock(td2); 553 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 554 if (TD_IS_INHIBITED(td2)) { 555 switch (mode) { 556 case SINGLE_EXIT: 557 if (td->td_flags & TDF_DBSUSPEND) 558 td->td_flags &= ~TDF_DBSUSPEND; 559 if (TD_IS_SUSPENDED(td2)) 560 thread_unsuspend_one(td2); 561 if (TD_ON_SLEEPQ(td2) && 562 (td2->td_flags & TDF_SINTR)) 563 wakeup_swapper = 564 sleepq_abort(td2, EINTR); 565 break; 566 case SINGLE_BOUNDARY: 567 break; 568 default: 569 if (TD_IS_SUSPENDED(td2)) { 570 thread_unlock(td2); 571 continue; 572 } 573 /* 574 * maybe other inhibited states too? 575 */ 576 if ((td2->td_flags & TDF_SINTR) && 577 (td2->td_inhibitors & 578 (TDI_SLEEPING | TDI_SWAPPED))) 579 thread_suspend_one(td2); 580 break; 581 } 582 } 583 #ifdef SMP 584 else if (TD_IS_RUNNING(td2) && td != td2) { 585 forward_signal(td2); 586 } 587 #endif 588 thread_unlock(td2); 589 } 590 if (wakeup_swapper) 591 kick_proc0(); 592 if (mode == SINGLE_EXIT) 593 remaining = p->p_numthreads; 594 else if (mode == SINGLE_BOUNDARY) 595 remaining = p->p_numthreads - p->p_boundary_count; 596 else 597 remaining = p->p_numthreads - p->p_suspcount; 598 599 /* 600 * Maybe we suspended some threads.. was it enough? 601 */ 602 if (remaining == 1) 603 break; 604 605 stopme: 606 /* 607 * Wake us up when everyone else has suspended. 608 * In the mean time we suspend as well. 609 */ 610 thread_suspend_switch(td); 611 if (mode == SINGLE_EXIT) 612 remaining = p->p_numthreads; 613 else if (mode == SINGLE_BOUNDARY) 614 remaining = p->p_numthreads - p->p_boundary_count; 615 else 616 remaining = p->p_numthreads - p->p_suspcount; 617 } 618 if (mode == SINGLE_EXIT) { 619 /* 620 * We have gotten rid of all the other threads and we 621 * are about to either exit or exec. In either case, 622 * we try our utmost to revert to being a non-threaded 623 * process. 624 */ 625 p->p_singlethread = NULL; 626 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 627 thread_unthread(td); 628 } 629 PROC_SUNLOCK(p); 630 return (0); 631 } 632 633 /* 634 * Called in from locations that can safely check to see 635 * whether we have to suspend or at least throttle for a 636 * single-thread event (e.g. fork). 637 * 638 * Such locations include userret(). 639 * If the "return_instead" argument is non zero, the thread must be able to 640 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 641 * 642 * The 'return_instead' argument tells the function if it may do a 643 * thread_exit() or suspend, or whether the caller must abort and back 644 * out instead. 645 * 646 * If the thread that set the single_threading request has set the 647 * P_SINGLE_EXIT bit in the process flags then this call will never return 648 * if 'return_instead' is false, but will exit. 649 * 650 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 651 *---------------+--------------------+--------------------- 652 * 0 | returns 0 | returns 0 or 1 653 * | when ST ends | immediatly 654 *---------------+--------------------+--------------------- 655 * 1 | thread exits | returns 1 656 * | | immediatly 657 * 0 = thread_exit() or suspension ok, 658 * other = return error instead of stopping the thread. 659 * 660 * While a full suspension is under effect, even a single threading 661 * thread would be suspended if it made this call (but it shouldn't). 662 * This call should only be made from places where 663 * thread_exit() would be safe as that may be the outcome unless 664 * return_instead is set. 665 */ 666 int 667 thread_suspend_check(int return_instead) 668 { 669 struct thread *td; 670 struct proc *p; 671 672 td = curthread; 673 p = td->td_proc; 674 mtx_assert(&Giant, MA_NOTOWNED); 675 PROC_LOCK_ASSERT(p, MA_OWNED); 676 while (P_SHOULDSTOP(p) || 677 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 678 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 679 KASSERT(p->p_singlethread != NULL, 680 ("singlethread not set")); 681 /* 682 * The only suspension in action is a 683 * single-threading. Single threader need not stop. 684 * XXX Should be safe to access unlocked 685 * as it can only be set to be true by us. 686 */ 687 if (p->p_singlethread == td) 688 return (0); /* Exempt from stopping. */ 689 } 690 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 691 return (EINTR); 692 693 /* Should we goto user boundary if we didn't come from there? */ 694 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 695 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 696 return (ERESTART); 697 698 /* If thread will exit, flush its pending signals */ 699 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 700 sigqueue_flush(&td->td_sigqueue); 701 702 PROC_SLOCK(p); 703 thread_stopped(p); 704 /* 705 * If the process is waiting for us to exit, 706 * this thread should just suicide. 707 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 708 */ 709 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 710 thread_exit(); 711 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 712 if (p->p_numthreads == p->p_suspcount + 1) { 713 thread_lock(p->p_singlethread); 714 thread_unsuspend_one(p->p_singlethread); 715 thread_unlock(p->p_singlethread); 716 } 717 } 718 PROC_UNLOCK(p); 719 thread_lock(td); 720 /* 721 * When a thread suspends, it just 722 * gets taken off all queues. 723 */ 724 thread_suspend_one(td); 725 if (return_instead == 0) { 726 p->p_boundary_count++; 727 td->td_flags |= TDF_BOUNDARY; 728 } 729 PROC_SUNLOCK(p); 730 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 731 if (return_instead == 0) 732 td->td_flags &= ~TDF_BOUNDARY; 733 thread_unlock(td); 734 PROC_LOCK(p); 735 if (return_instead == 0) 736 p->p_boundary_count--; 737 } 738 return (0); 739 } 740 741 void 742 thread_suspend_switch(struct thread *td) 743 { 744 struct proc *p; 745 746 p = td->td_proc; 747 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 748 PROC_LOCK_ASSERT(p, MA_OWNED); 749 PROC_SLOCK_ASSERT(p, MA_OWNED); 750 /* 751 * We implement thread_suspend_one in stages here to avoid 752 * dropping the proc lock while the thread lock is owned. 753 */ 754 thread_stopped(p); 755 p->p_suspcount++; 756 PROC_UNLOCK(p); 757 thread_lock(td); 758 td->td_flags &= ~TDF_NEEDSUSPCHK; 759 TD_SET_SUSPENDED(td); 760 sched_sleep(td, 0); 761 PROC_SUNLOCK(p); 762 DROP_GIANT(); 763 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 764 thread_unlock(td); 765 PICKUP_GIANT(); 766 PROC_LOCK(p); 767 PROC_SLOCK(p); 768 } 769 770 void 771 thread_suspend_one(struct thread *td) 772 { 773 struct proc *p = td->td_proc; 774 775 PROC_SLOCK_ASSERT(p, MA_OWNED); 776 THREAD_LOCK_ASSERT(td, MA_OWNED); 777 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 778 p->p_suspcount++; 779 td->td_flags &= ~TDF_NEEDSUSPCHK; 780 TD_SET_SUSPENDED(td); 781 sched_sleep(td, 0); 782 } 783 784 void 785 thread_unsuspend_one(struct thread *td) 786 { 787 struct proc *p = td->td_proc; 788 789 PROC_SLOCK_ASSERT(p, MA_OWNED); 790 THREAD_LOCK_ASSERT(td, MA_OWNED); 791 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 792 TD_CLR_SUSPENDED(td); 793 p->p_suspcount--; 794 if (setrunnable(td)) { 795 #ifdef INVARIANTS 796 panic("not waking up swapper"); 797 #endif 798 } 799 } 800 801 /* 802 * Allow all threads blocked by single threading to continue running. 803 */ 804 void 805 thread_unsuspend(struct proc *p) 806 { 807 struct thread *td; 808 809 PROC_LOCK_ASSERT(p, MA_OWNED); 810 PROC_SLOCK_ASSERT(p, MA_OWNED); 811 if (!P_SHOULDSTOP(p)) { 812 FOREACH_THREAD_IN_PROC(p, td) { 813 thread_lock(td); 814 if (TD_IS_SUSPENDED(td)) { 815 thread_unsuspend_one(td); 816 } 817 thread_unlock(td); 818 } 819 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 820 (p->p_numthreads == p->p_suspcount)) { 821 /* 822 * Stopping everything also did the job for the single 823 * threading request. Now we've downgraded to single-threaded, 824 * let it continue. 825 */ 826 thread_lock(p->p_singlethread); 827 thread_unsuspend_one(p->p_singlethread); 828 thread_unlock(p->p_singlethread); 829 } 830 } 831 832 /* 833 * End the single threading mode.. 834 */ 835 void 836 thread_single_end(void) 837 { 838 struct thread *td; 839 struct proc *p; 840 841 td = curthread; 842 p = td->td_proc; 843 PROC_LOCK_ASSERT(p, MA_OWNED); 844 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 845 PROC_SLOCK(p); 846 p->p_singlethread = NULL; 847 /* 848 * If there are other threads they mey now run, 849 * unless of course there is a blanket 'stop order' 850 * on the process. The single threader must be allowed 851 * to continue however as this is a bad place to stop. 852 */ 853 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 854 FOREACH_THREAD_IN_PROC(p, td) { 855 thread_lock(td); 856 if (TD_IS_SUSPENDED(td)) { 857 thread_unsuspend_one(td); 858 } 859 thread_unlock(td); 860 } 861 } 862 PROC_SUNLOCK(p); 863 } 864 865 struct thread * 866 thread_find(struct proc *p, lwpid_t tid) 867 { 868 struct thread *td; 869 870 PROC_LOCK_ASSERT(p, MA_OWNED); 871 FOREACH_THREAD_IN_PROC(p, td) { 872 if (td->td_tid == tid) 873 break; 874 } 875 return (td); 876 } 877