1 /*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_witness.h" 30 #include "opt_hwpmc_hooks.h" 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/proc.h> 41 #include <sys/resourcevar.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/sched.h> 45 #include <sys/sleepqueue.h> 46 #include <sys/selinfo.h> 47 #include <sys/turnstile.h> 48 #include <sys/ktr.h> 49 #include <sys/umtx.h> 50 #include <sys/cpuset.h> 51 #ifdef HWPMC_HOOKS 52 #include <sys/pmckern.h> 53 #endif 54 55 #include <security/audit/audit.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_extern.h> 59 #include <vm/uma.h> 60 #include <sys/eventhandler.h> 61 62 /* 63 * thread related storage. 64 */ 65 static uma_zone_t thread_zone; 66 67 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 68 69 int max_threads_per_proc = 1500; 70 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 71 &max_threads_per_proc, 0, "Limit on threads per proc"); 72 73 int max_threads_hits; 74 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 75 &max_threads_hits, 0, ""); 76 77 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 78 static struct mtx zombie_lock; 79 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 80 81 static void thread_zombie(struct thread *); 82 83 struct mtx tid_lock; 84 static struct unrhdr *tid_unrhdr; 85 86 /* 87 * Prepare a thread for use. 88 */ 89 static int 90 thread_ctor(void *mem, int size, void *arg, int flags) 91 { 92 struct thread *td; 93 94 td = (struct thread *)mem; 95 td->td_state = TDS_INACTIVE; 96 td->td_oncpu = NOCPU; 97 98 td->td_tid = alloc_unr(tid_unrhdr); 99 100 /* 101 * Note that td_critnest begins life as 1 because the thread is not 102 * running and is thereby implicitly waiting to be on the receiving 103 * end of a context switch. 104 */ 105 td->td_critnest = 1; 106 EVENTHANDLER_INVOKE(thread_ctor, td); 107 #ifdef AUDIT 108 audit_thread_alloc(td); 109 #endif 110 umtx_thread_alloc(td); 111 return (0); 112 } 113 114 /* 115 * Reclaim a thread after use. 116 */ 117 static void 118 thread_dtor(void *mem, int size, void *arg) 119 { 120 struct thread *td; 121 122 td = (struct thread *)mem; 123 124 #ifdef INVARIANTS 125 /* Verify that this thread is in a safe state to free. */ 126 switch (td->td_state) { 127 case TDS_INHIBITED: 128 case TDS_RUNNING: 129 case TDS_CAN_RUN: 130 case TDS_RUNQ: 131 /* 132 * We must never unlink a thread that is in one of 133 * these states, because it is currently active. 134 */ 135 panic("bad state for thread unlinking"); 136 /* NOTREACHED */ 137 case TDS_INACTIVE: 138 break; 139 default: 140 panic("bad thread state"); 141 /* NOTREACHED */ 142 } 143 #endif 144 #ifdef AUDIT 145 audit_thread_free(td); 146 #endif 147 /* Free all OSD associated to this thread. */ 148 osd_thread_exit(td); 149 150 EVENTHANDLER_INVOKE(thread_dtor, td); 151 free_unr(tid_unrhdr, td->td_tid); 152 } 153 154 /* 155 * Initialize type-stable parts of a thread (when newly created). 156 */ 157 static int 158 thread_init(void *mem, int size, int flags) 159 { 160 struct thread *td; 161 162 td = (struct thread *)mem; 163 164 td->td_sleepqueue = sleepq_alloc(); 165 td->td_turnstile = turnstile_alloc(); 166 EVENTHANDLER_INVOKE(thread_init, td); 167 td->td_sched = (struct td_sched *)&td[1]; 168 umtx_thread_init(td); 169 td->td_kstack = 0; 170 return (0); 171 } 172 173 /* 174 * Tear down type-stable parts of a thread (just before being discarded). 175 */ 176 static void 177 thread_fini(void *mem, int size) 178 { 179 struct thread *td; 180 181 td = (struct thread *)mem; 182 EVENTHANDLER_INVOKE(thread_fini, td); 183 turnstile_free(td->td_turnstile); 184 sleepq_free(td->td_sleepqueue); 185 umtx_thread_fini(td); 186 seltdfini(td); 187 } 188 189 /* 190 * For a newly created process, 191 * link up all the structures and its initial threads etc. 192 * called from: 193 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 194 * proc_dtor() (should go away) 195 * proc_init() 196 */ 197 void 198 proc_linkup0(struct proc *p, struct thread *td) 199 { 200 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 201 proc_linkup(p, td); 202 } 203 204 void 205 proc_linkup(struct proc *p, struct thread *td) 206 { 207 208 sigqueue_init(&p->p_sigqueue, p); 209 p->p_ksi = ksiginfo_alloc(1); 210 if (p->p_ksi != NULL) { 211 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 212 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 213 } 214 LIST_INIT(&p->p_mqnotifier); 215 p->p_numthreads = 0; 216 thread_link(td, p); 217 } 218 219 /* 220 * Initialize global thread allocation resources. 221 */ 222 void 223 threadinit(void) 224 { 225 226 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 227 /* leave one number for thread0 */ 228 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 229 230 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 231 thread_ctor, thread_dtor, thread_init, thread_fini, 232 16 - 1, 0); 233 } 234 235 /* 236 * Place an unused thread on the zombie list. 237 * Use the slpq as that must be unused by now. 238 */ 239 void 240 thread_zombie(struct thread *td) 241 { 242 mtx_lock_spin(&zombie_lock); 243 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 244 mtx_unlock_spin(&zombie_lock); 245 } 246 247 /* 248 * Release a thread that has exited after cpu_throw(). 249 */ 250 void 251 thread_stash(struct thread *td) 252 { 253 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 254 thread_zombie(td); 255 } 256 257 /* 258 * Reap zombie resources. 259 */ 260 void 261 thread_reap(void) 262 { 263 struct thread *td_first, *td_next; 264 265 /* 266 * Don't even bother to lock if none at this instant, 267 * we really don't care about the next instant.. 268 */ 269 if (!TAILQ_EMPTY(&zombie_threads)) { 270 mtx_lock_spin(&zombie_lock); 271 td_first = TAILQ_FIRST(&zombie_threads); 272 if (td_first) 273 TAILQ_INIT(&zombie_threads); 274 mtx_unlock_spin(&zombie_lock); 275 while (td_first) { 276 td_next = TAILQ_NEXT(td_first, td_slpq); 277 if (td_first->td_ucred) 278 crfree(td_first->td_ucred); 279 thread_free(td_first); 280 td_first = td_next; 281 } 282 } 283 } 284 285 /* 286 * Allocate a thread. 287 */ 288 struct thread * 289 thread_alloc(int pages) 290 { 291 struct thread *td; 292 293 thread_reap(); /* check if any zombies to get */ 294 295 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 296 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 297 if (!vm_thread_new(td, pages)) { 298 uma_zfree(thread_zone, td); 299 return (NULL); 300 } 301 cpu_thread_alloc(td); 302 return (td); 303 } 304 305 int 306 thread_alloc_stack(struct thread *td, int pages) 307 { 308 309 KASSERT(td->td_kstack == 0, 310 ("thread_alloc_stack called on a thread with kstack")); 311 if (!vm_thread_new(td, pages)) 312 return (0); 313 cpu_thread_alloc(td); 314 return (1); 315 } 316 317 /* 318 * Deallocate a thread. 319 */ 320 void 321 thread_free(struct thread *td) 322 { 323 324 lock_profile_thread_exit(td); 325 if (td->td_cpuset) 326 cpuset_rel(td->td_cpuset); 327 td->td_cpuset = NULL; 328 cpu_thread_free(td); 329 if (td->td_kstack != 0) 330 vm_thread_dispose(td); 331 uma_zfree(thread_zone, td); 332 } 333 334 /* 335 * Discard the current thread and exit from its context. 336 * Always called with scheduler locked. 337 * 338 * Because we can't free a thread while we're operating under its context, 339 * push the current thread into our CPU's deadthread holder. This means 340 * we needn't worry about someone else grabbing our context before we 341 * do a cpu_throw(). 342 */ 343 void 344 thread_exit(void) 345 { 346 uint64_t new_switchtime; 347 struct thread *td; 348 struct thread *td2; 349 struct proc *p; 350 int wakeup_swapper; 351 352 td = curthread; 353 p = td->td_proc; 354 355 PROC_SLOCK_ASSERT(p, MA_OWNED); 356 mtx_assert(&Giant, MA_NOTOWNED); 357 358 PROC_LOCK_ASSERT(p, MA_OWNED); 359 KASSERT(p != NULL, ("thread exiting without a process")); 360 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 361 (long)p->p_pid, td->td_name); 362 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 363 364 #ifdef AUDIT 365 AUDIT_SYSCALL_EXIT(0, td); 366 #endif 367 umtx_thread_exit(td); 368 /* 369 * drop FPU & debug register state storage, or any other 370 * architecture specific resources that 371 * would not be on a new untouched process. 372 */ 373 cpu_thread_exit(td); /* XXXSMP */ 374 375 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 376 new_switchtime = cpu_ticks(); 377 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 378 PCPU_SET(switchtime, new_switchtime); 379 PCPU_SET(switchticks, ticks); 380 PCPU_INC(cnt.v_swtch); 381 /* Save our resource usage in our process. */ 382 td->td_ru.ru_nvcsw++; 383 rucollect(&p->p_ru, &td->td_ru); 384 /* 385 * The last thread is left attached to the process 386 * So that the whole bundle gets recycled. Skip 387 * all this stuff if we never had threads. 388 * EXIT clears all sign of other threads when 389 * it goes to single threading, so the last thread always 390 * takes the short path. 391 */ 392 if (p->p_flag & P_HADTHREADS) { 393 if (p->p_numthreads > 1) { 394 thread_unlink(td); 395 td2 = FIRST_THREAD_IN_PROC(p); 396 sched_exit_thread(td2, td); 397 398 /* 399 * The test below is NOT true if we are the 400 * sole exiting thread. P_STOPPED_SINGLE is unset 401 * in exit1() after it is the only survivor. 402 */ 403 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 404 if (p->p_numthreads == p->p_suspcount) { 405 thread_lock(p->p_singlethread); 406 wakeup_swapper = thread_unsuspend_one( 407 p->p_singlethread); 408 thread_unlock(p->p_singlethread); 409 if (wakeup_swapper) 410 kick_proc0(); 411 } 412 } 413 414 atomic_add_int(&td->td_proc->p_exitthreads, 1); 415 PCPU_SET(deadthread, td); 416 } else { 417 /* 418 * The last thread is exiting.. but not through exit() 419 */ 420 panic ("thread_exit: Last thread exiting on its own"); 421 } 422 } 423 #ifdef HWPMC_HOOKS 424 /* 425 * If this thread is part of a process that is being tracked by hwpmc(4), 426 * inform the module of the thread's impending exit. 427 */ 428 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 429 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 430 #endif 431 PROC_UNLOCK(p); 432 ruxagg(p, td); 433 thread_lock(td); 434 PROC_SUNLOCK(p); 435 td->td_state = TDS_INACTIVE; 436 #ifdef WITNESS 437 witness_thread_exit(td); 438 #endif 439 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 440 sched_throw(td); 441 panic("I'm a teapot!"); 442 /* NOTREACHED */ 443 } 444 445 /* 446 * Do any thread specific cleanups that may be needed in wait() 447 * called with Giant, proc and schedlock not held. 448 */ 449 void 450 thread_wait(struct proc *p) 451 { 452 struct thread *td; 453 454 mtx_assert(&Giant, MA_NOTOWNED); 455 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 456 td = FIRST_THREAD_IN_PROC(p); 457 /* Lock the last thread so we spin until it exits cpu_throw(). */ 458 thread_lock(td); 459 thread_unlock(td); 460 /* Wait for any remaining threads to exit cpu_throw(). */ 461 while (p->p_exitthreads) 462 sched_relinquish(curthread); 463 lock_profile_thread_exit(td); 464 cpuset_rel(td->td_cpuset); 465 td->td_cpuset = NULL; 466 cpu_thread_clean(td); 467 crfree(td->td_ucred); 468 thread_reap(); /* check for zombie threads etc. */ 469 } 470 471 /* 472 * Link a thread to a process. 473 * set up anything that needs to be initialized for it to 474 * be used by the process. 475 */ 476 void 477 thread_link(struct thread *td, struct proc *p) 478 { 479 480 /* 481 * XXX This can't be enabled because it's called for proc0 before 482 * its lock has been created. 483 * PROC_LOCK_ASSERT(p, MA_OWNED); 484 */ 485 td->td_state = TDS_INACTIVE; 486 td->td_proc = p; 487 td->td_flags = TDF_INMEM; 488 489 LIST_INIT(&td->td_contested); 490 LIST_INIT(&td->td_lprof[0]); 491 LIST_INIT(&td->td_lprof[1]); 492 sigqueue_init(&td->td_sigqueue, p); 493 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 494 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 495 p->p_numthreads++; 496 } 497 498 /* 499 * Convert a process with one thread to an unthreaded process. 500 */ 501 void 502 thread_unthread(struct thread *td) 503 { 504 struct proc *p = td->td_proc; 505 506 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 507 p->p_flag &= ~P_HADTHREADS; 508 } 509 510 /* 511 * Called from: 512 * thread_exit() 513 */ 514 void 515 thread_unlink(struct thread *td) 516 { 517 struct proc *p = td->td_proc; 518 519 PROC_LOCK_ASSERT(p, MA_OWNED); 520 TAILQ_REMOVE(&p->p_threads, td, td_plist); 521 p->p_numthreads--; 522 /* could clear a few other things here */ 523 /* Must NOT clear links to proc! */ 524 } 525 526 static int 527 calc_remaining(struct proc *p, int mode) 528 { 529 int remaining; 530 531 if (mode == SINGLE_EXIT) 532 remaining = p->p_numthreads; 533 else if (mode == SINGLE_BOUNDARY) 534 remaining = p->p_numthreads - p->p_boundary_count; 535 else if (mode == SINGLE_NO_EXIT) 536 remaining = p->p_numthreads - p->p_suspcount; 537 else 538 panic("calc_remaining: wrong mode %d", mode); 539 return (remaining); 540 } 541 542 /* 543 * Enforce single-threading. 544 * 545 * Returns 1 if the caller must abort (another thread is waiting to 546 * exit the process or similar). Process is locked! 547 * Returns 0 when you are successfully the only thread running. 548 * A process has successfully single threaded in the suspend mode when 549 * There are no threads in user mode. Threads in the kernel must be 550 * allowed to continue until they get to the user boundary. They may even 551 * copy out their return values and data before suspending. They may however be 552 * accelerated in reaching the user boundary as we will wake up 553 * any sleeping threads that are interruptable. (PCATCH). 554 */ 555 int 556 thread_single(int mode) 557 { 558 struct thread *td; 559 struct thread *td2; 560 struct proc *p; 561 int remaining, wakeup_swapper; 562 563 td = curthread; 564 p = td->td_proc; 565 mtx_assert(&Giant, MA_NOTOWNED); 566 PROC_LOCK_ASSERT(p, MA_OWNED); 567 KASSERT((td != NULL), ("curthread is NULL")); 568 569 if ((p->p_flag & P_HADTHREADS) == 0) 570 return (0); 571 572 /* Is someone already single threading? */ 573 if (p->p_singlethread != NULL && p->p_singlethread != td) 574 return (1); 575 576 if (mode == SINGLE_EXIT) { 577 p->p_flag |= P_SINGLE_EXIT; 578 p->p_flag &= ~P_SINGLE_BOUNDARY; 579 } else { 580 p->p_flag &= ~P_SINGLE_EXIT; 581 if (mode == SINGLE_BOUNDARY) 582 p->p_flag |= P_SINGLE_BOUNDARY; 583 else 584 p->p_flag &= ~P_SINGLE_BOUNDARY; 585 } 586 p->p_flag |= P_STOPPED_SINGLE; 587 PROC_SLOCK(p); 588 p->p_singlethread = td; 589 remaining = calc_remaining(p, mode); 590 while (remaining != 1) { 591 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 592 goto stopme; 593 wakeup_swapper = 0; 594 FOREACH_THREAD_IN_PROC(p, td2) { 595 if (td2 == td) 596 continue; 597 thread_lock(td2); 598 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 599 if (TD_IS_INHIBITED(td2)) { 600 switch (mode) { 601 case SINGLE_EXIT: 602 if (TD_IS_SUSPENDED(td2)) 603 wakeup_swapper |= 604 thread_unsuspend_one(td2); 605 if (TD_ON_SLEEPQ(td2) && 606 (td2->td_flags & TDF_SINTR)) 607 wakeup_swapper |= 608 sleepq_abort(td2, EINTR); 609 break; 610 case SINGLE_BOUNDARY: 611 if (TD_IS_SUSPENDED(td2) && 612 !(td2->td_flags & TDF_BOUNDARY)) 613 wakeup_swapper |= 614 thread_unsuspend_one(td2); 615 if (TD_ON_SLEEPQ(td2) && 616 (td2->td_flags & TDF_SINTR)) 617 wakeup_swapper |= 618 sleepq_abort(td2, ERESTART); 619 break; 620 case SINGLE_NO_EXIT: 621 if (TD_IS_SUSPENDED(td2) && 622 !(td2->td_flags & TDF_BOUNDARY)) 623 wakeup_swapper |= 624 thread_unsuspend_one(td2); 625 if (TD_ON_SLEEPQ(td2) && 626 (td2->td_flags & TDF_SINTR)) 627 wakeup_swapper |= 628 sleepq_abort(td2, ERESTART); 629 break; 630 default: 631 break; 632 } 633 } 634 #ifdef SMP 635 else if (TD_IS_RUNNING(td2) && td != td2) { 636 forward_signal(td2); 637 } 638 #endif 639 thread_unlock(td2); 640 } 641 if (wakeup_swapper) 642 kick_proc0(); 643 remaining = calc_remaining(p, mode); 644 645 /* 646 * Maybe we suspended some threads.. was it enough? 647 */ 648 if (remaining == 1) 649 break; 650 651 stopme: 652 /* 653 * Wake us up when everyone else has suspended. 654 * In the mean time we suspend as well. 655 */ 656 thread_suspend_switch(td); 657 remaining = calc_remaining(p, mode); 658 } 659 if (mode == SINGLE_EXIT) { 660 /* 661 * We have gotten rid of all the other threads and we 662 * are about to either exit or exec. In either case, 663 * we try our utmost to revert to being a non-threaded 664 * process. 665 */ 666 p->p_singlethread = NULL; 667 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 668 thread_unthread(td); 669 } 670 PROC_SUNLOCK(p); 671 return (0); 672 } 673 674 /* 675 * Called in from locations that can safely check to see 676 * whether we have to suspend or at least throttle for a 677 * single-thread event (e.g. fork). 678 * 679 * Such locations include userret(). 680 * If the "return_instead" argument is non zero, the thread must be able to 681 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 682 * 683 * The 'return_instead' argument tells the function if it may do a 684 * thread_exit() or suspend, or whether the caller must abort and back 685 * out instead. 686 * 687 * If the thread that set the single_threading request has set the 688 * P_SINGLE_EXIT bit in the process flags then this call will never return 689 * if 'return_instead' is false, but will exit. 690 * 691 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 692 *---------------+--------------------+--------------------- 693 * 0 | returns 0 | returns 0 or 1 694 * | when ST ends | immediatly 695 *---------------+--------------------+--------------------- 696 * 1 | thread exits | returns 1 697 * | | immediatly 698 * 0 = thread_exit() or suspension ok, 699 * other = return error instead of stopping the thread. 700 * 701 * While a full suspension is under effect, even a single threading 702 * thread would be suspended if it made this call (but it shouldn't). 703 * This call should only be made from places where 704 * thread_exit() would be safe as that may be the outcome unless 705 * return_instead is set. 706 */ 707 int 708 thread_suspend_check(int return_instead) 709 { 710 struct thread *td; 711 struct proc *p; 712 int wakeup_swapper; 713 714 td = curthread; 715 p = td->td_proc; 716 mtx_assert(&Giant, MA_NOTOWNED); 717 PROC_LOCK_ASSERT(p, MA_OWNED); 718 while (P_SHOULDSTOP(p) || 719 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) { 720 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 721 KASSERT(p->p_singlethread != NULL, 722 ("singlethread not set")); 723 /* 724 * The only suspension in action is a 725 * single-threading. Single threader need not stop. 726 * XXX Should be safe to access unlocked 727 * as it can only be set to be true by us. 728 */ 729 if (p->p_singlethread == td) 730 return (0); /* Exempt from stopping. */ 731 } 732 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 733 return (EINTR); 734 735 /* Should we goto user boundary if we didn't come from there? */ 736 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 737 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 738 return (ERESTART); 739 740 /* If thread will exit, flush its pending signals */ 741 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 742 sigqueue_flush(&td->td_sigqueue); 743 744 PROC_SLOCK(p); 745 thread_stopped(p); 746 /* 747 * If the process is waiting for us to exit, 748 * this thread should just suicide. 749 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 750 */ 751 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 752 thread_exit(); 753 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 754 if (p->p_numthreads == p->p_suspcount + 1) { 755 thread_lock(p->p_singlethread); 756 wakeup_swapper = 757 thread_unsuspend_one(p->p_singlethread); 758 thread_unlock(p->p_singlethread); 759 if (wakeup_swapper) 760 kick_proc0(); 761 } 762 } 763 PROC_UNLOCK(p); 764 thread_lock(td); 765 /* 766 * When a thread suspends, it just 767 * gets taken off all queues. 768 */ 769 thread_suspend_one(td); 770 if (return_instead == 0) { 771 p->p_boundary_count++; 772 td->td_flags |= TDF_BOUNDARY; 773 } 774 PROC_SUNLOCK(p); 775 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 776 if (return_instead == 0) 777 td->td_flags &= ~TDF_BOUNDARY; 778 thread_unlock(td); 779 PROC_LOCK(p); 780 if (return_instead == 0) 781 p->p_boundary_count--; 782 } 783 return (0); 784 } 785 786 void 787 thread_suspend_switch(struct thread *td) 788 { 789 struct proc *p; 790 791 p = td->td_proc; 792 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 793 PROC_LOCK_ASSERT(p, MA_OWNED); 794 PROC_SLOCK_ASSERT(p, MA_OWNED); 795 /* 796 * We implement thread_suspend_one in stages here to avoid 797 * dropping the proc lock while the thread lock is owned. 798 */ 799 thread_stopped(p); 800 p->p_suspcount++; 801 PROC_UNLOCK(p); 802 thread_lock(td); 803 td->td_flags &= ~TDF_NEEDSUSPCHK; 804 TD_SET_SUSPENDED(td); 805 sched_sleep(td, 0); 806 PROC_SUNLOCK(p); 807 DROP_GIANT(); 808 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 809 thread_unlock(td); 810 PICKUP_GIANT(); 811 PROC_LOCK(p); 812 PROC_SLOCK(p); 813 } 814 815 void 816 thread_suspend_one(struct thread *td) 817 { 818 struct proc *p = td->td_proc; 819 820 PROC_SLOCK_ASSERT(p, MA_OWNED); 821 THREAD_LOCK_ASSERT(td, MA_OWNED); 822 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 823 p->p_suspcount++; 824 td->td_flags &= ~TDF_NEEDSUSPCHK; 825 TD_SET_SUSPENDED(td); 826 sched_sleep(td, 0); 827 } 828 829 int 830 thread_unsuspend_one(struct thread *td) 831 { 832 struct proc *p = td->td_proc; 833 834 PROC_SLOCK_ASSERT(p, MA_OWNED); 835 THREAD_LOCK_ASSERT(td, MA_OWNED); 836 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 837 TD_CLR_SUSPENDED(td); 838 p->p_suspcount--; 839 return (setrunnable(td)); 840 } 841 842 /* 843 * Allow all threads blocked by single threading to continue running. 844 */ 845 void 846 thread_unsuspend(struct proc *p) 847 { 848 struct thread *td; 849 int wakeup_swapper; 850 851 PROC_LOCK_ASSERT(p, MA_OWNED); 852 PROC_SLOCK_ASSERT(p, MA_OWNED); 853 wakeup_swapper = 0; 854 if (!P_SHOULDSTOP(p)) { 855 FOREACH_THREAD_IN_PROC(p, td) { 856 thread_lock(td); 857 if (TD_IS_SUSPENDED(td)) { 858 wakeup_swapper |= thread_unsuspend_one(td); 859 } 860 thread_unlock(td); 861 } 862 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 863 (p->p_numthreads == p->p_suspcount)) { 864 /* 865 * Stopping everything also did the job for the single 866 * threading request. Now we've downgraded to single-threaded, 867 * let it continue. 868 */ 869 thread_lock(p->p_singlethread); 870 wakeup_swapper = thread_unsuspend_one(p->p_singlethread); 871 thread_unlock(p->p_singlethread); 872 } 873 if (wakeup_swapper) 874 kick_proc0(); 875 } 876 877 /* 878 * End the single threading mode.. 879 */ 880 void 881 thread_single_end(void) 882 { 883 struct thread *td; 884 struct proc *p; 885 int wakeup_swapper; 886 887 td = curthread; 888 p = td->td_proc; 889 PROC_LOCK_ASSERT(p, MA_OWNED); 890 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 891 PROC_SLOCK(p); 892 p->p_singlethread = NULL; 893 wakeup_swapper = 0; 894 /* 895 * If there are other threads they may now run, 896 * unless of course there is a blanket 'stop order' 897 * on the process. The single threader must be allowed 898 * to continue however as this is a bad place to stop. 899 */ 900 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 901 FOREACH_THREAD_IN_PROC(p, td) { 902 thread_lock(td); 903 if (TD_IS_SUSPENDED(td)) { 904 wakeup_swapper |= thread_unsuspend_one(td); 905 } 906 thread_unlock(td); 907 } 908 } 909 PROC_SUNLOCK(p); 910 if (wakeup_swapper) 911 kick_proc0(); 912 } 913 914 struct thread * 915 thread_find(struct proc *p, lwpid_t tid) 916 { 917 struct thread *td; 918 919 PROC_LOCK_ASSERT(p, MA_OWNED); 920 FOREACH_THREAD_IN_PROC(p, td) { 921 if (td->td_tid == tid) 922 break; 923 } 924 return (td); 925 } 926