1 /*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_witness.h" 30 #include "opt_hwpmc_hooks.h" 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/proc.h> 41 #include <sys/rangelock.h> 42 #include <sys/resourcevar.h> 43 #include <sys/sdt.h> 44 #include <sys/smp.h> 45 #include <sys/sched.h> 46 #include <sys/sleepqueue.h> 47 #include <sys/selinfo.h> 48 #include <sys/turnstile.h> 49 #include <sys/ktr.h> 50 #include <sys/rwlock.h> 51 #include <sys/umtx.h> 52 #include <sys/cpuset.h> 53 #ifdef HWPMC_HOOKS 54 #include <sys/pmckern.h> 55 #endif 56 57 #include <security/audit/audit.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_extern.h> 61 #include <vm/uma.h> 62 #include <sys/eventhandler.h> 63 64 SDT_PROVIDER_DECLARE(proc); 65 SDT_PROBE_DEFINE(proc, , , lwp__exit); 66 67 /* 68 * thread related storage. 69 */ 70 static uma_zone_t thread_zone; 71 72 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 73 static struct mtx zombie_lock; 74 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 75 76 static void thread_zombie(struct thread *); 77 78 #define TID_BUFFER_SIZE 1024 79 80 struct mtx tid_lock; 81 static struct unrhdr *tid_unrhdr; 82 static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 83 static int tid_head, tid_tail; 84 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 85 86 struct tidhashhead *tidhashtbl; 87 u_long tidhash; 88 struct rwlock tidhash_lock; 89 90 static lwpid_t 91 tid_alloc(void) 92 { 93 lwpid_t tid; 94 95 tid = alloc_unr(tid_unrhdr); 96 if (tid != -1) 97 return (tid); 98 mtx_lock(&tid_lock); 99 if (tid_head == tid_tail) { 100 mtx_unlock(&tid_lock); 101 return (-1); 102 } 103 tid = tid_buffer[tid_head]; 104 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 105 mtx_unlock(&tid_lock); 106 return (tid); 107 } 108 109 static void 110 tid_free(lwpid_t tid) 111 { 112 lwpid_t tmp_tid = -1; 113 114 mtx_lock(&tid_lock); 115 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 116 tmp_tid = tid_buffer[tid_head]; 117 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 118 } 119 tid_buffer[tid_tail] = tid; 120 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 121 mtx_unlock(&tid_lock); 122 if (tmp_tid != -1) 123 free_unr(tid_unrhdr, tmp_tid); 124 } 125 126 /* 127 * Prepare a thread for use. 128 */ 129 static int 130 thread_ctor(void *mem, int size, void *arg, int flags) 131 { 132 struct thread *td; 133 134 td = (struct thread *)mem; 135 td->td_state = TDS_INACTIVE; 136 td->td_oncpu = NOCPU; 137 138 td->td_tid = tid_alloc(); 139 140 /* 141 * Note that td_critnest begins life as 1 because the thread is not 142 * running and is thereby implicitly waiting to be on the receiving 143 * end of a context switch. 144 */ 145 td->td_critnest = 1; 146 td->td_lend_user_pri = PRI_MAX; 147 EVENTHANDLER_INVOKE(thread_ctor, td); 148 #ifdef AUDIT 149 audit_thread_alloc(td); 150 #endif 151 umtx_thread_alloc(td); 152 153 mtx_init(&td->td_slpmutex, "td_slpmutex", NULL, MTX_SPIN); 154 callout_init_mtx(&td->td_slpcallout, &td->td_slpmutex, 0); 155 return (0); 156 } 157 158 /* 159 * Reclaim a thread after use. 160 */ 161 static void 162 thread_dtor(void *mem, int size, void *arg) 163 { 164 struct thread *td; 165 166 td = (struct thread *)mem; 167 168 /* make sure to drain any use of the "td->td_slpcallout" */ 169 callout_drain(&td->td_slpcallout); 170 mtx_destroy(&td->td_slpmutex); 171 172 #ifdef INVARIANTS 173 /* Verify that this thread is in a safe state to free. */ 174 switch (td->td_state) { 175 case TDS_INHIBITED: 176 case TDS_RUNNING: 177 case TDS_CAN_RUN: 178 case TDS_RUNQ: 179 /* 180 * We must never unlink a thread that is in one of 181 * these states, because it is currently active. 182 */ 183 panic("bad state for thread unlinking"); 184 /* NOTREACHED */ 185 case TDS_INACTIVE: 186 break; 187 default: 188 panic("bad thread state"); 189 /* NOTREACHED */ 190 } 191 #endif 192 #ifdef AUDIT 193 audit_thread_free(td); 194 #endif 195 /* Free all OSD associated to this thread. */ 196 osd_thread_exit(td); 197 198 EVENTHANDLER_INVOKE(thread_dtor, td); 199 tid_free(td->td_tid); 200 } 201 202 /* 203 * Initialize type-stable parts of a thread (when newly created). 204 */ 205 static int 206 thread_init(void *mem, int size, int flags) 207 { 208 struct thread *td; 209 210 td = (struct thread *)mem; 211 212 td->td_sleepqueue = sleepq_alloc(); 213 td->td_turnstile = turnstile_alloc(); 214 td->td_rlqe = NULL; 215 EVENTHANDLER_INVOKE(thread_init, td); 216 td->td_sched = (struct td_sched *)&td[1]; 217 umtx_thread_init(td); 218 td->td_kstack = 0; 219 return (0); 220 } 221 222 /* 223 * Tear down type-stable parts of a thread (just before being discarded). 224 */ 225 static void 226 thread_fini(void *mem, int size) 227 { 228 struct thread *td; 229 230 td = (struct thread *)mem; 231 EVENTHANDLER_INVOKE(thread_fini, td); 232 rlqentry_free(td->td_rlqe); 233 turnstile_free(td->td_turnstile); 234 sleepq_free(td->td_sleepqueue); 235 umtx_thread_fini(td); 236 seltdfini(td); 237 } 238 239 /* 240 * For a newly created process, 241 * link up all the structures and its initial threads etc. 242 * called from: 243 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 244 * proc_dtor() (should go away) 245 * proc_init() 246 */ 247 void 248 proc_linkup0(struct proc *p, struct thread *td) 249 { 250 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 251 proc_linkup(p, td); 252 } 253 254 void 255 proc_linkup(struct proc *p, struct thread *td) 256 { 257 258 sigqueue_init(&p->p_sigqueue, p); 259 p->p_ksi = ksiginfo_alloc(1); 260 if (p->p_ksi != NULL) { 261 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 262 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 263 } 264 LIST_INIT(&p->p_mqnotifier); 265 p->p_numthreads = 0; 266 thread_link(td, p); 267 } 268 269 /* 270 * Initialize global thread allocation resources. 271 */ 272 void 273 threadinit(void) 274 { 275 276 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 277 278 /* 279 * pid_max cannot be greater than PID_MAX. 280 * leave one number for thread0. 281 */ 282 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 283 284 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 285 thread_ctor, thread_dtor, thread_init, thread_fini, 286 16 - 1, 0); 287 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 288 rw_init(&tidhash_lock, "tidhash"); 289 } 290 291 /* 292 * Place an unused thread on the zombie list. 293 * Use the slpq as that must be unused by now. 294 */ 295 void 296 thread_zombie(struct thread *td) 297 { 298 mtx_lock_spin(&zombie_lock); 299 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 300 mtx_unlock_spin(&zombie_lock); 301 } 302 303 /* 304 * Release a thread that has exited after cpu_throw(). 305 */ 306 void 307 thread_stash(struct thread *td) 308 { 309 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 310 thread_zombie(td); 311 } 312 313 /* 314 * Reap zombie resources. 315 */ 316 void 317 thread_reap(void) 318 { 319 struct thread *td_first, *td_next; 320 321 /* 322 * Don't even bother to lock if none at this instant, 323 * we really don't care about the next instant.. 324 */ 325 if (!TAILQ_EMPTY(&zombie_threads)) { 326 mtx_lock_spin(&zombie_lock); 327 td_first = TAILQ_FIRST(&zombie_threads); 328 if (td_first) 329 TAILQ_INIT(&zombie_threads); 330 mtx_unlock_spin(&zombie_lock); 331 while (td_first) { 332 td_next = TAILQ_NEXT(td_first, td_slpq); 333 if (td_first->td_ucred) 334 crfree(td_first->td_ucred); 335 thread_free(td_first); 336 td_first = td_next; 337 } 338 } 339 } 340 341 /* 342 * Allocate a thread. 343 */ 344 struct thread * 345 thread_alloc(int pages) 346 { 347 struct thread *td; 348 349 thread_reap(); /* check if any zombies to get */ 350 351 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 352 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 353 if (!vm_thread_new(td, pages)) { 354 uma_zfree(thread_zone, td); 355 return (NULL); 356 } 357 cpu_thread_alloc(td); 358 return (td); 359 } 360 361 int 362 thread_alloc_stack(struct thread *td, int pages) 363 { 364 365 KASSERT(td->td_kstack == 0, 366 ("thread_alloc_stack called on a thread with kstack")); 367 if (!vm_thread_new(td, pages)) 368 return (0); 369 cpu_thread_alloc(td); 370 return (1); 371 } 372 373 /* 374 * Deallocate a thread. 375 */ 376 void 377 thread_free(struct thread *td) 378 { 379 380 lock_profile_thread_exit(td); 381 if (td->td_cpuset) 382 cpuset_rel(td->td_cpuset); 383 td->td_cpuset = NULL; 384 cpu_thread_free(td); 385 if (td->td_kstack != 0) 386 vm_thread_dispose(td); 387 uma_zfree(thread_zone, td); 388 } 389 390 /* 391 * Discard the current thread and exit from its context. 392 * Always called with scheduler locked. 393 * 394 * Because we can't free a thread while we're operating under its context, 395 * push the current thread into our CPU's deadthread holder. This means 396 * we needn't worry about someone else grabbing our context before we 397 * do a cpu_throw(). 398 */ 399 void 400 thread_exit(void) 401 { 402 uint64_t runtime, new_switchtime; 403 struct thread *td; 404 struct thread *td2; 405 struct proc *p; 406 int wakeup_swapper; 407 408 td = curthread; 409 p = td->td_proc; 410 411 PROC_SLOCK_ASSERT(p, MA_OWNED); 412 mtx_assert(&Giant, MA_NOTOWNED); 413 414 PROC_LOCK_ASSERT(p, MA_OWNED); 415 KASSERT(p != NULL, ("thread exiting without a process")); 416 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 417 (long)p->p_pid, td->td_name); 418 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 419 420 #ifdef AUDIT 421 AUDIT_SYSCALL_EXIT(0, td); 422 #endif 423 umtx_thread_exit(td); 424 /* 425 * drop FPU & debug register state storage, or any other 426 * architecture specific resources that 427 * would not be on a new untouched process. 428 */ 429 cpu_thread_exit(td); /* XXXSMP */ 430 431 /* 432 * The last thread is left attached to the process 433 * So that the whole bundle gets recycled. Skip 434 * all this stuff if we never had threads. 435 * EXIT clears all sign of other threads when 436 * it goes to single threading, so the last thread always 437 * takes the short path. 438 */ 439 if (p->p_flag & P_HADTHREADS) { 440 if (p->p_numthreads > 1) { 441 atomic_add_int(&td->td_proc->p_exitthreads, 1); 442 thread_unlink(td); 443 td2 = FIRST_THREAD_IN_PROC(p); 444 sched_exit_thread(td2, td); 445 446 /* 447 * The test below is NOT true if we are the 448 * sole exiting thread. P_STOPPED_SINGLE is unset 449 * in exit1() after it is the only survivor. 450 */ 451 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 452 if (p->p_numthreads == p->p_suspcount) { 453 thread_lock(p->p_singlethread); 454 wakeup_swapper = thread_unsuspend_one( 455 p->p_singlethread, p); 456 thread_unlock(p->p_singlethread); 457 if (wakeup_swapper) 458 kick_proc0(); 459 } 460 } 461 462 PCPU_SET(deadthread, td); 463 } else { 464 /* 465 * The last thread is exiting.. but not through exit() 466 */ 467 panic ("thread_exit: Last thread exiting on its own"); 468 } 469 } 470 #ifdef HWPMC_HOOKS 471 /* 472 * If this thread is part of a process that is being tracked by hwpmc(4), 473 * inform the module of the thread's impending exit. 474 */ 475 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 476 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 477 #endif 478 PROC_UNLOCK(p); 479 PROC_STATLOCK(p); 480 thread_lock(td); 481 PROC_SUNLOCK(p); 482 483 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 484 new_switchtime = cpu_ticks(); 485 runtime = new_switchtime - PCPU_GET(switchtime); 486 td->td_runtime += runtime; 487 td->td_incruntime += runtime; 488 PCPU_SET(switchtime, new_switchtime); 489 PCPU_SET(switchticks, ticks); 490 PCPU_INC(cnt.v_swtch); 491 492 /* Save our resource usage in our process. */ 493 td->td_ru.ru_nvcsw++; 494 ruxagg(p, td); 495 rucollect(&p->p_ru, &td->td_ru); 496 PROC_STATUNLOCK(p); 497 498 td->td_state = TDS_INACTIVE; 499 #ifdef WITNESS 500 witness_thread_exit(td); 501 #endif 502 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 503 sched_throw(td); 504 panic("I'm a teapot!"); 505 /* NOTREACHED */ 506 } 507 508 /* 509 * Do any thread specific cleanups that may be needed in wait() 510 * called with Giant, proc and schedlock not held. 511 */ 512 void 513 thread_wait(struct proc *p) 514 { 515 struct thread *td; 516 517 mtx_assert(&Giant, MA_NOTOWNED); 518 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 519 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 520 td = FIRST_THREAD_IN_PROC(p); 521 /* Lock the last thread so we spin until it exits cpu_throw(). */ 522 thread_lock(td); 523 thread_unlock(td); 524 lock_profile_thread_exit(td); 525 cpuset_rel(td->td_cpuset); 526 td->td_cpuset = NULL; 527 cpu_thread_clean(td); 528 crfree(td->td_ucred); 529 thread_reap(); /* check for zombie threads etc. */ 530 } 531 532 /* 533 * Link a thread to a process. 534 * set up anything that needs to be initialized for it to 535 * be used by the process. 536 */ 537 void 538 thread_link(struct thread *td, struct proc *p) 539 { 540 541 /* 542 * XXX This can't be enabled because it's called for proc0 before 543 * its lock has been created. 544 * PROC_LOCK_ASSERT(p, MA_OWNED); 545 */ 546 td->td_state = TDS_INACTIVE; 547 td->td_proc = p; 548 td->td_flags = TDF_INMEM; 549 550 LIST_INIT(&td->td_contested); 551 LIST_INIT(&td->td_lprof[0]); 552 LIST_INIT(&td->td_lprof[1]); 553 sigqueue_init(&td->td_sigqueue, p); 554 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 555 p->p_numthreads++; 556 } 557 558 /* 559 * Called from: 560 * thread_exit() 561 */ 562 void 563 thread_unlink(struct thread *td) 564 { 565 struct proc *p = td->td_proc; 566 567 PROC_LOCK_ASSERT(p, MA_OWNED); 568 TAILQ_REMOVE(&p->p_threads, td, td_plist); 569 p->p_numthreads--; 570 /* could clear a few other things here */ 571 /* Must NOT clear links to proc! */ 572 } 573 574 static int 575 calc_remaining(struct proc *p, int mode) 576 { 577 int remaining; 578 579 PROC_LOCK_ASSERT(p, MA_OWNED); 580 PROC_SLOCK_ASSERT(p, MA_OWNED); 581 if (mode == SINGLE_EXIT) 582 remaining = p->p_numthreads; 583 else if (mode == SINGLE_BOUNDARY) 584 remaining = p->p_numthreads - p->p_boundary_count; 585 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 586 remaining = p->p_numthreads - p->p_suspcount; 587 else 588 panic("calc_remaining: wrong mode %d", mode); 589 return (remaining); 590 } 591 592 static int 593 remain_for_mode(int mode) 594 { 595 596 return (mode == SINGLE_ALLPROC ? 0 : 1); 597 } 598 599 static int 600 weed_inhib(int mode, struct thread *td2, struct proc *p) 601 { 602 int wakeup_swapper; 603 604 PROC_LOCK_ASSERT(p, MA_OWNED); 605 PROC_SLOCK_ASSERT(p, MA_OWNED); 606 THREAD_LOCK_ASSERT(td2, MA_OWNED); 607 608 wakeup_swapper = 0; 609 switch (mode) { 610 case SINGLE_EXIT: 611 if (TD_IS_SUSPENDED(td2)) 612 wakeup_swapper |= thread_unsuspend_one(td2, p); 613 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 614 wakeup_swapper |= sleepq_abort(td2, EINTR); 615 break; 616 case SINGLE_BOUNDARY: 617 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 618 wakeup_swapper |= thread_unsuspend_one(td2, p); 619 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 620 wakeup_swapper |= sleepq_abort(td2, ERESTART); 621 break; 622 case SINGLE_NO_EXIT: 623 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 624 wakeup_swapper |= thread_unsuspend_one(td2, p); 625 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 626 wakeup_swapper |= sleepq_abort(td2, ERESTART); 627 break; 628 case SINGLE_ALLPROC: 629 /* 630 * ALLPROC suspend tries to avoid spurious EINTR for 631 * threads sleeping interruptable, by suspending the 632 * thread directly, similarly to sig_suspend_threads(). 633 * Since such sleep is not performed at the user 634 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 635 * is used to avoid immediate un-suspend. 636 */ 637 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 638 TDF_ALLPROCSUSP)) == 0) 639 wakeup_swapper |= thread_unsuspend_one(td2, p); 640 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 641 if ((td2->td_flags & TDF_SBDRY) == 0) { 642 thread_suspend_one(td2); 643 td2->td_flags |= TDF_ALLPROCSUSP; 644 } else { 645 wakeup_swapper |= sleepq_abort(td2, ERESTART); 646 } 647 } 648 break; 649 } 650 return (wakeup_swapper); 651 } 652 653 /* 654 * Enforce single-threading. 655 * 656 * Returns 1 if the caller must abort (another thread is waiting to 657 * exit the process or similar). Process is locked! 658 * Returns 0 when you are successfully the only thread running. 659 * A process has successfully single threaded in the suspend mode when 660 * There are no threads in user mode. Threads in the kernel must be 661 * allowed to continue until they get to the user boundary. They may even 662 * copy out their return values and data before suspending. They may however be 663 * accelerated in reaching the user boundary as we will wake up 664 * any sleeping threads that are interruptable. (PCATCH). 665 */ 666 int 667 thread_single(struct proc *p, int mode) 668 { 669 struct thread *td; 670 struct thread *td2; 671 int remaining, wakeup_swapper; 672 673 td = curthread; 674 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 675 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 676 ("invalid mode %d", mode)); 677 /* 678 * If allowing non-ALLPROC singlethreading for non-curproc 679 * callers, calc_remaining() and remain_for_mode() should be 680 * adjusted to also account for td->td_proc != p. For now 681 * this is not implemented because it is not used. 682 */ 683 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 684 (mode != SINGLE_ALLPROC && td->td_proc == p), 685 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 686 mtx_assert(&Giant, MA_NOTOWNED); 687 PROC_LOCK_ASSERT(p, MA_OWNED); 688 689 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 690 return (0); 691 692 /* Is someone already single threading? */ 693 if (p->p_singlethread != NULL && p->p_singlethread != td) 694 return (1); 695 696 if (mode == SINGLE_EXIT) { 697 p->p_flag |= P_SINGLE_EXIT; 698 p->p_flag &= ~P_SINGLE_BOUNDARY; 699 } else { 700 p->p_flag &= ~P_SINGLE_EXIT; 701 if (mode == SINGLE_BOUNDARY) 702 p->p_flag |= P_SINGLE_BOUNDARY; 703 else 704 p->p_flag &= ~P_SINGLE_BOUNDARY; 705 } 706 if (mode == SINGLE_ALLPROC) 707 p->p_flag |= P_TOTAL_STOP; 708 p->p_flag |= P_STOPPED_SINGLE; 709 PROC_SLOCK(p); 710 p->p_singlethread = td; 711 remaining = calc_remaining(p, mode); 712 while (remaining != remain_for_mode(mode)) { 713 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 714 goto stopme; 715 wakeup_swapper = 0; 716 FOREACH_THREAD_IN_PROC(p, td2) { 717 if (td2 == td) 718 continue; 719 thread_lock(td2); 720 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 721 if (TD_IS_INHIBITED(td2)) { 722 wakeup_swapper |= weed_inhib(mode, td2, p); 723 #ifdef SMP 724 } else if (TD_IS_RUNNING(td2) && td != td2) { 725 forward_signal(td2); 726 #endif 727 } 728 thread_unlock(td2); 729 } 730 if (wakeup_swapper) 731 kick_proc0(); 732 remaining = calc_remaining(p, mode); 733 734 /* 735 * Maybe we suspended some threads.. was it enough? 736 */ 737 if (remaining == remain_for_mode(mode)) 738 break; 739 740 stopme: 741 /* 742 * Wake us up when everyone else has suspended. 743 * In the mean time we suspend as well. 744 */ 745 thread_suspend_switch(td, p); 746 remaining = calc_remaining(p, mode); 747 } 748 if (mode == SINGLE_EXIT) { 749 /* 750 * Convert the process to an unthreaded process. The 751 * SINGLE_EXIT is called by exit1() or execve(), in 752 * both cases other threads must be retired. 753 */ 754 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 755 p->p_singlethread = NULL; 756 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 757 758 /* 759 * Wait for any remaining threads to exit cpu_throw(). 760 */ 761 while (p->p_exitthreads != 0) { 762 PROC_SUNLOCK(p); 763 PROC_UNLOCK(p); 764 sched_relinquish(td); 765 PROC_LOCK(p); 766 PROC_SLOCK(p); 767 } 768 } 769 PROC_SUNLOCK(p); 770 return (0); 771 } 772 773 bool 774 thread_suspend_check_needed(void) 775 { 776 struct proc *p; 777 struct thread *td; 778 779 td = curthread; 780 p = td->td_proc; 781 PROC_LOCK_ASSERT(p, MA_OWNED); 782 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 783 (td->td_dbgflags & TDB_SUSPEND) != 0)); 784 } 785 786 /* 787 * Called in from locations that can safely check to see 788 * whether we have to suspend or at least throttle for a 789 * single-thread event (e.g. fork). 790 * 791 * Such locations include userret(). 792 * If the "return_instead" argument is non zero, the thread must be able to 793 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 794 * 795 * The 'return_instead' argument tells the function if it may do a 796 * thread_exit() or suspend, or whether the caller must abort and back 797 * out instead. 798 * 799 * If the thread that set the single_threading request has set the 800 * P_SINGLE_EXIT bit in the process flags then this call will never return 801 * if 'return_instead' is false, but will exit. 802 * 803 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 804 *---------------+--------------------+--------------------- 805 * 0 | returns 0 | returns 0 or 1 806 * | when ST ends | immediately 807 *---------------+--------------------+--------------------- 808 * 1 | thread exits | returns 1 809 * | | immediately 810 * 0 = thread_exit() or suspension ok, 811 * other = return error instead of stopping the thread. 812 * 813 * While a full suspension is under effect, even a single threading 814 * thread would be suspended if it made this call (but it shouldn't). 815 * This call should only be made from places where 816 * thread_exit() would be safe as that may be the outcome unless 817 * return_instead is set. 818 */ 819 int 820 thread_suspend_check(int return_instead) 821 { 822 struct thread *td; 823 struct proc *p; 824 int wakeup_swapper; 825 826 td = curthread; 827 p = td->td_proc; 828 mtx_assert(&Giant, MA_NOTOWNED); 829 PROC_LOCK_ASSERT(p, MA_OWNED); 830 while (thread_suspend_check_needed()) { 831 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 832 KASSERT(p->p_singlethread != NULL, 833 ("singlethread not set")); 834 /* 835 * The only suspension in action is a 836 * single-threading. Single threader need not stop. 837 * XXX Should be safe to access unlocked 838 * as it can only be set to be true by us. 839 */ 840 if (p->p_singlethread == td) 841 return (0); /* Exempt from stopping. */ 842 } 843 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 844 return (EINTR); 845 846 /* Should we goto user boundary if we didn't come from there? */ 847 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 848 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 849 return (ERESTART); 850 851 /* 852 * Ignore suspend requests for stop signals if they 853 * are deferred. 854 */ 855 if ((P_SHOULDSTOP(p) == P_STOPPED_SIG || 856 (p->p_flag & P_TOTAL_STOP) != 0) && 857 (td->td_flags & TDF_SBDRY) != 0) { 858 KASSERT(return_instead, 859 ("TDF_SBDRY set for unsafe thread_suspend_check")); 860 return (0); 861 } 862 863 /* 864 * If the process is waiting for us to exit, 865 * this thread should just suicide. 866 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 867 */ 868 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 869 PROC_UNLOCK(p); 870 tidhash_remove(td); 871 PROC_LOCK(p); 872 tdsigcleanup(td); 873 PROC_SLOCK(p); 874 thread_stopped(p); 875 thread_exit(); 876 } 877 878 PROC_SLOCK(p); 879 thread_stopped(p); 880 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 881 if (p->p_numthreads == p->p_suspcount + 1) { 882 thread_lock(p->p_singlethread); 883 wakeup_swapper = 884 thread_unsuspend_one(p->p_singlethread, p); 885 thread_unlock(p->p_singlethread); 886 if (wakeup_swapper) 887 kick_proc0(); 888 } 889 } 890 PROC_UNLOCK(p); 891 thread_lock(td); 892 /* 893 * When a thread suspends, it just 894 * gets taken off all queues. 895 */ 896 thread_suspend_one(td); 897 if (return_instead == 0) { 898 p->p_boundary_count++; 899 td->td_flags |= TDF_BOUNDARY; 900 } 901 PROC_SUNLOCK(p); 902 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 903 if (return_instead == 0) 904 td->td_flags &= ~TDF_BOUNDARY; 905 thread_unlock(td); 906 PROC_LOCK(p); 907 if (return_instead == 0) { 908 PROC_SLOCK(p); 909 p->p_boundary_count--; 910 PROC_SUNLOCK(p); 911 } 912 } 913 return (0); 914 } 915 916 void 917 thread_suspend_switch(struct thread *td, struct proc *p) 918 { 919 920 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 921 PROC_LOCK_ASSERT(p, MA_OWNED); 922 PROC_SLOCK_ASSERT(p, MA_OWNED); 923 /* 924 * We implement thread_suspend_one in stages here to avoid 925 * dropping the proc lock while the thread lock is owned. 926 */ 927 if (p == td->td_proc) { 928 thread_stopped(p); 929 p->p_suspcount++; 930 } 931 PROC_UNLOCK(p); 932 thread_lock(td); 933 td->td_flags &= ~TDF_NEEDSUSPCHK; 934 TD_SET_SUSPENDED(td); 935 sched_sleep(td, 0); 936 PROC_SUNLOCK(p); 937 DROP_GIANT(); 938 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 939 thread_unlock(td); 940 PICKUP_GIANT(); 941 PROC_LOCK(p); 942 PROC_SLOCK(p); 943 } 944 945 void 946 thread_suspend_one(struct thread *td) 947 { 948 struct proc *p; 949 950 p = td->td_proc; 951 PROC_SLOCK_ASSERT(p, MA_OWNED); 952 THREAD_LOCK_ASSERT(td, MA_OWNED); 953 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 954 p->p_suspcount++; 955 td->td_flags &= ~TDF_NEEDSUSPCHK; 956 TD_SET_SUSPENDED(td); 957 sched_sleep(td, 0); 958 } 959 960 int 961 thread_unsuspend_one(struct thread *td, struct proc *p) 962 { 963 964 THREAD_LOCK_ASSERT(td, MA_OWNED); 965 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 966 TD_CLR_SUSPENDED(td); 967 td->td_flags &= ~TDF_ALLPROCSUSP; 968 if (td->td_proc == p) { 969 PROC_SLOCK_ASSERT(p, MA_OWNED); 970 p->p_suspcount--; 971 } 972 return (setrunnable(td)); 973 } 974 975 /* 976 * Allow all threads blocked by single threading to continue running. 977 */ 978 void 979 thread_unsuspend(struct proc *p) 980 { 981 struct thread *td; 982 int wakeup_swapper; 983 984 PROC_LOCK_ASSERT(p, MA_OWNED); 985 PROC_SLOCK_ASSERT(p, MA_OWNED); 986 wakeup_swapper = 0; 987 if (!P_SHOULDSTOP(p)) { 988 FOREACH_THREAD_IN_PROC(p, td) { 989 thread_lock(td); 990 if (TD_IS_SUSPENDED(td)) { 991 wakeup_swapper |= thread_unsuspend_one(td, p); 992 } 993 thread_unlock(td); 994 } 995 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 996 (p->p_numthreads == p->p_suspcount)) { 997 /* 998 * Stopping everything also did the job for the single 999 * threading request. Now we've downgraded to single-threaded, 1000 * let it continue. 1001 */ 1002 if (p->p_singlethread->td_proc == p) { 1003 thread_lock(p->p_singlethread); 1004 wakeup_swapper = thread_unsuspend_one( 1005 p->p_singlethread, p); 1006 thread_unlock(p->p_singlethread); 1007 } 1008 } 1009 if (wakeup_swapper) 1010 kick_proc0(); 1011 } 1012 1013 /* 1014 * End the single threading mode.. 1015 */ 1016 void 1017 thread_single_end(struct proc *p, int mode) 1018 { 1019 struct thread *td; 1020 int wakeup_swapper; 1021 1022 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1023 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1024 ("invalid mode %d", mode)); 1025 PROC_LOCK_ASSERT(p, MA_OWNED); 1026 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1027 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1028 ("mode %d does not match P_TOTAL_STOP", mode)); 1029 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1030 P_TOTAL_STOP); 1031 PROC_SLOCK(p); 1032 p->p_singlethread = NULL; 1033 wakeup_swapper = 0; 1034 /* 1035 * If there are other threads they may now run, 1036 * unless of course there is a blanket 'stop order' 1037 * on the process. The single threader must be allowed 1038 * to continue however as this is a bad place to stop. 1039 */ 1040 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1041 FOREACH_THREAD_IN_PROC(p, td) { 1042 thread_lock(td); 1043 if (TD_IS_SUSPENDED(td)) { 1044 wakeup_swapper |= thread_unsuspend_one(td, p); 1045 } 1046 thread_unlock(td); 1047 } 1048 } 1049 PROC_SUNLOCK(p); 1050 if (wakeup_swapper) 1051 kick_proc0(); 1052 } 1053 1054 struct thread * 1055 thread_find(struct proc *p, lwpid_t tid) 1056 { 1057 struct thread *td; 1058 1059 PROC_LOCK_ASSERT(p, MA_OWNED); 1060 FOREACH_THREAD_IN_PROC(p, td) { 1061 if (td->td_tid == tid) 1062 break; 1063 } 1064 return (td); 1065 } 1066 1067 /* Locate a thread by number; return with proc lock held. */ 1068 struct thread * 1069 tdfind(lwpid_t tid, pid_t pid) 1070 { 1071 #define RUN_THRESH 16 1072 struct thread *td; 1073 int run = 0; 1074 1075 rw_rlock(&tidhash_lock); 1076 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1077 if (td->td_tid == tid) { 1078 if (pid != -1 && td->td_proc->p_pid != pid) { 1079 td = NULL; 1080 break; 1081 } 1082 PROC_LOCK(td->td_proc); 1083 if (td->td_proc->p_state == PRS_NEW) { 1084 PROC_UNLOCK(td->td_proc); 1085 td = NULL; 1086 break; 1087 } 1088 if (run > RUN_THRESH) { 1089 if (rw_try_upgrade(&tidhash_lock)) { 1090 LIST_REMOVE(td, td_hash); 1091 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1092 td, td_hash); 1093 rw_wunlock(&tidhash_lock); 1094 return (td); 1095 } 1096 } 1097 break; 1098 } 1099 run++; 1100 } 1101 rw_runlock(&tidhash_lock); 1102 return (td); 1103 } 1104 1105 void 1106 tidhash_add(struct thread *td) 1107 { 1108 rw_wlock(&tidhash_lock); 1109 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1110 rw_wunlock(&tidhash_lock); 1111 } 1112 1113 void 1114 tidhash_remove(struct thread *td) 1115 { 1116 rw_wlock(&tidhash_lock); 1117 LIST_REMOVE(td, td_hash); 1118 rw_wunlock(&tidhash_lock); 1119 } 1120