1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice(s), this list of conditions and the following disclaimer as 12 * the first lines of this file unmodified other than the possible 13 * addition of one or more copyright notices. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice(s), this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 * DAMAGE. 29 */ 30 31 #include "opt_witness.h" 32 #include "opt_hwpmc_hooks.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/bitstring.h> 44 #include <sys/epoch.h> 45 #include <sys/rangelock.h> 46 #include <sys/resourcevar.h> 47 #include <sys/sdt.h> 48 #include <sys/smp.h> 49 #include <sys/sched.h> 50 #include <sys/sleepqueue.h> 51 #include <sys/selinfo.h> 52 #include <sys/syscallsubr.h> 53 #include <sys/sysent.h> 54 #include <sys/turnstile.h> 55 #include <sys/ktr.h> 56 #include <sys/rwlock.h> 57 #include <sys/umtx.h> 58 #include <sys/vmmeter.h> 59 #include <sys/cpuset.h> 60 #ifdef HWPMC_HOOKS 61 #include <sys/pmckern.h> 62 #endif 63 #include <sys/priv.h> 64 65 #include <security/audit/audit.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_extern.h> 69 #include <vm/uma.h> 70 #include <sys/eventhandler.h> 71 72 /* 73 * Asserts below verify the stability of struct thread and struct proc 74 * layout, as exposed by KBI to modules. On head, the KBI is allowed 75 * to drift, change to the structures must be accompanied by the 76 * assert update. 77 * 78 * On the stable branches after KBI freeze, conditions must not be 79 * violated. Typically new fields are moved to the end of the 80 * structures. 81 */ 82 #ifdef __amd64__ 83 _Static_assert(offsetof(struct thread, td_flags) == 0xfc, 84 "struct thread KBI td_flags"); 85 _Static_assert(offsetof(struct thread, td_pflags) == 0x104, 86 "struct thread KBI td_pflags"); 87 _Static_assert(offsetof(struct thread, td_frame) == 0x4a0, 88 "struct thread KBI td_frame"); 89 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0, 90 "struct thread KBI td_emuldata"); 91 _Static_assert(offsetof(struct proc, p_flag) == 0xb0, 92 "struct proc KBI p_flag"); 93 _Static_assert(offsetof(struct proc, p_pid) == 0xbc, 94 "struct proc KBI p_pid"); 95 _Static_assert(offsetof(struct proc, p_filemon) == 0x3b8, 96 "struct proc KBI p_filemon"); 97 _Static_assert(offsetof(struct proc, p_comm) == 0x3d0, 98 "struct proc KBI p_comm"); 99 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b0, 100 "struct proc KBI p_emuldata"); 101 #endif 102 #ifdef __i386__ 103 _Static_assert(offsetof(struct thread, td_flags) == 0x98, 104 "struct thread KBI td_flags"); 105 _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, 106 "struct thread KBI td_pflags"); 107 _Static_assert(offsetof(struct thread, td_frame) == 0x300, 108 "struct thread KBI td_frame"); 109 _Static_assert(offsetof(struct thread, td_emuldata) == 0x344, 110 "struct thread KBI td_emuldata"); 111 _Static_assert(offsetof(struct proc, p_flag) == 0x68, 112 "struct proc KBI p_flag"); 113 _Static_assert(offsetof(struct proc, p_pid) == 0x74, 114 "struct proc KBI p_pid"); 115 _Static_assert(offsetof(struct proc, p_filemon) == 0x268, 116 "struct proc KBI p_filemon"); 117 _Static_assert(offsetof(struct proc, p_comm) == 0x27c, 118 "struct proc KBI p_comm"); 119 _Static_assert(offsetof(struct proc, p_emuldata) == 0x308, 120 "struct proc KBI p_emuldata"); 121 #endif 122 123 SDT_PROVIDER_DECLARE(proc); 124 SDT_PROBE_DEFINE(proc, , , lwp__exit); 125 126 /* 127 * thread related storage. 128 */ 129 static uma_zone_t thread_zone; 130 131 static __exclusive_cache_line struct thread *thread_zombies; 132 133 static void thread_zombie(struct thread *); 134 static int thread_unsuspend_one(struct thread *td, struct proc *p, 135 bool boundary); 136 static void thread_free_batched(struct thread *td); 137 138 static __exclusive_cache_line struct mtx tid_lock; 139 static bitstr_t *tid_bitmap; 140 141 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 142 143 static int maxthread; 144 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 145 &maxthread, 0, "Maximum number of threads"); 146 147 static __exclusive_cache_line int nthreads; 148 149 static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 150 static u_long tidhash; 151 static u_long tidhashlock; 152 static struct rwlock *tidhashtbl_lock; 153 #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 154 #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 155 156 EVENTHANDLER_LIST_DEFINE(thread_ctor); 157 EVENTHANDLER_LIST_DEFINE(thread_dtor); 158 EVENTHANDLER_LIST_DEFINE(thread_init); 159 EVENTHANDLER_LIST_DEFINE(thread_fini); 160 161 static bool 162 thread_count_inc(void) 163 { 164 static struct timeval lastfail; 165 static int curfail; 166 int nthreads_new; 167 168 thread_reap(); 169 170 nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; 171 if (nthreads_new >= maxthread - 100) { 172 if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 173 nthreads_new >= maxthread) { 174 atomic_subtract_int(&nthreads, 1); 175 if (ppsratecheck(&lastfail, &curfail, 1)) { 176 printf("maxthread limit exceeded by uid %u " 177 "(pid %d); consider increasing kern.maxthread\n", 178 curthread->td_ucred->cr_ruid, curproc->p_pid); 179 } 180 return (false); 181 } 182 } 183 return (true); 184 } 185 186 static void 187 thread_count_sub(int n) 188 { 189 190 atomic_subtract_int(&nthreads, n); 191 } 192 193 static void 194 thread_count_dec(void) 195 { 196 197 thread_count_sub(1); 198 } 199 200 static lwpid_t 201 tid_alloc(void) 202 { 203 static lwpid_t trytid; 204 lwpid_t tid; 205 206 mtx_lock(&tid_lock); 207 /* 208 * It is an invariant that the bitmap is big enough to hold maxthread 209 * IDs. If we got to this point there has to be at least one free. 210 */ 211 if (trytid >= maxthread) 212 trytid = 0; 213 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 214 if (tid == -1) { 215 KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 216 trytid = 0; 217 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 218 KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 219 } 220 bit_set(tid_bitmap, tid); 221 trytid = tid + 1; 222 mtx_unlock(&tid_lock); 223 return (tid + NO_PID); 224 } 225 226 static void 227 tid_free_locked(lwpid_t rtid) 228 { 229 lwpid_t tid; 230 231 mtx_assert(&tid_lock, MA_OWNED); 232 KASSERT(rtid >= NO_PID, 233 ("%s: invalid tid %d\n", __func__, rtid)); 234 tid = rtid - NO_PID; 235 KASSERT(bit_test(tid_bitmap, tid) != 0, 236 ("thread ID %d not allocated\n", rtid)); 237 bit_clear(tid_bitmap, tid); 238 } 239 240 static void 241 tid_free(lwpid_t rtid) 242 { 243 244 mtx_lock(&tid_lock); 245 tid_free_locked(rtid); 246 mtx_unlock(&tid_lock); 247 } 248 249 static void 250 tid_free_batch(lwpid_t *batch, int n) 251 { 252 int i; 253 254 mtx_lock(&tid_lock); 255 for (i = 0; i < n; i++) { 256 tid_free_locked(batch[i]); 257 } 258 mtx_unlock(&tid_lock); 259 } 260 261 /* 262 * Batching for thread reapping. 263 */ 264 struct tidbatch { 265 lwpid_t tab[16]; 266 int n; 267 }; 268 269 static void 270 tidbatch_prep(struct tidbatch *tb) 271 { 272 273 tb->n = 0; 274 } 275 276 static void 277 tidbatch_add(struct tidbatch *tb, struct thread *td) 278 { 279 280 KASSERT(tb->n < nitems(tb->tab), 281 ("%s: count too high %d", __func__, tb->n)); 282 tb->tab[tb->n] = td->td_tid; 283 tb->n++; 284 } 285 286 static void 287 tidbatch_process(struct tidbatch *tb) 288 { 289 290 KASSERT(tb->n <= nitems(tb->tab), 291 ("%s: count too high %d", __func__, tb->n)); 292 if (tb->n == nitems(tb->tab)) { 293 tid_free_batch(tb->tab, tb->n); 294 tb->n = 0; 295 } 296 } 297 298 static void 299 tidbatch_final(struct tidbatch *tb) 300 { 301 302 KASSERT(tb->n <= nitems(tb->tab), 303 ("%s: count too high %d", __func__, tb->n)); 304 if (tb->n != 0) { 305 tid_free_batch(tb->tab, tb->n); 306 } 307 } 308 309 /* 310 * Prepare a thread for use. 311 */ 312 static int 313 thread_ctor(void *mem, int size, void *arg, int flags) 314 { 315 struct thread *td; 316 317 td = (struct thread *)mem; 318 td->td_state = TDS_INACTIVE; 319 td->td_lastcpu = td->td_oncpu = NOCPU; 320 321 /* 322 * Note that td_critnest begins life as 1 because the thread is not 323 * running and is thereby implicitly waiting to be on the receiving 324 * end of a context switch. 325 */ 326 td->td_critnest = 1; 327 td->td_lend_user_pri = PRI_MAX; 328 #ifdef AUDIT 329 audit_thread_alloc(td); 330 #endif 331 umtx_thread_alloc(td); 332 return (0); 333 } 334 335 /* 336 * Reclaim a thread after use. 337 */ 338 static void 339 thread_dtor(void *mem, int size, void *arg) 340 { 341 struct thread *td; 342 343 td = (struct thread *)mem; 344 345 #ifdef INVARIANTS 346 /* Verify that this thread is in a safe state to free. */ 347 switch (td->td_state) { 348 case TDS_INHIBITED: 349 case TDS_RUNNING: 350 case TDS_CAN_RUN: 351 case TDS_RUNQ: 352 /* 353 * We must never unlink a thread that is in one of 354 * these states, because it is currently active. 355 */ 356 panic("bad state for thread unlinking"); 357 /* NOTREACHED */ 358 case TDS_INACTIVE: 359 break; 360 default: 361 panic("bad thread state"); 362 /* NOTREACHED */ 363 } 364 #endif 365 #ifdef AUDIT 366 audit_thread_free(td); 367 #endif 368 /* Free all OSD associated to this thread. */ 369 osd_thread_exit(td); 370 td_softdep_cleanup(td); 371 MPASS(td->td_su == NULL); 372 } 373 374 /* 375 * Initialize type-stable parts of a thread (when newly created). 376 */ 377 static int 378 thread_init(void *mem, int size, int flags) 379 { 380 struct thread *td; 381 382 td = (struct thread *)mem; 383 384 td->td_sleepqueue = sleepq_alloc(); 385 td->td_turnstile = turnstile_alloc(); 386 td->td_rlqe = NULL; 387 EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 388 umtx_thread_init(td); 389 td->td_kstack = 0; 390 td->td_sel = NULL; 391 return (0); 392 } 393 394 /* 395 * Tear down type-stable parts of a thread (just before being discarded). 396 */ 397 static void 398 thread_fini(void *mem, int size) 399 { 400 struct thread *td; 401 402 td = (struct thread *)mem; 403 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 404 rlqentry_free(td->td_rlqe); 405 turnstile_free(td->td_turnstile); 406 sleepq_free(td->td_sleepqueue); 407 umtx_thread_fini(td); 408 seltdfini(td); 409 } 410 411 /* 412 * For a newly created process, 413 * link up all the structures and its initial threads etc. 414 * called from: 415 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 416 * proc_dtor() (should go away) 417 * proc_init() 418 */ 419 void 420 proc_linkup0(struct proc *p, struct thread *td) 421 { 422 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 423 proc_linkup(p, td); 424 } 425 426 void 427 proc_linkup(struct proc *p, struct thread *td) 428 { 429 430 sigqueue_init(&p->p_sigqueue, p); 431 p->p_ksi = ksiginfo_alloc(1); 432 if (p->p_ksi != NULL) { 433 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 434 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 435 } 436 LIST_INIT(&p->p_mqnotifier); 437 p->p_numthreads = 0; 438 thread_link(td, p); 439 } 440 441 extern int max_threads_per_proc; 442 443 /* 444 * Initialize global thread allocation resources. 445 */ 446 void 447 threadinit(void) 448 { 449 u_long i; 450 lwpid_t tid0; 451 uint32_t flags; 452 453 /* 454 * Place an upper limit on threads which can be allocated. 455 * 456 * Note that other factors may make the de facto limit much lower. 457 * 458 * Platform limits are somewhat arbitrary but deemed "more than good 459 * enough" for the foreseable future. 460 */ 461 if (maxthread == 0) { 462 #ifdef _LP64 463 maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 464 #else 465 maxthread = MIN(maxproc * max_threads_per_proc, 100000); 466 #endif 467 } 468 469 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 470 tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 471 /* 472 * Handle thread0. 473 */ 474 thread_count_inc(); 475 tid0 = tid_alloc(); 476 if (tid0 != THREAD0_TID) 477 panic("tid0 %d != %d\n", tid0, THREAD0_TID); 478 479 flags = UMA_ZONE_NOFREE; 480 #ifdef __aarch64__ 481 /* 482 * Force thread structures to be allocated from the direct map. 483 * Otherwise, superpage promotions and demotions may temporarily 484 * invalidate thread structure mappings. For most dynamically allocated 485 * structures this is not a problem, but translation faults cannot be 486 * handled without accessing curthread. 487 */ 488 flags |= UMA_ZONE_CONTIG; 489 #endif 490 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 491 thread_ctor, thread_dtor, thread_init, thread_fini, 492 32 - 1, flags); 493 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 494 tidhashlock = (tidhash + 1) / 64; 495 if (tidhashlock > 0) 496 tidhashlock--; 497 tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 498 M_TIDHASH, M_WAITOK | M_ZERO); 499 for (i = 0; i < tidhashlock + 1; i++) 500 rw_init(&tidhashtbl_lock[i], "tidhash"); 501 } 502 503 /* 504 * Place an unused thread on the zombie list. 505 */ 506 void 507 thread_zombie(struct thread *td) 508 { 509 struct thread *ztd; 510 511 ztd = atomic_load_ptr(&thread_zombies); 512 for (;;) { 513 td->td_zombie = ztd; 514 if (atomic_fcmpset_rel_ptr((uintptr_t *)&thread_zombies, 515 (uintptr_t *)&ztd, (uintptr_t)td)) 516 break; 517 continue; 518 } 519 } 520 521 /* 522 * Release a thread that has exited after cpu_throw(). 523 */ 524 void 525 thread_stash(struct thread *td) 526 { 527 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 528 thread_zombie(td); 529 } 530 531 /* 532 * Reap zombie threads. 533 */ 534 void 535 thread_reap(void) 536 { 537 struct thread *itd, *ntd; 538 struct tidbatch tidbatch; 539 struct credbatch credbatch; 540 int tdcount; 541 struct plimit *lim; 542 int limcount; 543 544 /* 545 * Reading upfront is pessimal if followed by concurrent atomic_swap, 546 * but most of the time the list is empty. 547 */ 548 if (thread_zombies == NULL) 549 return; 550 551 itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&thread_zombies, 552 (uintptr_t)NULL); 553 if (itd == NULL) 554 return; 555 556 tidbatch_prep(&tidbatch); 557 credbatch_prep(&credbatch); 558 tdcount = 0; 559 lim = NULL; 560 limcount = 0; 561 while (itd != NULL) { 562 ntd = itd->td_zombie; 563 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); 564 tidbatch_add(&tidbatch, itd); 565 credbatch_add(&credbatch, itd); 566 MPASS(itd->td_limit != NULL); 567 if (lim != itd->td_limit) { 568 if (limcount != 0) { 569 lim_freen(lim, limcount); 570 limcount = 0; 571 } 572 } 573 lim = itd->td_limit; 574 limcount++; 575 thread_free_batched(itd); 576 tidbatch_process(&tidbatch); 577 credbatch_process(&credbatch); 578 tdcount++; 579 if (tdcount == 32) { 580 thread_count_sub(tdcount); 581 tdcount = 0; 582 } 583 itd = ntd; 584 } 585 586 tidbatch_final(&tidbatch); 587 credbatch_final(&credbatch); 588 if (tdcount != 0) { 589 thread_count_sub(tdcount); 590 } 591 MPASS(limcount != 0); 592 lim_freen(lim, limcount); 593 } 594 595 /* 596 * Allocate a thread. 597 */ 598 struct thread * 599 thread_alloc(int pages) 600 { 601 struct thread *td; 602 lwpid_t tid; 603 604 if (!thread_count_inc()) { 605 return (NULL); 606 } 607 608 tid = tid_alloc(); 609 td = uma_zalloc(thread_zone, M_WAITOK); 610 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 611 if (!vm_thread_new(td, pages)) { 612 uma_zfree(thread_zone, td); 613 tid_free(tid); 614 thread_count_dec(); 615 return (NULL); 616 } 617 td->td_tid = tid; 618 cpu_thread_alloc(td); 619 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 620 return (td); 621 } 622 623 int 624 thread_alloc_stack(struct thread *td, int pages) 625 { 626 627 KASSERT(td->td_kstack == 0, 628 ("thread_alloc_stack called on a thread with kstack")); 629 if (!vm_thread_new(td, pages)) 630 return (0); 631 cpu_thread_alloc(td); 632 return (1); 633 } 634 635 /* 636 * Deallocate a thread. 637 */ 638 static void 639 thread_free_batched(struct thread *td) 640 { 641 642 lock_profile_thread_exit(td); 643 if (td->td_cpuset) 644 cpuset_rel(td->td_cpuset); 645 td->td_cpuset = NULL; 646 cpu_thread_free(td); 647 if (td->td_kstack != 0) 648 vm_thread_dispose(td); 649 callout_drain(&td->td_slpcallout); 650 /* 651 * Freeing handled by the caller. 652 */ 653 td->td_tid = -1; 654 uma_zfree(thread_zone, td); 655 } 656 657 void 658 thread_free(struct thread *td) 659 { 660 lwpid_t tid; 661 662 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 663 tid = td->td_tid; 664 thread_free_batched(td); 665 tid_free(tid); 666 thread_count_dec(); 667 } 668 669 void 670 thread_cow_get_proc(struct thread *newtd, struct proc *p) 671 { 672 673 PROC_LOCK_ASSERT(p, MA_OWNED); 674 newtd->td_realucred = crcowget(p->p_ucred); 675 newtd->td_ucred = newtd->td_realucred; 676 newtd->td_limit = lim_hold(p->p_limit); 677 newtd->td_cowgen = p->p_cowgen; 678 } 679 680 void 681 thread_cow_get(struct thread *newtd, struct thread *td) 682 { 683 684 MPASS(td->td_realucred == td->td_ucred); 685 newtd->td_realucred = crcowget(td->td_realucred); 686 newtd->td_ucred = newtd->td_realucred; 687 newtd->td_limit = lim_hold(td->td_limit); 688 newtd->td_cowgen = td->td_cowgen; 689 } 690 691 void 692 thread_cow_free(struct thread *td) 693 { 694 695 if (td->td_realucred != NULL) 696 crcowfree(td); 697 if (td->td_limit != NULL) 698 lim_free(td->td_limit); 699 } 700 701 void 702 thread_cow_update(struct thread *td) 703 { 704 struct proc *p; 705 struct ucred *oldcred; 706 struct plimit *oldlimit; 707 708 p = td->td_proc; 709 oldlimit = NULL; 710 PROC_LOCK(p); 711 oldcred = crcowsync(); 712 if (td->td_limit != p->p_limit) { 713 oldlimit = td->td_limit; 714 td->td_limit = lim_hold(p->p_limit); 715 } 716 td->td_cowgen = p->p_cowgen; 717 PROC_UNLOCK(p); 718 if (oldcred != NULL) 719 crfree(oldcred); 720 if (oldlimit != NULL) 721 lim_free(oldlimit); 722 } 723 724 /* 725 * Discard the current thread and exit from its context. 726 * Always called with scheduler locked. 727 * 728 * Because we can't free a thread while we're operating under its context, 729 * push the current thread into our CPU's deadthread holder. This means 730 * we needn't worry about someone else grabbing our context before we 731 * do a cpu_throw(). 732 */ 733 void 734 thread_exit(void) 735 { 736 uint64_t runtime, new_switchtime; 737 struct thread *td; 738 struct thread *td2; 739 struct proc *p; 740 int wakeup_swapper; 741 742 td = curthread; 743 p = td->td_proc; 744 745 PROC_SLOCK_ASSERT(p, MA_OWNED); 746 mtx_assert(&Giant, MA_NOTOWNED); 747 748 PROC_LOCK_ASSERT(p, MA_OWNED); 749 KASSERT(p != NULL, ("thread exiting without a process")); 750 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 751 (long)p->p_pid, td->td_name); 752 SDT_PROBE0(proc, , , lwp__exit); 753 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 754 MPASS(td->td_realucred == td->td_ucred); 755 756 /* 757 * drop FPU & debug register state storage, or any other 758 * architecture specific resources that 759 * would not be on a new untouched process. 760 */ 761 cpu_thread_exit(td); 762 763 /* 764 * The last thread is left attached to the process 765 * So that the whole bundle gets recycled. Skip 766 * all this stuff if we never had threads. 767 * EXIT clears all sign of other threads when 768 * it goes to single threading, so the last thread always 769 * takes the short path. 770 */ 771 if (p->p_flag & P_HADTHREADS) { 772 if (p->p_numthreads > 1) { 773 atomic_add_int(&td->td_proc->p_exitthreads, 1); 774 thread_unlink(td); 775 td2 = FIRST_THREAD_IN_PROC(p); 776 sched_exit_thread(td2, td); 777 778 /* 779 * The test below is NOT true if we are the 780 * sole exiting thread. P_STOPPED_SINGLE is unset 781 * in exit1() after it is the only survivor. 782 */ 783 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 784 if (p->p_numthreads == p->p_suspcount) { 785 thread_lock(p->p_singlethread); 786 wakeup_swapper = thread_unsuspend_one( 787 p->p_singlethread, p, false); 788 if (wakeup_swapper) 789 kick_proc0(); 790 } 791 } 792 793 PCPU_SET(deadthread, td); 794 } else { 795 /* 796 * The last thread is exiting.. but not through exit() 797 */ 798 panic ("thread_exit: Last thread exiting on its own"); 799 } 800 } 801 #ifdef HWPMC_HOOKS 802 /* 803 * If this thread is part of a process that is being tracked by hwpmc(4), 804 * inform the module of the thread's impending exit. 805 */ 806 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 807 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 808 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 809 } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 810 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 811 #endif 812 PROC_UNLOCK(p); 813 PROC_STATLOCK(p); 814 thread_lock(td); 815 PROC_SUNLOCK(p); 816 817 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 818 new_switchtime = cpu_ticks(); 819 runtime = new_switchtime - PCPU_GET(switchtime); 820 td->td_runtime += runtime; 821 td->td_incruntime += runtime; 822 PCPU_SET(switchtime, new_switchtime); 823 PCPU_SET(switchticks, ticks); 824 VM_CNT_INC(v_swtch); 825 826 /* Save our resource usage in our process. */ 827 td->td_ru.ru_nvcsw++; 828 ruxagg_locked(p, td); 829 rucollect(&p->p_ru, &td->td_ru); 830 PROC_STATUNLOCK(p); 831 832 td->td_state = TDS_INACTIVE; 833 #ifdef WITNESS 834 witness_thread_exit(td); 835 #endif 836 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 837 sched_throw(td); 838 panic("I'm a teapot!"); 839 /* NOTREACHED */ 840 } 841 842 /* 843 * Do any thread specific cleanups that may be needed in wait() 844 * called with Giant, proc and schedlock not held. 845 */ 846 void 847 thread_wait(struct proc *p) 848 { 849 struct thread *td; 850 851 mtx_assert(&Giant, MA_NOTOWNED); 852 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 853 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 854 td = FIRST_THREAD_IN_PROC(p); 855 /* Lock the last thread so we spin until it exits cpu_throw(). */ 856 thread_lock(td); 857 thread_unlock(td); 858 lock_profile_thread_exit(td); 859 cpuset_rel(td->td_cpuset); 860 td->td_cpuset = NULL; 861 cpu_thread_clean(td); 862 thread_cow_free(td); 863 callout_drain(&td->td_slpcallout); 864 thread_reap(); /* check for zombie threads etc. */ 865 } 866 867 /* 868 * Link a thread to a process. 869 * set up anything that needs to be initialized for it to 870 * be used by the process. 871 */ 872 void 873 thread_link(struct thread *td, struct proc *p) 874 { 875 876 /* 877 * XXX This can't be enabled because it's called for proc0 before 878 * its lock has been created. 879 * PROC_LOCK_ASSERT(p, MA_OWNED); 880 */ 881 td->td_state = TDS_INACTIVE; 882 td->td_proc = p; 883 td->td_flags = TDF_INMEM; 884 885 LIST_INIT(&td->td_contested); 886 LIST_INIT(&td->td_lprof[0]); 887 LIST_INIT(&td->td_lprof[1]); 888 #ifdef EPOCH_TRACE 889 SLIST_INIT(&td->td_epochs); 890 #endif 891 sigqueue_init(&td->td_sigqueue, p); 892 callout_init(&td->td_slpcallout, 1); 893 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 894 p->p_numthreads++; 895 } 896 897 /* 898 * Called from: 899 * thread_exit() 900 */ 901 void 902 thread_unlink(struct thread *td) 903 { 904 struct proc *p = td->td_proc; 905 906 PROC_LOCK_ASSERT(p, MA_OWNED); 907 #ifdef EPOCH_TRACE 908 MPASS(SLIST_EMPTY(&td->td_epochs)); 909 #endif 910 911 TAILQ_REMOVE(&p->p_threads, td, td_plist); 912 p->p_numthreads--; 913 /* could clear a few other things here */ 914 /* Must NOT clear links to proc! */ 915 } 916 917 static int 918 calc_remaining(struct proc *p, int mode) 919 { 920 int remaining; 921 922 PROC_LOCK_ASSERT(p, MA_OWNED); 923 PROC_SLOCK_ASSERT(p, MA_OWNED); 924 if (mode == SINGLE_EXIT) 925 remaining = p->p_numthreads; 926 else if (mode == SINGLE_BOUNDARY) 927 remaining = p->p_numthreads - p->p_boundary_count; 928 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 929 remaining = p->p_numthreads - p->p_suspcount; 930 else 931 panic("calc_remaining: wrong mode %d", mode); 932 return (remaining); 933 } 934 935 static int 936 remain_for_mode(int mode) 937 { 938 939 return (mode == SINGLE_ALLPROC ? 0 : 1); 940 } 941 942 static int 943 weed_inhib(int mode, struct thread *td2, struct proc *p) 944 { 945 int wakeup_swapper; 946 947 PROC_LOCK_ASSERT(p, MA_OWNED); 948 PROC_SLOCK_ASSERT(p, MA_OWNED); 949 THREAD_LOCK_ASSERT(td2, MA_OWNED); 950 951 wakeup_swapper = 0; 952 953 /* 954 * Since the thread lock is dropped by the scheduler we have 955 * to retry to check for races. 956 */ 957 restart: 958 switch (mode) { 959 case SINGLE_EXIT: 960 if (TD_IS_SUSPENDED(td2)) { 961 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 962 thread_lock(td2); 963 goto restart; 964 } 965 if (TD_CAN_ABORT(td2)) { 966 wakeup_swapper |= sleepq_abort(td2, EINTR); 967 return (wakeup_swapper); 968 } 969 break; 970 case SINGLE_BOUNDARY: 971 case SINGLE_NO_EXIT: 972 if (TD_IS_SUSPENDED(td2) && 973 (td2->td_flags & TDF_BOUNDARY) == 0) { 974 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 975 thread_lock(td2); 976 goto restart; 977 } 978 if (TD_CAN_ABORT(td2)) { 979 wakeup_swapper |= sleepq_abort(td2, ERESTART); 980 return (wakeup_swapper); 981 } 982 break; 983 case SINGLE_ALLPROC: 984 /* 985 * ALLPROC suspend tries to avoid spurious EINTR for 986 * threads sleeping interruptable, by suspending the 987 * thread directly, similarly to sig_suspend_threads(). 988 * Since such sleep is not performed at the user 989 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 990 * is used to avoid immediate un-suspend. 991 */ 992 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 993 TDF_ALLPROCSUSP)) == 0) { 994 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 995 thread_lock(td2); 996 goto restart; 997 } 998 if (TD_CAN_ABORT(td2)) { 999 if ((td2->td_flags & TDF_SBDRY) == 0) { 1000 thread_suspend_one(td2); 1001 td2->td_flags |= TDF_ALLPROCSUSP; 1002 } else { 1003 wakeup_swapper |= sleepq_abort(td2, ERESTART); 1004 return (wakeup_swapper); 1005 } 1006 } 1007 break; 1008 default: 1009 break; 1010 } 1011 thread_unlock(td2); 1012 return (wakeup_swapper); 1013 } 1014 1015 /* 1016 * Enforce single-threading. 1017 * 1018 * Returns 1 if the caller must abort (another thread is waiting to 1019 * exit the process or similar). Process is locked! 1020 * Returns 0 when you are successfully the only thread running. 1021 * A process has successfully single threaded in the suspend mode when 1022 * There are no threads in user mode. Threads in the kernel must be 1023 * allowed to continue until they get to the user boundary. They may even 1024 * copy out their return values and data before suspending. They may however be 1025 * accelerated in reaching the user boundary as we will wake up 1026 * any sleeping threads that are interruptable. (PCATCH). 1027 */ 1028 int 1029 thread_single(struct proc *p, int mode) 1030 { 1031 struct thread *td; 1032 struct thread *td2; 1033 int remaining, wakeup_swapper; 1034 1035 td = curthread; 1036 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1037 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1038 ("invalid mode %d", mode)); 1039 /* 1040 * If allowing non-ALLPROC singlethreading for non-curproc 1041 * callers, calc_remaining() and remain_for_mode() should be 1042 * adjusted to also account for td->td_proc != p. For now 1043 * this is not implemented because it is not used. 1044 */ 1045 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 1046 (mode != SINGLE_ALLPROC && td->td_proc == p), 1047 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 1048 mtx_assert(&Giant, MA_NOTOWNED); 1049 PROC_LOCK_ASSERT(p, MA_OWNED); 1050 1051 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 1052 return (0); 1053 1054 /* Is someone already single threading? */ 1055 if (p->p_singlethread != NULL && p->p_singlethread != td) 1056 return (1); 1057 1058 if (mode == SINGLE_EXIT) { 1059 p->p_flag |= P_SINGLE_EXIT; 1060 p->p_flag &= ~P_SINGLE_BOUNDARY; 1061 } else { 1062 p->p_flag &= ~P_SINGLE_EXIT; 1063 if (mode == SINGLE_BOUNDARY) 1064 p->p_flag |= P_SINGLE_BOUNDARY; 1065 else 1066 p->p_flag &= ~P_SINGLE_BOUNDARY; 1067 } 1068 if (mode == SINGLE_ALLPROC) 1069 p->p_flag |= P_TOTAL_STOP; 1070 p->p_flag |= P_STOPPED_SINGLE; 1071 PROC_SLOCK(p); 1072 p->p_singlethread = td; 1073 remaining = calc_remaining(p, mode); 1074 while (remaining != remain_for_mode(mode)) { 1075 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 1076 goto stopme; 1077 wakeup_swapper = 0; 1078 FOREACH_THREAD_IN_PROC(p, td2) { 1079 if (td2 == td) 1080 continue; 1081 thread_lock(td2); 1082 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 1083 if (TD_IS_INHIBITED(td2)) { 1084 wakeup_swapper |= weed_inhib(mode, td2, p); 1085 #ifdef SMP 1086 } else if (TD_IS_RUNNING(td2) && td != td2) { 1087 forward_signal(td2); 1088 thread_unlock(td2); 1089 #endif 1090 } else 1091 thread_unlock(td2); 1092 } 1093 if (wakeup_swapper) 1094 kick_proc0(); 1095 remaining = calc_remaining(p, mode); 1096 1097 /* 1098 * Maybe we suspended some threads.. was it enough? 1099 */ 1100 if (remaining == remain_for_mode(mode)) 1101 break; 1102 1103 stopme: 1104 /* 1105 * Wake us up when everyone else has suspended. 1106 * In the mean time we suspend as well. 1107 */ 1108 thread_suspend_switch(td, p); 1109 remaining = calc_remaining(p, mode); 1110 } 1111 if (mode == SINGLE_EXIT) { 1112 /* 1113 * Convert the process to an unthreaded process. The 1114 * SINGLE_EXIT is called by exit1() or execve(), in 1115 * both cases other threads must be retired. 1116 */ 1117 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 1118 p->p_singlethread = NULL; 1119 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 1120 1121 /* 1122 * Wait for any remaining threads to exit cpu_throw(). 1123 */ 1124 while (p->p_exitthreads != 0) { 1125 PROC_SUNLOCK(p); 1126 PROC_UNLOCK(p); 1127 sched_relinquish(td); 1128 PROC_LOCK(p); 1129 PROC_SLOCK(p); 1130 } 1131 } else if (mode == SINGLE_BOUNDARY) { 1132 /* 1133 * Wait until all suspended threads are removed from 1134 * the processors. The thread_suspend_check() 1135 * increments p_boundary_count while it is still 1136 * running, which makes it possible for the execve() 1137 * to destroy vmspace while our other threads are 1138 * still using the address space. 1139 * 1140 * We lock the thread, which is only allowed to 1141 * succeed after context switch code finished using 1142 * the address space. 1143 */ 1144 FOREACH_THREAD_IN_PROC(p, td2) { 1145 if (td2 == td) 1146 continue; 1147 thread_lock(td2); 1148 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1149 ("td %p not on boundary", td2)); 1150 KASSERT(TD_IS_SUSPENDED(td2), 1151 ("td %p is not suspended", td2)); 1152 thread_unlock(td2); 1153 } 1154 } 1155 PROC_SUNLOCK(p); 1156 return (0); 1157 } 1158 1159 bool 1160 thread_suspend_check_needed(void) 1161 { 1162 struct proc *p; 1163 struct thread *td; 1164 1165 td = curthread; 1166 p = td->td_proc; 1167 PROC_LOCK_ASSERT(p, MA_OWNED); 1168 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 1169 (td->td_dbgflags & TDB_SUSPEND) != 0)); 1170 } 1171 1172 /* 1173 * Called in from locations that can safely check to see 1174 * whether we have to suspend or at least throttle for a 1175 * single-thread event (e.g. fork). 1176 * 1177 * Such locations include userret(). 1178 * If the "return_instead" argument is non zero, the thread must be able to 1179 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1180 * 1181 * The 'return_instead' argument tells the function if it may do a 1182 * thread_exit() or suspend, or whether the caller must abort and back 1183 * out instead. 1184 * 1185 * If the thread that set the single_threading request has set the 1186 * P_SINGLE_EXIT bit in the process flags then this call will never return 1187 * if 'return_instead' is false, but will exit. 1188 * 1189 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1190 *---------------+--------------------+--------------------- 1191 * 0 | returns 0 | returns 0 or 1 1192 * | when ST ends | immediately 1193 *---------------+--------------------+--------------------- 1194 * 1 | thread exits | returns 1 1195 * | | immediately 1196 * 0 = thread_exit() or suspension ok, 1197 * other = return error instead of stopping the thread. 1198 * 1199 * While a full suspension is under effect, even a single threading 1200 * thread would be suspended if it made this call (but it shouldn't). 1201 * This call should only be made from places where 1202 * thread_exit() would be safe as that may be the outcome unless 1203 * return_instead is set. 1204 */ 1205 int 1206 thread_suspend_check(int return_instead) 1207 { 1208 struct thread *td; 1209 struct proc *p; 1210 int wakeup_swapper; 1211 1212 td = curthread; 1213 p = td->td_proc; 1214 mtx_assert(&Giant, MA_NOTOWNED); 1215 PROC_LOCK_ASSERT(p, MA_OWNED); 1216 while (thread_suspend_check_needed()) { 1217 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1218 KASSERT(p->p_singlethread != NULL, 1219 ("singlethread not set")); 1220 /* 1221 * The only suspension in action is a 1222 * single-threading. Single threader need not stop. 1223 * It is safe to access p->p_singlethread unlocked 1224 * because it can only be set to our address by us. 1225 */ 1226 if (p->p_singlethread == td) 1227 return (0); /* Exempt from stopping. */ 1228 } 1229 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 1230 return (EINTR); 1231 1232 /* Should we goto user boundary if we didn't come from there? */ 1233 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1234 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 1235 return (ERESTART); 1236 1237 /* 1238 * Ignore suspend requests if they are deferred. 1239 */ 1240 if ((td->td_flags & TDF_SBDRY) != 0) { 1241 KASSERT(return_instead, 1242 ("TDF_SBDRY set for unsafe thread_suspend_check")); 1243 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 1244 (TDF_SEINTR | TDF_SERESTART), 1245 ("both TDF_SEINTR and TDF_SERESTART")); 1246 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1247 } 1248 1249 /* 1250 * If the process is waiting for us to exit, 1251 * this thread should just suicide. 1252 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1253 */ 1254 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1255 PROC_UNLOCK(p); 1256 1257 /* 1258 * Allow Linux emulation layer to do some work 1259 * before thread suicide. 1260 */ 1261 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 1262 (p->p_sysent->sv_thread_detach)(td); 1263 umtx_thread_exit(td); 1264 kern_thr_exit(td); 1265 panic("stopped thread did not exit"); 1266 } 1267 1268 PROC_SLOCK(p); 1269 thread_stopped(p); 1270 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1271 if (p->p_numthreads == p->p_suspcount + 1) { 1272 thread_lock(p->p_singlethread); 1273 wakeup_swapper = thread_unsuspend_one( 1274 p->p_singlethread, p, false); 1275 if (wakeup_swapper) 1276 kick_proc0(); 1277 } 1278 } 1279 PROC_UNLOCK(p); 1280 thread_lock(td); 1281 /* 1282 * When a thread suspends, it just 1283 * gets taken off all queues. 1284 */ 1285 thread_suspend_one(td); 1286 if (return_instead == 0) { 1287 p->p_boundary_count++; 1288 td->td_flags |= TDF_BOUNDARY; 1289 } 1290 PROC_SUNLOCK(p); 1291 mi_switch(SW_INVOL | SWT_SUSPEND); 1292 PROC_LOCK(p); 1293 } 1294 return (0); 1295 } 1296 1297 /* 1298 * Check for possible stops and suspensions while executing a 1299 * casueword or similar transiently failing operation. 1300 * 1301 * The sleep argument controls whether the function can handle a stop 1302 * request itself or it should return ERESTART and the request is 1303 * proceed at the kernel/user boundary in ast. 1304 * 1305 * Typically, when retrying due to casueword(9) failure (rv == 1), we 1306 * should handle the stop requests there, with exception of cases when 1307 * the thread owns a kernel resource, for instance busied the umtx 1308 * key, or when functions return immediately if thread_check_susp() 1309 * returned non-zero. On the other hand, retrying the whole lock 1310 * operation, we better not stop there but delegate the handling to 1311 * ast. 1312 * 1313 * If the request is for thread termination P_SINGLE_EXIT, we cannot 1314 * handle it at all, and simply return EINTR. 1315 */ 1316 int 1317 thread_check_susp(struct thread *td, bool sleep) 1318 { 1319 struct proc *p; 1320 int error; 1321 1322 /* 1323 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to 1324 * eventually break the lockstep loop. 1325 */ 1326 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0) 1327 return (0); 1328 error = 0; 1329 p = td->td_proc; 1330 PROC_LOCK(p); 1331 if (p->p_flag & P_SINGLE_EXIT) 1332 error = EINTR; 1333 else if (P_SHOULDSTOP(p) || 1334 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1335 error = sleep ? thread_suspend_check(0) : ERESTART; 1336 PROC_UNLOCK(p); 1337 return (error); 1338 } 1339 1340 void 1341 thread_suspend_switch(struct thread *td, struct proc *p) 1342 { 1343 1344 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1345 PROC_LOCK_ASSERT(p, MA_OWNED); 1346 PROC_SLOCK_ASSERT(p, MA_OWNED); 1347 /* 1348 * We implement thread_suspend_one in stages here to avoid 1349 * dropping the proc lock while the thread lock is owned. 1350 */ 1351 if (p == td->td_proc) { 1352 thread_stopped(p); 1353 p->p_suspcount++; 1354 } 1355 PROC_UNLOCK(p); 1356 thread_lock(td); 1357 td->td_flags &= ~TDF_NEEDSUSPCHK; 1358 TD_SET_SUSPENDED(td); 1359 sched_sleep(td, 0); 1360 PROC_SUNLOCK(p); 1361 DROP_GIANT(); 1362 mi_switch(SW_VOL | SWT_SUSPEND); 1363 PICKUP_GIANT(); 1364 PROC_LOCK(p); 1365 PROC_SLOCK(p); 1366 } 1367 1368 void 1369 thread_suspend_one(struct thread *td) 1370 { 1371 struct proc *p; 1372 1373 p = td->td_proc; 1374 PROC_SLOCK_ASSERT(p, MA_OWNED); 1375 THREAD_LOCK_ASSERT(td, MA_OWNED); 1376 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1377 p->p_suspcount++; 1378 td->td_flags &= ~TDF_NEEDSUSPCHK; 1379 TD_SET_SUSPENDED(td); 1380 sched_sleep(td, 0); 1381 } 1382 1383 static int 1384 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 1385 { 1386 1387 THREAD_LOCK_ASSERT(td, MA_OWNED); 1388 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1389 TD_CLR_SUSPENDED(td); 1390 td->td_flags &= ~TDF_ALLPROCSUSP; 1391 if (td->td_proc == p) { 1392 PROC_SLOCK_ASSERT(p, MA_OWNED); 1393 p->p_suspcount--; 1394 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 1395 td->td_flags &= ~TDF_BOUNDARY; 1396 p->p_boundary_count--; 1397 } 1398 } 1399 return (setrunnable(td, 0)); 1400 } 1401 1402 /* 1403 * Allow all threads blocked by single threading to continue running. 1404 */ 1405 void 1406 thread_unsuspend(struct proc *p) 1407 { 1408 struct thread *td; 1409 int wakeup_swapper; 1410 1411 PROC_LOCK_ASSERT(p, MA_OWNED); 1412 PROC_SLOCK_ASSERT(p, MA_OWNED); 1413 wakeup_swapper = 0; 1414 if (!P_SHOULDSTOP(p)) { 1415 FOREACH_THREAD_IN_PROC(p, td) { 1416 thread_lock(td); 1417 if (TD_IS_SUSPENDED(td)) { 1418 wakeup_swapper |= thread_unsuspend_one(td, p, 1419 true); 1420 } else 1421 thread_unlock(td); 1422 } 1423 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1424 p->p_numthreads == p->p_suspcount) { 1425 /* 1426 * Stopping everything also did the job for the single 1427 * threading request. Now we've downgraded to single-threaded, 1428 * let it continue. 1429 */ 1430 if (p->p_singlethread->td_proc == p) { 1431 thread_lock(p->p_singlethread); 1432 wakeup_swapper = thread_unsuspend_one( 1433 p->p_singlethread, p, false); 1434 } 1435 } 1436 if (wakeup_swapper) 1437 kick_proc0(); 1438 } 1439 1440 /* 1441 * End the single threading mode.. 1442 */ 1443 void 1444 thread_single_end(struct proc *p, int mode) 1445 { 1446 struct thread *td; 1447 int wakeup_swapper; 1448 1449 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1450 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1451 ("invalid mode %d", mode)); 1452 PROC_LOCK_ASSERT(p, MA_OWNED); 1453 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1454 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1455 ("mode %d does not match P_TOTAL_STOP", mode)); 1456 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1457 ("thread_single_end from other thread %p %p", 1458 curthread, p->p_singlethread)); 1459 KASSERT(mode != SINGLE_BOUNDARY || 1460 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1461 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1462 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1463 P_TOTAL_STOP); 1464 PROC_SLOCK(p); 1465 p->p_singlethread = NULL; 1466 wakeup_swapper = 0; 1467 /* 1468 * If there are other threads they may now run, 1469 * unless of course there is a blanket 'stop order' 1470 * on the process. The single threader must be allowed 1471 * to continue however as this is a bad place to stop. 1472 */ 1473 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1474 FOREACH_THREAD_IN_PROC(p, td) { 1475 thread_lock(td); 1476 if (TD_IS_SUSPENDED(td)) { 1477 wakeup_swapper |= thread_unsuspend_one(td, p, 1478 mode == SINGLE_BOUNDARY); 1479 } else 1480 thread_unlock(td); 1481 } 1482 } 1483 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1484 ("inconsistent boundary count %d", p->p_boundary_count)); 1485 PROC_SUNLOCK(p); 1486 if (wakeup_swapper) 1487 kick_proc0(); 1488 } 1489 1490 /* 1491 * Locate a thread by number and return with proc lock held. 1492 * 1493 * thread exit establishes proc -> tidhash lock ordering, but lookup 1494 * takes tidhash first and needs to return locked proc. 1495 * 1496 * The problem is worked around by relying on type-safety of both 1497 * structures and doing the work in 2 steps: 1498 * - tidhash-locked lookup which saves both thread and proc pointers 1499 * - proc-locked verification that the found thread still matches 1500 */ 1501 static bool 1502 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1503 { 1504 #define RUN_THRESH 16 1505 struct proc *p; 1506 struct thread *td; 1507 int run; 1508 bool locked; 1509 1510 run = 0; 1511 rw_rlock(TIDHASHLOCK(tid)); 1512 locked = true; 1513 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1514 if (td->td_tid != tid) { 1515 run++; 1516 continue; 1517 } 1518 p = td->td_proc; 1519 if (pid != -1 && p->p_pid != pid) { 1520 td = NULL; 1521 break; 1522 } 1523 if (run > RUN_THRESH) { 1524 if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1525 LIST_REMOVE(td, td_hash); 1526 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1527 td, td_hash); 1528 rw_wunlock(TIDHASHLOCK(tid)); 1529 locked = false; 1530 break; 1531 } 1532 } 1533 break; 1534 } 1535 if (locked) 1536 rw_runlock(TIDHASHLOCK(tid)); 1537 if (td == NULL) 1538 return (false); 1539 *pp = p; 1540 *tdp = td; 1541 return (true); 1542 } 1543 1544 struct thread * 1545 tdfind(lwpid_t tid, pid_t pid) 1546 { 1547 struct proc *p; 1548 struct thread *td; 1549 1550 td = curthread; 1551 if (td->td_tid == tid) { 1552 if (pid != -1 && td->td_proc->p_pid != pid) 1553 return (NULL); 1554 PROC_LOCK(td->td_proc); 1555 return (td); 1556 } 1557 1558 for (;;) { 1559 if (!tdfind_hash(tid, pid, &p, &td)) 1560 return (NULL); 1561 PROC_LOCK(p); 1562 if (td->td_tid != tid) { 1563 PROC_UNLOCK(p); 1564 continue; 1565 } 1566 if (td->td_proc != p) { 1567 PROC_UNLOCK(p); 1568 continue; 1569 } 1570 if (p->p_state == PRS_NEW) { 1571 PROC_UNLOCK(p); 1572 return (NULL); 1573 } 1574 return (td); 1575 } 1576 } 1577 1578 void 1579 tidhash_add(struct thread *td) 1580 { 1581 rw_wlock(TIDHASHLOCK(td->td_tid)); 1582 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1583 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1584 } 1585 1586 void 1587 tidhash_remove(struct thread *td) 1588 { 1589 1590 rw_wlock(TIDHASHLOCK(td->td_tid)); 1591 LIST_REMOVE(td, td_hash); 1592 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1593 } 1594