1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice(s), this list of conditions and the following disclaimer as 12 * the first lines of this file unmodified other than the possible 13 * addition of one or more copyright notices. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice(s), this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 * DAMAGE. 29 */ 30 31 #include "opt_witness.h" 32 #include "opt_hwpmc_hooks.h" 33 34 #include <sys/cdefs.h> 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/msan.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/bitstring.h> 43 #include <sys/epoch.h> 44 #include <sys/rangelock.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sdt.h> 47 #include <sys/smp.h> 48 #include <sys/sched.h> 49 #include <sys/sleepqueue.h> 50 #include <sys/selinfo.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/dtrace_bsd.h> 53 #include <sys/sysent.h> 54 #include <sys/turnstile.h> 55 #include <sys/taskqueue.h> 56 #include <sys/ktr.h> 57 #include <sys/rwlock.h> 58 #include <sys/umtxvar.h> 59 #include <sys/vmmeter.h> 60 #include <sys/cpuset.h> 61 #ifdef HWPMC_HOOKS 62 #include <sys/pmckern.h> 63 #endif 64 #include <sys/priv.h> 65 66 #include <security/audit/audit.h> 67 68 #include <vm/pmap.h> 69 #include <vm/vm.h> 70 #include <vm/vm_extern.h> 71 #include <vm/uma.h> 72 #include <vm/vm_phys.h> 73 #include <sys/eventhandler.h> 74 75 /* 76 * Asserts below verify the stability of struct thread and struct proc 77 * layout, as exposed by KBI to modules. On head, the KBI is allowed 78 * to drift, change to the structures must be accompanied by the 79 * assert update. 80 * 81 * On the stable branches after KBI freeze, conditions must not be 82 * violated. Typically new fields are moved to the end of the 83 * structures. 84 */ 85 #ifdef __amd64__ 86 _Static_assert(offsetof(struct thread, td_flags) == 0x108, 87 "struct thread KBI td_flags"); 88 _Static_assert(offsetof(struct thread, td_pflags) == 0x114, 89 "struct thread KBI td_pflags"); 90 _Static_assert(offsetof(struct thread, td_frame) == 0x4b8, 91 "struct thread KBI td_frame"); 92 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0, 93 "struct thread KBI td_emuldata"); 94 _Static_assert(offsetof(struct proc, p_flag) == 0xb8, 95 "struct proc KBI p_flag"); 96 _Static_assert(offsetof(struct proc, p_pid) == 0xc4, 97 "struct proc KBI p_pid"); 98 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 99 "struct proc KBI p_filemon"); 100 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, 101 "struct proc KBI p_comm"); 102 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0, 103 "struct proc KBI p_emuldata"); 104 #endif 105 #ifdef __i386__ 106 _Static_assert(offsetof(struct thread, td_flags) == 0x9c, 107 "struct thread KBI td_flags"); 108 _Static_assert(offsetof(struct thread, td_pflags) == 0xa8, 109 "struct thread KBI td_pflags"); 110 _Static_assert(offsetof(struct thread, td_frame) == 0x314, 111 "struct thread KBI td_frame"); 112 _Static_assert(offsetof(struct thread, td_emuldata) == 0x358, 113 "struct thread KBI td_emuldata"); 114 _Static_assert(offsetof(struct proc, p_flag) == 0x6c, 115 "struct proc KBI p_flag"); 116 _Static_assert(offsetof(struct proc, p_pid) == 0x78, 117 "struct proc KBI p_pid"); 118 _Static_assert(offsetof(struct proc, p_filemon) == 0x270, 119 "struct proc KBI p_filemon"); 120 _Static_assert(offsetof(struct proc, p_comm) == 0x284, 121 "struct proc KBI p_comm"); 122 _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, 123 "struct proc KBI p_emuldata"); 124 #endif 125 126 SDT_PROVIDER_DECLARE(proc); 127 SDT_PROBE_DEFINE(proc, , , lwp__exit); 128 129 /* 130 * thread related storage. 131 */ 132 static uma_zone_t thread_zone; 133 134 struct thread_domain_data { 135 struct thread *tdd_zombies; 136 int tdd_reapticks; 137 } __aligned(CACHE_LINE_SIZE); 138 139 static struct thread_domain_data thread_domain_data[MAXMEMDOM]; 140 141 static struct task thread_reap_task; 142 static struct callout thread_reap_callout; 143 144 static void thread_zombie(struct thread *); 145 static void thread_reap(void); 146 static void thread_reap_all(void); 147 static void thread_reap_task_cb(void *, int); 148 static void thread_reap_callout_cb(void *); 149 static int thread_unsuspend_one(struct thread *td, struct proc *p, 150 bool boundary); 151 static void thread_free_batched(struct thread *td); 152 153 static __exclusive_cache_line struct mtx tid_lock; 154 static bitstr_t *tid_bitmap; 155 156 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 157 158 static int maxthread; 159 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 160 &maxthread, 0, "Maximum number of threads"); 161 162 static __exclusive_cache_line int nthreads; 163 164 static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 165 static u_long tidhash; 166 static u_long tidhashlock; 167 static struct rwlock *tidhashtbl_lock; 168 #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 169 #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 170 171 EVENTHANDLER_LIST_DEFINE(thread_ctor); 172 EVENTHANDLER_LIST_DEFINE(thread_dtor); 173 EVENTHANDLER_LIST_DEFINE(thread_init); 174 EVENTHANDLER_LIST_DEFINE(thread_fini); 175 176 static bool 177 thread_count_inc_try(void) 178 { 179 int nthreads_new; 180 181 nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; 182 if (nthreads_new >= maxthread - 100) { 183 if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 184 nthreads_new >= maxthread) { 185 atomic_subtract_int(&nthreads, 1); 186 return (false); 187 } 188 } 189 return (true); 190 } 191 192 static bool 193 thread_count_inc(void) 194 { 195 static struct timeval lastfail; 196 static int curfail; 197 198 thread_reap(); 199 if (thread_count_inc_try()) { 200 return (true); 201 } 202 203 thread_reap_all(); 204 if (thread_count_inc_try()) { 205 return (true); 206 } 207 208 if (ppsratecheck(&lastfail, &curfail, 1)) { 209 printf("maxthread limit exceeded by uid %u " 210 "(pid %d); consider increasing kern.maxthread\n", 211 curthread->td_ucred->cr_ruid, curproc->p_pid); 212 } 213 return (false); 214 } 215 216 static void 217 thread_count_sub(int n) 218 { 219 220 atomic_subtract_int(&nthreads, n); 221 } 222 223 static void 224 thread_count_dec(void) 225 { 226 227 thread_count_sub(1); 228 } 229 230 static lwpid_t 231 tid_alloc(void) 232 { 233 static lwpid_t trytid; 234 lwpid_t tid; 235 236 mtx_lock(&tid_lock); 237 /* 238 * It is an invariant that the bitmap is big enough to hold maxthread 239 * IDs. If we got to this point there has to be at least one free. 240 */ 241 if (trytid >= maxthread) 242 trytid = 0; 243 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 244 if (tid == -1) { 245 KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 246 trytid = 0; 247 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 248 KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 249 } 250 bit_set(tid_bitmap, tid); 251 trytid = tid + 1; 252 mtx_unlock(&tid_lock); 253 return (tid + NO_PID); 254 } 255 256 static void 257 tid_free_locked(lwpid_t rtid) 258 { 259 lwpid_t tid; 260 261 mtx_assert(&tid_lock, MA_OWNED); 262 KASSERT(rtid >= NO_PID, 263 ("%s: invalid tid %d\n", __func__, rtid)); 264 tid = rtid - NO_PID; 265 KASSERT(bit_test(tid_bitmap, tid) != 0, 266 ("thread ID %d not allocated\n", rtid)); 267 bit_clear(tid_bitmap, tid); 268 } 269 270 static void 271 tid_free(lwpid_t rtid) 272 { 273 274 mtx_lock(&tid_lock); 275 tid_free_locked(rtid); 276 mtx_unlock(&tid_lock); 277 } 278 279 static void 280 tid_free_batch(lwpid_t *batch, int n) 281 { 282 int i; 283 284 mtx_lock(&tid_lock); 285 for (i = 0; i < n; i++) { 286 tid_free_locked(batch[i]); 287 } 288 mtx_unlock(&tid_lock); 289 } 290 291 /* 292 * Batching for thread reapping. 293 */ 294 struct tidbatch { 295 lwpid_t tab[16]; 296 int n; 297 }; 298 299 static void 300 tidbatch_prep(struct tidbatch *tb) 301 { 302 303 tb->n = 0; 304 } 305 306 static void 307 tidbatch_add(struct tidbatch *tb, struct thread *td) 308 { 309 310 KASSERT(tb->n < nitems(tb->tab), 311 ("%s: count too high %d", __func__, tb->n)); 312 tb->tab[tb->n] = td->td_tid; 313 tb->n++; 314 } 315 316 static void 317 tidbatch_process(struct tidbatch *tb) 318 { 319 320 KASSERT(tb->n <= nitems(tb->tab), 321 ("%s: count too high %d", __func__, tb->n)); 322 if (tb->n == nitems(tb->tab)) { 323 tid_free_batch(tb->tab, tb->n); 324 tb->n = 0; 325 } 326 } 327 328 static void 329 tidbatch_final(struct tidbatch *tb) 330 { 331 332 KASSERT(tb->n <= nitems(tb->tab), 333 ("%s: count too high %d", __func__, tb->n)); 334 if (tb->n != 0) { 335 tid_free_batch(tb->tab, tb->n); 336 } 337 } 338 339 /* 340 * Batching thread count free, for consistency 341 */ 342 struct tdcountbatch { 343 int n; 344 }; 345 346 static void 347 tdcountbatch_prep(struct tdcountbatch *tb) 348 { 349 350 tb->n = 0; 351 } 352 353 static void 354 tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused) 355 { 356 357 tb->n++; 358 } 359 360 static void 361 tdcountbatch_process(struct tdcountbatch *tb) 362 { 363 364 if (tb->n == 32) { 365 thread_count_sub(tb->n); 366 tb->n = 0; 367 } 368 } 369 370 static void 371 tdcountbatch_final(struct tdcountbatch *tb) 372 { 373 374 if (tb->n != 0) { 375 thread_count_sub(tb->n); 376 } 377 } 378 379 /* 380 * Prepare a thread for use. 381 */ 382 static int 383 thread_ctor(void *mem, int size, void *arg, int flags) 384 { 385 struct thread *td; 386 387 td = (struct thread *)mem; 388 TD_SET_STATE(td, TDS_INACTIVE); 389 td->td_lastcpu = td->td_oncpu = NOCPU; 390 391 /* 392 * Note that td_critnest begins life as 1 because the thread is not 393 * running and is thereby implicitly waiting to be on the receiving 394 * end of a context switch. 395 */ 396 td->td_critnest = 1; 397 td->td_lend_user_pri = PRI_MAX; 398 #ifdef AUDIT 399 audit_thread_alloc(td); 400 #endif 401 #ifdef KDTRACE_HOOKS 402 kdtrace_thread_ctor(td); 403 #endif 404 umtx_thread_alloc(td); 405 MPASS(td->td_sel == NULL); 406 return (0); 407 } 408 409 /* 410 * Reclaim a thread after use. 411 */ 412 static void 413 thread_dtor(void *mem, int size, void *arg) 414 { 415 struct thread *td; 416 417 td = (struct thread *)mem; 418 419 #ifdef INVARIANTS 420 /* Verify that this thread is in a safe state to free. */ 421 switch (TD_GET_STATE(td)) { 422 case TDS_INHIBITED: 423 case TDS_RUNNING: 424 case TDS_CAN_RUN: 425 case TDS_RUNQ: 426 /* 427 * We must never unlink a thread that is in one of 428 * these states, because it is currently active. 429 */ 430 panic("bad state for thread unlinking"); 431 /* NOTREACHED */ 432 case TDS_INACTIVE: 433 break; 434 default: 435 panic("bad thread state"); 436 /* NOTREACHED */ 437 } 438 #endif 439 #ifdef AUDIT 440 audit_thread_free(td); 441 #endif 442 #ifdef KDTRACE_HOOKS 443 kdtrace_thread_dtor(td); 444 #endif 445 /* Free all OSD associated to this thread. */ 446 osd_thread_exit(td); 447 ast_kclear(td); 448 seltdfini(td); 449 } 450 451 /* 452 * Initialize type-stable parts of a thread (when newly created). 453 */ 454 static int 455 thread_init(void *mem, int size, int flags) 456 { 457 struct thread *td; 458 459 td = (struct thread *)mem; 460 461 td->td_allocdomain = vm_phys_domain(vtophys(td)); 462 td->td_sleepqueue = sleepq_alloc(); 463 td->td_turnstile = turnstile_alloc(); 464 td->td_rlqe = NULL; 465 EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 466 umtx_thread_init(td); 467 td->td_kstack = 0; 468 td->td_sel = NULL; 469 return (0); 470 } 471 472 /* 473 * Tear down type-stable parts of a thread (just before being discarded). 474 */ 475 static void 476 thread_fini(void *mem, int size) 477 { 478 struct thread *td; 479 480 td = (struct thread *)mem; 481 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 482 rlqentry_free(td->td_rlqe); 483 turnstile_free(td->td_turnstile); 484 sleepq_free(td->td_sleepqueue); 485 umtx_thread_fini(td); 486 MPASS(td->td_sel == NULL); 487 } 488 489 /* 490 * For a newly created process, 491 * link up all the structures and its initial threads etc. 492 * called from: 493 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 494 * proc_dtor() (should go away) 495 * proc_init() 496 */ 497 void 498 proc_linkup0(struct proc *p, struct thread *td) 499 { 500 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 501 proc_linkup(p, td); 502 } 503 504 void 505 proc_linkup(struct proc *p, struct thread *td) 506 { 507 508 sigqueue_init(&p->p_sigqueue, p); 509 p->p_ksi = ksiginfo_alloc(M_WAITOK); 510 if (p->p_ksi != NULL) { 511 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 512 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 513 } 514 LIST_INIT(&p->p_mqnotifier); 515 p->p_numthreads = 0; 516 thread_link(td, p); 517 } 518 519 static void 520 ast_suspend(struct thread *td, int tda __unused) 521 { 522 struct proc *p; 523 524 p = td->td_proc; 525 /* 526 * We need to check to see if we have to exit or wait due to a 527 * single threading requirement or some other STOP condition. 528 */ 529 PROC_LOCK(p); 530 thread_suspend_check(0); 531 PROC_UNLOCK(p); 532 } 533 534 extern int max_threads_per_proc; 535 536 /* 537 * Initialize global thread allocation resources. 538 */ 539 void 540 threadinit(void) 541 { 542 u_long i; 543 lwpid_t tid0; 544 545 /* 546 * Place an upper limit on threads which can be allocated. 547 * 548 * Note that other factors may make the de facto limit much lower. 549 * 550 * Platform limits are somewhat arbitrary but deemed "more than good 551 * enough" for the foreseable future. 552 */ 553 if (maxthread == 0) { 554 #ifdef _LP64 555 maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 556 #else 557 maxthread = MIN(maxproc * max_threads_per_proc, 100000); 558 #endif 559 } 560 561 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 562 tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 563 /* 564 * Handle thread0. 565 */ 566 thread_count_inc(); 567 tid0 = tid_alloc(); 568 if (tid0 != THREAD0_TID) 569 panic("tid0 %d != %d\n", tid0, THREAD0_TID); 570 571 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 572 thread_ctor, thread_dtor, thread_init, thread_fini, 573 32 - 1, UMA_ZONE_NOFREE); 574 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 575 tidhashlock = (tidhash + 1) / 64; 576 if (tidhashlock > 0) 577 tidhashlock--; 578 tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 579 M_TIDHASH, M_WAITOK | M_ZERO); 580 for (i = 0; i < tidhashlock + 1; i++) 581 rw_init(&tidhashtbl_lock[i], "tidhash"); 582 583 TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); 584 callout_init(&thread_reap_callout, 1); 585 callout_reset(&thread_reap_callout, 5 * hz, 586 thread_reap_callout_cb, NULL); 587 ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend); 588 } 589 590 /* 591 * Place an unused thread on the zombie list. 592 */ 593 void 594 thread_zombie(struct thread *td) 595 { 596 struct thread_domain_data *tdd; 597 struct thread *ztd; 598 599 tdd = &thread_domain_data[td->td_allocdomain]; 600 ztd = atomic_load_ptr(&tdd->tdd_zombies); 601 for (;;) { 602 td->td_zombie = ztd; 603 if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies, 604 (uintptr_t *)&ztd, (uintptr_t)td)) 605 break; 606 continue; 607 } 608 } 609 610 /* 611 * Release a thread that has exited after cpu_throw(). 612 */ 613 void 614 thread_stash(struct thread *td) 615 { 616 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 617 thread_zombie(td); 618 } 619 620 /* 621 * Reap zombies from passed domain. 622 */ 623 static void 624 thread_reap_domain(struct thread_domain_data *tdd) 625 { 626 struct thread *itd, *ntd; 627 struct tidbatch tidbatch; 628 struct credbatch credbatch; 629 struct limbatch limbatch; 630 struct tdcountbatch tdcountbatch; 631 632 /* 633 * Reading upfront is pessimal if followed by concurrent atomic_swap, 634 * but most of the time the list is empty. 635 */ 636 if (tdd->tdd_zombies == NULL) 637 return; 638 639 itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies, 640 (uintptr_t)NULL); 641 if (itd == NULL) 642 return; 643 644 /* 645 * Multiple CPUs can get here, the race is fine as ticks is only 646 * advisory. 647 */ 648 tdd->tdd_reapticks = ticks; 649 650 tidbatch_prep(&tidbatch); 651 credbatch_prep(&credbatch); 652 limbatch_prep(&limbatch); 653 tdcountbatch_prep(&tdcountbatch); 654 655 while (itd != NULL) { 656 ntd = itd->td_zombie; 657 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); 658 659 tidbatch_add(&tidbatch, itd); 660 credbatch_add(&credbatch, itd); 661 limbatch_add(&limbatch, itd); 662 tdcountbatch_add(&tdcountbatch, itd); 663 664 thread_free_batched(itd); 665 666 tidbatch_process(&tidbatch); 667 credbatch_process(&credbatch); 668 limbatch_process(&limbatch); 669 tdcountbatch_process(&tdcountbatch); 670 671 itd = ntd; 672 } 673 674 tidbatch_final(&tidbatch); 675 credbatch_final(&credbatch); 676 limbatch_final(&limbatch); 677 tdcountbatch_final(&tdcountbatch); 678 } 679 680 /* 681 * Reap zombies from all domains. 682 */ 683 static void 684 thread_reap_all(void) 685 { 686 struct thread_domain_data *tdd; 687 int i, domain; 688 689 domain = PCPU_GET(domain); 690 for (i = 0; i < vm_ndomains; i++) { 691 tdd = &thread_domain_data[(i + domain) % vm_ndomains]; 692 thread_reap_domain(tdd); 693 } 694 } 695 696 /* 697 * Reap zombies from local domain. 698 */ 699 static void 700 thread_reap(void) 701 { 702 struct thread_domain_data *tdd; 703 int domain; 704 705 domain = PCPU_GET(domain); 706 tdd = &thread_domain_data[domain]; 707 708 thread_reap_domain(tdd); 709 } 710 711 static void 712 thread_reap_task_cb(void *arg __unused, int pending __unused) 713 { 714 715 thread_reap_all(); 716 } 717 718 static void 719 thread_reap_callout_cb(void *arg __unused) 720 { 721 struct thread_domain_data *tdd; 722 int i, cticks, lticks; 723 bool wantreap; 724 725 wantreap = false; 726 cticks = atomic_load_int(&ticks); 727 for (i = 0; i < vm_ndomains; i++) { 728 tdd = &thread_domain_data[i]; 729 lticks = tdd->tdd_reapticks; 730 if (tdd->tdd_zombies != NULL && 731 (u_int)(cticks - lticks) > 5 * hz) { 732 wantreap = true; 733 break; 734 } 735 } 736 737 if (wantreap) 738 taskqueue_enqueue(taskqueue_thread, &thread_reap_task); 739 callout_reset(&thread_reap_callout, 5 * hz, 740 thread_reap_callout_cb, NULL); 741 } 742 743 /* 744 * Calling this function guarantees that any thread that exited before 745 * the call is reaped when the function returns. By 'exited' we mean 746 * a thread removed from the process linkage with thread_unlink(). 747 * Practically this means that caller must lock/unlock corresponding 748 * process lock before the call, to synchronize with thread_exit(). 749 */ 750 void 751 thread_reap_barrier(void) 752 { 753 struct task *t; 754 755 /* 756 * First do context switches to each CPU to ensure that all 757 * PCPU pc_deadthreads are moved to zombie list. 758 */ 759 quiesce_all_cpus("", PDROP); 760 761 /* 762 * Second, fire the task in the same thread as normal 763 * thread_reap() is done, to serialize reaping. 764 */ 765 t = malloc(sizeof(*t), M_TEMP, M_WAITOK); 766 TASK_INIT(t, 0, thread_reap_task_cb, t); 767 taskqueue_enqueue(taskqueue_thread, t); 768 taskqueue_drain(taskqueue_thread, t); 769 free(t, M_TEMP); 770 } 771 772 /* 773 * Allocate a thread. 774 */ 775 struct thread * 776 thread_alloc(int pages) 777 { 778 struct thread *td; 779 lwpid_t tid; 780 781 if (!thread_count_inc()) { 782 return (NULL); 783 } 784 785 tid = tid_alloc(); 786 td = uma_zalloc(thread_zone, M_WAITOK); 787 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 788 if (!vm_thread_new(td, pages)) { 789 uma_zfree(thread_zone, td); 790 tid_free(tid); 791 thread_count_dec(); 792 return (NULL); 793 } 794 td->td_tid = tid; 795 bzero(&td->td_sa.args, sizeof(td->td_sa.args)); 796 kmsan_thread_alloc(td); 797 cpu_thread_alloc(td); 798 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 799 return (td); 800 } 801 802 int 803 thread_alloc_stack(struct thread *td, int pages) 804 { 805 806 KASSERT(td->td_kstack == 0, 807 ("thread_alloc_stack called on a thread with kstack")); 808 if (!vm_thread_new(td, pages)) 809 return (0); 810 cpu_thread_alloc(td); 811 return (1); 812 } 813 814 /* 815 * Deallocate a thread. 816 */ 817 static void 818 thread_free_batched(struct thread *td) 819 { 820 821 lock_profile_thread_exit(td); 822 if (td->td_cpuset) 823 cpuset_rel(td->td_cpuset); 824 td->td_cpuset = NULL; 825 cpu_thread_free(td); 826 if (td->td_kstack != 0) 827 vm_thread_dispose(td); 828 callout_drain(&td->td_slpcallout); 829 /* 830 * Freeing handled by the caller. 831 */ 832 td->td_tid = -1; 833 kmsan_thread_free(td); 834 uma_zfree(thread_zone, td); 835 } 836 837 void 838 thread_free(struct thread *td) 839 { 840 lwpid_t tid; 841 842 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 843 tid = td->td_tid; 844 thread_free_batched(td); 845 tid_free(tid); 846 thread_count_dec(); 847 } 848 849 void 850 thread_cow_get_proc(struct thread *newtd, struct proc *p) 851 { 852 853 PROC_LOCK_ASSERT(p, MA_OWNED); 854 newtd->td_realucred = crcowget(p->p_ucred); 855 newtd->td_ucred = newtd->td_realucred; 856 newtd->td_limit = lim_hold(p->p_limit); 857 newtd->td_cowgen = p->p_cowgen; 858 } 859 860 void 861 thread_cow_get(struct thread *newtd, struct thread *td) 862 { 863 864 MPASS(td->td_realucred == td->td_ucred); 865 newtd->td_realucred = crcowget(td->td_realucred); 866 newtd->td_ucred = newtd->td_realucred; 867 newtd->td_limit = lim_hold(td->td_limit); 868 newtd->td_cowgen = td->td_cowgen; 869 } 870 871 void 872 thread_cow_free(struct thread *td) 873 { 874 875 if (td->td_realucred != NULL) 876 crcowfree(td); 877 if (td->td_limit != NULL) 878 lim_free(td->td_limit); 879 } 880 881 void 882 thread_cow_update(struct thread *td) 883 { 884 struct proc *p; 885 struct ucred *oldcred; 886 struct plimit *oldlimit; 887 888 p = td->td_proc; 889 PROC_LOCK(p); 890 oldcred = crcowsync(); 891 oldlimit = lim_cowsync(); 892 td->td_cowgen = p->p_cowgen; 893 PROC_UNLOCK(p); 894 if (oldcred != NULL) 895 crfree(oldcred); 896 if (oldlimit != NULL) 897 lim_free(oldlimit); 898 } 899 900 void 901 thread_cow_synced(struct thread *td) 902 { 903 struct proc *p; 904 905 p = td->td_proc; 906 PROC_LOCK_ASSERT(p, MA_OWNED); 907 MPASS(td->td_cowgen != p->p_cowgen); 908 MPASS(td->td_ucred == p->p_ucred); 909 MPASS(td->td_limit == p->p_limit); 910 td->td_cowgen = p->p_cowgen; 911 } 912 913 /* 914 * Discard the current thread and exit from its context. 915 * Always called with scheduler locked. 916 * 917 * Because we can't free a thread while we're operating under its context, 918 * push the current thread into our CPU's deadthread holder. This means 919 * we needn't worry about someone else grabbing our context before we 920 * do a cpu_throw(). 921 */ 922 void 923 thread_exit(void) 924 { 925 uint64_t runtime, new_switchtime; 926 struct thread *td; 927 struct thread *td2; 928 struct proc *p; 929 int wakeup_swapper; 930 931 td = curthread; 932 p = td->td_proc; 933 934 PROC_SLOCK_ASSERT(p, MA_OWNED); 935 mtx_assert(&Giant, MA_NOTOWNED); 936 937 PROC_LOCK_ASSERT(p, MA_OWNED); 938 KASSERT(p != NULL, ("thread exiting without a process")); 939 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 940 (long)p->p_pid, td->td_name); 941 SDT_PROBE0(proc, , , lwp__exit); 942 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 943 MPASS(td->td_realucred == td->td_ucred); 944 945 /* 946 * drop FPU & debug register state storage, or any other 947 * architecture specific resources that 948 * would not be on a new untouched process. 949 */ 950 cpu_thread_exit(td); 951 952 /* 953 * The last thread is left attached to the process 954 * So that the whole bundle gets recycled. Skip 955 * all this stuff if we never had threads. 956 * EXIT clears all sign of other threads when 957 * it goes to single threading, so the last thread always 958 * takes the short path. 959 */ 960 if (p->p_flag & P_HADTHREADS) { 961 if (p->p_numthreads > 1) { 962 atomic_add_int(&td->td_proc->p_exitthreads, 1); 963 thread_unlink(td); 964 td2 = FIRST_THREAD_IN_PROC(p); 965 sched_exit_thread(td2, td); 966 967 /* 968 * The test below is NOT true if we are the 969 * sole exiting thread. P_STOPPED_SINGLE is unset 970 * in exit1() after it is the only survivor. 971 */ 972 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 973 if (p->p_numthreads == p->p_suspcount) { 974 thread_lock(p->p_singlethread); 975 wakeup_swapper = thread_unsuspend_one( 976 p->p_singlethread, p, false); 977 if (wakeup_swapper) 978 kick_proc0(); 979 } 980 } 981 982 PCPU_SET(deadthread, td); 983 } else { 984 /* 985 * The last thread is exiting.. but not through exit() 986 */ 987 panic ("thread_exit: Last thread exiting on its own"); 988 } 989 } 990 #ifdef HWPMC_HOOKS 991 /* 992 * If this thread is part of a process that is being tracked by hwpmc(4), 993 * inform the module of the thread's impending exit. 994 */ 995 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 996 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 997 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 998 } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 999 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 1000 #endif 1001 PROC_UNLOCK(p); 1002 PROC_STATLOCK(p); 1003 thread_lock(td); 1004 PROC_SUNLOCK(p); 1005 1006 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 1007 new_switchtime = cpu_ticks(); 1008 runtime = new_switchtime - PCPU_GET(switchtime); 1009 td->td_runtime += runtime; 1010 td->td_incruntime += runtime; 1011 PCPU_SET(switchtime, new_switchtime); 1012 PCPU_SET(switchticks, ticks); 1013 VM_CNT_INC(v_swtch); 1014 1015 /* Save our resource usage in our process. */ 1016 td->td_ru.ru_nvcsw++; 1017 ruxagg_locked(p, td); 1018 rucollect(&p->p_ru, &td->td_ru); 1019 PROC_STATUNLOCK(p); 1020 1021 TD_SET_STATE(td, TDS_INACTIVE); 1022 #ifdef WITNESS 1023 witness_thread_exit(td); 1024 #endif 1025 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 1026 sched_throw(td); 1027 panic("I'm a teapot!"); 1028 /* NOTREACHED */ 1029 } 1030 1031 /* 1032 * Do any thread specific cleanups that may be needed in wait() 1033 * called with Giant, proc and schedlock not held. 1034 */ 1035 void 1036 thread_wait(struct proc *p) 1037 { 1038 struct thread *td; 1039 1040 mtx_assert(&Giant, MA_NOTOWNED); 1041 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 1042 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 1043 td = FIRST_THREAD_IN_PROC(p); 1044 /* Lock the last thread so we spin until it exits cpu_throw(). */ 1045 thread_lock(td); 1046 thread_unlock(td); 1047 lock_profile_thread_exit(td); 1048 cpuset_rel(td->td_cpuset); 1049 td->td_cpuset = NULL; 1050 cpu_thread_clean(td); 1051 thread_cow_free(td); 1052 callout_drain(&td->td_slpcallout); 1053 thread_reap(); /* check for zombie threads etc. */ 1054 } 1055 1056 /* 1057 * Link a thread to a process. 1058 * set up anything that needs to be initialized for it to 1059 * be used by the process. 1060 */ 1061 void 1062 thread_link(struct thread *td, struct proc *p) 1063 { 1064 1065 /* 1066 * XXX This can't be enabled because it's called for proc0 before 1067 * its lock has been created. 1068 * PROC_LOCK_ASSERT(p, MA_OWNED); 1069 */ 1070 TD_SET_STATE(td, TDS_INACTIVE); 1071 td->td_proc = p; 1072 td->td_flags = TDF_INMEM; 1073 1074 LIST_INIT(&td->td_contested); 1075 LIST_INIT(&td->td_lprof[0]); 1076 LIST_INIT(&td->td_lprof[1]); 1077 #ifdef EPOCH_TRACE 1078 SLIST_INIT(&td->td_epochs); 1079 #endif 1080 sigqueue_init(&td->td_sigqueue, p); 1081 callout_init(&td->td_slpcallout, 1); 1082 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 1083 p->p_numthreads++; 1084 } 1085 1086 /* 1087 * Called from: 1088 * thread_exit() 1089 */ 1090 void 1091 thread_unlink(struct thread *td) 1092 { 1093 struct proc *p = td->td_proc; 1094 1095 PROC_LOCK_ASSERT(p, MA_OWNED); 1096 #ifdef EPOCH_TRACE 1097 MPASS(SLIST_EMPTY(&td->td_epochs)); 1098 #endif 1099 1100 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1101 p->p_numthreads--; 1102 /* could clear a few other things here */ 1103 /* Must NOT clear links to proc! */ 1104 } 1105 1106 static int 1107 calc_remaining(struct proc *p, int mode) 1108 { 1109 int remaining; 1110 1111 PROC_LOCK_ASSERT(p, MA_OWNED); 1112 PROC_SLOCK_ASSERT(p, MA_OWNED); 1113 if (mode == SINGLE_EXIT) 1114 remaining = p->p_numthreads; 1115 else if (mode == SINGLE_BOUNDARY) 1116 remaining = p->p_numthreads - p->p_boundary_count; 1117 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 1118 remaining = p->p_numthreads - p->p_suspcount; 1119 else 1120 panic("calc_remaining: wrong mode %d", mode); 1121 return (remaining); 1122 } 1123 1124 static int 1125 remain_for_mode(int mode) 1126 { 1127 1128 return (mode == SINGLE_ALLPROC ? 0 : 1); 1129 } 1130 1131 static int 1132 weed_inhib(int mode, struct thread *td2, struct proc *p) 1133 { 1134 int wakeup_swapper; 1135 1136 PROC_LOCK_ASSERT(p, MA_OWNED); 1137 PROC_SLOCK_ASSERT(p, MA_OWNED); 1138 THREAD_LOCK_ASSERT(td2, MA_OWNED); 1139 1140 wakeup_swapper = 0; 1141 1142 /* 1143 * Since the thread lock is dropped by the scheduler we have 1144 * to retry to check for races. 1145 */ 1146 restart: 1147 switch (mode) { 1148 case SINGLE_EXIT: 1149 if (TD_IS_SUSPENDED(td2)) { 1150 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 1151 thread_lock(td2); 1152 goto restart; 1153 } 1154 if (TD_CAN_ABORT(td2)) { 1155 wakeup_swapper |= sleepq_abort(td2, EINTR); 1156 return (wakeup_swapper); 1157 } 1158 break; 1159 case SINGLE_BOUNDARY: 1160 case SINGLE_NO_EXIT: 1161 if (TD_IS_SUSPENDED(td2) && 1162 (td2->td_flags & TDF_BOUNDARY) == 0) { 1163 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 1164 thread_lock(td2); 1165 goto restart; 1166 } 1167 if (TD_CAN_ABORT(td2)) { 1168 wakeup_swapper |= sleepq_abort(td2, ERESTART); 1169 return (wakeup_swapper); 1170 } 1171 break; 1172 case SINGLE_ALLPROC: 1173 /* 1174 * ALLPROC suspend tries to avoid spurious EINTR for 1175 * threads sleeping interruptable, by suspending the 1176 * thread directly, similarly to sig_suspend_threads(). 1177 * Since such sleep is not neccessary performed at the user 1178 * boundary, TDF_ALLPROCSUSP is used to avoid immediate 1179 * un-suspend. 1180 */ 1181 if (TD_IS_SUSPENDED(td2) && 1182 (td2->td_flags & TDF_ALLPROCSUSP) == 0) { 1183 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 1184 thread_lock(td2); 1185 goto restart; 1186 } 1187 if (TD_CAN_ABORT(td2)) { 1188 td2->td_flags |= TDF_ALLPROCSUSP; 1189 wakeup_swapper |= sleepq_abort(td2, ERESTART); 1190 return (wakeup_swapper); 1191 } 1192 break; 1193 default: 1194 break; 1195 } 1196 thread_unlock(td2); 1197 return (wakeup_swapper); 1198 } 1199 1200 /* 1201 * Enforce single-threading. 1202 * 1203 * Returns 1 if the caller must abort (another thread is waiting to 1204 * exit the process or similar). Process is locked! 1205 * Returns 0 when you are successfully the only thread running. 1206 * A process has successfully single threaded in the suspend mode when 1207 * There are no threads in user mode. Threads in the kernel must be 1208 * allowed to continue until they get to the user boundary. They may even 1209 * copy out their return values and data before suspending. They may however be 1210 * accelerated in reaching the user boundary as we will wake up 1211 * any sleeping threads that are interruptable. (PCATCH). 1212 */ 1213 int 1214 thread_single(struct proc *p, int mode) 1215 { 1216 struct thread *td; 1217 struct thread *td2; 1218 int remaining, wakeup_swapper; 1219 1220 td = curthread; 1221 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1222 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1223 ("invalid mode %d", mode)); 1224 /* 1225 * If allowing non-ALLPROC singlethreading for non-curproc 1226 * callers, calc_remaining() and remain_for_mode() should be 1227 * adjusted to also account for td->td_proc != p. For now 1228 * this is not implemented because it is not used. 1229 */ 1230 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 1231 (mode != SINGLE_ALLPROC && td->td_proc == p), 1232 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 1233 mtx_assert(&Giant, MA_NOTOWNED); 1234 PROC_LOCK_ASSERT(p, MA_OWNED); 1235 1236 /* 1237 * Is someone already single threading? 1238 * Or may be singlethreading is not needed at all. 1239 */ 1240 if (mode == SINGLE_ALLPROC) { 1241 while ((p->p_flag & P_STOPPED_SINGLE) != 0) { 1242 if ((p->p_flag2 & P2_WEXIT) != 0) 1243 return (1); 1244 msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0); 1245 } 1246 } else if ((p->p_flag & P_HADTHREADS) == 0) 1247 return (0); 1248 if (p->p_singlethread != NULL && p->p_singlethread != td) 1249 return (1); 1250 1251 if (mode == SINGLE_EXIT) { 1252 p->p_flag |= P_SINGLE_EXIT; 1253 p->p_flag &= ~P_SINGLE_BOUNDARY; 1254 } else { 1255 p->p_flag &= ~P_SINGLE_EXIT; 1256 if (mode == SINGLE_BOUNDARY) 1257 p->p_flag |= P_SINGLE_BOUNDARY; 1258 else 1259 p->p_flag &= ~P_SINGLE_BOUNDARY; 1260 } 1261 if (mode == SINGLE_ALLPROC) 1262 p->p_flag |= P_TOTAL_STOP; 1263 p->p_flag |= P_STOPPED_SINGLE; 1264 PROC_SLOCK(p); 1265 p->p_singlethread = td; 1266 remaining = calc_remaining(p, mode); 1267 while (remaining != remain_for_mode(mode)) { 1268 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 1269 goto stopme; 1270 wakeup_swapper = 0; 1271 FOREACH_THREAD_IN_PROC(p, td2) { 1272 if (td2 == td) 1273 continue; 1274 thread_lock(td2); 1275 ast_sched_locked(td2, TDA_SUSPEND); 1276 if (TD_IS_INHIBITED(td2)) { 1277 wakeup_swapper |= weed_inhib(mode, td2, p); 1278 #ifdef SMP 1279 } else if (TD_IS_RUNNING(td2)) { 1280 forward_signal(td2); 1281 thread_unlock(td2); 1282 #endif 1283 } else 1284 thread_unlock(td2); 1285 } 1286 if (wakeup_swapper) 1287 kick_proc0(); 1288 remaining = calc_remaining(p, mode); 1289 1290 /* 1291 * Maybe we suspended some threads.. was it enough? 1292 */ 1293 if (remaining == remain_for_mode(mode)) 1294 break; 1295 1296 stopme: 1297 /* 1298 * Wake us up when everyone else has suspended. 1299 * In the mean time we suspend as well. 1300 */ 1301 thread_suspend_switch(td, p); 1302 remaining = calc_remaining(p, mode); 1303 } 1304 if (mode == SINGLE_EXIT) { 1305 /* 1306 * Convert the process to an unthreaded process. The 1307 * SINGLE_EXIT is called by exit1() or execve(), in 1308 * both cases other threads must be retired. 1309 */ 1310 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 1311 p->p_singlethread = NULL; 1312 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 1313 1314 /* 1315 * Wait for any remaining threads to exit cpu_throw(). 1316 */ 1317 while (p->p_exitthreads != 0) { 1318 PROC_SUNLOCK(p); 1319 PROC_UNLOCK(p); 1320 sched_relinquish(td); 1321 PROC_LOCK(p); 1322 PROC_SLOCK(p); 1323 } 1324 } else if (mode == SINGLE_BOUNDARY) { 1325 /* 1326 * Wait until all suspended threads are removed from 1327 * the processors. The thread_suspend_check() 1328 * increments p_boundary_count while it is still 1329 * running, which makes it possible for the execve() 1330 * to destroy vmspace while our other threads are 1331 * still using the address space. 1332 * 1333 * We lock the thread, which is only allowed to 1334 * succeed after context switch code finished using 1335 * the address space. 1336 */ 1337 FOREACH_THREAD_IN_PROC(p, td2) { 1338 if (td2 == td) 1339 continue; 1340 thread_lock(td2); 1341 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1342 ("td %p not on boundary", td2)); 1343 KASSERT(TD_IS_SUSPENDED(td2), 1344 ("td %p is not suspended", td2)); 1345 thread_unlock(td2); 1346 } 1347 } 1348 PROC_SUNLOCK(p); 1349 return (0); 1350 } 1351 1352 bool 1353 thread_suspend_check_needed(void) 1354 { 1355 struct proc *p; 1356 struct thread *td; 1357 1358 td = curthread; 1359 p = td->td_proc; 1360 PROC_LOCK_ASSERT(p, MA_OWNED); 1361 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 1362 (td->td_dbgflags & TDB_SUSPEND) != 0)); 1363 } 1364 1365 /* 1366 * Called in from locations that can safely check to see 1367 * whether we have to suspend or at least throttle for a 1368 * single-thread event (e.g. fork). 1369 * 1370 * Such locations include userret(). 1371 * If the "return_instead" argument is non zero, the thread must be able to 1372 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1373 * 1374 * The 'return_instead' argument tells the function if it may do a 1375 * thread_exit() or suspend, or whether the caller must abort and back 1376 * out instead. 1377 * 1378 * If the thread that set the single_threading request has set the 1379 * P_SINGLE_EXIT bit in the process flags then this call will never return 1380 * if 'return_instead' is false, but will exit. 1381 * 1382 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1383 *---------------+--------------------+--------------------- 1384 * 0 | returns 0 | returns 0 or 1 1385 * | when ST ends | immediately 1386 *---------------+--------------------+--------------------- 1387 * 1 | thread exits | returns 1 1388 * | | immediately 1389 * 0 = thread_exit() or suspension ok, 1390 * other = return error instead of stopping the thread. 1391 * 1392 * While a full suspension is under effect, even a single threading 1393 * thread would be suspended if it made this call (but it shouldn't). 1394 * This call should only be made from places where 1395 * thread_exit() would be safe as that may be the outcome unless 1396 * return_instead is set. 1397 */ 1398 int 1399 thread_suspend_check(int return_instead) 1400 { 1401 struct thread *td; 1402 struct proc *p; 1403 int wakeup_swapper; 1404 1405 td = curthread; 1406 p = td->td_proc; 1407 mtx_assert(&Giant, MA_NOTOWNED); 1408 PROC_LOCK_ASSERT(p, MA_OWNED); 1409 while (thread_suspend_check_needed()) { 1410 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1411 KASSERT(p->p_singlethread != NULL, 1412 ("singlethread not set")); 1413 /* 1414 * The only suspension in action is a 1415 * single-threading. Single threader need not stop. 1416 * It is safe to access p->p_singlethread unlocked 1417 * because it can only be set to our address by us. 1418 */ 1419 if (p->p_singlethread == td) 1420 return (0); /* Exempt from stopping. */ 1421 } 1422 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 1423 return (EINTR); 1424 1425 /* Should we goto user boundary if we didn't come from there? */ 1426 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1427 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 1428 return (ERESTART); 1429 1430 /* 1431 * Ignore suspend requests if they are deferred. 1432 */ 1433 if ((td->td_flags & TDF_SBDRY) != 0) { 1434 KASSERT(return_instead, 1435 ("TDF_SBDRY set for unsafe thread_suspend_check")); 1436 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 1437 (TDF_SEINTR | TDF_SERESTART), 1438 ("both TDF_SEINTR and TDF_SERESTART")); 1439 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1440 } 1441 1442 /* 1443 * If the process is waiting for us to exit, 1444 * this thread should just suicide. 1445 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1446 */ 1447 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1448 PROC_UNLOCK(p); 1449 1450 /* 1451 * Allow Linux emulation layer to do some work 1452 * before thread suicide. 1453 */ 1454 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 1455 (p->p_sysent->sv_thread_detach)(td); 1456 umtx_thread_exit(td); 1457 kern_thr_exit(td); 1458 panic("stopped thread did not exit"); 1459 } 1460 1461 PROC_SLOCK(p); 1462 thread_stopped(p); 1463 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1464 if (p->p_numthreads == p->p_suspcount + 1) { 1465 thread_lock(p->p_singlethread); 1466 wakeup_swapper = thread_unsuspend_one( 1467 p->p_singlethread, p, false); 1468 if (wakeup_swapper) 1469 kick_proc0(); 1470 } 1471 } 1472 PROC_UNLOCK(p); 1473 thread_lock(td); 1474 /* 1475 * When a thread suspends, it just 1476 * gets taken off all queues. 1477 */ 1478 thread_suspend_one(td); 1479 if (return_instead == 0) { 1480 p->p_boundary_count++; 1481 td->td_flags |= TDF_BOUNDARY; 1482 } 1483 PROC_SUNLOCK(p); 1484 mi_switch(SW_INVOL | SWT_SUSPEND); 1485 PROC_LOCK(p); 1486 } 1487 return (0); 1488 } 1489 1490 /* 1491 * Check for possible stops and suspensions while executing a 1492 * casueword or similar transiently failing operation. 1493 * 1494 * The sleep argument controls whether the function can handle a stop 1495 * request itself or it should return ERESTART and the request is 1496 * proceed at the kernel/user boundary in ast. 1497 * 1498 * Typically, when retrying due to casueword(9) failure (rv == 1), we 1499 * should handle the stop requests there, with exception of cases when 1500 * the thread owns a kernel resource, for instance busied the umtx 1501 * key, or when functions return immediately if thread_check_susp() 1502 * returned non-zero. On the other hand, retrying the whole lock 1503 * operation, we better not stop there but delegate the handling to 1504 * ast. 1505 * 1506 * If the request is for thread termination P_SINGLE_EXIT, we cannot 1507 * handle it at all, and simply return EINTR. 1508 */ 1509 int 1510 thread_check_susp(struct thread *td, bool sleep) 1511 { 1512 struct proc *p; 1513 int error; 1514 1515 /* 1516 * The check for TDA_SUSPEND is racy, but it is enough to 1517 * eventually break the lockstep loop. 1518 */ 1519 if (!td_ast_pending(td, TDA_SUSPEND)) 1520 return (0); 1521 error = 0; 1522 p = td->td_proc; 1523 PROC_LOCK(p); 1524 if (p->p_flag & P_SINGLE_EXIT) 1525 error = EINTR; 1526 else if (P_SHOULDSTOP(p) || 1527 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1528 error = sleep ? thread_suspend_check(0) : ERESTART; 1529 PROC_UNLOCK(p); 1530 return (error); 1531 } 1532 1533 void 1534 thread_suspend_switch(struct thread *td, struct proc *p) 1535 { 1536 1537 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1538 PROC_LOCK_ASSERT(p, MA_OWNED); 1539 PROC_SLOCK_ASSERT(p, MA_OWNED); 1540 /* 1541 * We implement thread_suspend_one in stages here to avoid 1542 * dropping the proc lock while the thread lock is owned. 1543 */ 1544 if (p == td->td_proc) { 1545 thread_stopped(p); 1546 p->p_suspcount++; 1547 } 1548 PROC_UNLOCK(p); 1549 thread_lock(td); 1550 ast_unsched_locked(td, TDA_SUSPEND); 1551 TD_SET_SUSPENDED(td); 1552 sched_sleep(td, 0); 1553 PROC_SUNLOCK(p); 1554 DROP_GIANT(); 1555 mi_switch(SW_VOL | SWT_SUSPEND); 1556 PICKUP_GIANT(); 1557 PROC_LOCK(p); 1558 PROC_SLOCK(p); 1559 } 1560 1561 void 1562 thread_suspend_one(struct thread *td) 1563 { 1564 struct proc *p; 1565 1566 p = td->td_proc; 1567 PROC_SLOCK_ASSERT(p, MA_OWNED); 1568 THREAD_LOCK_ASSERT(td, MA_OWNED); 1569 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1570 p->p_suspcount++; 1571 ast_unsched_locked(td, TDA_SUSPEND); 1572 TD_SET_SUSPENDED(td); 1573 sched_sleep(td, 0); 1574 } 1575 1576 static int 1577 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 1578 { 1579 1580 THREAD_LOCK_ASSERT(td, MA_OWNED); 1581 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1582 TD_CLR_SUSPENDED(td); 1583 td->td_flags &= ~TDF_ALLPROCSUSP; 1584 if (td->td_proc == p) { 1585 PROC_SLOCK_ASSERT(p, MA_OWNED); 1586 p->p_suspcount--; 1587 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 1588 td->td_flags &= ~TDF_BOUNDARY; 1589 p->p_boundary_count--; 1590 } 1591 } 1592 return (setrunnable(td, 0)); 1593 } 1594 1595 void 1596 thread_run_flash(struct thread *td) 1597 { 1598 struct proc *p; 1599 1600 p = td->td_proc; 1601 PROC_LOCK_ASSERT(p, MA_OWNED); 1602 1603 if (TD_ON_SLEEPQ(td)) 1604 sleepq_remove_nested(td); 1605 else 1606 thread_lock(td); 1607 1608 THREAD_LOCK_ASSERT(td, MA_OWNED); 1609 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1610 1611 TD_CLR_SUSPENDED(td); 1612 PROC_SLOCK(p); 1613 MPASS(p->p_suspcount > 0); 1614 p->p_suspcount--; 1615 PROC_SUNLOCK(p); 1616 if (setrunnable(td, 0)) 1617 kick_proc0(); 1618 } 1619 1620 /* 1621 * Allow all threads blocked by single threading to continue running. 1622 */ 1623 void 1624 thread_unsuspend(struct proc *p) 1625 { 1626 struct thread *td; 1627 int wakeup_swapper; 1628 1629 PROC_LOCK_ASSERT(p, MA_OWNED); 1630 PROC_SLOCK_ASSERT(p, MA_OWNED); 1631 wakeup_swapper = 0; 1632 if (!P_SHOULDSTOP(p)) { 1633 FOREACH_THREAD_IN_PROC(p, td) { 1634 thread_lock(td); 1635 if (TD_IS_SUSPENDED(td)) 1636 wakeup_swapper |= thread_unsuspend_one(td, p, 1637 true); 1638 else 1639 thread_unlock(td); 1640 } 1641 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1642 p->p_numthreads == p->p_suspcount) { 1643 /* 1644 * Stopping everything also did the job for the single 1645 * threading request. Now we've downgraded to single-threaded, 1646 * let it continue. 1647 */ 1648 if (p->p_singlethread->td_proc == p) { 1649 thread_lock(p->p_singlethread); 1650 wakeup_swapper = thread_unsuspend_one( 1651 p->p_singlethread, p, false); 1652 } 1653 } 1654 if (wakeup_swapper) 1655 kick_proc0(); 1656 } 1657 1658 /* 1659 * End the single threading mode.. 1660 */ 1661 void 1662 thread_single_end(struct proc *p, int mode) 1663 { 1664 struct thread *td; 1665 int wakeup_swapper; 1666 1667 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1668 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1669 ("invalid mode %d", mode)); 1670 PROC_LOCK_ASSERT(p, MA_OWNED); 1671 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1672 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1673 ("mode %d does not match P_TOTAL_STOP", mode)); 1674 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1675 ("thread_single_end from other thread %p %p", 1676 curthread, p->p_singlethread)); 1677 KASSERT(mode != SINGLE_BOUNDARY || 1678 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1679 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1680 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1681 P_TOTAL_STOP); 1682 PROC_SLOCK(p); 1683 p->p_singlethread = NULL; 1684 wakeup_swapper = 0; 1685 /* 1686 * If there are other threads they may now run, 1687 * unless of course there is a blanket 'stop order' 1688 * on the process. The single threader must be allowed 1689 * to continue however as this is a bad place to stop. 1690 */ 1691 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1692 FOREACH_THREAD_IN_PROC(p, td) { 1693 thread_lock(td); 1694 if (TD_IS_SUSPENDED(td)) { 1695 wakeup_swapper |= thread_unsuspend_one(td, p, 1696 true); 1697 } else 1698 thread_unlock(td); 1699 } 1700 } 1701 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1702 ("inconsistent boundary count %d", p->p_boundary_count)); 1703 PROC_SUNLOCK(p); 1704 if (wakeup_swapper) 1705 kick_proc0(); 1706 wakeup(&p->p_flag); 1707 } 1708 1709 /* 1710 * Locate a thread by number and return with proc lock held. 1711 * 1712 * thread exit establishes proc -> tidhash lock ordering, but lookup 1713 * takes tidhash first and needs to return locked proc. 1714 * 1715 * The problem is worked around by relying on type-safety of both 1716 * structures and doing the work in 2 steps: 1717 * - tidhash-locked lookup which saves both thread and proc pointers 1718 * - proc-locked verification that the found thread still matches 1719 */ 1720 static bool 1721 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1722 { 1723 #define RUN_THRESH 16 1724 struct proc *p; 1725 struct thread *td; 1726 int run; 1727 bool locked; 1728 1729 run = 0; 1730 rw_rlock(TIDHASHLOCK(tid)); 1731 locked = true; 1732 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1733 if (td->td_tid != tid) { 1734 run++; 1735 continue; 1736 } 1737 p = td->td_proc; 1738 if (pid != -1 && p->p_pid != pid) { 1739 td = NULL; 1740 break; 1741 } 1742 if (run > RUN_THRESH) { 1743 if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1744 LIST_REMOVE(td, td_hash); 1745 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1746 td, td_hash); 1747 rw_wunlock(TIDHASHLOCK(tid)); 1748 locked = false; 1749 break; 1750 } 1751 } 1752 break; 1753 } 1754 if (locked) 1755 rw_runlock(TIDHASHLOCK(tid)); 1756 if (td == NULL) 1757 return (false); 1758 *pp = p; 1759 *tdp = td; 1760 return (true); 1761 } 1762 1763 struct thread * 1764 tdfind(lwpid_t tid, pid_t pid) 1765 { 1766 struct proc *p; 1767 struct thread *td; 1768 1769 td = curthread; 1770 if (td->td_tid == tid) { 1771 if (pid != -1 && td->td_proc->p_pid != pid) 1772 return (NULL); 1773 PROC_LOCK(td->td_proc); 1774 return (td); 1775 } 1776 1777 for (;;) { 1778 if (!tdfind_hash(tid, pid, &p, &td)) 1779 return (NULL); 1780 PROC_LOCK(p); 1781 if (td->td_tid != tid) { 1782 PROC_UNLOCK(p); 1783 continue; 1784 } 1785 if (td->td_proc != p) { 1786 PROC_UNLOCK(p); 1787 continue; 1788 } 1789 if (p->p_state == PRS_NEW) { 1790 PROC_UNLOCK(p); 1791 return (NULL); 1792 } 1793 return (td); 1794 } 1795 } 1796 1797 void 1798 tidhash_add(struct thread *td) 1799 { 1800 rw_wlock(TIDHASHLOCK(td->td_tid)); 1801 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1802 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1803 } 1804 1805 void 1806 tidhash_remove(struct thread *td) 1807 { 1808 1809 rw_wlock(TIDHASHLOCK(td->td_tid)); 1810 LIST_REMOVE(td, td_hash); 1811 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1812 } 1813