1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice(s), this list of conditions and the following disclaimer as 12 * the first lines of this file unmodified other than the possible 13 * addition of one or more copyright notices. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice(s), this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 * DAMAGE. 29 */ 30 31 #include "opt_witness.h" 32 #include "opt_hwpmc_hooks.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/msan.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/bitstring.h> 45 #include <sys/epoch.h> 46 #include <sys/rangelock.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sdt.h> 49 #include <sys/smp.h> 50 #include <sys/sched.h> 51 #include <sys/sleepqueue.h> 52 #include <sys/selinfo.h> 53 #include <sys/syscallsubr.h> 54 #include <sys/dtrace_bsd.h> 55 #include <sys/sysent.h> 56 #include <sys/turnstile.h> 57 #include <sys/taskqueue.h> 58 #include <sys/ktr.h> 59 #include <sys/rwlock.h> 60 #include <sys/umtxvar.h> 61 #include <sys/vmmeter.h> 62 #include <sys/cpuset.h> 63 #ifdef HWPMC_HOOKS 64 #include <sys/pmckern.h> 65 #endif 66 #include <sys/priv.h> 67 68 #include <security/audit/audit.h> 69 70 #include <vm/pmap.h> 71 #include <vm/vm.h> 72 #include <vm/vm_extern.h> 73 #include <vm/uma.h> 74 #include <vm/vm_phys.h> 75 #include <sys/eventhandler.h> 76 77 /* 78 * Asserts below verify the stability of struct thread and struct proc 79 * layout, as exposed by KBI to modules. On head, the KBI is allowed 80 * to drift, change to the structures must be accompanied by the 81 * assert update. 82 * 83 * On the stable branches after KBI freeze, conditions must not be 84 * violated. Typically new fields are moved to the end of the 85 * structures. 86 */ 87 #ifdef __amd64__ 88 _Static_assert(offsetof(struct thread, td_flags) == 0x108, 89 "struct thread KBI td_flags"); 90 _Static_assert(offsetof(struct thread, td_pflags) == 0x110, 91 "struct thread KBI td_pflags"); 92 _Static_assert(offsetof(struct thread, td_frame) == 0x4a8, 93 "struct thread KBI td_frame"); 94 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0, 95 "struct thread KBI td_emuldata"); 96 _Static_assert(offsetof(struct proc, p_flag) == 0xb8, 97 "struct proc KBI p_flag"); 98 _Static_assert(offsetof(struct proc, p_pid) == 0xc4, 99 "struct proc KBI p_pid"); 100 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 101 "struct proc KBI p_filemon"); 102 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, 103 "struct proc KBI p_comm"); 104 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8, 105 "struct proc KBI p_emuldata"); 106 #endif 107 #ifdef __i386__ 108 _Static_assert(offsetof(struct thread, td_flags) == 0x9c, 109 "struct thread KBI td_flags"); 110 _Static_assert(offsetof(struct thread, td_pflags) == 0xa4, 111 "struct thread KBI td_pflags"); 112 _Static_assert(offsetof(struct thread, td_frame) == 0x308, 113 "struct thread KBI td_frame"); 114 _Static_assert(offsetof(struct thread, td_emuldata) == 0x34c, 115 "struct thread KBI td_emuldata"); 116 _Static_assert(offsetof(struct proc, p_flag) == 0x6c, 117 "struct proc KBI p_flag"); 118 _Static_assert(offsetof(struct proc, p_pid) == 0x78, 119 "struct proc KBI p_pid"); 120 _Static_assert(offsetof(struct proc, p_filemon) == 0x270, 121 "struct proc KBI p_filemon"); 122 _Static_assert(offsetof(struct proc, p_comm) == 0x284, 123 "struct proc KBI p_comm"); 124 _Static_assert(offsetof(struct proc, p_emuldata) == 0x310, 125 "struct proc KBI p_emuldata"); 126 #endif 127 128 SDT_PROVIDER_DECLARE(proc); 129 SDT_PROBE_DEFINE(proc, , , lwp__exit); 130 131 /* 132 * thread related storage. 133 */ 134 static uma_zone_t thread_zone; 135 136 struct thread_domain_data { 137 struct thread *tdd_zombies; 138 int tdd_reapticks; 139 } __aligned(CACHE_LINE_SIZE); 140 141 static struct thread_domain_data thread_domain_data[MAXMEMDOM]; 142 143 static struct task thread_reap_task; 144 static struct callout thread_reap_callout; 145 146 static void thread_zombie(struct thread *); 147 static void thread_reap(void); 148 static void thread_reap_all(void); 149 static void thread_reap_task_cb(void *, int); 150 static void thread_reap_callout_cb(void *); 151 static int thread_unsuspend_one(struct thread *td, struct proc *p, 152 bool boundary); 153 static void thread_free_batched(struct thread *td); 154 155 static __exclusive_cache_line struct mtx tid_lock; 156 static bitstr_t *tid_bitmap; 157 158 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 159 160 static int maxthread; 161 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 162 &maxthread, 0, "Maximum number of threads"); 163 164 static __exclusive_cache_line int nthreads; 165 166 static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 167 static u_long tidhash; 168 static u_long tidhashlock; 169 static struct rwlock *tidhashtbl_lock; 170 #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 171 #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 172 173 EVENTHANDLER_LIST_DEFINE(thread_ctor); 174 EVENTHANDLER_LIST_DEFINE(thread_dtor); 175 EVENTHANDLER_LIST_DEFINE(thread_init); 176 EVENTHANDLER_LIST_DEFINE(thread_fini); 177 178 static bool 179 thread_count_inc_try(void) 180 { 181 int nthreads_new; 182 183 nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; 184 if (nthreads_new >= maxthread - 100) { 185 if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 186 nthreads_new >= maxthread) { 187 atomic_subtract_int(&nthreads, 1); 188 return (false); 189 } 190 } 191 return (true); 192 } 193 194 static bool 195 thread_count_inc(void) 196 { 197 static struct timeval lastfail; 198 static int curfail; 199 200 thread_reap(); 201 if (thread_count_inc_try()) { 202 return (true); 203 } 204 205 thread_reap_all(); 206 if (thread_count_inc_try()) { 207 return (true); 208 } 209 210 if (ppsratecheck(&lastfail, &curfail, 1)) { 211 printf("maxthread limit exceeded by uid %u " 212 "(pid %d); consider increasing kern.maxthread\n", 213 curthread->td_ucred->cr_ruid, curproc->p_pid); 214 } 215 return (false); 216 } 217 218 static void 219 thread_count_sub(int n) 220 { 221 222 atomic_subtract_int(&nthreads, n); 223 } 224 225 static void 226 thread_count_dec(void) 227 { 228 229 thread_count_sub(1); 230 } 231 232 static lwpid_t 233 tid_alloc(void) 234 { 235 static lwpid_t trytid; 236 lwpid_t tid; 237 238 mtx_lock(&tid_lock); 239 /* 240 * It is an invariant that the bitmap is big enough to hold maxthread 241 * IDs. If we got to this point there has to be at least one free. 242 */ 243 if (trytid >= maxthread) 244 trytid = 0; 245 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 246 if (tid == -1) { 247 KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 248 trytid = 0; 249 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 250 KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 251 } 252 bit_set(tid_bitmap, tid); 253 trytid = tid + 1; 254 mtx_unlock(&tid_lock); 255 return (tid + NO_PID); 256 } 257 258 static void 259 tid_free_locked(lwpid_t rtid) 260 { 261 lwpid_t tid; 262 263 mtx_assert(&tid_lock, MA_OWNED); 264 KASSERT(rtid >= NO_PID, 265 ("%s: invalid tid %d\n", __func__, rtid)); 266 tid = rtid - NO_PID; 267 KASSERT(bit_test(tid_bitmap, tid) != 0, 268 ("thread ID %d not allocated\n", rtid)); 269 bit_clear(tid_bitmap, tid); 270 } 271 272 static void 273 tid_free(lwpid_t rtid) 274 { 275 276 mtx_lock(&tid_lock); 277 tid_free_locked(rtid); 278 mtx_unlock(&tid_lock); 279 } 280 281 static void 282 tid_free_batch(lwpid_t *batch, int n) 283 { 284 int i; 285 286 mtx_lock(&tid_lock); 287 for (i = 0; i < n; i++) { 288 tid_free_locked(batch[i]); 289 } 290 mtx_unlock(&tid_lock); 291 } 292 293 /* 294 * Batching for thread reapping. 295 */ 296 struct tidbatch { 297 lwpid_t tab[16]; 298 int n; 299 }; 300 301 static void 302 tidbatch_prep(struct tidbatch *tb) 303 { 304 305 tb->n = 0; 306 } 307 308 static void 309 tidbatch_add(struct tidbatch *tb, struct thread *td) 310 { 311 312 KASSERT(tb->n < nitems(tb->tab), 313 ("%s: count too high %d", __func__, tb->n)); 314 tb->tab[tb->n] = td->td_tid; 315 tb->n++; 316 } 317 318 static void 319 tidbatch_process(struct tidbatch *tb) 320 { 321 322 KASSERT(tb->n <= nitems(tb->tab), 323 ("%s: count too high %d", __func__, tb->n)); 324 if (tb->n == nitems(tb->tab)) { 325 tid_free_batch(tb->tab, tb->n); 326 tb->n = 0; 327 } 328 } 329 330 static void 331 tidbatch_final(struct tidbatch *tb) 332 { 333 334 KASSERT(tb->n <= nitems(tb->tab), 335 ("%s: count too high %d", __func__, tb->n)); 336 if (tb->n != 0) { 337 tid_free_batch(tb->tab, tb->n); 338 } 339 } 340 341 /* 342 * Prepare a thread for use. 343 */ 344 static int 345 thread_ctor(void *mem, int size, void *arg, int flags) 346 { 347 struct thread *td; 348 349 td = (struct thread *)mem; 350 TD_SET_STATE(td, TDS_INACTIVE); 351 td->td_lastcpu = td->td_oncpu = NOCPU; 352 353 /* 354 * Note that td_critnest begins life as 1 because the thread is not 355 * running and is thereby implicitly waiting to be on the receiving 356 * end of a context switch. 357 */ 358 td->td_critnest = 1; 359 td->td_lend_user_pri = PRI_MAX; 360 #ifdef AUDIT 361 audit_thread_alloc(td); 362 #endif 363 #ifdef KDTRACE_HOOKS 364 kdtrace_thread_ctor(td); 365 #endif 366 umtx_thread_alloc(td); 367 MPASS(td->td_sel == NULL); 368 return (0); 369 } 370 371 /* 372 * Reclaim a thread after use. 373 */ 374 static void 375 thread_dtor(void *mem, int size, void *arg) 376 { 377 struct thread *td; 378 379 td = (struct thread *)mem; 380 381 #ifdef INVARIANTS 382 /* Verify that this thread is in a safe state to free. */ 383 switch (TD_GET_STATE(td)) { 384 case TDS_INHIBITED: 385 case TDS_RUNNING: 386 case TDS_CAN_RUN: 387 case TDS_RUNQ: 388 /* 389 * We must never unlink a thread that is in one of 390 * these states, because it is currently active. 391 */ 392 panic("bad state for thread unlinking"); 393 /* NOTREACHED */ 394 case TDS_INACTIVE: 395 break; 396 default: 397 panic("bad thread state"); 398 /* NOTREACHED */ 399 } 400 #endif 401 #ifdef AUDIT 402 audit_thread_free(td); 403 #endif 404 #ifdef KDTRACE_HOOKS 405 kdtrace_thread_dtor(td); 406 #endif 407 /* Free all OSD associated to this thread. */ 408 osd_thread_exit(td); 409 td_softdep_cleanup(td); 410 MPASS(td->td_su == NULL); 411 seltdfini(td); 412 } 413 414 /* 415 * Initialize type-stable parts of a thread (when newly created). 416 */ 417 static int 418 thread_init(void *mem, int size, int flags) 419 { 420 struct thread *td; 421 422 td = (struct thread *)mem; 423 424 td->td_allocdomain = vm_phys_domain(vtophys(td)); 425 td->td_sleepqueue = sleepq_alloc(); 426 td->td_turnstile = turnstile_alloc(); 427 td->td_rlqe = NULL; 428 EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 429 umtx_thread_init(td); 430 td->td_kstack = 0; 431 td->td_sel = NULL; 432 return (0); 433 } 434 435 /* 436 * Tear down type-stable parts of a thread (just before being discarded). 437 */ 438 static void 439 thread_fini(void *mem, int size) 440 { 441 struct thread *td; 442 443 td = (struct thread *)mem; 444 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 445 rlqentry_free(td->td_rlqe); 446 turnstile_free(td->td_turnstile); 447 sleepq_free(td->td_sleepqueue); 448 umtx_thread_fini(td); 449 MPASS(td->td_sel == NULL); 450 } 451 452 /* 453 * For a newly created process, 454 * link up all the structures and its initial threads etc. 455 * called from: 456 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 457 * proc_dtor() (should go away) 458 * proc_init() 459 */ 460 void 461 proc_linkup0(struct proc *p, struct thread *td) 462 { 463 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 464 proc_linkup(p, td); 465 } 466 467 void 468 proc_linkup(struct proc *p, struct thread *td) 469 { 470 471 sigqueue_init(&p->p_sigqueue, p); 472 p->p_ksi = ksiginfo_alloc(1); 473 if (p->p_ksi != NULL) { 474 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 475 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 476 } 477 LIST_INIT(&p->p_mqnotifier); 478 p->p_numthreads = 0; 479 thread_link(td, p); 480 } 481 482 extern int max_threads_per_proc; 483 484 /* 485 * Initialize global thread allocation resources. 486 */ 487 void 488 threadinit(void) 489 { 490 u_long i; 491 lwpid_t tid0; 492 uint32_t flags; 493 494 /* 495 * Place an upper limit on threads which can be allocated. 496 * 497 * Note that other factors may make the de facto limit much lower. 498 * 499 * Platform limits are somewhat arbitrary but deemed "more than good 500 * enough" for the foreseable future. 501 */ 502 if (maxthread == 0) { 503 #ifdef _LP64 504 maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 505 #else 506 maxthread = MIN(maxproc * max_threads_per_proc, 100000); 507 #endif 508 } 509 510 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 511 tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 512 /* 513 * Handle thread0. 514 */ 515 thread_count_inc(); 516 tid0 = tid_alloc(); 517 if (tid0 != THREAD0_TID) 518 panic("tid0 %d != %d\n", tid0, THREAD0_TID); 519 520 flags = UMA_ZONE_NOFREE; 521 #ifdef __aarch64__ 522 /* 523 * Force thread structures to be allocated from the direct map. 524 * Otherwise, superpage promotions and demotions may temporarily 525 * invalidate thread structure mappings. For most dynamically allocated 526 * structures this is not a problem, but translation faults cannot be 527 * handled without accessing curthread. 528 */ 529 flags |= UMA_ZONE_CONTIG; 530 #endif 531 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 532 thread_ctor, thread_dtor, thread_init, thread_fini, 533 32 - 1, flags); 534 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 535 tidhashlock = (tidhash + 1) / 64; 536 if (tidhashlock > 0) 537 tidhashlock--; 538 tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 539 M_TIDHASH, M_WAITOK | M_ZERO); 540 for (i = 0; i < tidhashlock + 1; i++) 541 rw_init(&tidhashtbl_lock[i], "tidhash"); 542 543 TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); 544 callout_init(&thread_reap_callout, 1); 545 callout_reset(&thread_reap_callout, 5 * hz, 546 thread_reap_callout_cb, NULL); 547 } 548 549 /* 550 * Place an unused thread on the zombie list. 551 */ 552 void 553 thread_zombie(struct thread *td) 554 { 555 struct thread_domain_data *tdd; 556 struct thread *ztd; 557 558 tdd = &thread_domain_data[td->td_allocdomain]; 559 ztd = atomic_load_ptr(&tdd->tdd_zombies); 560 for (;;) { 561 td->td_zombie = ztd; 562 if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies, 563 (uintptr_t *)&ztd, (uintptr_t)td)) 564 break; 565 continue; 566 } 567 } 568 569 /* 570 * Release a thread that has exited after cpu_throw(). 571 */ 572 void 573 thread_stash(struct thread *td) 574 { 575 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 576 thread_zombie(td); 577 } 578 579 /* 580 * Reap zombies from passed domain. 581 */ 582 static void 583 thread_reap_domain(struct thread_domain_data *tdd) 584 { 585 struct thread *itd, *ntd; 586 struct tidbatch tidbatch; 587 struct credbatch credbatch; 588 int tdcount; 589 struct plimit *lim; 590 int limcount; 591 592 /* 593 * Reading upfront is pessimal if followed by concurrent atomic_swap, 594 * but most of the time the list is empty. 595 */ 596 if (tdd->tdd_zombies == NULL) 597 return; 598 599 itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies, 600 (uintptr_t)NULL); 601 if (itd == NULL) 602 return; 603 604 /* 605 * Multiple CPUs can get here, the race is fine as ticks is only 606 * advisory. 607 */ 608 tdd->tdd_reapticks = ticks; 609 610 tidbatch_prep(&tidbatch); 611 credbatch_prep(&credbatch); 612 tdcount = 0; 613 lim = NULL; 614 limcount = 0; 615 616 while (itd != NULL) { 617 ntd = itd->td_zombie; 618 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); 619 tidbatch_add(&tidbatch, itd); 620 credbatch_add(&credbatch, itd); 621 MPASS(itd->td_limit != NULL); 622 if (lim != itd->td_limit) { 623 if (limcount != 0) { 624 lim_freen(lim, limcount); 625 limcount = 0; 626 } 627 } 628 lim = itd->td_limit; 629 limcount++; 630 thread_free_batched(itd); 631 tidbatch_process(&tidbatch); 632 credbatch_process(&credbatch); 633 tdcount++; 634 if (tdcount == 32) { 635 thread_count_sub(tdcount); 636 tdcount = 0; 637 } 638 itd = ntd; 639 } 640 641 tidbatch_final(&tidbatch); 642 credbatch_final(&credbatch); 643 if (tdcount != 0) { 644 thread_count_sub(tdcount); 645 } 646 MPASS(limcount != 0); 647 lim_freen(lim, limcount); 648 } 649 650 /* 651 * Reap zombies from all domains. 652 */ 653 static void 654 thread_reap_all(void) 655 { 656 struct thread_domain_data *tdd; 657 int i, domain; 658 659 domain = PCPU_GET(domain); 660 for (i = 0; i < vm_ndomains; i++) { 661 tdd = &thread_domain_data[(i + domain) % vm_ndomains]; 662 thread_reap_domain(tdd); 663 } 664 } 665 666 /* 667 * Reap zombies from local domain. 668 */ 669 static void 670 thread_reap(void) 671 { 672 struct thread_domain_data *tdd; 673 int domain; 674 675 domain = PCPU_GET(domain); 676 tdd = &thread_domain_data[domain]; 677 678 thread_reap_domain(tdd); 679 } 680 681 static void 682 thread_reap_task_cb(void *arg __unused, int pending __unused) 683 { 684 685 thread_reap_all(); 686 } 687 688 static void 689 thread_reap_callout_cb(void *arg __unused) 690 { 691 struct thread_domain_data *tdd; 692 int i, cticks, lticks; 693 bool wantreap; 694 695 wantreap = false; 696 cticks = atomic_load_int(&ticks); 697 for (i = 0; i < vm_ndomains; i++) { 698 tdd = &thread_domain_data[i]; 699 lticks = tdd->tdd_reapticks; 700 if (tdd->tdd_zombies != NULL && 701 (u_int)(cticks - lticks) > 5 * hz) { 702 wantreap = true; 703 break; 704 } 705 } 706 707 if (wantreap) 708 taskqueue_enqueue(taskqueue_thread, &thread_reap_task); 709 callout_reset(&thread_reap_callout, 5 * hz, 710 thread_reap_callout_cb, NULL); 711 } 712 713 /* 714 * Calling this function guarantees that any thread that exited before 715 * the call is reaped when the function returns. By 'exited' we mean 716 * a thread removed from the process linkage with thread_unlink(). 717 * Practically this means that caller must lock/unlock corresponding 718 * process lock before the call, to synchronize with thread_exit(). 719 */ 720 void 721 thread_reap_barrier(void) 722 { 723 struct task *t; 724 725 /* 726 * First do context switches to each CPU to ensure that all 727 * PCPU pc_deadthreads are moved to zombie list. 728 */ 729 quiesce_all_cpus("", PDROP); 730 731 /* 732 * Second, fire the task in the same thread as normal 733 * thread_reap() is done, to serialize reaping. 734 */ 735 t = malloc(sizeof(*t), M_TEMP, M_WAITOK); 736 TASK_INIT(t, 0, thread_reap_task_cb, t); 737 taskqueue_enqueue(taskqueue_thread, t); 738 taskqueue_drain(taskqueue_thread, t); 739 free(t, M_TEMP); 740 } 741 742 /* 743 * Allocate a thread. 744 */ 745 struct thread * 746 thread_alloc(int pages) 747 { 748 struct thread *td; 749 lwpid_t tid; 750 751 if (!thread_count_inc()) { 752 return (NULL); 753 } 754 755 tid = tid_alloc(); 756 td = uma_zalloc(thread_zone, M_WAITOK); 757 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 758 if (!vm_thread_new(td, pages)) { 759 uma_zfree(thread_zone, td); 760 tid_free(tid); 761 thread_count_dec(); 762 return (NULL); 763 } 764 td->td_tid = tid; 765 bzero(&td->td_sa.args, sizeof(td->td_sa.args)); 766 kmsan_thread_alloc(td); 767 cpu_thread_alloc(td); 768 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 769 return (td); 770 } 771 772 int 773 thread_alloc_stack(struct thread *td, int pages) 774 { 775 776 KASSERT(td->td_kstack == 0, 777 ("thread_alloc_stack called on a thread with kstack")); 778 if (!vm_thread_new(td, pages)) 779 return (0); 780 cpu_thread_alloc(td); 781 return (1); 782 } 783 784 /* 785 * Deallocate a thread. 786 */ 787 static void 788 thread_free_batched(struct thread *td) 789 { 790 791 lock_profile_thread_exit(td); 792 if (td->td_cpuset) 793 cpuset_rel(td->td_cpuset); 794 td->td_cpuset = NULL; 795 cpu_thread_free(td); 796 if (td->td_kstack != 0) 797 vm_thread_dispose(td); 798 callout_drain(&td->td_slpcallout); 799 /* 800 * Freeing handled by the caller. 801 */ 802 td->td_tid = -1; 803 kmsan_thread_free(td); 804 uma_zfree(thread_zone, td); 805 } 806 807 void 808 thread_free(struct thread *td) 809 { 810 lwpid_t tid; 811 812 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 813 tid = td->td_tid; 814 thread_free_batched(td); 815 tid_free(tid); 816 thread_count_dec(); 817 } 818 819 void 820 thread_cow_get_proc(struct thread *newtd, struct proc *p) 821 { 822 823 PROC_LOCK_ASSERT(p, MA_OWNED); 824 newtd->td_realucred = crcowget(p->p_ucred); 825 newtd->td_ucred = newtd->td_realucred; 826 newtd->td_limit = lim_hold(p->p_limit); 827 newtd->td_cowgen = p->p_cowgen; 828 } 829 830 void 831 thread_cow_get(struct thread *newtd, struct thread *td) 832 { 833 834 MPASS(td->td_realucred == td->td_ucred); 835 newtd->td_realucred = crcowget(td->td_realucred); 836 newtd->td_ucred = newtd->td_realucred; 837 newtd->td_limit = lim_hold(td->td_limit); 838 newtd->td_cowgen = td->td_cowgen; 839 } 840 841 void 842 thread_cow_free(struct thread *td) 843 { 844 845 if (td->td_realucred != NULL) 846 crcowfree(td); 847 if (td->td_limit != NULL) 848 lim_free(td->td_limit); 849 } 850 851 void 852 thread_cow_update(struct thread *td) 853 { 854 struct proc *p; 855 struct ucred *oldcred; 856 struct plimit *oldlimit; 857 858 p = td->td_proc; 859 oldlimit = NULL; 860 PROC_LOCK(p); 861 oldcred = crcowsync(); 862 if (td->td_limit != p->p_limit) { 863 oldlimit = td->td_limit; 864 td->td_limit = lim_hold(p->p_limit); 865 } 866 td->td_cowgen = p->p_cowgen; 867 PROC_UNLOCK(p); 868 if (oldcred != NULL) 869 crfree(oldcred); 870 if (oldlimit != NULL) 871 lim_free(oldlimit); 872 } 873 874 /* 875 * Discard the current thread and exit from its context. 876 * Always called with scheduler locked. 877 * 878 * Because we can't free a thread while we're operating under its context, 879 * push the current thread into our CPU's deadthread holder. This means 880 * we needn't worry about someone else grabbing our context before we 881 * do a cpu_throw(). 882 */ 883 void 884 thread_exit(void) 885 { 886 uint64_t runtime, new_switchtime; 887 struct thread *td; 888 struct thread *td2; 889 struct proc *p; 890 int wakeup_swapper; 891 892 td = curthread; 893 p = td->td_proc; 894 895 PROC_SLOCK_ASSERT(p, MA_OWNED); 896 mtx_assert(&Giant, MA_NOTOWNED); 897 898 PROC_LOCK_ASSERT(p, MA_OWNED); 899 KASSERT(p != NULL, ("thread exiting without a process")); 900 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 901 (long)p->p_pid, td->td_name); 902 SDT_PROBE0(proc, , , lwp__exit); 903 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 904 MPASS(td->td_realucred == td->td_ucred); 905 906 /* 907 * drop FPU & debug register state storage, or any other 908 * architecture specific resources that 909 * would not be on a new untouched process. 910 */ 911 cpu_thread_exit(td); 912 913 /* 914 * The last thread is left attached to the process 915 * So that the whole bundle gets recycled. Skip 916 * all this stuff if we never had threads. 917 * EXIT clears all sign of other threads when 918 * it goes to single threading, so the last thread always 919 * takes the short path. 920 */ 921 if (p->p_flag & P_HADTHREADS) { 922 if (p->p_numthreads > 1) { 923 atomic_add_int(&td->td_proc->p_exitthreads, 1); 924 thread_unlink(td); 925 td2 = FIRST_THREAD_IN_PROC(p); 926 sched_exit_thread(td2, td); 927 928 /* 929 * The test below is NOT true if we are the 930 * sole exiting thread. P_STOPPED_SINGLE is unset 931 * in exit1() after it is the only survivor. 932 */ 933 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 934 if (p->p_numthreads == p->p_suspcount) { 935 thread_lock(p->p_singlethread); 936 wakeup_swapper = thread_unsuspend_one( 937 p->p_singlethread, p, false); 938 if (wakeup_swapper) 939 kick_proc0(); 940 } 941 } 942 943 PCPU_SET(deadthread, td); 944 } else { 945 /* 946 * The last thread is exiting.. but not through exit() 947 */ 948 panic ("thread_exit: Last thread exiting on its own"); 949 } 950 } 951 #ifdef HWPMC_HOOKS 952 /* 953 * If this thread is part of a process that is being tracked by hwpmc(4), 954 * inform the module of the thread's impending exit. 955 */ 956 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 957 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 958 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 959 } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 960 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 961 #endif 962 PROC_UNLOCK(p); 963 PROC_STATLOCK(p); 964 thread_lock(td); 965 PROC_SUNLOCK(p); 966 967 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 968 new_switchtime = cpu_ticks(); 969 runtime = new_switchtime - PCPU_GET(switchtime); 970 td->td_runtime += runtime; 971 td->td_incruntime += runtime; 972 PCPU_SET(switchtime, new_switchtime); 973 PCPU_SET(switchticks, ticks); 974 VM_CNT_INC(v_swtch); 975 976 /* Save our resource usage in our process. */ 977 td->td_ru.ru_nvcsw++; 978 ruxagg_locked(p, td); 979 rucollect(&p->p_ru, &td->td_ru); 980 PROC_STATUNLOCK(p); 981 982 TD_SET_STATE(td, TDS_INACTIVE); 983 #ifdef WITNESS 984 witness_thread_exit(td); 985 #endif 986 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 987 sched_throw(td); 988 panic("I'm a teapot!"); 989 /* NOTREACHED */ 990 } 991 992 /* 993 * Do any thread specific cleanups that may be needed in wait() 994 * called with Giant, proc and schedlock not held. 995 */ 996 void 997 thread_wait(struct proc *p) 998 { 999 struct thread *td; 1000 1001 mtx_assert(&Giant, MA_NOTOWNED); 1002 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 1003 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 1004 td = FIRST_THREAD_IN_PROC(p); 1005 /* Lock the last thread so we spin until it exits cpu_throw(). */ 1006 thread_lock(td); 1007 thread_unlock(td); 1008 lock_profile_thread_exit(td); 1009 cpuset_rel(td->td_cpuset); 1010 td->td_cpuset = NULL; 1011 cpu_thread_clean(td); 1012 thread_cow_free(td); 1013 callout_drain(&td->td_slpcallout); 1014 thread_reap(); /* check for zombie threads etc. */ 1015 } 1016 1017 /* 1018 * Link a thread to a process. 1019 * set up anything that needs to be initialized for it to 1020 * be used by the process. 1021 */ 1022 void 1023 thread_link(struct thread *td, struct proc *p) 1024 { 1025 1026 /* 1027 * XXX This can't be enabled because it's called for proc0 before 1028 * its lock has been created. 1029 * PROC_LOCK_ASSERT(p, MA_OWNED); 1030 */ 1031 TD_SET_STATE(td, TDS_INACTIVE); 1032 td->td_proc = p; 1033 td->td_flags = TDF_INMEM; 1034 1035 LIST_INIT(&td->td_contested); 1036 LIST_INIT(&td->td_lprof[0]); 1037 LIST_INIT(&td->td_lprof[1]); 1038 #ifdef EPOCH_TRACE 1039 SLIST_INIT(&td->td_epochs); 1040 #endif 1041 sigqueue_init(&td->td_sigqueue, p); 1042 callout_init(&td->td_slpcallout, 1); 1043 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 1044 p->p_numthreads++; 1045 } 1046 1047 /* 1048 * Called from: 1049 * thread_exit() 1050 */ 1051 void 1052 thread_unlink(struct thread *td) 1053 { 1054 struct proc *p = td->td_proc; 1055 1056 PROC_LOCK_ASSERT(p, MA_OWNED); 1057 #ifdef EPOCH_TRACE 1058 MPASS(SLIST_EMPTY(&td->td_epochs)); 1059 #endif 1060 1061 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1062 p->p_numthreads--; 1063 /* could clear a few other things here */ 1064 /* Must NOT clear links to proc! */ 1065 } 1066 1067 static int 1068 calc_remaining(struct proc *p, int mode) 1069 { 1070 int remaining; 1071 1072 PROC_LOCK_ASSERT(p, MA_OWNED); 1073 PROC_SLOCK_ASSERT(p, MA_OWNED); 1074 if (mode == SINGLE_EXIT) 1075 remaining = p->p_numthreads; 1076 else if (mode == SINGLE_BOUNDARY) 1077 remaining = p->p_numthreads - p->p_boundary_count; 1078 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 1079 remaining = p->p_numthreads - p->p_suspcount; 1080 else 1081 panic("calc_remaining: wrong mode %d", mode); 1082 return (remaining); 1083 } 1084 1085 static int 1086 remain_for_mode(int mode) 1087 { 1088 1089 return (mode == SINGLE_ALLPROC ? 0 : 1); 1090 } 1091 1092 static int 1093 weed_inhib(int mode, struct thread *td2, struct proc *p) 1094 { 1095 int wakeup_swapper; 1096 1097 PROC_LOCK_ASSERT(p, MA_OWNED); 1098 PROC_SLOCK_ASSERT(p, MA_OWNED); 1099 THREAD_LOCK_ASSERT(td2, MA_OWNED); 1100 1101 wakeup_swapper = 0; 1102 1103 /* 1104 * Since the thread lock is dropped by the scheduler we have 1105 * to retry to check for races. 1106 */ 1107 restart: 1108 switch (mode) { 1109 case SINGLE_EXIT: 1110 if (TD_IS_SUSPENDED(td2)) { 1111 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 1112 thread_lock(td2); 1113 goto restart; 1114 } 1115 if (TD_CAN_ABORT(td2)) { 1116 wakeup_swapper |= sleepq_abort(td2, EINTR); 1117 return (wakeup_swapper); 1118 } 1119 break; 1120 case SINGLE_BOUNDARY: 1121 case SINGLE_NO_EXIT: 1122 if (TD_IS_SUSPENDED(td2) && 1123 (td2->td_flags & TDF_BOUNDARY) == 0) { 1124 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 1125 thread_lock(td2); 1126 goto restart; 1127 } 1128 if (TD_CAN_ABORT(td2)) { 1129 wakeup_swapper |= sleepq_abort(td2, ERESTART); 1130 return (wakeup_swapper); 1131 } 1132 break; 1133 case SINGLE_ALLPROC: 1134 /* 1135 * ALLPROC suspend tries to avoid spurious EINTR for 1136 * threads sleeping interruptable, by suspending the 1137 * thread directly, similarly to sig_suspend_threads(). 1138 * Since such sleep is not performed at the user 1139 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 1140 * is used to avoid immediate un-suspend. 1141 */ 1142 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 1143 TDF_ALLPROCSUSP)) == 0) { 1144 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 1145 thread_lock(td2); 1146 goto restart; 1147 } 1148 if (TD_CAN_ABORT(td2)) { 1149 if ((td2->td_flags & TDF_SBDRY) == 0) { 1150 thread_suspend_one(td2); 1151 td2->td_flags |= TDF_ALLPROCSUSP; 1152 } else { 1153 wakeup_swapper |= sleepq_abort(td2, ERESTART); 1154 return (wakeup_swapper); 1155 } 1156 } 1157 break; 1158 default: 1159 break; 1160 } 1161 thread_unlock(td2); 1162 return (wakeup_swapper); 1163 } 1164 1165 /* 1166 * Enforce single-threading. 1167 * 1168 * Returns 1 if the caller must abort (another thread is waiting to 1169 * exit the process or similar). Process is locked! 1170 * Returns 0 when you are successfully the only thread running. 1171 * A process has successfully single threaded in the suspend mode when 1172 * There are no threads in user mode. Threads in the kernel must be 1173 * allowed to continue until they get to the user boundary. They may even 1174 * copy out their return values and data before suspending. They may however be 1175 * accelerated in reaching the user boundary as we will wake up 1176 * any sleeping threads that are interruptable. (PCATCH). 1177 */ 1178 int 1179 thread_single(struct proc *p, int mode) 1180 { 1181 struct thread *td; 1182 struct thread *td2; 1183 int remaining, wakeup_swapper; 1184 1185 td = curthread; 1186 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1187 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1188 ("invalid mode %d", mode)); 1189 /* 1190 * If allowing non-ALLPROC singlethreading for non-curproc 1191 * callers, calc_remaining() and remain_for_mode() should be 1192 * adjusted to also account for td->td_proc != p. For now 1193 * this is not implemented because it is not used. 1194 */ 1195 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 1196 (mode != SINGLE_ALLPROC && td->td_proc == p), 1197 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 1198 mtx_assert(&Giant, MA_NOTOWNED); 1199 PROC_LOCK_ASSERT(p, MA_OWNED); 1200 1201 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 1202 return (0); 1203 1204 /* Is someone already single threading? */ 1205 if (p->p_singlethread != NULL && p->p_singlethread != td) 1206 return (1); 1207 1208 if (mode == SINGLE_EXIT) { 1209 p->p_flag |= P_SINGLE_EXIT; 1210 p->p_flag &= ~P_SINGLE_BOUNDARY; 1211 } else { 1212 p->p_flag &= ~P_SINGLE_EXIT; 1213 if (mode == SINGLE_BOUNDARY) 1214 p->p_flag |= P_SINGLE_BOUNDARY; 1215 else 1216 p->p_flag &= ~P_SINGLE_BOUNDARY; 1217 } 1218 if (mode == SINGLE_ALLPROC) 1219 p->p_flag |= P_TOTAL_STOP; 1220 p->p_flag |= P_STOPPED_SINGLE; 1221 PROC_SLOCK(p); 1222 p->p_singlethread = td; 1223 remaining = calc_remaining(p, mode); 1224 while (remaining != remain_for_mode(mode)) { 1225 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 1226 goto stopme; 1227 wakeup_swapper = 0; 1228 FOREACH_THREAD_IN_PROC(p, td2) { 1229 if (td2 == td) 1230 continue; 1231 thread_lock(td2); 1232 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 1233 if (TD_IS_INHIBITED(td2)) { 1234 wakeup_swapper |= weed_inhib(mode, td2, p); 1235 #ifdef SMP 1236 } else if (TD_IS_RUNNING(td2) && td != td2) { 1237 forward_signal(td2); 1238 thread_unlock(td2); 1239 #endif 1240 } else 1241 thread_unlock(td2); 1242 } 1243 if (wakeup_swapper) 1244 kick_proc0(); 1245 remaining = calc_remaining(p, mode); 1246 1247 /* 1248 * Maybe we suspended some threads.. was it enough? 1249 */ 1250 if (remaining == remain_for_mode(mode)) 1251 break; 1252 1253 stopme: 1254 /* 1255 * Wake us up when everyone else has suspended. 1256 * In the mean time we suspend as well. 1257 */ 1258 thread_suspend_switch(td, p); 1259 remaining = calc_remaining(p, mode); 1260 } 1261 if (mode == SINGLE_EXIT) { 1262 /* 1263 * Convert the process to an unthreaded process. The 1264 * SINGLE_EXIT is called by exit1() or execve(), in 1265 * both cases other threads must be retired. 1266 */ 1267 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 1268 p->p_singlethread = NULL; 1269 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 1270 1271 /* 1272 * Wait for any remaining threads to exit cpu_throw(). 1273 */ 1274 while (p->p_exitthreads != 0) { 1275 PROC_SUNLOCK(p); 1276 PROC_UNLOCK(p); 1277 sched_relinquish(td); 1278 PROC_LOCK(p); 1279 PROC_SLOCK(p); 1280 } 1281 } else if (mode == SINGLE_BOUNDARY) { 1282 /* 1283 * Wait until all suspended threads are removed from 1284 * the processors. The thread_suspend_check() 1285 * increments p_boundary_count while it is still 1286 * running, which makes it possible for the execve() 1287 * to destroy vmspace while our other threads are 1288 * still using the address space. 1289 * 1290 * We lock the thread, which is only allowed to 1291 * succeed after context switch code finished using 1292 * the address space. 1293 */ 1294 FOREACH_THREAD_IN_PROC(p, td2) { 1295 if (td2 == td) 1296 continue; 1297 thread_lock(td2); 1298 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1299 ("td %p not on boundary", td2)); 1300 KASSERT(TD_IS_SUSPENDED(td2), 1301 ("td %p is not suspended", td2)); 1302 thread_unlock(td2); 1303 } 1304 } 1305 PROC_SUNLOCK(p); 1306 return (0); 1307 } 1308 1309 bool 1310 thread_suspend_check_needed(void) 1311 { 1312 struct proc *p; 1313 struct thread *td; 1314 1315 td = curthread; 1316 p = td->td_proc; 1317 PROC_LOCK_ASSERT(p, MA_OWNED); 1318 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 1319 (td->td_dbgflags & TDB_SUSPEND) != 0)); 1320 } 1321 1322 /* 1323 * Called in from locations that can safely check to see 1324 * whether we have to suspend or at least throttle for a 1325 * single-thread event (e.g. fork). 1326 * 1327 * Such locations include userret(). 1328 * If the "return_instead" argument is non zero, the thread must be able to 1329 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1330 * 1331 * The 'return_instead' argument tells the function if it may do a 1332 * thread_exit() or suspend, or whether the caller must abort and back 1333 * out instead. 1334 * 1335 * If the thread that set the single_threading request has set the 1336 * P_SINGLE_EXIT bit in the process flags then this call will never return 1337 * if 'return_instead' is false, but will exit. 1338 * 1339 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1340 *---------------+--------------------+--------------------- 1341 * 0 | returns 0 | returns 0 or 1 1342 * | when ST ends | immediately 1343 *---------------+--------------------+--------------------- 1344 * 1 | thread exits | returns 1 1345 * | | immediately 1346 * 0 = thread_exit() or suspension ok, 1347 * other = return error instead of stopping the thread. 1348 * 1349 * While a full suspension is under effect, even a single threading 1350 * thread would be suspended if it made this call (but it shouldn't). 1351 * This call should only be made from places where 1352 * thread_exit() would be safe as that may be the outcome unless 1353 * return_instead is set. 1354 */ 1355 int 1356 thread_suspend_check(int return_instead) 1357 { 1358 struct thread *td; 1359 struct proc *p; 1360 int wakeup_swapper; 1361 1362 td = curthread; 1363 p = td->td_proc; 1364 mtx_assert(&Giant, MA_NOTOWNED); 1365 PROC_LOCK_ASSERT(p, MA_OWNED); 1366 while (thread_suspend_check_needed()) { 1367 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1368 KASSERT(p->p_singlethread != NULL, 1369 ("singlethread not set")); 1370 /* 1371 * The only suspension in action is a 1372 * single-threading. Single threader need not stop. 1373 * It is safe to access p->p_singlethread unlocked 1374 * because it can only be set to our address by us. 1375 */ 1376 if (p->p_singlethread == td) 1377 return (0); /* Exempt from stopping. */ 1378 } 1379 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 1380 return (EINTR); 1381 1382 /* Should we goto user boundary if we didn't come from there? */ 1383 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1384 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 1385 return (ERESTART); 1386 1387 /* 1388 * Ignore suspend requests if they are deferred. 1389 */ 1390 if ((td->td_flags & TDF_SBDRY) != 0) { 1391 KASSERT(return_instead, 1392 ("TDF_SBDRY set for unsafe thread_suspend_check")); 1393 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 1394 (TDF_SEINTR | TDF_SERESTART), 1395 ("both TDF_SEINTR and TDF_SERESTART")); 1396 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1397 } 1398 1399 /* 1400 * If the process is waiting for us to exit, 1401 * this thread should just suicide. 1402 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1403 */ 1404 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1405 PROC_UNLOCK(p); 1406 1407 /* 1408 * Allow Linux emulation layer to do some work 1409 * before thread suicide. 1410 */ 1411 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 1412 (p->p_sysent->sv_thread_detach)(td); 1413 umtx_thread_exit(td); 1414 kern_thr_exit(td); 1415 panic("stopped thread did not exit"); 1416 } 1417 1418 PROC_SLOCK(p); 1419 thread_stopped(p); 1420 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1421 if (p->p_numthreads == p->p_suspcount + 1) { 1422 thread_lock(p->p_singlethread); 1423 wakeup_swapper = thread_unsuspend_one( 1424 p->p_singlethread, p, false); 1425 if (wakeup_swapper) 1426 kick_proc0(); 1427 } 1428 } 1429 PROC_UNLOCK(p); 1430 thread_lock(td); 1431 /* 1432 * When a thread suspends, it just 1433 * gets taken off all queues. 1434 */ 1435 thread_suspend_one(td); 1436 if (return_instead == 0) { 1437 p->p_boundary_count++; 1438 td->td_flags |= TDF_BOUNDARY; 1439 } 1440 PROC_SUNLOCK(p); 1441 mi_switch(SW_INVOL | SWT_SUSPEND); 1442 PROC_LOCK(p); 1443 } 1444 return (0); 1445 } 1446 1447 /* 1448 * Check for possible stops and suspensions while executing a 1449 * casueword or similar transiently failing operation. 1450 * 1451 * The sleep argument controls whether the function can handle a stop 1452 * request itself or it should return ERESTART and the request is 1453 * proceed at the kernel/user boundary in ast. 1454 * 1455 * Typically, when retrying due to casueword(9) failure (rv == 1), we 1456 * should handle the stop requests there, with exception of cases when 1457 * the thread owns a kernel resource, for instance busied the umtx 1458 * key, or when functions return immediately if thread_check_susp() 1459 * returned non-zero. On the other hand, retrying the whole lock 1460 * operation, we better not stop there but delegate the handling to 1461 * ast. 1462 * 1463 * If the request is for thread termination P_SINGLE_EXIT, we cannot 1464 * handle it at all, and simply return EINTR. 1465 */ 1466 int 1467 thread_check_susp(struct thread *td, bool sleep) 1468 { 1469 struct proc *p; 1470 int error; 1471 1472 /* 1473 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to 1474 * eventually break the lockstep loop. 1475 */ 1476 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0) 1477 return (0); 1478 error = 0; 1479 p = td->td_proc; 1480 PROC_LOCK(p); 1481 if (p->p_flag & P_SINGLE_EXIT) 1482 error = EINTR; 1483 else if (P_SHOULDSTOP(p) || 1484 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1485 error = sleep ? thread_suspend_check(0) : ERESTART; 1486 PROC_UNLOCK(p); 1487 return (error); 1488 } 1489 1490 void 1491 thread_suspend_switch(struct thread *td, struct proc *p) 1492 { 1493 1494 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1495 PROC_LOCK_ASSERT(p, MA_OWNED); 1496 PROC_SLOCK_ASSERT(p, MA_OWNED); 1497 /* 1498 * We implement thread_suspend_one in stages here to avoid 1499 * dropping the proc lock while the thread lock is owned. 1500 */ 1501 if (p == td->td_proc) { 1502 thread_stopped(p); 1503 p->p_suspcount++; 1504 } 1505 PROC_UNLOCK(p); 1506 thread_lock(td); 1507 td->td_flags &= ~TDF_NEEDSUSPCHK; 1508 TD_SET_SUSPENDED(td); 1509 sched_sleep(td, 0); 1510 PROC_SUNLOCK(p); 1511 DROP_GIANT(); 1512 mi_switch(SW_VOL | SWT_SUSPEND); 1513 PICKUP_GIANT(); 1514 PROC_LOCK(p); 1515 PROC_SLOCK(p); 1516 } 1517 1518 void 1519 thread_suspend_one(struct thread *td) 1520 { 1521 struct proc *p; 1522 1523 p = td->td_proc; 1524 PROC_SLOCK_ASSERT(p, MA_OWNED); 1525 THREAD_LOCK_ASSERT(td, MA_OWNED); 1526 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1527 p->p_suspcount++; 1528 td->td_flags &= ~TDF_NEEDSUSPCHK; 1529 TD_SET_SUSPENDED(td); 1530 sched_sleep(td, 0); 1531 } 1532 1533 static int 1534 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 1535 { 1536 1537 THREAD_LOCK_ASSERT(td, MA_OWNED); 1538 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1539 TD_CLR_SUSPENDED(td); 1540 td->td_flags &= ~TDF_ALLPROCSUSP; 1541 if (td->td_proc == p) { 1542 PROC_SLOCK_ASSERT(p, MA_OWNED); 1543 p->p_suspcount--; 1544 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 1545 td->td_flags &= ~TDF_BOUNDARY; 1546 p->p_boundary_count--; 1547 } 1548 } 1549 return (setrunnable(td, 0)); 1550 } 1551 1552 void 1553 thread_run_flash(struct thread *td) 1554 { 1555 struct proc *p; 1556 1557 p = td->td_proc; 1558 PROC_LOCK_ASSERT(p, MA_OWNED); 1559 1560 if (TD_ON_SLEEPQ(td)) 1561 sleepq_remove_nested(td); 1562 else 1563 thread_lock(td); 1564 1565 THREAD_LOCK_ASSERT(td, MA_OWNED); 1566 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1567 1568 TD_CLR_SUSPENDED(td); 1569 PROC_SLOCK(p); 1570 MPASS(p->p_suspcount > 0); 1571 p->p_suspcount--; 1572 PROC_SUNLOCK(p); 1573 if (setrunnable(td, 0)) 1574 kick_proc0(); 1575 } 1576 1577 /* 1578 * Allow all threads blocked by single threading to continue running. 1579 */ 1580 void 1581 thread_unsuspend(struct proc *p) 1582 { 1583 struct thread *td; 1584 int wakeup_swapper; 1585 1586 PROC_LOCK_ASSERT(p, MA_OWNED); 1587 PROC_SLOCK_ASSERT(p, MA_OWNED); 1588 wakeup_swapper = 0; 1589 if (!P_SHOULDSTOP(p)) { 1590 FOREACH_THREAD_IN_PROC(p, td) { 1591 thread_lock(td); 1592 if (TD_IS_SUSPENDED(td)) { 1593 wakeup_swapper |= thread_unsuspend_one(td, p, 1594 true); 1595 } else 1596 thread_unlock(td); 1597 } 1598 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1599 p->p_numthreads == p->p_suspcount) { 1600 /* 1601 * Stopping everything also did the job for the single 1602 * threading request. Now we've downgraded to single-threaded, 1603 * let it continue. 1604 */ 1605 if (p->p_singlethread->td_proc == p) { 1606 thread_lock(p->p_singlethread); 1607 wakeup_swapper = thread_unsuspend_one( 1608 p->p_singlethread, p, false); 1609 } 1610 } 1611 if (wakeup_swapper) 1612 kick_proc0(); 1613 } 1614 1615 /* 1616 * End the single threading mode.. 1617 */ 1618 void 1619 thread_single_end(struct proc *p, int mode) 1620 { 1621 struct thread *td; 1622 int wakeup_swapper; 1623 1624 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1625 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1626 ("invalid mode %d", mode)); 1627 PROC_LOCK_ASSERT(p, MA_OWNED); 1628 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1629 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1630 ("mode %d does not match P_TOTAL_STOP", mode)); 1631 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1632 ("thread_single_end from other thread %p %p", 1633 curthread, p->p_singlethread)); 1634 KASSERT(mode != SINGLE_BOUNDARY || 1635 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1636 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1637 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1638 P_TOTAL_STOP); 1639 PROC_SLOCK(p); 1640 p->p_singlethread = NULL; 1641 wakeup_swapper = 0; 1642 /* 1643 * If there are other threads they may now run, 1644 * unless of course there is a blanket 'stop order' 1645 * on the process. The single threader must be allowed 1646 * to continue however as this is a bad place to stop. 1647 */ 1648 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1649 FOREACH_THREAD_IN_PROC(p, td) { 1650 thread_lock(td); 1651 if (TD_IS_SUSPENDED(td)) { 1652 wakeup_swapper |= thread_unsuspend_one(td, p, 1653 mode == SINGLE_BOUNDARY); 1654 } else 1655 thread_unlock(td); 1656 } 1657 } 1658 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1659 ("inconsistent boundary count %d", p->p_boundary_count)); 1660 PROC_SUNLOCK(p); 1661 if (wakeup_swapper) 1662 kick_proc0(); 1663 } 1664 1665 /* 1666 * Locate a thread by number and return with proc lock held. 1667 * 1668 * thread exit establishes proc -> tidhash lock ordering, but lookup 1669 * takes tidhash first and needs to return locked proc. 1670 * 1671 * The problem is worked around by relying on type-safety of both 1672 * structures and doing the work in 2 steps: 1673 * - tidhash-locked lookup which saves both thread and proc pointers 1674 * - proc-locked verification that the found thread still matches 1675 */ 1676 static bool 1677 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1678 { 1679 #define RUN_THRESH 16 1680 struct proc *p; 1681 struct thread *td; 1682 int run; 1683 bool locked; 1684 1685 run = 0; 1686 rw_rlock(TIDHASHLOCK(tid)); 1687 locked = true; 1688 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1689 if (td->td_tid != tid) { 1690 run++; 1691 continue; 1692 } 1693 p = td->td_proc; 1694 if (pid != -1 && p->p_pid != pid) { 1695 td = NULL; 1696 break; 1697 } 1698 if (run > RUN_THRESH) { 1699 if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1700 LIST_REMOVE(td, td_hash); 1701 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1702 td, td_hash); 1703 rw_wunlock(TIDHASHLOCK(tid)); 1704 locked = false; 1705 break; 1706 } 1707 } 1708 break; 1709 } 1710 if (locked) 1711 rw_runlock(TIDHASHLOCK(tid)); 1712 if (td == NULL) 1713 return (false); 1714 *pp = p; 1715 *tdp = td; 1716 return (true); 1717 } 1718 1719 struct thread * 1720 tdfind(lwpid_t tid, pid_t pid) 1721 { 1722 struct proc *p; 1723 struct thread *td; 1724 1725 td = curthread; 1726 if (td->td_tid == tid) { 1727 if (pid != -1 && td->td_proc->p_pid != pid) 1728 return (NULL); 1729 PROC_LOCK(td->td_proc); 1730 return (td); 1731 } 1732 1733 for (;;) { 1734 if (!tdfind_hash(tid, pid, &p, &td)) 1735 return (NULL); 1736 PROC_LOCK(p); 1737 if (td->td_tid != tid) { 1738 PROC_UNLOCK(p); 1739 continue; 1740 } 1741 if (td->td_proc != p) { 1742 PROC_UNLOCK(p); 1743 continue; 1744 } 1745 if (p->p_state == PRS_NEW) { 1746 PROC_UNLOCK(p); 1747 return (NULL); 1748 } 1749 return (td); 1750 } 1751 } 1752 1753 void 1754 tidhash_add(struct thread *td) 1755 { 1756 rw_wlock(TIDHASHLOCK(td->td_tid)); 1757 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1758 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1759 } 1760 1761 void 1762 tidhash_remove(struct thread *td) 1763 { 1764 1765 rw_wlock(TIDHASHLOCK(td->td_tid)); 1766 LIST_REMOVE(td, td_hash); 1767 rw_wunlock(TIDHASHLOCK(td->td_tid)); 1768 } 1769