19454b2d8SWarner Losh /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 38a36da99SPedro F. Giffuni * 444990b8cSJulian Elischer * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 544990b8cSJulian Elischer * All rights reserved. 644990b8cSJulian Elischer * 744990b8cSJulian Elischer * Redistribution and use in source and binary forms, with or without 844990b8cSJulian Elischer * modification, are permitted provided that the following conditions 944990b8cSJulian Elischer * are met: 1044990b8cSJulian Elischer * 1. Redistributions of source code must retain the above copyright 1144990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer as 1244990b8cSJulian Elischer * the first lines of this file unmodified other than the possible 1344990b8cSJulian Elischer * addition of one or more copyright notices. 1444990b8cSJulian Elischer * 2. Redistributions in binary form must reproduce the above copyright 1544990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer in the 1644990b8cSJulian Elischer * documentation and/or other materials provided with the distribution. 1744990b8cSJulian Elischer * 1844990b8cSJulian Elischer * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1944990b8cSJulian Elischer * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2044990b8cSJulian Elischer * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 2144990b8cSJulian Elischer * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2244990b8cSJulian Elischer * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2344990b8cSJulian Elischer * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2444990b8cSJulian Elischer * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2544990b8cSJulian Elischer * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2644990b8cSJulian Elischer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2744990b8cSJulian Elischer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2844990b8cSJulian Elischer * DAMAGE. 2944990b8cSJulian Elischer */ 3044990b8cSJulian Elischer 313d06b4b3SAttilio Rao #include "opt_witness.h" 3216d95d4fSJoseph Koshy #include "opt_hwpmc_hooks.h" 333d06b4b3SAttilio Rao 3444990b8cSJulian Elischer #include <sys/param.h> 3544990b8cSJulian Elischer #include <sys/systm.h> 36*7a7063ccSMark Johnston #include <sys/asan.h> 3744990b8cSJulian Elischer #include <sys/kernel.h> 3844990b8cSJulian Elischer #include <sys/lock.h> 395dda15adSMark Johnston #include <sys/msan.h> 4044990b8cSJulian Elischer #include <sys/mutex.h> 4144990b8cSJulian Elischer #include <sys/proc.h> 4235bb59edSMateusz Guzik #include <sys/bitstring.h> 436febf180SGleb Smirnoff #include <sys/epoch.h> 448f0e9130SKonstantin Belousov #include <sys/rangelock.h> 45e170bfdaSDavid Xu #include <sys/resourcevar.h> 46b3e9e682SRyan Stone #include <sys/sdt.h> 4794e0a4cdSJulian Elischer #include <sys/smp.h> 48de028f5aSJeff Roberson #include <sys/sched.h> 4944f3b092SJohn Baldwin #include <sys/sleepqueue.h> 50ace8398dSJeff Roberson #include <sys/selinfo.h> 51d1e7a4a5SJohn Baldwin #include <sys/syscallsubr.h> 52598f2b81SMateusz Guzik #include <sys/dtrace_bsd.h> 5391d1786fSDmitry Chagin #include <sys/sysent.h> 54961a7b24SJohn Baldwin #include <sys/turnstile.h> 55d116b9f1SMateusz Guzik #include <sys/taskqueue.h> 5644990b8cSJulian Elischer #include <sys/ktr.h> 57cf7d9a8cSDavid Xu #include <sys/rwlock.h> 58af29f399SDmitry Chagin #include <sys/umtxvar.h> 599ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 60d7f687fcSJeff Roberson #include <sys/cpuset.h> 6116d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 6216d95d4fSJoseph Koshy #include <sys/pmckern.h> 6316d95d4fSJoseph Koshy #endif 641bd3cf5dSMateusz Guzik #include <sys/priv.h> 6544990b8cSJulian Elischer 66911b84b0SRobert Watson #include <security/audit/audit.h> 67911b84b0SRobert Watson 68d116b9f1SMateusz Guzik #include <vm/pmap.h> 6944990b8cSJulian Elischer #include <vm/vm.h> 7049a2507bSAlan Cox #include <vm/vm_extern.h> 7144990b8cSJulian Elischer #include <vm/uma.h> 72d116b9f1SMateusz Guzik #include <vm/vm_phys.h> 73b209f889SRandall Stewart #include <sys/eventhandler.h> 7402fb42b0SPeter Wemm 75acd9f517SKonstantin Belousov /* 76acd9f517SKonstantin Belousov * Asserts below verify the stability of struct thread and struct proc 77acd9f517SKonstantin Belousov * layout, as exposed by KBI to modules. On head, the KBI is allowed 78acd9f517SKonstantin Belousov * to drift, change to the structures must be accompanied by the 79acd9f517SKonstantin Belousov * assert update. 80acd9f517SKonstantin Belousov * 81acd9f517SKonstantin Belousov * On the stable branches after KBI freeze, conditions must not be 82acd9f517SKonstantin Belousov * violated. Typically new fields are moved to the end of the 83acd9f517SKonstantin Belousov * structures. 84acd9f517SKonstantin Belousov */ 85acd9f517SKonstantin Belousov #ifdef __amd64__ 86a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x108, 87acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 88c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_pflags) == 0x114, 89acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 907530de77SMateusz Guzik _Static_assert(offsetof(struct thread, td_frame) == 0x4b8, 91acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 92c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0, 93acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 9485078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0xb8, 95acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 9685078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0xc4, 97acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 98351d5f7fSKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 99acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 100aaa92413SKonstantin Belousov _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, 101acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 10293ca6ff2SKonstantin Belousov _Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0, 103acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 104acd9f517SKonstantin Belousov #endif 105acd9f517SKonstantin Belousov #ifdef __i386__ 106a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x9c, 107acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 108c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_pflags) == 0xa8, 109acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 1107530de77SMateusz Guzik _Static_assert(offsetof(struct thread, td_frame) == 0x318, 111acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 1127530de77SMateusz Guzik _Static_assert(offsetof(struct thread, td_emuldata) == 0x35c, 113acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 11485078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0x6c, 115acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 11685078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0x78, 117acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 1184d675b80SKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x270, 119acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 120aaa92413SKonstantin Belousov _Static_assert(offsetof(struct proc, p_comm) == 0x284, 121acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 122aaa92413SKonstantin Belousov _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, 123acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 124acd9f517SKonstantin Belousov #endif 125acd9f517SKonstantin Belousov 126b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(proc); 127d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(proc, , , lwp__exit); 128b3e9e682SRyan Stone 1298460a577SJohn Birrell /* 1308460a577SJohn Birrell * thread related storage. 1318460a577SJohn Birrell */ 13244990b8cSJulian Elischer static uma_zone_t thread_zone; 13344990b8cSJulian Elischer 134d116b9f1SMateusz Guzik struct thread_domain_data { 135d116b9f1SMateusz Guzik struct thread *tdd_zombies; 136d116b9f1SMateusz Guzik int tdd_reapticks; 137d116b9f1SMateusz Guzik } __aligned(CACHE_LINE_SIZE); 138d116b9f1SMateusz Guzik 139d116b9f1SMateusz Guzik static struct thread_domain_data thread_domain_data[MAXMEMDOM]; 140d116b9f1SMateusz Guzik 141d116b9f1SMateusz Guzik static struct task thread_reap_task; 142d116b9f1SMateusz Guzik static struct callout thread_reap_callout; 14344990b8cSJulian Elischer 144ff8fbcffSJeff Roberson static void thread_zombie(struct thread *); 145b83e94beSMateusz Guzik static void thread_reap(void); 146d116b9f1SMateusz Guzik static void thread_reap_all(void); 147d116b9f1SMateusz Guzik static void thread_reap_task_cb(void *, int); 148d116b9f1SMateusz Guzik static void thread_reap_callout_cb(void *); 14984cdea97SKonstantin Belousov static int thread_unsuspend_one(struct thread *td, struct proc *p, 15084cdea97SKonstantin Belousov bool boundary); 151755341dfSMateusz Guzik static void thread_free_batched(struct thread *td); 152ff8fbcffSJeff Roberson 153d1ca25beSMateusz Guzik static __exclusive_cache_line struct mtx tid_lock; 154934e7e5eSMateusz Guzik static bitstr_t *tid_bitmap; 15535bb59edSMateusz Guzik 156cf7d9a8cSDavid Xu static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 157cf7d9a8cSDavid Xu 1581bd3cf5dSMateusz Guzik static int maxthread; 1591bd3cf5dSMateusz Guzik SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 1601bd3cf5dSMateusz Guzik &maxthread, 0, "Maximum number of threads"); 1611bd3cf5dSMateusz Guzik 16262dbc992SMateusz Guzik static __exclusive_cache_line int nthreads; 1631bd3cf5dSMateusz Guzik 164aae3547bSMateusz Guzik static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 165aae3547bSMateusz Guzik static u_long tidhash; 16626007fe3SMateusz Guzik static u_long tidhashlock; 16726007fe3SMateusz Guzik static struct rwlock *tidhashtbl_lock; 168aae3547bSMateusz Guzik #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 16926007fe3SMateusz Guzik #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 170cf7d9a8cSDavid Xu 1712ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_ctor); 1722ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_dtor); 1732ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_init); 1742ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_fini); 1752ca45184SMatt Joras 17662dbc992SMateusz Guzik static bool 177d116b9f1SMateusz Guzik thread_count_inc_try(void) 178ec6ea5e8SDavid Xu { 17962dbc992SMateusz Guzik int nthreads_new; 180ec6ea5e8SDavid Xu 18162dbc992SMateusz Guzik nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; 18262dbc992SMateusz Guzik if (nthreads_new >= maxthread - 100) { 1831bd3cf5dSMateusz Guzik if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 18462dbc992SMateusz Guzik nthreads_new >= maxthread) { 18562dbc992SMateusz Guzik atomic_subtract_int(&nthreads, 1); 186d116b9f1SMateusz Guzik return (false); 187d116b9f1SMateusz Guzik } 188d116b9f1SMateusz Guzik } 189d116b9f1SMateusz Guzik return (true); 190d116b9f1SMateusz Guzik } 191d116b9f1SMateusz Guzik 192d116b9f1SMateusz Guzik static bool 193d116b9f1SMateusz Guzik thread_count_inc(void) 194d116b9f1SMateusz Guzik { 195d116b9f1SMateusz Guzik static struct timeval lastfail; 196d116b9f1SMateusz Guzik static int curfail; 197d116b9f1SMateusz Guzik 198d116b9f1SMateusz Guzik thread_reap(); 199d116b9f1SMateusz Guzik if (thread_count_inc_try()) { 200d116b9f1SMateusz Guzik return (true); 201d116b9f1SMateusz Guzik } 202d116b9f1SMateusz Guzik 203d116b9f1SMateusz Guzik thread_reap_all(); 204d116b9f1SMateusz Guzik if (thread_count_inc_try()) { 205d116b9f1SMateusz Guzik return (true); 206d116b9f1SMateusz Guzik } 207d116b9f1SMateusz Guzik 2081bd3cf5dSMateusz Guzik if (ppsratecheck(&lastfail, &curfail, 1)) { 2091bd3cf5dSMateusz Guzik printf("maxthread limit exceeded by uid %u " 2101bd3cf5dSMateusz Guzik "(pid %d); consider increasing kern.maxthread\n", 2111bd3cf5dSMateusz Guzik curthread->td_ucred->cr_ruid, curproc->p_pid); 2121bd3cf5dSMateusz Guzik } 21362dbc992SMateusz Guzik return (false); 2141bd3cf5dSMateusz Guzik } 2151bd3cf5dSMateusz Guzik 21662dbc992SMateusz Guzik static void 21762dbc992SMateusz Guzik thread_count_sub(int n) 21862dbc992SMateusz Guzik { 21962dbc992SMateusz Guzik 22062dbc992SMateusz Guzik atomic_subtract_int(&nthreads, n); 22162dbc992SMateusz Guzik } 22262dbc992SMateusz Guzik 22362dbc992SMateusz Guzik static void 22462dbc992SMateusz Guzik thread_count_dec(void) 22562dbc992SMateusz Guzik { 22662dbc992SMateusz Guzik 22762dbc992SMateusz Guzik thread_count_sub(1); 22862dbc992SMateusz Guzik } 22962dbc992SMateusz Guzik 23062dbc992SMateusz Guzik static lwpid_t 23162dbc992SMateusz Guzik tid_alloc(void) 23262dbc992SMateusz Guzik { 23362dbc992SMateusz Guzik static lwpid_t trytid; 23462dbc992SMateusz Guzik lwpid_t tid; 23562dbc992SMateusz Guzik 23662dbc992SMateusz Guzik mtx_lock(&tid_lock); 23735bb59edSMateusz Guzik /* 23835bb59edSMateusz Guzik * It is an invariant that the bitmap is big enough to hold maxthread 23935bb59edSMateusz Guzik * IDs. If we got to this point there has to be at least one free. 24035bb59edSMateusz Guzik */ 24135bb59edSMateusz Guzik if (trytid >= maxthread) 24235bb59edSMateusz Guzik trytid = 0; 24335bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 24435bb59edSMateusz Guzik if (tid == -1) { 24535bb59edSMateusz Guzik KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 24635bb59edSMateusz Guzik trytid = 0; 24735bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 24835bb59edSMateusz Guzik KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 249ec6ea5e8SDavid Xu } 25035bb59edSMateusz Guzik bit_set(tid_bitmap, tid); 251934e7e5eSMateusz Guzik trytid = tid + 1; 252ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 25335bb59edSMateusz Guzik return (tid + NO_PID); 254ec6ea5e8SDavid Xu } 255ec6ea5e8SDavid Xu 256ec6ea5e8SDavid Xu static void 257755341dfSMateusz Guzik tid_free_locked(lwpid_t rtid) 258ec6ea5e8SDavid Xu { 25935bb59edSMateusz Guzik lwpid_t tid; 260ec6ea5e8SDavid Xu 261755341dfSMateusz Guzik mtx_assert(&tid_lock, MA_OWNED); 26235bb59edSMateusz Guzik KASSERT(rtid >= NO_PID, 26335bb59edSMateusz Guzik ("%s: invalid tid %d\n", __func__, rtid)); 26435bb59edSMateusz Guzik tid = rtid - NO_PID; 26535bb59edSMateusz Guzik KASSERT(bit_test(tid_bitmap, tid) != 0, 26635bb59edSMateusz Guzik ("thread ID %d not allocated\n", rtid)); 26735bb59edSMateusz Guzik bit_clear(tid_bitmap, tid); 268755341dfSMateusz Guzik } 269755341dfSMateusz Guzik 270755341dfSMateusz Guzik static void 271755341dfSMateusz Guzik tid_free(lwpid_t rtid) 272755341dfSMateusz Guzik { 273755341dfSMateusz Guzik 274755341dfSMateusz Guzik mtx_lock(&tid_lock); 275755341dfSMateusz Guzik tid_free_locked(rtid); 276755341dfSMateusz Guzik mtx_unlock(&tid_lock); 277755341dfSMateusz Guzik } 278755341dfSMateusz Guzik 279755341dfSMateusz Guzik static void 280755341dfSMateusz Guzik tid_free_batch(lwpid_t *batch, int n) 281755341dfSMateusz Guzik { 282755341dfSMateusz Guzik int i; 283755341dfSMateusz Guzik 284755341dfSMateusz Guzik mtx_lock(&tid_lock); 285755341dfSMateusz Guzik for (i = 0; i < n; i++) { 286755341dfSMateusz Guzik tid_free_locked(batch[i]); 287755341dfSMateusz Guzik } 288ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 289ec6ea5e8SDavid Xu } 290ec6ea5e8SDavid Xu 291fdcac928SMarcel Moolenaar /* 2925ef7b7a0SMateusz Guzik * Batching for thread reapping. 2935ef7b7a0SMateusz Guzik */ 2945ef7b7a0SMateusz Guzik struct tidbatch { 2955ef7b7a0SMateusz Guzik lwpid_t tab[16]; 2965ef7b7a0SMateusz Guzik int n; 2975ef7b7a0SMateusz Guzik }; 2985ef7b7a0SMateusz Guzik 2995ef7b7a0SMateusz Guzik static void 3005ef7b7a0SMateusz Guzik tidbatch_prep(struct tidbatch *tb) 3015ef7b7a0SMateusz Guzik { 3025ef7b7a0SMateusz Guzik 3035ef7b7a0SMateusz Guzik tb->n = 0; 3045ef7b7a0SMateusz Guzik } 3055ef7b7a0SMateusz Guzik 3065ef7b7a0SMateusz Guzik static void 3075ef7b7a0SMateusz Guzik tidbatch_add(struct tidbatch *tb, struct thread *td) 3085ef7b7a0SMateusz Guzik { 3095ef7b7a0SMateusz Guzik 3105ef7b7a0SMateusz Guzik KASSERT(tb->n < nitems(tb->tab), 3115ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3125ef7b7a0SMateusz Guzik tb->tab[tb->n] = td->td_tid; 3135ef7b7a0SMateusz Guzik tb->n++; 3145ef7b7a0SMateusz Guzik } 3155ef7b7a0SMateusz Guzik 3165ef7b7a0SMateusz Guzik static void 3175ef7b7a0SMateusz Guzik tidbatch_process(struct tidbatch *tb) 3185ef7b7a0SMateusz Guzik { 3195ef7b7a0SMateusz Guzik 3205ef7b7a0SMateusz Guzik KASSERT(tb->n <= nitems(tb->tab), 3215ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3225ef7b7a0SMateusz Guzik if (tb->n == nitems(tb->tab)) { 3235ef7b7a0SMateusz Guzik tid_free_batch(tb->tab, tb->n); 3245ef7b7a0SMateusz Guzik tb->n = 0; 3255ef7b7a0SMateusz Guzik } 3265ef7b7a0SMateusz Guzik } 3275ef7b7a0SMateusz Guzik 3285ef7b7a0SMateusz Guzik static void 3295ef7b7a0SMateusz Guzik tidbatch_final(struct tidbatch *tb) 3305ef7b7a0SMateusz Guzik { 3315ef7b7a0SMateusz Guzik 3325ef7b7a0SMateusz Guzik KASSERT(tb->n <= nitems(tb->tab), 3335ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3345ef7b7a0SMateusz Guzik if (tb->n != 0) { 3355ef7b7a0SMateusz Guzik tid_free_batch(tb->tab, tb->n); 3365ef7b7a0SMateusz Guzik } 3375ef7b7a0SMateusz Guzik } 3385ef7b7a0SMateusz Guzik 3395ef7b7a0SMateusz Guzik /* 340e0c86f5cSMateusz Guzik * Batching thread count free, for consistency 341e0c86f5cSMateusz Guzik */ 342e0c86f5cSMateusz Guzik struct tdcountbatch { 343e0c86f5cSMateusz Guzik int n; 344e0c86f5cSMateusz Guzik }; 345e0c86f5cSMateusz Guzik 346e0c86f5cSMateusz Guzik static void 347e0c86f5cSMateusz Guzik tdcountbatch_prep(struct tdcountbatch *tb) 348e0c86f5cSMateusz Guzik { 349e0c86f5cSMateusz Guzik 350e0c86f5cSMateusz Guzik tb->n = 0; 351e0c86f5cSMateusz Guzik } 352e0c86f5cSMateusz Guzik 353e0c86f5cSMateusz Guzik static void 354e0c86f5cSMateusz Guzik tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused) 355e0c86f5cSMateusz Guzik { 356e0c86f5cSMateusz Guzik 357e0c86f5cSMateusz Guzik tb->n++; 358e0c86f5cSMateusz Guzik } 359e0c86f5cSMateusz Guzik 360e0c86f5cSMateusz Guzik static void 361e0c86f5cSMateusz Guzik tdcountbatch_process(struct tdcountbatch *tb) 362e0c86f5cSMateusz Guzik { 363e0c86f5cSMateusz Guzik 364e0c86f5cSMateusz Guzik if (tb->n == 32) { 365e0c86f5cSMateusz Guzik thread_count_sub(tb->n); 366e0c86f5cSMateusz Guzik tb->n = 0; 367e0c86f5cSMateusz Guzik } 368e0c86f5cSMateusz Guzik } 369e0c86f5cSMateusz Guzik 370e0c86f5cSMateusz Guzik static void 371e0c86f5cSMateusz Guzik tdcountbatch_final(struct tdcountbatch *tb) 372e0c86f5cSMateusz Guzik { 373e0c86f5cSMateusz Guzik 374e0c86f5cSMateusz Guzik if (tb->n != 0) { 375e0c86f5cSMateusz Guzik thread_count_sub(tb->n); 376e0c86f5cSMateusz Guzik } 377e0c86f5cSMateusz Guzik } 378e0c86f5cSMateusz Guzik 379e0c86f5cSMateusz Guzik /* 380696058c3SJulian Elischer * Prepare a thread for use. 38144990b8cSJulian Elischer */ 382b23f72e9SBrian Feldman static int 383b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags) 38444990b8cSJulian Elischer { 38544990b8cSJulian Elischer struct thread *td; 38644990b8cSJulian Elischer 38744990b8cSJulian Elischer td = (struct thread *)mem; 388fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 38994dd54b9SKonstantin Belousov td->td_lastcpu = td->td_oncpu = NOCPU; 3906c27c603SJuli Mallett 3916c27c603SJuli Mallett /* 3926c27c603SJuli Mallett * Note that td_critnest begins life as 1 because the thread is not 3936c27c603SJuli Mallett * running and is thereby implicitly waiting to be on the receiving 394a54e85fdSJeff Roberson * end of a context switch. 3956c27c603SJuli Mallett */ 396139b7550SJohn Baldwin td->td_critnest = 1; 397acbe332aSDavid Xu td->td_lend_user_pri = PRI_MAX; 398911b84b0SRobert Watson #ifdef AUDIT 399911b84b0SRobert Watson audit_thread_alloc(td); 400911b84b0SRobert Watson #endif 401598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS 402598f2b81SMateusz Guzik kdtrace_thread_ctor(td); 403598f2b81SMateusz Guzik #endif 404d10183d9SDavid Xu umtx_thread_alloc(td); 40519d3e47dSMateusz Guzik MPASS(td->td_sel == NULL); 406b23f72e9SBrian Feldman return (0); 40744990b8cSJulian Elischer } 40844990b8cSJulian Elischer 40944990b8cSJulian Elischer /* 41044990b8cSJulian Elischer * Reclaim a thread after use. 41144990b8cSJulian Elischer */ 41244990b8cSJulian Elischer static void 41344990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg) 41444990b8cSJulian Elischer { 41544990b8cSJulian Elischer struct thread *td; 41644990b8cSJulian Elischer 41744990b8cSJulian Elischer td = (struct thread *)mem; 41844990b8cSJulian Elischer 41944990b8cSJulian Elischer #ifdef INVARIANTS 42044990b8cSJulian Elischer /* Verify that this thread is in a safe state to free. */ 421fa2528acSAlex Richardson switch (TD_GET_STATE(td)) { 42271fad9fdSJulian Elischer case TDS_INHIBITED: 42371fad9fdSJulian Elischer case TDS_RUNNING: 42471fad9fdSJulian Elischer case TDS_CAN_RUN: 42544990b8cSJulian Elischer case TDS_RUNQ: 42644990b8cSJulian Elischer /* 42744990b8cSJulian Elischer * We must never unlink a thread that is in one of 42844990b8cSJulian Elischer * these states, because it is currently active. 42944990b8cSJulian Elischer */ 43044990b8cSJulian Elischer panic("bad state for thread unlinking"); 43144990b8cSJulian Elischer /* NOTREACHED */ 43271fad9fdSJulian Elischer case TDS_INACTIVE: 43344990b8cSJulian Elischer break; 43444990b8cSJulian Elischer default: 43544990b8cSJulian Elischer panic("bad thread state"); 43644990b8cSJulian Elischer /* NOTREACHED */ 43744990b8cSJulian Elischer } 43844990b8cSJulian Elischer #endif 4396e8525ceSRobert Watson #ifdef AUDIT 4406e8525ceSRobert Watson audit_thread_free(td); 4416e8525ceSRobert Watson #endif 442598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS 443598f2b81SMateusz Guzik kdtrace_thread_dtor(td); 444598f2b81SMateusz Guzik #endif 4451ba4a712SPawel Jakub Dawidek /* Free all OSD associated to this thread. */ 4461ba4a712SPawel Jakub Dawidek osd_thread_exit(td); 447c6d31b83SKonstantin Belousov ast_kclear(td); 44819d3e47dSMateusz Guzik seltdfini(td); 44944990b8cSJulian Elischer } 45044990b8cSJulian Elischer 45144990b8cSJulian Elischer /* 45244990b8cSJulian Elischer * Initialize type-stable parts of a thread (when newly created). 45344990b8cSJulian Elischer */ 454b23f72e9SBrian Feldman static int 455b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags) 45644990b8cSJulian Elischer { 45744990b8cSJulian Elischer struct thread *td; 45844990b8cSJulian Elischer 45944990b8cSJulian Elischer td = (struct thread *)mem; 460247aba24SMarcel Moolenaar 461b83e94beSMateusz Guzik td->td_allocdomain = vm_phys_domain(vtophys(td)); 46244f3b092SJohn Baldwin td->td_sleepqueue = sleepq_alloc(); 463961a7b24SJohn Baldwin td->td_turnstile = turnstile_alloc(); 4648f0e9130SKonstantin Belousov td->td_rlqe = NULL; 4652ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 466d10183d9SDavid Xu umtx_thread_init(td); 46789b57fcfSKonstantin Belousov td->td_kstack = 0; 468ad8b1d85SKonstantin Belousov td->td_sel = NULL; 469b23f72e9SBrian Feldman return (0); 47044990b8cSJulian Elischer } 47144990b8cSJulian Elischer 47244990b8cSJulian Elischer /* 47344990b8cSJulian Elischer * Tear down type-stable parts of a thread (just before being discarded). 47444990b8cSJulian Elischer */ 47544990b8cSJulian Elischer static void 47644990b8cSJulian Elischer thread_fini(void *mem, int size) 47744990b8cSJulian Elischer { 47844990b8cSJulian Elischer struct thread *td; 47944990b8cSJulian Elischer 48044990b8cSJulian Elischer td = (struct thread *)mem; 4812ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 4828f0e9130SKonstantin Belousov rlqentry_free(td->td_rlqe); 483961a7b24SJohn Baldwin turnstile_free(td->td_turnstile); 48444f3b092SJohn Baldwin sleepq_free(td->td_sleepqueue); 485d10183d9SDavid Xu umtx_thread_fini(td); 48619d3e47dSMateusz Guzik MPASS(td->td_sel == NULL); 48744990b8cSJulian Elischer } 4885215b187SJeff Roberson 4895c8329edSJulian Elischer /* 4905215b187SJeff Roberson * For a newly created process, 4915215b187SJeff Roberson * link up all the structures and its initial threads etc. 492ed062c8dSJulian Elischer * called from: 493e7d939bdSMarcel Moolenaar * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 494ed062c8dSJulian Elischer * proc_dtor() (should go away) 495ed062c8dSJulian Elischer * proc_init() 4965c8329edSJulian Elischer */ 4975c8329edSJulian Elischer void 49889b57fcfSKonstantin Belousov proc_linkup0(struct proc *p, struct thread *td) 49989b57fcfSKonstantin Belousov { 50089b57fcfSKonstantin Belousov TAILQ_INIT(&p->p_threads); /* all threads in proc */ 50189b57fcfSKonstantin Belousov proc_linkup(p, td); 50289b57fcfSKonstantin Belousov } 50389b57fcfSKonstantin Belousov 50489b57fcfSKonstantin Belousov void 5058460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td) 5065c8329edSJulian Elischer { 507a54e85fdSJeff Roberson 5089104847fSDavid Xu sigqueue_init(&p->p_sigqueue, p); 509cc29f221SKonstantin Belousov p->p_ksi = ksiginfo_alloc(M_WAITOK); 510ebceaf6dSDavid Xu if (p->p_ksi != NULL) { 5115c474517SDavid Xu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 512ebceaf6dSDavid Xu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 513ebceaf6dSDavid Xu } 514b2f92ef9SDavid Xu LIST_INIT(&p->p_mqnotifier); 5155c8329edSJulian Elischer p->p_numthreads = 0; 5168460a577SJohn Birrell thread_link(td, p); 5175c8329edSJulian Elischer } 5185c8329edSJulian Elischer 519c6d31b83SKonstantin Belousov static void 520c6d31b83SKonstantin Belousov ast_suspend(struct thread *td, int tda __unused) 521c6d31b83SKonstantin Belousov { 522c6d31b83SKonstantin Belousov struct proc *p; 523c6d31b83SKonstantin Belousov 524c6d31b83SKonstantin Belousov p = td->td_proc; 525c6d31b83SKonstantin Belousov /* 526c6d31b83SKonstantin Belousov * We need to check to see if we have to exit or wait due to a 527c6d31b83SKonstantin Belousov * single threading requirement or some other STOP condition. 528c6d31b83SKonstantin Belousov */ 529c6d31b83SKonstantin Belousov PROC_LOCK(p); 530c6d31b83SKonstantin Belousov thread_suspend_check(0); 531c6d31b83SKonstantin Belousov PROC_UNLOCK(p); 532c6d31b83SKonstantin Belousov } 533c6d31b83SKonstantin Belousov 5341bd3cf5dSMateusz Guzik extern int max_threads_per_proc; 5351bd3cf5dSMateusz Guzik 5365c8329edSJulian Elischer /* 53744990b8cSJulian Elischer * Initialize global thread allocation resources. 53844990b8cSJulian Elischer */ 53944990b8cSJulian Elischer void 54044990b8cSJulian Elischer threadinit(void) 54144990b8cSJulian Elischer { 54226007fe3SMateusz Guzik u_long i; 543cf31cadeSMateusz Guzik lwpid_t tid0; 54444990b8cSJulian Elischer 5451bd3cf5dSMateusz Guzik /* 5461bd3cf5dSMateusz Guzik * Place an upper limit on threads which can be allocated. 5471bd3cf5dSMateusz Guzik * 5481bd3cf5dSMateusz Guzik * Note that other factors may make the de facto limit much lower. 5491bd3cf5dSMateusz Guzik * 5501bd3cf5dSMateusz Guzik * Platform limits are somewhat arbitrary but deemed "more than good 5511bd3cf5dSMateusz Guzik * enough" for the foreseable future. 5521bd3cf5dSMateusz Guzik */ 5531bd3cf5dSMateusz Guzik if (maxthread == 0) { 5541bd3cf5dSMateusz Guzik #ifdef _LP64 5551bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 5561bd3cf5dSMateusz Guzik #else 5571bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 100000); 5581bd3cf5dSMateusz Guzik #endif 5591bd3cf5dSMateusz Guzik } 5601bd3cf5dSMateusz Guzik 5611ea7a6f8SPoul-Henning Kamp mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 56235bb59edSMateusz Guzik tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 56362dbc992SMateusz Guzik /* 56462dbc992SMateusz Guzik * Handle thread0. 56562dbc992SMateusz Guzik */ 56662dbc992SMateusz Guzik thread_count_inc(); 567cf31cadeSMateusz Guzik tid0 = tid_alloc(); 568cf31cadeSMateusz Guzik if (tid0 != THREAD0_TID) 569cf31cadeSMateusz Guzik panic("tid0 %d != %d\n", tid0, THREAD0_TID); 5701ea7a6f8SPoul-Henning Kamp 5717d1469e5SOlivier Certner /* 5727d1469e5SOlivier Certner * Thread structures are specially aligned so that (at least) the 5737d1469e5SOlivier Certner * 5 lower bits of a pointer to 'struct thead' must be 0. These bits 5747d1469e5SOlivier Certner * are used by synchronization primitives to store flags in pointers to 5757d1469e5SOlivier Certner * such structures. 5767d1469e5SOlivier Certner */ 577de028f5aSJeff Roberson thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 57844990b8cSJulian Elischer thread_ctor, thread_dtor, thread_init, thread_fini, 5797d1469e5SOlivier Certner UMA_ALIGN_CACHE_AND_MASK(32 - 1), UMA_ZONE_NOFREE); 580cf7d9a8cSDavid Xu tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 58126007fe3SMateusz Guzik tidhashlock = (tidhash + 1) / 64; 58226007fe3SMateusz Guzik if (tidhashlock > 0) 58326007fe3SMateusz Guzik tidhashlock--; 58426007fe3SMateusz Guzik tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 58526007fe3SMateusz Guzik M_TIDHASH, M_WAITOK | M_ZERO); 58626007fe3SMateusz Guzik for (i = 0; i < tidhashlock + 1; i++) 58726007fe3SMateusz Guzik rw_init(&tidhashtbl_lock[i], "tidhash"); 588d116b9f1SMateusz Guzik 589d116b9f1SMateusz Guzik TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); 590d116b9f1SMateusz Guzik callout_init(&thread_reap_callout, 1); 591845d7797SKonstantin Belousov callout_reset(&thread_reap_callout, 5 * hz, 592845d7797SKonstantin Belousov thread_reap_callout_cb, NULL); 593c6d31b83SKonstantin Belousov ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend); 59444990b8cSJulian Elischer } 59544990b8cSJulian Elischer 59644990b8cSJulian Elischer /* 597ff8fbcffSJeff Roberson * Place an unused thread on the zombie list. 59844990b8cSJulian Elischer */ 59944990b8cSJulian Elischer void 600ff8fbcffSJeff Roberson thread_zombie(struct thread *td) 60144990b8cSJulian Elischer { 602d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 603c5315f51SMateusz Guzik struct thread *ztd; 604c5315f51SMateusz Guzik 605a9568cd2SMateusz Guzik tdd = &thread_domain_data[td->td_allocdomain]; 606d116b9f1SMateusz Guzik ztd = atomic_load_ptr(&tdd->tdd_zombies); 607c5315f51SMateusz Guzik for (;;) { 608c5315f51SMateusz Guzik td->td_zombie = ztd; 609d116b9f1SMateusz Guzik if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies, 610c5315f51SMateusz Guzik (uintptr_t *)&ztd, (uintptr_t)td)) 611c5315f51SMateusz Guzik break; 612c5315f51SMateusz Guzik continue; 613c5315f51SMateusz Guzik } 61444990b8cSJulian Elischer } 61544990b8cSJulian Elischer 6165c8329edSJulian Elischer /* 617ff8fbcffSJeff Roberson * Release a thread that has exited after cpu_throw(). 618ff8fbcffSJeff Roberson */ 619ff8fbcffSJeff Roberson void 620ff8fbcffSJeff Roberson thread_stash(struct thread *td) 621ff8fbcffSJeff Roberson { 622ff8fbcffSJeff Roberson atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 623ff8fbcffSJeff Roberson thread_zombie(td); 624ff8fbcffSJeff Roberson } 625ff8fbcffSJeff Roberson 626ff8fbcffSJeff Roberson /* 627d116b9f1SMateusz Guzik * Reap zombies from passed domain. 62844990b8cSJulian Elischer */ 629d116b9f1SMateusz Guzik static void 630d116b9f1SMateusz Guzik thread_reap_domain(struct thread_domain_data *tdd) 63144990b8cSJulian Elischer { 632c5315f51SMateusz Guzik struct thread *itd, *ntd; 6335ef7b7a0SMateusz Guzik struct tidbatch tidbatch; 634f34a2f56SMateusz Guzik struct credbatch credbatch; 635bbe62559SMateusz Guzik struct limbatch limbatch; 636e0c86f5cSMateusz Guzik struct tdcountbatch tdcountbatch; 63744990b8cSJulian Elischer 63844990b8cSJulian Elischer /* 639c5315f51SMateusz Guzik * Reading upfront is pessimal if followed by concurrent atomic_swap, 640c5315f51SMateusz Guzik * but most of the time the list is empty. 64144990b8cSJulian Elischer */ 642d116b9f1SMateusz Guzik if (tdd->tdd_zombies == NULL) 643c5315f51SMateusz Guzik return; 644c5315f51SMateusz Guzik 645d116b9f1SMateusz Guzik itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies, 646c5315f51SMateusz Guzik (uintptr_t)NULL); 6475ef7b7a0SMateusz Guzik if (itd == NULL) 6485ef7b7a0SMateusz Guzik return; 6495ef7b7a0SMateusz Guzik 650d116b9f1SMateusz Guzik /* 651d116b9f1SMateusz Guzik * Multiple CPUs can get here, the race is fine as ticks is only 652d116b9f1SMateusz Guzik * advisory. 653d116b9f1SMateusz Guzik */ 654d116b9f1SMateusz Guzik tdd->tdd_reapticks = ticks; 655d116b9f1SMateusz Guzik 6565ef7b7a0SMateusz Guzik tidbatch_prep(&tidbatch); 657f34a2f56SMateusz Guzik credbatch_prep(&credbatch); 658bbe62559SMateusz Guzik limbatch_prep(&limbatch); 659e0c86f5cSMateusz Guzik tdcountbatch_prep(&tdcountbatch); 660d116b9f1SMateusz Guzik 661c5315f51SMateusz Guzik while (itd != NULL) { 662c5315f51SMateusz Guzik ntd = itd->td_zombie; 6635ef7b7a0SMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); 664bbe62559SMateusz Guzik 6655ef7b7a0SMateusz Guzik tidbatch_add(&tidbatch, itd); 666f34a2f56SMateusz Guzik credbatch_add(&credbatch, itd); 667bbe62559SMateusz Guzik limbatch_add(&limbatch, itd); 668e0c86f5cSMateusz Guzik tdcountbatch_add(&tdcountbatch, itd); 669bbe62559SMateusz Guzik 670755341dfSMateusz Guzik thread_free_batched(itd); 671bbe62559SMateusz Guzik 6725ef7b7a0SMateusz Guzik tidbatch_process(&tidbatch); 673f34a2f56SMateusz Guzik credbatch_process(&credbatch); 674bbe62559SMateusz Guzik limbatch_process(&limbatch); 675e0c86f5cSMateusz Guzik tdcountbatch_process(&tdcountbatch); 676bbe62559SMateusz Guzik 677c5315f51SMateusz Guzik itd = ntd; 67844990b8cSJulian Elischer } 679755341dfSMateusz Guzik 6805ef7b7a0SMateusz Guzik tidbatch_final(&tidbatch); 681f34a2f56SMateusz Guzik credbatch_final(&credbatch); 682bbe62559SMateusz Guzik limbatch_final(&limbatch); 683e0c86f5cSMateusz Guzik tdcountbatch_final(&tdcountbatch); 684ed062c8dSJulian Elischer } 68544990b8cSJulian Elischer 6864f0db5e0SJulian Elischer /* 687d116b9f1SMateusz Guzik * Reap zombies from all domains. 688d116b9f1SMateusz Guzik */ 689d116b9f1SMateusz Guzik static void 690d116b9f1SMateusz Guzik thread_reap_all(void) 691d116b9f1SMateusz Guzik { 692d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 693d116b9f1SMateusz Guzik int i, domain; 694d116b9f1SMateusz Guzik 695d116b9f1SMateusz Guzik domain = PCPU_GET(domain); 696d116b9f1SMateusz Guzik for (i = 0; i < vm_ndomains; i++) { 697d116b9f1SMateusz Guzik tdd = &thread_domain_data[(i + domain) % vm_ndomains]; 698d116b9f1SMateusz Guzik thread_reap_domain(tdd); 699d116b9f1SMateusz Guzik } 700d116b9f1SMateusz Guzik } 701d116b9f1SMateusz Guzik 702d116b9f1SMateusz Guzik /* 703d116b9f1SMateusz Guzik * Reap zombies from local domain. 704d116b9f1SMateusz Guzik */ 705b83e94beSMateusz Guzik static void 706d116b9f1SMateusz Guzik thread_reap(void) 707d116b9f1SMateusz Guzik { 708d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 709d116b9f1SMateusz Guzik int domain; 710d116b9f1SMateusz Guzik 711d116b9f1SMateusz Guzik domain = PCPU_GET(domain); 712d116b9f1SMateusz Guzik tdd = &thread_domain_data[domain]; 713d116b9f1SMateusz Guzik 714d116b9f1SMateusz Guzik thread_reap_domain(tdd); 715d116b9f1SMateusz Guzik } 716d116b9f1SMateusz Guzik 717d116b9f1SMateusz Guzik static void 718d116b9f1SMateusz Guzik thread_reap_task_cb(void *arg __unused, int pending __unused) 719d116b9f1SMateusz Guzik { 720d116b9f1SMateusz Guzik 721d116b9f1SMateusz Guzik thread_reap_all(); 722d116b9f1SMateusz Guzik } 723d116b9f1SMateusz Guzik 724d116b9f1SMateusz Guzik static void 725d116b9f1SMateusz Guzik thread_reap_callout_cb(void *arg __unused) 726d116b9f1SMateusz Guzik { 727d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 728d116b9f1SMateusz Guzik int i, cticks, lticks; 729d116b9f1SMateusz Guzik bool wantreap; 730d116b9f1SMateusz Guzik 731d116b9f1SMateusz Guzik wantreap = false; 732d116b9f1SMateusz Guzik cticks = atomic_load_int(&ticks); 733d116b9f1SMateusz Guzik for (i = 0; i < vm_ndomains; i++) { 734d116b9f1SMateusz Guzik tdd = &thread_domain_data[i]; 735d116b9f1SMateusz Guzik lticks = tdd->tdd_reapticks; 736d116b9f1SMateusz Guzik if (tdd->tdd_zombies != NULL && 737d116b9f1SMateusz Guzik (u_int)(cticks - lticks) > 5 * hz) { 738d116b9f1SMateusz Guzik wantreap = true; 739d116b9f1SMateusz Guzik break; 740d116b9f1SMateusz Guzik } 741d116b9f1SMateusz Guzik } 742d116b9f1SMateusz Guzik 743d116b9f1SMateusz Guzik if (wantreap) 744d116b9f1SMateusz Guzik taskqueue_enqueue(taskqueue_thread, &thread_reap_task); 745845d7797SKonstantin Belousov callout_reset(&thread_reap_callout, 5 * hz, 746845d7797SKonstantin Belousov thread_reap_callout_cb, NULL); 747d116b9f1SMateusz Guzik } 748d116b9f1SMateusz Guzik 749d116b9f1SMateusz Guzik /* 750f62c7e54SKonstantin Belousov * Calling this function guarantees that any thread that exited before 751f62c7e54SKonstantin Belousov * the call is reaped when the function returns. By 'exited' we mean 752f62c7e54SKonstantin Belousov * a thread removed from the process linkage with thread_unlink(). 753f62c7e54SKonstantin Belousov * Practically this means that caller must lock/unlock corresponding 754f62c7e54SKonstantin Belousov * process lock before the call, to synchronize with thread_exit(). 755f62c7e54SKonstantin Belousov */ 756f62c7e54SKonstantin Belousov void 757f62c7e54SKonstantin Belousov thread_reap_barrier(void) 758f62c7e54SKonstantin Belousov { 759f62c7e54SKonstantin Belousov struct task *t; 760f62c7e54SKonstantin Belousov 761f62c7e54SKonstantin Belousov /* 762f62c7e54SKonstantin Belousov * First do context switches to each CPU to ensure that all 763f62c7e54SKonstantin Belousov * PCPU pc_deadthreads are moved to zombie list. 764f62c7e54SKonstantin Belousov */ 765f62c7e54SKonstantin Belousov quiesce_all_cpus("", PDROP); 766f62c7e54SKonstantin Belousov 767f62c7e54SKonstantin Belousov /* 768f62c7e54SKonstantin Belousov * Second, fire the task in the same thread as normal 769f62c7e54SKonstantin Belousov * thread_reap() is done, to serialize reaping. 770f62c7e54SKonstantin Belousov */ 771f62c7e54SKonstantin Belousov t = malloc(sizeof(*t), M_TEMP, M_WAITOK); 772f62c7e54SKonstantin Belousov TASK_INIT(t, 0, thread_reap_task_cb, t); 773f62c7e54SKonstantin Belousov taskqueue_enqueue(taskqueue_thread, t); 774f62c7e54SKonstantin Belousov taskqueue_drain(taskqueue_thread, t); 775f62c7e54SKonstantin Belousov free(t, M_TEMP); 776f62c7e54SKonstantin Belousov } 777f62c7e54SKonstantin Belousov 778f62c7e54SKonstantin Belousov /* 77944990b8cSJulian Elischer * Allocate a thread. 78044990b8cSJulian Elischer */ 78144990b8cSJulian Elischer struct thread * 7828a945d10SKonstantin Belousov thread_alloc(int pages) 78344990b8cSJulian Elischer { 78489b57fcfSKonstantin Belousov struct thread *td; 7851bd3cf5dSMateusz Guzik lwpid_t tid; 7868460a577SJohn Birrell 78762dbc992SMateusz Guzik if (!thread_count_inc()) { 7881bd3cf5dSMateusz Guzik return (NULL); 7891bd3cf5dSMateusz Guzik } 7901bd3cf5dSMateusz Guzik 79162dbc992SMateusz Guzik tid = tid_alloc(); 7921bd3cf5dSMateusz Guzik td = uma_zalloc(thread_zone, M_WAITOK); 79389b57fcfSKonstantin Belousov KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 7948a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) { 79589b57fcfSKonstantin Belousov uma_zfree(thread_zone, td); 7961bd3cf5dSMateusz Guzik tid_free(tid); 79762dbc992SMateusz Guzik thread_count_dec(); 79889b57fcfSKonstantin Belousov return (NULL); 79989b57fcfSKonstantin Belousov } 8001bd3cf5dSMateusz Guzik td->td_tid = tid; 801f575573cSKonstantin Belousov bzero(&td->td_sa.args, sizeof(td->td_sa.args)); 802800da341SMark Johnston kasan_thread_alloc(td); 8035dda15adSMark Johnston kmsan_thread_alloc(td); 8040c3967e7SMarcel Moolenaar cpu_thread_alloc(td); 8051bd3cf5dSMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 80689b57fcfSKonstantin Belousov return (td); 80744990b8cSJulian Elischer } 80844990b8cSJulian Elischer 8098a945d10SKonstantin Belousov int 810800da341SMark Johnston thread_recycle(struct thread *td, int pages) 8118a945d10SKonstantin Belousov { 812800da341SMark Johnston if (td->td_kstack == 0 || td->td_kstack_pages != pages) { 813800da341SMark Johnston if (td->td_kstack != 0) 814800da341SMark Johnston vm_thread_dispose(td); 8158a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) 816800da341SMark Johnston return (ENOMEM); 8178a945d10SKonstantin Belousov cpu_thread_alloc(td); 818800da341SMark Johnston } 819800da341SMark Johnston kasan_thread_alloc(td); 820800da341SMark Johnston kmsan_thread_alloc(td); 821800da341SMark Johnston return (0); 8228a945d10SKonstantin Belousov } 8234f0db5e0SJulian Elischer 8244f0db5e0SJulian Elischer /* 82544990b8cSJulian Elischer * Deallocate a thread. 82644990b8cSJulian Elischer */ 827755341dfSMateusz Guzik static void 828755341dfSMateusz Guzik thread_free_batched(struct thread *td) 82944990b8cSJulian Elischer { 8302e6b8de4SJeff Roberson 8312e6b8de4SJeff Roberson lock_profile_thread_exit(td); 83245aea8deSJeff Roberson if (td->td_cpuset) 833d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 834d7f687fcSJeff Roberson td->td_cpuset = NULL; 8350c3967e7SMarcel Moolenaar cpu_thread_free(td); 83689b57fcfSKonstantin Belousov if (td->td_kstack != 0) 83789b57fcfSKonstantin Belousov vm_thread_dispose(td); 8382d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 839755341dfSMateusz Guzik /* 840755341dfSMateusz Guzik * Freeing handled by the caller. 841755341dfSMateusz Guzik */ 8421bd3cf5dSMateusz Guzik td->td_tid = -1; 8435dda15adSMark Johnston kmsan_thread_free(td); 84444990b8cSJulian Elischer uma_zfree(thread_zone, td); 84544990b8cSJulian Elischer } 84644990b8cSJulian Elischer 8474ea6a9a2SMateusz Guzik void 848755341dfSMateusz Guzik thread_free(struct thread *td) 849755341dfSMateusz Guzik { 850755341dfSMateusz Guzik lwpid_t tid; 851755341dfSMateusz Guzik 8525ef7b7a0SMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 853755341dfSMateusz Guzik tid = td->td_tid; 854755341dfSMateusz Guzik thread_free_batched(td); 855755341dfSMateusz Guzik tid_free(tid); 85662dbc992SMateusz Guzik thread_count_dec(); 857755341dfSMateusz Guzik } 858755341dfSMateusz Guzik 859755341dfSMateusz Guzik void 8604ea6a9a2SMateusz Guzik thread_cow_get_proc(struct thread *newtd, struct proc *p) 8614ea6a9a2SMateusz Guzik { 8624ea6a9a2SMateusz Guzik 8634ea6a9a2SMateusz Guzik PROC_LOCK_ASSERT(p, MA_OWNED); 8641724c563SMateusz Guzik newtd->td_realucred = crcowget(p->p_ucred); 8651724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 866f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(p->p_limit); 8674ea6a9a2SMateusz Guzik newtd->td_cowgen = p->p_cowgen; 8684ea6a9a2SMateusz Guzik } 8694ea6a9a2SMateusz Guzik 8704ea6a9a2SMateusz Guzik void 8714ea6a9a2SMateusz Guzik thread_cow_get(struct thread *newtd, struct thread *td) 8724ea6a9a2SMateusz Guzik { 8734ea6a9a2SMateusz Guzik 8741724c563SMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 8751724c563SMateusz Guzik newtd->td_realucred = crcowget(td->td_realucred); 8761724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 877f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(td->td_limit); 8784ea6a9a2SMateusz Guzik newtd->td_cowgen = td->td_cowgen; 8794ea6a9a2SMateusz Guzik } 8804ea6a9a2SMateusz Guzik 8814ea6a9a2SMateusz Guzik void 8824ea6a9a2SMateusz Guzik thread_cow_free(struct thread *td) 8834ea6a9a2SMateusz Guzik { 8844ea6a9a2SMateusz Guzik 8851724c563SMateusz Guzik if (td->td_realucred != NULL) 8861724c563SMateusz Guzik crcowfree(td); 887cd672ca6SMateusz Guzik if (td->td_limit != NULL) 888f6f6d240SMateusz Guzik lim_free(td->td_limit); 8894ea6a9a2SMateusz Guzik } 8904ea6a9a2SMateusz Guzik 8914ea6a9a2SMateusz Guzik void 8924ea6a9a2SMateusz Guzik thread_cow_update(struct thread *td) 8934ea6a9a2SMateusz Guzik { 8944ea6a9a2SMateusz Guzik struct proc *p; 895cd672ca6SMateusz Guzik struct ucred *oldcred; 896cd672ca6SMateusz Guzik struct plimit *oldlimit; 8974ea6a9a2SMateusz Guzik 8984ea6a9a2SMateusz Guzik p = td->td_proc; 8994ea6a9a2SMateusz Guzik PROC_LOCK(p); 9001724c563SMateusz Guzik oldcred = crcowsync(); 9018a0cb04dSMateusz Guzik oldlimit = lim_cowsync(); 9024ea6a9a2SMateusz Guzik td->td_cowgen = p->p_cowgen; 9034ea6a9a2SMateusz Guzik PROC_UNLOCK(p); 904cd672ca6SMateusz Guzik if (oldcred != NULL) 905cd672ca6SMateusz Guzik crfree(oldcred); 906cd672ca6SMateusz Guzik if (oldlimit != NULL) 907cd672ca6SMateusz Guzik lim_free(oldlimit); 9084ea6a9a2SMateusz Guzik } 9094ea6a9a2SMateusz Guzik 91032114b63SMateusz Guzik void 91132114b63SMateusz Guzik thread_cow_synced(struct thread *td) 91232114b63SMateusz Guzik { 91332114b63SMateusz Guzik struct proc *p; 91432114b63SMateusz Guzik 91532114b63SMateusz Guzik p = td->td_proc; 91632114b63SMateusz Guzik PROC_LOCK_ASSERT(p, MA_OWNED); 91732114b63SMateusz Guzik MPASS(td->td_cowgen != p->p_cowgen); 91832114b63SMateusz Guzik MPASS(td->td_ucred == p->p_ucred); 91932114b63SMateusz Guzik MPASS(td->td_limit == p->p_limit); 92032114b63SMateusz Guzik td->td_cowgen = p->p_cowgen; 92132114b63SMateusz Guzik } 92232114b63SMateusz Guzik 92344990b8cSJulian Elischer /* 92444990b8cSJulian Elischer * Discard the current thread and exit from its context. 92594e0a4cdSJulian Elischer * Always called with scheduler locked. 92644990b8cSJulian Elischer * 92744990b8cSJulian Elischer * Because we can't free a thread while we're operating under its context, 928696058c3SJulian Elischer * push the current thread into our CPU's deadthread holder. This means 929696058c3SJulian Elischer * we needn't worry about someone else grabbing our context before we 9306617724cSJeff Roberson * do a cpu_throw(). 93144990b8cSJulian Elischer */ 93244990b8cSJulian Elischer void 93344990b8cSJulian Elischer thread_exit(void) 93444990b8cSJulian Elischer { 9357e3a96eaSJohn Baldwin uint64_t runtime, new_switchtime; 93644990b8cSJulian Elischer struct thread *td; 9371c4bcd05SJeff Roberson struct thread *td2; 93844990b8cSJulian Elischer struct proc *p; 9397847a9daSJohn Baldwin int wakeup_swapper; 94044990b8cSJulian Elischer 94144990b8cSJulian Elischer td = curthread; 94244990b8cSJulian Elischer p = td->td_proc; 94344990b8cSJulian Elischer 944a54e85fdSJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 945ed062c8dSJulian Elischer mtx_assert(&Giant, MA_NOTOWNED); 946a54e85fdSJeff Roberson 94744990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 948ed062c8dSJulian Elischer KASSERT(p != NULL, ("thread exiting without a process")); 949cc701b73SRobert Watson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 950e01eafefSJulian Elischer (long)p->p_pid, td->td_name); 9516c9271a9SAndriy Gapon SDT_PROBE0(proc, , , lwp__exit); 9529104847fSDavid Xu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 953936c24faSMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 95444990b8cSJulian Elischer 955ed062c8dSJulian Elischer /* 956ed062c8dSJulian Elischer * drop FPU & debug register state storage, or any other 957ed062c8dSJulian Elischer * architecture specific resources that 958ed062c8dSJulian Elischer * would not be on a new untouched process. 959ed062c8dSJulian Elischer */ 960bd07998eSKonstantin Belousov cpu_thread_exit(td); 96144990b8cSJulian Elischer 962ed062c8dSJulian Elischer /* 9631faf202eSJulian Elischer * The last thread is left attached to the process 9641faf202eSJulian Elischer * So that the whole bundle gets recycled. Skip 965ed062c8dSJulian Elischer * all this stuff if we never had threads. 966ed062c8dSJulian Elischer * EXIT clears all sign of other threads when 967ed062c8dSJulian Elischer * it goes to single threading, so the last thread always 968ed062c8dSJulian Elischer * takes the short path. 9691faf202eSJulian Elischer */ 970ed062c8dSJulian Elischer if (p->p_flag & P_HADTHREADS) { 9711faf202eSJulian Elischer if (p->p_numthreads > 1) { 972fd229b5bSKonstantin Belousov atomic_add_int(&td->td_proc->p_exitthreads, 1); 973d3a0bd78SJulian Elischer thread_unlink(td); 9741c4bcd05SJeff Roberson td2 = FIRST_THREAD_IN_PROC(p); 9751c4bcd05SJeff Roberson sched_exit_thread(td2, td); 976ed062c8dSJulian Elischer 977ed062c8dSJulian Elischer /* 97844990b8cSJulian Elischer * The test below is NOT true if we are the 9799182554aSKonstantin Belousov * sole exiting thread. P_STOPPED_SINGLE is unset 98044990b8cSJulian Elischer * in exit1() after it is the only survivor. 98144990b8cSJulian Elischer */ 9821279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 98344990b8cSJulian Elischer if (p->p_numthreads == p->p_suspcount) { 984a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 9857847a9daSJohn Baldwin wakeup_swapper = thread_unsuspend_one( 98684cdea97SKonstantin Belousov p->p_singlethread, p, false); 9877847a9daSJohn Baldwin if (wakeup_swapper) 9887847a9daSJohn Baldwin kick_proc0(); 98944990b8cSJulian Elischer } 99044990b8cSJulian Elischer } 99148bfcdddSJulian Elischer 992696058c3SJulian Elischer PCPU_SET(deadthread, td); 9931faf202eSJulian Elischer } else { 994ed062c8dSJulian Elischer /* 995ed062c8dSJulian Elischer * The last thread is exiting.. but not through exit() 996ed062c8dSJulian Elischer */ 997ed062c8dSJulian Elischer panic ("thread_exit: Last thread exiting on its own"); 998ed062c8dSJulian Elischer } 9991faf202eSJulian Elischer } 100016d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 100116d95d4fSJoseph Koshy /* 100216d95d4fSJoseph Koshy * If this thread is part of a process that is being tracked by hwpmc(4), 100316d95d4fSJoseph Koshy * inform the module of the thread's impending exit. 100416d95d4fSJoseph Koshy */ 10056161b98cSMatt Macy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 100616d95d4fSJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 10076161b98cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 1008ebfaf69cSMatt Macy } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 1009ebfaf69cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 101016d95d4fSJoseph Koshy #endif 1011a54e85fdSJeff Roberson PROC_UNLOCK(p); 10125c7bebf9SKonstantin Belousov PROC_STATLOCK(p); 10135c7bebf9SKonstantin Belousov thread_lock(td); 10145c7bebf9SKonstantin Belousov PROC_SUNLOCK(p); 10157e3a96eaSJohn Baldwin 10167e3a96eaSJohn Baldwin /* Do the same timestamp bookkeeping that mi_switch() would do. */ 10177e3a96eaSJohn Baldwin new_switchtime = cpu_ticks(); 10187e3a96eaSJohn Baldwin runtime = new_switchtime - PCPU_GET(switchtime); 10197e3a96eaSJohn Baldwin td->td_runtime += runtime; 10207e3a96eaSJohn Baldwin td->td_incruntime += runtime; 10217e3a96eaSJohn Baldwin PCPU_SET(switchtime, new_switchtime); 10227e3a96eaSJohn Baldwin PCPU_SET(switchticks, ticks); 102383c9dea1SGleb Smirnoff VM_CNT_INC(v_swtch); 10247e3a96eaSJohn Baldwin 10257e3a96eaSJohn Baldwin /* Save our resource usage in our process. */ 10267e3a96eaSJohn Baldwin td->td_ru.ru_nvcsw++; 102761a74c5cSJeff Roberson ruxagg_locked(p, td); 10287e3a96eaSJohn Baldwin rucollect(&p->p_ru, &td->td_ru); 10295c7bebf9SKonstantin Belousov PROC_STATUNLOCK(p); 10307e3a96eaSJohn Baldwin 1031fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 10323d06b4b3SAttilio Rao #ifdef WITNESS 10333d06b4b3SAttilio Rao witness_thread_exit(td); 10343d06b4b3SAttilio Rao #endif 1035732d9528SJulian Elischer CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 1036a54e85fdSJeff Roberson sched_throw(td); 1037cc66ebe2SPeter Wemm panic("I'm a teapot!"); 103844990b8cSJulian Elischer /* NOTREACHED */ 103944990b8cSJulian Elischer } 104044990b8cSJulian Elischer 104144990b8cSJulian Elischer /* 1042696058c3SJulian Elischer * Do any thread specific cleanups that may be needed in wait() 104337814395SPeter Wemm * called with Giant, proc and schedlock not held. 1044696058c3SJulian Elischer */ 1045696058c3SJulian Elischer void 1046696058c3SJulian Elischer thread_wait(struct proc *p) 1047696058c3SJulian Elischer { 1048696058c3SJulian Elischer struct thread *td; 1049696058c3SJulian Elischer 105037814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 1051624bf9e1SKonstantin Belousov KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 1052624bf9e1SKonstantin Belousov KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 1053ff8fbcffSJeff Roberson td = FIRST_THREAD_IN_PROC(p); 1054ff8fbcffSJeff Roberson /* Lock the last thread so we spin until it exits cpu_throw(). */ 1055ff8fbcffSJeff Roberson thread_lock(td); 1056ff8fbcffSJeff Roberson thread_unlock(td); 10572e6b8de4SJeff Roberson lock_profile_thread_exit(td); 1058d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 1059d7f687fcSJeff Roberson td->td_cpuset = NULL; 1060696058c3SJulian Elischer cpu_thread_clean(td); 10614ea6a9a2SMateusz Guzik thread_cow_free(td); 10622d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 1063696058c3SJulian Elischer thread_reap(); /* check for zombie threads etc. */ 1064696058c3SJulian Elischer } 1065696058c3SJulian Elischer 1066696058c3SJulian Elischer /* 106744990b8cSJulian Elischer * Link a thread to a process. 10681faf202eSJulian Elischer * set up anything that needs to be initialized for it to 10691faf202eSJulian Elischer * be used by the process. 107044990b8cSJulian Elischer */ 107144990b8cSJulian Elischer void 10728460a577SJohn Birrell thread_link(struct thread *td, struct proc *p) 107344990b8cSJulian Elischer { 107444990b8cSJulian Elischer 1075a54e85fdSJeff Roberson /* 1076a54e85fdSJeff Roberson * XXX This can't be enabled because it's called for proc0 before 1077374ae2a3SJeff Roberson * its lock has been created. 1078374ae2a3SJeff Roberson * PROC_LOCK_ASSERT(p, MA_OWNED); 1079a54e85fdSJeff Roberson */ 1080fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 108144990b8cSJulian Elischer td->td_proc = p; 1082b61ce5b0SJeff Roberson td->td_flags = TDF_INMEM; 108344990b8cSJulian Elischer 10841faf202eSJulian Elischer LIST_INIT(&td->td_contested); 1085eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[0]); 1086eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[1]); 1087f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 1088dd902d01SGleb Smirnoff SLIST_INIT(&td->td_epochs); 1089f6eccf96SGleb Smirnoff #endif 10909104847fSDavid Xu sigqueue_init(&td->td_sigqueue, p); 1091fd90e2edSJung-uk Kim callout_init(&td->td_slpcallout, 1); 109266d8df9dSDaniel Eischen TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 109344990b8cSJulian Elischer p->p_numthreads++; 109444990b8cSJulian Elischer } 109544990b8cSJulian Elischer 1096ed062c8dSJulian Elischer /* 1097ed062c8dSJulian Elischer * Called from: 1098ed062c8dSJulian Elischer * thread_exit() 1099ed062c8dSJulian Elischer */ 1100d3a0bd78SJulian Elischer void 1101d3a0bd78SJulian Elischer thread_unlink(struct thread *td) 1102d3a0bd78SJulian Elischer { 1103d3a0bd78SJulian Elischer struct proc *p = td->td_proc; 1104d3a0bd78SJulian Elischer 1105374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 1106f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 1107dd902d01SGleb Smirnoff MPASS(SLIST_EMPTY(&td->td_epochs)); 1108f6eccf96SGleb Smirnoff #endif 1109dd902d01SGleb Smirnoff 1110d3a0bd78SJulian Elischer TAILQ_REMOVE(&p->p_threads, td, td_plist); 1111d3a0bd78SJulian Elischer p->p_numthreads--; 1112d3a0bd78SJulian Elischer /* could clear a few other things here */ 11138460a577SJohn Birrell /* Must NOT clear links to proc! */ 11145c8329edSJulian Elischer } 11155c8329edSJulian Elischer 111679799053SKonstantin Belousov static int 111779799053SKonstantin Belousov calc_remaining(struct proc *p, int mode) 111879799053SKonstantin Belousov { 111979799053SKonstantin Belousov int remaining; 112079799053SKonstantin Belousov 11217b519077SKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 11227b519077SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 112379799053SKonstantin Belousov if (mode == SINGLE_EXIT) 112479799053SKonstantin Belousov remaining = p->p_numthreads; 112579799053SKonstantin Belousov else if (mode == SINGLE_BOUNDARY) 112679799053SKonstantin Belousov remaining = p->p_numthreads - p->p_boundary_count; 11276ddcc233SKonstantin Belousov else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 112879799053SKonstantin Belousov remaining = p->p_numthreads - p->p_suspcount; 112979799053SKonstantin Belousov else 113079799053SKonstantin Belousov panic("calc_remaining: wrong mode %d", mode); 113179799053SKonstantin Belousov return (remaining); 113279799053SKonstantin Belousov } 113379799053SKonstantin Belousov 113407a9368aSKonstantin Belousov static int 113507a9368aSKonstantin Belousov remain_for_mode(int mode) 113607a9368aSKonstantin Belousov { 113707a9368aSKonstantin Belousov 11386ddcc233SKonstantin Belousov return (mode == SINGLE_ALLPROC ? 0 : 1); 113907a9368aSKonstantin Belousov } 114007a9368aSKonstantin Belousov 114107a9368aSKonstantin Belousov static int 114207a9368aSKonstantin Belousov weed_inhib(int mode, struct thread *td2, struct proc *p) 114307a9368aSKonstantin Belousov { 114407a9368aSKonstantin Belousov int wakeup_swapper; 114507a9368aSKonstantin Belousov 114607a9368aSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 114707a9368aSKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 114807a9368aSKonstantin Belousov THREAD_LOCK_ASSERT(td2, MA_OWNED); 114907a9368aSKonstantin Belousov 115007a9368aSKonstantin Belousov wakeup_swapper = 0; 115161a74c5cSJeff Roberson 115261a74c5cSJeff Roberson /* 115361a74c5cSJeff Roberson * Since the thread lock is dropped by the scheduler we have 115461a74c5cSJeff Roberson * to retry to check for races. 115561a74c5cSJeff Roberson */ 115661a74c5cSJeff Roberson restart: 115707a9368aSKonstantin Belousov switch (mode) { 115807a9368aSKonstantin Belousov case SINGLE_EXIT: 115961a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2)) { 116084cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, true); 116161a74c5cSJeff Roberson thread_lock(td2); 116261a74c5cSJeff Roberson goto restart; 116361a74c5cSJeff Roberson } 116461a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 116507a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, EINTR); 116661a74c5cSJeff Roberson return (wakeup_swapper); 116761a74c5cSJeff Roberson } 116807a9368aSKonstantin Belousov break; 116907a9368aSKonstantin Belousov case SINGLE_BOUNDARY: 117007a9368aSKonstantin Belousov case SINGLE_NO_EXIT: 117161a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2) && 117261a74c5cSJeff Roberson (td2->td_flags & TDF_BOUNDARY) == 0) { 117384cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 117461a74c5cSJeff Roberson thread_lock(td2); 117561a74c5cSJeff Roberson goto restart; 117661a74c5cSJeff Roberson } 117761a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 117807a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 117961a74c5cSJeff Roberson return (wakeup_swapper); 118061a74c5cSJeff Roberson } 1181917dd390SKonstantin Belousov break; 11826ddcc233SKonstantin Belousov case SINGLE_ALLPROC: 11836ddcc233SKonstantin Belousov /* 11846ddcc233SKonstantin Belousov * ALLPROC suspend tries to avoid spurious EINTR for 11856ddcc233SKonstantin Belousov * threads sleeping interruptable, by suspending the 11866ddcc233SKonstantin Belousov * thread directly, similarly to sig_suspend_threads(). 1187dd883e9aSKonstantin Belousov * Since such sleep is not neccessary performed at the user 1188dd883e9aSKonstantin Belousov * boundary, TDF_ALLPROCSUSP is used to avoid immediate 1189dd883e9aSKonstantin Belousov * un-suspend. 11906ddcc233SKonstantin Belousov */ 11915c274b36SBrooks Davis if (TD_IS_SUSPENDED(td2) && 11925c274b36SBrooks Davis (td2->td_flags & TDF_ALLPROCSUSP) == 0) { 119384cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 119461a74c5cSJeff Roberson thread_lock(td2); 119561a74c5cSJeff Roberson goto restart; 119661a74c5cSJeff Roberson } 119761a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 11986ddcc233SKonstantin Belousov td2->td_flags |= TDF_ALLPROCSUSP; 11996ddcc233SKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 120061a74c5cSJeff Roberson return (wakeup_swapper); 12016ddcc233SKonstantin Belousov } 120207a9368aSKonstantin Belousov break; 120361a74c5cSJeff Roberson default: 120461a74c5cSJeff Roberson break; 120507a9368aSKonstantin Belousov } 120661a74c5cSJeff Roberson thread_unlock(td2); 120707a9368aSKonstantin Belousov return (wakeup_swapper); 120807a9368aSKonstantin Belousov } 120907a9368aSKonstantin Belousov 12105215b187SJeff Roberson /* 121144990b8cSJulian Elischer * Enforce single-threading. 121244990b8cSJulian Elischer * 121344990b8cSJulian Elischer * Returns 1 if the caller must abort (another thread is waiting to 121444990b8cSJulian Elischer * exit the process or similar). Process is locked! 121544990b8cSJulian Elischer * Returns 0 when you are successfully the only thread running. 121644990b8cSJulian Elischer * A process has successfully single threaded in the suspend mode when 121744990b8cSJulian Elischer * There are no threads in user mode. Threads in the kernel must be 121844990b8cSJulian Elischer * allowed to continue until they get to the user boundary. They may even 121944990b8cSJulian Elischer * copy out their return values and data before suspending. They may however be 1220e2668f55SMaxim Konovalov * accelerated in reaching the user boundary as we will wake up 122144990b8cSJulian Elischer * any sleeping threads that are interruptable. (PCATCH). 122244990b8cSJulian Elischer */ 122344990b8cSJulian Elischer int 12246ddcc233SKonstantin Belousov thread_single(struct proc *p, int mode) 122544990b8cSJulian Elischer { 122644990b8cSJulian Elischer struct thread *td; 122744990b8cSJulian Elischer struct thread *td2; 1228da7bbd2cSJohn Baldwin int remaining, wakeup_swapper; 122944990b8cSJulian Elischer 123044990b8cSJulian Elischer td = curthread; 12316ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 12326ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 12336ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 12346ddcc233SKonstantin Belousov /* 12356ddcc233SKonstantin Belousov * If allowing non-ALLPROC singlethreading for non-curproc 12366ddcc233SKonstantin Belousov * callers, calc_remaining() and remain_for_mode() should be 12376ddcc233SKonstantin Belousov * adjusted to also account for td->td_proc != p. For now 12386ddcc233SKonstantin Belousov * this is not implemented because it is not used. 12396ddcc233SKonstantin Belousov */ 12406ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 12416ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && td->td_proc == p), 12426ddcc233SKonstantin Belousov ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 124337814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 124444990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 124544990b8cSJulian Elischer 1246d7a9e6e7SKonstantin Belousov /* 1247d7a9e6e7SKonstantin Belousov * Is someone already single threading? 1248d7a9e6e7SKonstantin Belousov * Or may be singlethreading is not needed at all. 1249d7a9e6e7SKonstantin Belousov */ 1250d7a9e6e7SKonstantin Belousov if (mode == SINGLE_ALLPROC) { 1251d7a9e6e7SKonstantin Belousov while ((p->p_flag & P_STOPPED_SINGLE) != 0) { 1252d7a9e6e7SKonstantin Belousov if ((p->p_flag2 & P2_WEXIT) != 0) 1253d7a9e6e7SKonstantin Belousov return (1); 1254d7a9e6e7SKonstantin Belousov msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0); 1255d7a9e6e7SKonstantin Belousov } 12569241ebc7SKonstantin Belousov if ((p->p_flag & (P_STOPPED_SIG | P_TRACED)) != 0 || 12579241ebc7SKonstantin Belousov (p->p_flag2 & P2_WEXIT) != 0) 12589241ebc7SKonstantin Belousov return (1); 1259d7a9e6e7SKonstantin Belousov } else if ((p->p_flag & P_HADTHREADS) == 0) 126044990b8cSJulian Elischer return (0); 1261906ac69dSDavid Xu if (p->p_singlethread != NULL && p->p_singlethread != td) 126244990b8cSJulian Elischer return (1); 126344990b8cSJulian Elischer 1264906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 1265906ac69dSDavid Xu p->p_flag |= P_SINGLE_EXIT; 1266906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 1267906ac69dSDavid Xu } else { 1268906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_EXIT; 1269906ac69dSDavid Xu if (mode == SINGLE_BOUNDARY) 1270906ac69dSDavid Xu p->p_flag |= P_SINGLE_BOUNDARY; 1271906ac69dSDavid Xu else 1272906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 1273906ac69dSDavid Xu } 1274f829268bSKonstantin Belousov if (mode == SINGLE_ALLPROC) 12756ddcc233SKonstantin Belousov p->p_flag |= P_TOTAL_STOP; 12761279572aSDavid Xu p->p_flag |= P_STOPPED_SINGLE; 12777b4a950aSDavid Xu PROC_SLOCK(p); 1278112afcb2SJohn Baldwin p->p_singlethread = td; 127979799053SKonstantin Belousov remaining = calc_remaining(p, mode); 128007a9368aSKonstantin Belousov while (remaining != remain_for_mode(mode)) { 1281bf1a3220SDavid Xu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 1282bf1a3220SDavid Xu goto stopme; 1283da7bbd2cSJohn Baldwin wakeup_swapper = 0; 128444990b8cSJulian Elischer FOREACH_THREAD_IN_PROC(p, td2) { 128544990b8cSJulian Elischer if (td2 == td) 128644990b8cSJulian Elischer continue; 1287a54e85fdSJeff Roberson thread_lock(td2); 1288c6d31b83SKonstantin Belousov ast_sched_locked(td2, TDA_SUSPEND); 12896ddcc233SKonstantin Belousov if (TD_IS_INHIBITED(td2)) { 129007a9368aSKonstantin Belousov wakeup_swapper |= weed_inhib(mode, td2, p); 1291d8267df7SDavid Xu #ifdef SMP 1292b9009b17SKonstantin Belousov } else if (TD_IS_RUNNING(td2)) { 1293d8267df7SDavid Xu forward_signal(td2); 129461a74c5cSJeff Roberson thread_unlock(td2); 1295d8267df7SDavid Xu #endif 129661a74c5cSJeff Roberson } else 1297a54e85fdSJeff Roberson thread_unlock(td2); 12989d102777SJulian Elischer } 1299da7bbd2cSJohn Baldwin if (wakeup_swapper) 1300da7bbd2cSJohn Baldwin kick_proc0(); 130179799053SKonstantin Belousov remaining = calc_remaining(p, mode); 1302ec008e96SDavid Xu 13039d102777SJulian Elischer /* 13049d102777SJulian Elischer * Maybe we suspended some threads.. was it enough? 13059d102777SJulian Elischer */ 130607a9368aSKonstantin Belousov if (remaining == remain_for_mode(mode)) 13079d102777SJulian Elischer break; 13089d102777SJulian Elischer 1309bf1a3220SDavid Xu stopme: 131044990b8cSJulian Elischer /* 131144990b8cSJulian Elischer * Wake us up when everyone else has suspended. 1312e3b9bf71SJulian Elischer * In the mean time we suspend as well. 131344990b8cSJulian Elischer */ 13146ddcc233SKonstantin Belousov thread_suspend_switch(td, p); 131579799053SKonstantin Belousov remaining = calc_remaining(p, mode); 131644990b8cSJulian Elischer } 1317906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 131891599697SJulian Elischer /* 13198626a0ddSKonstantin Belousov * Convert the process to an unthreaded process. The 13208626a0ddSKonstantin Belousov * SINGLE_EXIT is called by exit1() or execve(), in 13218626a0ddSKonstantin Belousov * both cases other threads must be retired. 132291599697SJulian Elischer */ 13238626a0ddSKonstantin Belousov KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 1324ed062c8dSJulian Elischer p->p_singlethread = NULL; 13258626a0ddSKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 1326fd229b5bSKonstantin Belousov 1327fd229b5bSKonstantin Belousov /* 1328fd229b5bSKonstantin Belousov * Wait for any remaining threads to exit cpu_throw(). 1329fd229b5bSKonstantin Belousov */ 1330fd229b5bSKonstantin Belousov while (p->p_exitthreads != 0) { 1331fd229b5bSKonstantin Belousov PROC_SUNLOCK(p); 1332fd229b5bSKonstantin Belousov PROC_UNLOCK(p); 1333fd229b5bSKonstantin Belousov sched_relinquish(td); 1334fd229b5bSKonstantin Belousov PROC_LOCK(p); 1335fd229b5bSKonstantin Belousov PROC_SLOCK(p); 1336fd229b5bSKonstantin Belousov } 1337ac437c07SKonstantin Belousov } else if (mode == SINGLE_BOUNDARY) { 1338ac437c07SKonstantin Belousov /* 1339ac437c07SKonstantin Belousov * Wait until all suspended threads are removed from 1340ac437c07SKonstantin Belousov * the processors. The thread_suspend_check() 1341ac437c07SKonstantin Belousov * increments p_boundary_count while it is still 1342ac437c07SKonstantin Belousov * running, which makes it possible for the execve() 1343ac437c07SKonstantin Belousov * to destroy vmspace while our other threads are 1344ac437c07SKonstantin Belousov * still using the address space. 1345ac437c07SKonstantin Belousov * 1346ac437c07SKonstantin Belousov * We lock the thread, which is only allowed to 1347ac437c07SKonstantin Belousov * succeed after context switch code finished using 1348ac437c07SKonstantin Belousov * the address space. 1349ac437c07SKonstantin Belousov */ 1350ac437c07SKonstantin Belousov FOREACH_THREAD_IN_PROC(p, td2) { 1351ac437c07SKonstantin Belousov if (td2 == td) 1352ac437c07SKonstantin Belousov continue; 1353ac437c07SKonstantin Belousov thread_lock(td2); 1354ac437c07SKonstantin Belousov KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1355ac437c07SKonstantin Belousov ("td %p not on boundary", td2)); 1356ac437c07SKonstantin Belousov KASSERT(TD_IS_SUSPENDED(td2), 1357ac437c07SKonstantin Belousov ("td %p is not suspended", td2)); 1358ac437c07SKonstantin Belousov thread_unlock(td2); 1359ac437c07SKonstantin Belousov } 136091599697SJulian Elischer } 13617b4a950aSDavid Xu PROC_SUNLOCK(p); 136244990b8cSJulian Elischer return (0); 136344990b8cSJulian Elischer } 136444990b8cSJulian Elischer 13658638fe7bSKonstantin Belousov bool 13668638fe7bSKonstantin Belousov thread_suspend_check_needed(void) 13678638fe7bSKonstantin Belousov { 13688638fe7bSKonstantin Belousov struct proc *p; 13698638fe7bSKonstantin Belousov struct thread *td; 13708638fe7bSKonstantin Belousov 13718638fe7bSKonstantin Belousov td = curthread; 13728638fe7bSKonstantin Belousov p = td->td_proc; 13738638fe7bSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 13748638fe7bSKonstantin Belousov return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 13758638fe7bSKonstantin Belousov (td->td_dbgflags & TDB_SUSPEND) != 0)); 13768638fe7bSKonstantin Belousov } 13778638fe7bSKonstantin Belousov 137844990b8cSJulian Elischer /* 137944990b8cSJulian Elischer * Called in from locations that can safely check to see 138044990b8cSJulian Elischer * whether we have to suspend or at least throttle for a 138144990b8cSJulian Elischer * single-thread event (e.g. fork). 138244990b8cSJulian Elischer * 138344990b8cSJulian Elischer * Such locations include userret(). 138444990b8cSJulian Elischer * If the "return_instead" argument is non zero, the thread must be able to 138544990b8cSJulian Elischer * accept 0 (caller may continue), or 1 (caller must abort) as a result. 138644990b8cSJulian Elischer * 138744990b8cSJulian Elischer * The 'return_instead' argument tells the function if it may do a 138844990b8cSJulian Elischer * thread_exit() or suspend, or whether the caller must abort and back 138944990b8cSJulian Elischer * out instead. 139044990b8cSJulian Elischer * 139144990b8cSJulian Elischer * If the thread that set the single_threading request has set the 139244990b8cSJulian Elischer * P_SINGLE_EXIT bit in the process flags then this call will never return 139344990b8cSJulian Elischer * if 'return_instead' is false, but will exit. 139444990b8cSJulian Elischer * 139544990b8cSJulian Elischer * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 139644990b8cSJulian Elischer *---------------+--------------------+--------------------- 139744990b8cSJulian Elischer * 0 | returns 0 | returns 0 or 1 1398353374b5SJohn Baldwin * | when ST ends | immediately 139944990b8cSJulian Elischer *---------------+--------------------+--------------------- 140044990b8cSJulian Elischer * 1 | thread exits | returns 1 1401353374b5SJohn Baldwin * | | immediately 140244990b8cSJulian Elischer * 0 = thread_exit() or suspension ok, 140344990b8cSJulian Elischer * other = return error instead of stopping the thread. 140444990b8cSJulian Elischer * 140544990b8cSJulian Elischer * While a full suspension is under effect, even a single threading 140644990b8cSJulian Elischer * thread would be suspended if it made this call (but it shouldn't). 140744990b8cSJulian Elischer * This call should only be made from places where 140844990b8cSJulian Elischer * thread_exit() would be safe as that may be the outcome unless 140944990b8cSJulian Elischer * return_instead is set. 141044990b8cSJulian Elischer */ 141144990b8cSJulian Elischer int 141244990b8cSJulian Elischer thread_suspend_check(int return_instead) 141344990b8cSJulian Elischer { 1414ecafb24bSJuli Mallett struct thread *td; 1415ecafb24bSJuli Mallett struct proc *p; 141646e47c4fSKonstantin Belousov int wakeup_swapper; 141744990b8cSJulian Elischer 141844990b8cSJulian Elischer td = curthread; 141944990b8cSJulian Elischer p = td->td_proc; 142037814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 142144990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 14228638fe7bSKonstantin Belousov while (thread_suspend_check_needed()) { 14231279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 142444990b8cSJulian Elischer KASSERT(p->p_singlethread != NULL, 142544990b8cSJulian Elischer ("singlethread not set")); 142644990b8cSJulian Elischer /* 1427e3b9bf71SJulian Elischer * The only suspension in action is a 1428e3b9bf71SJulian Elischer * single-threading. Single threader need not stop. 1429bd07998eSKonstantin Belousov * It is safe to access p->p_singlethread unlocked 1430bd07998eSKonstantin Belousov * because it can only be set to our address by us. 143144990b8cSJulian Elischer */ 1432e3b9bf71SJulian Elischer if (p->p_singlethread == td) 143344990b8cSJulian Elischer return (0); /* Exempt from stopping. */ 143444990b8cSJulian Elischer } 143545a4bfa1SDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 143694f0972bSDavid Xu return (EINTR); 143744990b8cSJulian Elischer 1438906ac69dSDavid Xu /* Should we goto user boundary if we didn't come from there? */ 1439906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1440906ac69dSDavid Xu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 144194f0972bSDavid Xu return (ERESTART); 1442906ac69dSDavid Xu 144344990b8cSJulian Elischer /* 14443077f938SKonstantin Belousov * Ignore suspend requests if they are deferred. 1445d071a6faSJohn Baldwin */ 14463077f938SKonstantin Belousov if ((td->td_flags & TDF_SBDRY) != 0) { 1447d071a6faSJohn Baldwin KASSERT(return_instead, 1448d071a6faSJohn Baldwin ("TDF_SBDRY set for unsafe thread_suspend_check")); 144946e47c4fSKonstantin Belousov KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 145046e47c4fSKonstantin Belousov (TDF_SEINTR | TDF_SERESTART), 145146e47c4fSKonstantin Belousov ("both TDF_SEINTR and TDF_SERESTART")); 145246e47c4fSKonstantin Belousov return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1453d071a6faSJohn Baldwin } 1454d071a6faSJohn Baldwin 1455d071a6faSJohn Baldwin /* 145644990b8cSJulian Elischer * If the process is waiting for us to exit, 145744990b8cSJulian Elischer * this thread should just suicide. 14581279572aSDavid Xu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 145944990b8cSJulian Elischer */ 1460cf7d9a8cSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1461cf7d9a8cSDavid Xu PROC_UNLOCK(p); 146291d1786fSDmitry Chagin 146391d1786fSDmitry Chagin /* 146491d1786fSDmitry Chagin * Allow Linux emulation layer to do some work 146591d1786fSDmitry Chagin * before thread suicide. 146691d1786fSDmitry Chagin */ 146791d1786fSDmitry Chagin if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 146891d1786fSDmitry Chagin (p->p_sysent->sv_thread_detach)(td); 14692a339d9eSKonstantin Belousov umtx_thread_exit(td); 1470d1e7a4a5SJohn Baldwin kern_thr_exit(td); 1471d1e7a4a5SJohn Baldwin panic("stopped thread did not exit"); 1472cf7d9a8cSDavid Xu } 147321ecd1e9SDavid Xu 147421ecd1e9SDavid Xu PROC_SLOCK(p); 147521ecd1e9SDavid Xu thread_stopped(p); 1476a54e85fdSJeff Roberson if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1477a54e85fdSJeff Roberson if (p->p_numthreads == p->p_suspcount + 1) { 1478a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 147984cdea97SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 148084cdea97SKonstantin Belousov p->p_singlethread, p, false); 14817847a9daSJohn Baldwin if (wakeup_swapper) 14827847a9daSJohn Baldwin kick_proc0(); 1483a54e85fdSJeff Roberson } 1484a54e85fdSJeff Roberson } 14853f9be10eSDavid Xu PROC_UNLOCK(p); 14867b4a950aSDavid Xu thread_lock(td); 148744990b8cSJulian Elischer /* 148844990b8cSJulian Elischer * When a thread suspends, it just 1489ad1e7d28SJulian Elischer * gets taken off all queues. 149044990b8cSJulian Elischer */ 149171fad9fdSJulian Elischer thread_suspend_one(td); 1492906ac69dSDavid Xu if (return_instead == 0) { 1493906ac69dSDavid Xu p->p_boundary_count++; 1494906ac69dSDavid Xu td->td_flags |= TDF_BOUNDARY; 1495cf19bf91SJulian Elischer } 14967b4a950aSDavid Xu PROC_SUNLOCK(p); 1497686bcb5cSJeff Roberson mi_switch(SW_INVOL | SWT_SUSPEND); 149844990b8cSJulian Elischer PROC_LOCK(p); 149944990b8cSJulian Elischer } 150044990b8cSJulian Elischer return (0); 150144990b8cSJulian Elischer } 150244990b8cSJulian Elischer 1503478ca4b0SKonstantin Belousov /* 1504478ca4b0SKonstantin Belousov * Check for possible stops and suspensions while executing a 1505478ca4b0SKonstantin Belousov * casueword or similar transiently failing operation. 1506478ca4b0SKonstantin Belousov * 1507478ca4b0SKonstantin Belousov * The sleep argument controls whether the function can handle a stop 1508478ca4b0SKonstantin Belousov * request itself or it should return ERESTART and the request is 1509478ca4b0SKonstantin Belousov * proceed at the kernel/user boundary in ast. 1510478ca4b0SKonstantin Belousov * 1511478ca4b0SKonstantin Belousov * Typically, when retrying due to casueword(9) failure (rv == 1), we 1512478ca4b0SKonstantin Belousov * should handle the stop requests there, with exception of cases when 1513478ca4b0SKonstantin Belousov * the thread owns a kernel resource, for instance busied the umtx 1514300b525dSKonstantin Belousov * key, or when functions return immediately if thread_check_susp() 1515478ca4b0SKonstantin Belousov * returned non-zero. On the other hand, retrying the whole lock 1516478ca4b0SKonstantin Belousov * operation, we better not stop there but delegate the handling to 1517478ca4b0SKonstantin Belousov * ast. 1518478ca4b0SKonstantin Belousov * 1519478ca4b0SKonstantin Belousov * If the request is for thread termination P_SINGLE_EXIT, we cannot 1520478ca4b0SKonstantin Belousov * handle it at all, and simply return EINTR. 1521478ca4b0SKonstantin Belousov */ 1522478ca4b0SKonstantin Belousov int 1523478ca4b0SKonstantin Belousov thread_check_susp(struct thread *td, bool sleep) 1524478ca4b0SKonstantin Belousov { 1525478ca4b0SKonstantin Belousov struct proc *p; 1526478ca4b0SKonstantin Belousov int error; 1527478ca4b0SKonstantin Belousov 1528478ca4b0SKonstantin Belousov /* 1529c6d31b83SKonstantin Belousov * The check for TDA_SUSPEND is racy, but it is enough to 1530478ca4b0SKonstantin Belousov * eventually break the lockstep loop. 1531478ca4b0SKonstantin Belousov */ 1532c6d31b83SKonstantin Belousov if (!td_ast_pending(td, TDA_SUSPEND)) 1533478ca4b0SKonstantin Belousov return (0); 1534478ca4b0SKonstantin Belousov error = 0; 1535478ca4b0SKonstantin Belousov p = td->td_proc; 1536478ca4b0SKonstantin Belousov PROC_LOCK(p); 1537478ca4b0SKonstantin Belousov if (p->p_flag & P_SINGLE_EXIT) 1538478ca4b0SKonstantin Belousov error = EINTR; 1539478ca4b0SKonstantin Belousov else if (P_SHOULDSTOP(p) || 1540478ca4b0SKonstantin Belousov ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1541478ca4b0SKonstantin Belousov error = sleep ? thread_suspend_check(0) : ERESTART; 1542478ca4b0SKonstantin Belousov PROC_UNLOCK(p); 1543478ca4b0SKonstantin Belousov return (error); 1544478ca4b0SKonstantin Belousov } 1545478ca4b0SKonstantin Belousov 154635c32a76SDavid Xu void 15476ddcc233SKonstantin Belousov thread_suspend_switch(struct thread *td, struct proc *p) 1548a54e85fdSJeff Roberson { 1549a54e85fdSJeff Roberson 1550a54e85fdSJeff Roberson KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1551a54e85fdSJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 15527b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1553a54e85fdSJeff Roberson /* 1554a54e85fdSJeff Roberson * We implement thread_suspend_one in stages here to avoid 1555a54e85fdSJeff Roberson * dropping the proc lock while the thread lock is owned. 1556a54e85fdSJeff Roberson */ 15576ddcc233SKonstantin Belousov if (p == td->td_proc) { 1558a54e85fdSJeff Roberson thread_stopped(p); 1559a54e85fdSJeff Roberson p->p_suspcount++; 15606ddcc233SKonstantin Belousov } 15613f9be10eSDavid Xu PROC_UNLOCK(p); 15627b4a950aSDavid Xu thread_lock(td); 1563c6d31b83SKonstantin Belousov ast_unsched_locked(td, TDA_SUSPEND); 1564a54e85fdSJeff Roberson TD_SET_SUSPENDED(td); 1565c5aa6b58SJeff Roberson sched_sleep(td, 0); 15667b4a950aSDavid Xu PROC_SUNLOCK(p); 1567a54e85fdSJeff Roberson DROP_GIANT(); 1568686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_SUSPEND); 1569a54e85fdSJeff Roberson PICKUP_GIANT(); 1570a54e85fdSJeff Roberson PROC_LOCK(p); 15717b4a950aSDavid Xu PROC_SLOCK(p); 1572a54e85fdSJeff Roberson } 1573a54e85fdSJeff Roberson 1574a54e85fdSJeff Roberson void 157535c32a76SDavid Xu thread_suspend_one(struct thread *td) 157635c32a76SDavid Xu { 15776ddcc233SKonstantin Belousov struct proc *p; 157835c32a76SDavid Xu 15796ddcc233SKonstantin Belousov p = td->td_proc; 15807b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1581a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1582e574e444SDavid Xu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 158335c32a76SDavid Xu p->p_suspcount++; 1584c6d31b83SKonstantin Belousov ast_unsched_locked(td, TDA_SUSPEND); 158571fad9fdSJulian Elischer TD_SET_SUSPENDED(td); 1586c5aa6b58SJeff Roberson sched_sleep(td, 0); 158735c32a76SDavid Xu } 158835c32a76SDavid Xu 158984cdea97SKonstantin Belousov static int 159084cdea97SKonstantin Belousov thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 159135c32a76SDavid Xu { 159235c32a76SDavid Xu 1593a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1594ad1e7d28SJulian Elischer KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 159571fad9fdSJulian Elischer TD_CLR_SUSPENDED(td); 15966ddcc233SKonstantin Belousov td->td_flags &= ~TDF_ALLPROCSUSP; 15976ddcc233SKonstantin Belousov if (td->td_proc == p) { 15986ddcc233SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 159935c32a76SDavid Xu p->p_suspcount--; 160084cdea97SKonstantin Belousov if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 160184cdea97SKonstantin Belousov td->td_flags &= ~TDF_BOUNDARY; 160284cdea97SKonstantin Belousov p->p_boundary_count--; 160384cdea97SKonstantin Belousov } 16046ddcc233SKonstantin Belousov } 160561a74c5cSJeff Roberson return (setrunnable(td, 0)); 160635c32a76SDavid Xu } 160735c32a76SDavid Xu 1608af928fdeSKonstantin Belousov void 1609af928fdeSKonstantin Belousov thread_run_flash(struct thread *td) 1610af928fdeSKonstantin Belousov { 1611af928fdeSKonstantin Belousov struct proc *p; 1612af928fdeSKonstantin Belousov 1613af928fdeSKonstantin Belousov p = td->td_proc; 1614af928fdeSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 1615af928fdeSKonstantin Belousov 1616af928fdeSKonstantin Belousov if (TD_ON_SLEEPQ(td)) 1617af928fdeSKonstantin Belousov sleepq_remove_nested(td); 1618af928fdeSKonstantin Belousov else 1619af928fdeSKonstantin Belousov thread_lock(td); 1620af928fdeSKonstantin Belousov 1621af928fdeSKonstantin Belousov THREAD_LOCK_ASSERT(td, MA_OWNED); 1622af928fdeSKonstantin Belousov KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1623af928fdeSKonstantin Belousov 1624af928fdeSKonstantin Belousov TD_CLR_SUSPENDED(td); 1625af928fdeSKonstantin Belousov PROC_SLOCK(p); 1626af928fdeSKonstantin Belousov MPASS(p->p_suspcount > 0); 1627af928fdeSKonstantin Belousov p->p_suspcount--; 1628af928fdeSKonstantin Belousov PROC_SUNLOCK(p); 1629af928fdeSKonstantin Belousov if (setrunnable(td, 0)) 1630af928fdeSKonstantin Belousov kick_proc0(); 1631af928fdeSKonstantin Belousov } 1632af928fdeSKonstantin Belousov 163344990b8cSJulian Elischer /* 163444990b8cSJulian Elischer * Allow all threads blocked by single threading to continue running. 163544990b8cSJulian Elischer */ 163644990b8cSJulian Elischer void 163744990b8cSJulian Elischer thread_unsuspend(struct proc *p) 163844990b8cSJulian Elischer { 163944990b8cSJulian Elischer struct thread *td; 16407847a9daSJohn Baldwin int wakeup_swapper; 164144990b8cSJulian Elischer 164244990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 16437b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 16447847a9daSJohn Baldwin wakeup_swapper = 0; 164544990b8cSJulian Elischer if (!P_SHOULDSTOP(p)) { 1646ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1647a54e85fdSJeff Roberson thread_lock(td); 1648f829268bSKonstantin Belousov if (TD_IS_SUSPENDED(td)) 164984cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 165084cdea97SKonstantin Belousov true); 1651f829268bSKonstantin Belousov else 1652a54e85fdSJeff Roberson thread_unlock(td); 1653ad1e7d28SJulian Elischer } 165484cdea97SKonstantin Belousov } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 165584cdea97SKonstantin Belousov p->p_numthreads == p->p_suspcount) { 165644990b8cSJulian Elischer /* 165744990b8cSJulian Elischer * Stopping everything also did the job for the single 165844990b8cSJulian Elischer * threading request. Now we've downgraded to single-threaded, 165944990b8cSJulian Elischer * let it continue. 166044990b8cSJulian Elischer */ 16616ddcc233SKonstantin Belousov if (p->p_singlethread->td_proc == p) { 1662a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 16636ddcc233SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 166484cdea97SKonstantin Belousov p->p_singlethread, p, false); 166544990b8cSJulian Elischer } 16666ddcc233SKonstantin Belousov } 16677847a9daSJohn Baldwin if (wakeup_swapper) 16687847a9daSJohn Baldwin kick_proc0(); 166944990b8cSJulian Elischer } 167044990b8cSJulian Elischer 1671ed062c8dSJulian Elischer /* 1672ed062c8dSJulian Elischer * End the single threading mode.. 1673ed062c8dSJulian Elischer */ 167444990b8cSJulian Elischer void 16756ddcc233SKonstantin Belousov thread_single_end(struct proc *p, int mode) 167644990b8cSJulian Elischer { 167744990b8cSJulian Elischer struct thread *td; 16787847a9daSJohn Baldwin int wakeup_swapper; 167944990b8cSJulian Elischer 16806ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 16816ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 16826ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 168344990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 16846ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 16856ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 16866ddcc233SKonstantin Belousov ("mode %d does not match P_TOTAL_STOP", mode)); 168784cdea97SKonstantin Belousov KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 168884cdea97SKonstantin Belousov ("thread_single_end from other thread %p %p", 168984cdea97SKonstantin Belousov curthread, p->p_singlethread)); 169084cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || 169184cdea97SKonstantin Belousov (p->p_flag & P_SINGLE_BOUNDARY) != 0, 169284cdea97SKonstantin Belousov ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 16936ddcc233SKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 16946ddcc233SKonstantin Belousov P_TOTAL_STOP); 16957b4a950aSDavid Xu PROC_SLOCK(p); 169644990b8cSJulian Elischer p->p_singlethread = NULL; 16977847a9daSJohn Baldwin wakeup_swapper = 0; 169849539972SJulian Elischer /* 16997847a9daSJohn Baldwin * If there are other threads they may now run, 170049539972SJulian Elischer * unless of course there is a blanket 'stop order' 170149539972SJulian Elischer * on the process. The single threader must be allowed 170249539972SJulian Elischer * to continue however as this is a bad place to stop. 170349539972SJulian Elischer */ 17046ddcc233SKonstantin Belousov if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1705ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1706a54e85fdSJeff Roberson thread_lock(td); 1707ad1e7d28SJulian Elischer if (TD_IS_SUSPENDED(td)) { 170884cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 17092d5ef216SMark Johnston true); 171061a74c5cSJeff Roberson } else 1711a54e85fdSJeff Roberson thread_unlock(td); 171249539972SJulian Elischer } 1713ad1e7d28SJulian Elischer } 171484cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 171584cdea97SKonstantin Belousov ("inconsistent boundary count %d", p->p_boundary_count)); 17167b4a950aSDavid Xu PROC_SUNLOCK(p); 17177847a9daSJohn Baldwin if (wakeup_swapper) 17187847a9daSJohn Baldwin kick_proc0(); 1719d7a9e6e7SKonstantin Belousov wakeup(&p->p_flag); 172049539972SJulian Elischer } 17214fc21c09SDaniel Eischen 1722aae3547bSMateusz Guzik /* 1723aae3547bSMateusz Guzik * Locate a thread by number and return with proc lock held. 1724aae3547bSMateusz Guzik * 1725aae3547bSMateusz Guzik * thread exit establishes proc -> tidhash lock ordering, but lookup 1726aae3547bSMateusz Guzik * takes tidhash first and needs to return locked proc. 1727aae3547bSMateusz Guzik * 1728aae3547bSMateusz Guzik * The problem is worked around by relying on type-safety of both 1729aae3547bSMateusz Guzik * structures and doing the work in 2 steps: 1730aae3547bSMateusz Guzik * - tidhash-locked lookup which saves both thread and proc pointers 1731aae3547bSMateusz Guzik * - proc-locked verification that the found thread still matches 1732aae3547bSMateusz Guzik */ 1733aae3547bSMateusz Guzik static bool 1734aae3547bSMateusz Guzik tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1735cf7d9a8cSDavid Xu { 1736cf7d9a8cSDavid Xu #define RUN_THRESH 16 1737aae3547bSMateusz Guzik struct proc *p; 1738cf7d9a8cSDavid Xu struct thread *td; 1739aae3547bSMateusz Guzik int run; 1740aae3547bSMateusz Guzik bool locked; 1741cf7d9a8cSDavid Xu 1742aae3547bSMateusz Guzik run = 0; 174326007fe3SMateusz Guzik rw_rlock(TIDHASHLOCK(tid)); 1744aae3547bSMateusz Guzik locked = true; 1745cf7d9a8cSDavid Xu LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1746aae3547bSMateusz Guzik if (td->td_tid != tid) { 1747aae3547bSMateusz Guzik run++; 1748aae3547bSMateusz Guzik continue; 1749cf7d9a8cSDavid Xu } 1750aae3547bSMateusz Guzik p = td->td_proc; 1751aae3547bSMateusz Guzik if (pid != -1 && p->p_pid != pid) { 1752cf7d9a8cSDavid Xu td = NULL; 1753cf7d9a8cSDavid Xu break; 1754cf7d9a8cSDavid Xu } 1755cf7d9a8cSDavid Xu if (run > RUN_THRESH) { 175626007fe3SMateusz Guzik if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1757cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 1758cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1759cf7d9a8cSDavid Xu td, td_hash); 176026007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(tid)); 1761aae3547bSMateusz Guzik locked = false; 1762aae3547bSMateusz Guzik break; 1763cf7d9a8cSDavid Xu } 1764cf7d9a8cSDavid Xu } 1765cf7d9a8cSDavid Xu break; 1766cf7d9a8cSDavid Xu } 1767aae3547bSMateusz Guzik if (locked) 176826007fe3SMateusz Guzik rw_runlock(TIDHASHLOCK(tid)); 1769aae3547bSMateusz Guzik if (td == NULL) 1770aae3547bSMateusz Guzik return (false); 1771aae3547bSMateusz Guzik *pp = p; 1772aae3547bSMateusz Guzik *tdp = td; 1773aae3547bSMateusz Guzik return (true); 1774aae3547bSMateusz Guzik } 1775aae3547bSMateusz Guzik 1776aae3547bSMateusz Guzik struct thread * 1777aae3547bSMateusz Guzik tdfind(lwpid_t tid, pid_t pid) 1778aae3547bSMateusz Guzik { 1779aae3547bSMateusz Guzik struct proc *p; 1780aae3547bSMateusz Guzik struct thread *td; 1781aae3547bSMateusz Guzik 1782aae3547bSMateusz Guzik td = curthread; 1783aae3547bSMateusz Guzik if (td->td_tid == tid) { 1784aae3547bSMateusz Guzik if (pid != -1 && td->td_proc->p_pid != pid) 1785aae3547bSMateusz Guzik return (NULL); 1786aae3547bSMateusz Guzik PROC_LOCK(td->td_proc); 1787cf7d9a8cSDavid Xu return (td); 1788cf7d9a8cSDavid Xu } 1789cf7d9a8cSDavid Xu 1790aae3547bSMateusz Guzik for (;;) { 1791aae3547bSMateusz Guzik if (!tdfind_hash(tid, pid, &p, &td)) 1792aae3547bSMateusz Guzik return (NULL); 1793aae3547bSMateusz Guzik PROC_LOCK(p); 1794aae3547bSMateusz Guzik if (td->td_tid != tid) { 1795aae3547bSMateusz Guzik PROC_UNLOCK(p); 1796aae3547bSMateusz Guzik continue; 1797aae3547bSMateusz Guzik } 1798aae3547bSMateusz Guzik if (td->td_proc != p) { 1799aae3547bSMateusz Guzik PROC_UNLOCK(p); 1800aae3547bSMateusz Guzik continue; 1801aae3547bSMateusz Guzik } 1802aae3547bSMateusz Guzik if (p->p_state == PRS_NEW) { 1803aae3547bSMateusz Guzik PROC_UNLOCK(p); 1804aae3547bSMateusz Guzik return (NULL); 1805aae3547bSMateusz Guzik } 1806aae3547bSMateusz Guzik return (td); 1807aae3547bSMateusz Guzik } 1808aae3547bSMateusz Guzik } 1809aae3547bSMateusz Guzik 1810cf7d9a8cSDavid Xu void 1811cf7d9a8cSDavid Xu tidhash_add(struct thread *td) 1812cf7d9a8cSDavid Xu { 181326007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1814cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 181526007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1816cf7d9a8cSDavid Xu } 1817cf7d9a8cSDavid Xu 1818cf7d9a8cSDavid Xu void 1819cf7d9a8cSDavid Xu tidhash_remove(struct thread *td) 1820cf7d9a8cSDavid Xu { 182126007fe3SMateusz Guzik 182226007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1823cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 182426007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1825cf7d9a8cSDavid Xu } 1826