xref: /freebsd/sys/kern/kern_thread.c (revision af928fded0705100e4f3926c99ed488f7ab6dcf1)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
444990b8cSJulian Elischer  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
544990b8cSJulian Elischer  *  All rights reserved.
644990b8cSJulian Elischer  *
744990b8cSJulian Elischer  * Redistribution and use in source and binary forms, with or without
844990b8cSJulian Elischer  * modification, are permitted provided that the following conditions
944990b8cSJulian Elischer  * are met:
1044990b8cSJulian Elischer  * 1. Redistributions of source code must retain the above copyright
1144990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer as
1244990b8cSJulian Elischer  *    the first lines of this file unmodified other than the possible
1344990b8cSJulian Elischer  *    addition of one or more copyright notices.
1444990b8cSJulian Elischer  * 2. Redistributions in binary form must reproduce the above copyright
1544990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer in the
1644990b8cSJulian Elischer  *    documentation and/or other materials provided with the distribution.
1744990b8cSJulian Elischer  *
1844990b8cSJulian Elischer  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1944990b8cSJulian Elischer  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2044990b8cSJulian Elischer  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2144990b8cSJulian Elischer  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2244990b8cSJulian Elischer  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2344990b8cSJulian Elischer  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2444990b8cSJulian Elischer  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2544990b8cSJulian Elischer  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2644990b8cSJulian Elischer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2744990b8cSJulian Elischer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2844990b8cSJulian Elischer  * DAMAGE.
2944990b8cSJulian Elischer  */
3044990b8cSJulian Elischer 
313d06b4b3SAttilio Rao #include "opt_witness.h"
3216d95d4fSJoseph Koshy #include "opt_hwpmc_hooks.h"
333d06b4b3SAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3744990b8cSJulian Elischer #include <sys/param.h>
3844990b8cSJulian Elischer #include <sys/systm.h>
3944990b8cSJulian Elischer #include <sys/kernel.h>
4044990b8cSJulian Elischer #include <sys/lock.h>
4144990b8cSJulian Elischer #include <sys/mutex.h>
4244990b8cSJulian Elischer #include <sys/proc.h>
4335bb59edSMateusz Guzik #include <sys/bitstring.h>
446febf180SGleb Smirnoff #include <sys/epoch.h>
458f0e9130SKonstantin Belousov #include <sys/rangelock.h>
46e170bfdaSDavid Xu #include <sys/resourcevar.h>
47b3e9e682SRyan Stone #include <sys/sdt.h>
4894e0a4cdSJulian Elischer #include <sys/smp.h>
49de028f5aSJeff Roberson #include <sys/sched.h>
5044f3b092SJohn Baldwin #include <sys/sleepqueue.h>
51ace8398dSJeff Roberson #include <sys/selinfo.h>
52d1e7a4a5SJohn Baldwin #include <sys/syscallsubr.h>
53598f2b81SMateusz Guzik #include <sys/dtrace_bsd.h>
5491d1786fSDmitry Chagin #include <sys/sysent.h>
55961a7b24SJohn Baldwin #include <sys/turnstile.h>
56d116b9f1SMateusz Guzik #include <sys/taskqueue.h>
5744990b8cSJulian Elischer #include <sys/ktr.h>
58cf7d9a8cSDavid Xu #include <sys/rwlock.h>
59bc8e6d81SDavid Xu #include <sys/umtx.h>
609ed01c32SGleb Smirnoff #include <sys/vmmeter.h>
61d7f687fcSJeff Roberson #include <sys/cpuset.h>
6216d95d4fSJoseph Koshy #ifdef	HWPMC_HOOKS
6316d95d4fSJoseph Koshy #include <sys/pmckern.h>
6416d95d4fSJoseph Koshy #endif
651bd3cf5dSMateusz Guzik #include <sys/priv.h>
6644990b8cSJulian Elischer 
67911b84b0SRobert Watson #include <security/audit/audit.h>
68911b84b0SRobert Watson 
69d116b9f1SMateusz Guzik #include <vm/pmap.h>
7044990b8cSJulian Elischer #include <vm/vm.h>
7149a2507bSAlan Cox #include <vm/vm_extern.h>
7244990b8cSJulian Elischer #include <vm/uma.h>
73d116b9f1SMateusz Guzik #include <vm/vm_phys.h>
74b209f889SRandall Stewart #include <sys/eventhandler.h>
7502fb42b0SPeter Wemm 
76acd9f517SKonstantin Belousov /*
77acd9f517SKonstantin Belousov  * Asserts below verify the stability of struct thread and struct proc
78acd9f517SKonstantin Belousov  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
79acd9f517SKonstantin Belousov  * to drift, change to the structures must be accompanied by the
80acd9f517SKonstantin Belousov  * assert update.
81acd9f517SKonstantin Belousov  *
82acd9f517SKonstantin Belousov  * On the stable branches after KBI freeze, conditions must not be
83acd9f517SKonstantin Belousov  * violated.  Typically new fields are moved to the end of the
84acd9f517SKonstantin Belousov  * structures.
85acd9f517SKonstantin Belousov  */
86acd9f517SKonstantin Belousov #ifdef __amd64__
873f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_flags) == 0xfc,
88acd9f517SKonstantin Belousov     "struct thread KBI td_flags");
893f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_pflags) == 0x104,
90acd9f517SKonstantin Belousov     "struct thread KBI td_pflags");
911e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_frame) == 0x4a0,
92acd9f517SKonstantin Belousov     "struct thread KBI td_frame");
931724c563SMateusz Guzik _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
94acd9f517SKonstantin Belousov     "struct thread KBI td_emuldata");
9585078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
96acd9f517SKonstantin Belousov     "struct proc KBI p_flag");
9785078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
98acd9f517SKonstantin Belousov     "struct proc KBI p_pid");
9985078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
100acd9f517SKonstantin Belousov     "struct proc KBI p_filemon");
10185078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_comm) == 0x3d8,
102acd9f517SKonstantin Belousov     "struct proc KBI p_comm");
10385078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
104acd9f517SKonstantin Belousov     "struct proc KBI p_emuldata");
105acd9f517SKonstantin Belousov #endif
106acd9f517SKonstantin Belousov #ifdef __i386__
1073f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_flags) == 0x98,
108acd9f517SKonstantin Belousov     "struct thread KBI td_flags");
1093f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_pflags) == 0xa0,
110acd9f517SKonstantin Belousov     "struct thread KBI td_pflags");
1111e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_frame) == 0x300,
112acd9f517SKonstantin Belousov     "struct thread KBI td_frame");
1131e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_emuldata) == 0x344,
114acd9f517SKonstantin Belousov     "struct thread KBI td_emuldata");
11585078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
116acd9f517SKonstantin Belousov     "struct proc KBI p_flag");
11785078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0x78,
118acd9f517SKonstantin Belousov     "struct proc KBI p_pid");
11985078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_filemon) == 0x26c,
120acd9f517SKonstantin Belousov     "struct proc KBI p_filemon");
12185078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_comm) == 0x280,
122acd9f517SKonstantin Belousov     "struct proc KBI p_comm");
12385078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_emuldata) == 0x30c,
124acd9f517SKonstantin Belousov     "struct proc KBI p_emuldata");
125acd9f517SKonstantin Belousov #endif
126acd9f517SKonstantin Belousov 
127b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(proc);
128d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(proc, , , lwp__exit);
129b3e9e682SRyan Stone 
1308460a577SJohn Birrell /*
1318460a577SJohn Birrell  * thread related storage.
1328460a577SJohn Birrell  */
13344990b8cSJulian Elischer static uma_zone_t thread_zone;
13444990b8cSJulian Elischer 
135d116b9f1SMateusz Guzik struct thread_domain_data {
136d116b9f1SMateusz Guzik 	struct thread	*tdd_zombies;
137d116b9f1SMateusz Guzik 	int		tdd_reapticks;
138d116b9f1SMateusz Guzik } __aligned(CACHE_LINE_SIZE);
139d116b9f1SMateusz Guzik 
140d116b9f1SMateusz Guzik static struct thread_domain_data thread_domain_data[MAXMEMDOM];
141d116b9f1SMateusz Guzik 
142d116b9f1SMateusz Guzik static struct task	thread_reap_task;
143d116b9f1SMateusz Guzik static struct callout  	thread_reap_callout;
14444990b8cSJulian Elischer 
145ff8fbcffSJeff Roberson static void thread_zombie(struct thread *);
146b83e94beSMateusz Guzik static void thread_reap(void);
147d116b9f1SMateusz Guzik static void thread_reap_all(void);
148d116b9f1SMateusz Guzik static void thread_reap_task_cb(void *, int);
149d116b9f1SMateusz Guzik static void thread_reap_callout_cb(void *);
15084cdea97SKonstantin Belousov static int thread_unsuspend_one(struct thread *td, struct proc *p,
15184cdea97SKonstantin Belousov     bool boundary);
152755341dfSMateusz Guzik static void thread_free_batched(struct thread *td);
153ff8fbcffSJeff Roberson 
154d1ca25beSMateusz Guzik static __exclusive_cache_line struct mtx tid_lock;
155934e7e5eSMateusz Guzik static bitstr_t *tid_bitmap;
15635bb59edSMateusz Guzik 
157cf7d9a8cSDavid Xu static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
158cf7d9a8cSDavid Xu 
1591bd3cf5dSMateusz Guzik static int maxthread;
1601bd3cf5dSMateusz Guzik SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
1611bd3cf5dSMateusz Guzik     &maxthread, 0, "Maximum number of threads");
1621bd3cf5dSMateusz Guzik 
16362dbc992SMateusz Guzik static __exclusive_cache_line int nthreads;
1641bd3cf5dSMateusz Guzik 
165aae3547bSMateusz Guzik static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
166aae3547bSMateusz Guzik static u_long	tidhash;
16726007fe3SMateusz Guzik static u_long	tidhashlock;
16826007fe3SMateusz Guzik static struct	rwlock *tidhashtbl_lock;
169aae3547bSMateusz Guzik #define	TIDHASH(tid)		(&tidhashtbl[(tid) & tidhash])
17026007fe3SMateusz Guzik #define	TIDHASHLOCK(tid)	(&tidhashtbl_lock[(tid) & tidhashlock])
171cf7d9a8cSDavid Xu 
1722ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_ctor);
1732ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_dtor);
1742ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_init);
1752ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_fini);
1762ca45184SMatt Joras 
17762dbc992SMateusz Guzik static bool
178d116b9f1SMateusz Guzik thread_count_inc_try(void)
179ec6ea5e8SDavid Xu {
18062dbc992SMateusz Guzik 	int nthreads_new;
181ec6ea5e8SDavid Xu 
18262dbc992SMateusz Guzik 	nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
18362dbc992SMateusz Guzik 	if (nthreads_new >= maxthread - 100) {
1841bd3cf5dSMateusz Guzik 		if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
18562dbc992SMateusz Guzik 		    nthreads_new >= maxthread) {
18662dbc992SMateusz Guzik 			atomic_subtract_int(&nthreads, 1);
187d116b9f1SMateusz Guzik 			return (false);
188d116b9f1SMateusz Guzik 		}
189d116b9f1SMateusz Guzik 	}
190d116b9f1SMateusz Guzik 	return (true);
191d116b9f1SMateusz Guzik }
192d116b9f1SMateusz Guzik 
193d116b9f1SMateusz Guzik static bool
194d116b9f1SMateusz Guzik thread_count_inc(void)
195d116b9f1SMateusz Guzik {
196d116b9f1SMateusz Guzik 	static struct timeval lastfail;
197d116b9f1SMateusz Guzik 	static int curfail;
198d116b9f1SMateusz Guzik 
199d116b9f1SMateusz Guzik 	thread_reap();
200d116b9f1SMateusz Guzik 	if (thread_count_inc_try()) {
201d116b9f1SMateusz Guzik 		return (true);
202d116b9f1SMateusz Guzik 	}
203d116b9f1SMateusz Guzik 
204d116b9f1SMateusz Guzik 	thread_reap_all();
205d116b9f1SMateusz Guzik 	if (thread_count_inc_try()) {
206d116b9f1SMateusz Guzik 		return (true);
207d116b9f1SMateusz Guzik 	}
208d116b9f1SMateusz Guzik 
2091bd3cf5dSMateusz Guzik 	if (ppsratecheck(&lastfail, &curfail, 1)) {
2101bd3cf5dSMateusz Guzik 		printf("maxthread limit exceeded by uid %u "
2111bd3cf5dSMateusz Guzik 		    "(pid %d); consider increasing kern.maxthread\n",
2121bd3cf5dSMateusz Guzik 		    curthread->td_ucred->cr_ruid, curproc->p_pid);
2131bd3cf5dSMateusz Guzik 	}
21462dbc992SMateusz Guzik 	return (false);
2151bd3cf5dSMateusz Guzik }
2161bd3cf5dSMateusz Guzik 
21762dbc992SMateusz Guzik static void
21862dbc992SMateusz Guzik thread_count_sub(int n)
21962dbc992SMateusz Guzik {
22062dbc992SMateusz Guzik 
22162dbc992SMateusz Guzik 	atomic_subtract_int(&nthreads, n);
22262dbc992SMateusz Guzik }
22362dbc992SMateusz Guzik 
22462dbc992SMateusz Guzik static void
22562dbc992SMateusz Guzik thread_count_dec(void)
22662dbc992SMateusz Guzik {
22762dbc992SMateusz Guzik 
22862dbc992SMateusz Guzik 	thread_count_sub(1);
22962dbc992SMateusz Guzik }
23062dbc992SMateusz Guzik 
23162dbc992SMateusz Guzik static lwpid_t
23262dbc992SMateusz Guzik tid_alloc(void)
23362dbc992SMateusz Guzik {
23462dbc992SMateusz Guzik 	static lwpid_t trytid;
23562dbc992SMateusz Guzik 	lwpid_t tid;
23662dbc992SMateusz Guzik 
23762dbc992SMateusz Guzik 	mtx_lock(&tid_lock);
23835bb59edSMateusz Guzik 	/*
23935bb59edSMateusz Guzik 	 * It is an invariant that the bitmap is big enough to hold maxthread
24035bb59edSMateusz Guzik 	 * IDs. If we got to this point there has to be at least one free.
24135bb59edSMateusz Guzik 	 */
24235bb59edSMateusz Guzik 	if (trytid >= maxthread)
24335bb59edSMateusz Guzik 		trytid = 0;
24435bb59edSMateusz Guzik 	bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
24535bb59edSMateusz Guzik 	if (tid == -1) {
24635bb59edSMateusz Guzik 		KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
24735bb59edSMateusz Guzik 		trytid = 0;
24835bb59edSMateusz Guzik 		bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
24935bb59edSMateusz Guzik 		KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
250ec6ea5e8SDavid Xu 	}
25135bb59edSMateusz Guzik 	bit_set(tid_bitmap, tid);
252934e7e5eSMateusz Guzik 	trytid = tid + 1;
253ec6ea5e8SDavid Xu 	mtx_unlock(&tid_lock);
25435bb59edSMateusz Guzik 	return (tid + NO_PID);
255ec6ea5e8SDavid Xu }
256ec6ea5e8SDavid Xu 
257ec6ea5e8SDavid Xu static void
258755341dfSMateusz Guzik tid_free_locked(lwpid_t rtid)
259ec6ea5e8SDavid Xu {
26035bb59edSMateusz Guzik 	lwpid_t tid;
261ec6ea5e8SDavid Xu 
262755341dfSMateusz Guzik 	mtx_assert(&tid_lock, MA_OWNED);
26335bb59edSMateusz Guzik 	KASSERT(rtid >= NO_PID,
26435bb59edSMateusz Guzik 	    ("%s: invalid tid %d\n", __func__, rtid));
26535bb59edSMateusz Guzik 	tid = rtid - NO_PID;
26635bb59edSMateusz Guzik 	KASSERT(bit_test(tid_bitmap, tid) != 0,
26735bb59edSMateusz Guzik 	    ("thread ID %d not allocated\n", rtid));
26835bb59edSMateusz Guzik 	bit_clear(tid_bitmap, tid);
269755341dfSMateusz Guzik }
270755341dfSMateusz Guzik 
271755341dfSMateusz Guzik static void
272755341dfSMateusz Guzik tid_free(lwpid_t rtid)
273755341dfSMateusz Guzik {
274755341dfSMateusz Guzik 
275755341dfSMateusz Guzik 	mtx_lock(&tid_lock);
276755341dfSMateusz Guzik 	tid_free_locked(rtid);
277755341dfSMateusz Guzik 	mtx_unlock(&tid_lock);
278755341dfSMateusz Guzik }
279755341dfSMateusz Guzik 
280755341dfSMateusz Guzik static void
281755341dfSMateusz Guzik tid_free_batch(lwpid_t *batch, int n)
282755341dfSMateusz Guzik {
283755341dfSMateusz Guzik 	int i;
284755341dfSMateusz Guzik 
285755341dfSMateusz Guzik 	mtx_lock(&tid_lock);
286755341dfSMateusz Guzik 	for (i = 0; i < n; i++) {
287755341dfSMateusz Guzik 		tid_free_locked(batch[i]);
288755341dfSMateusz Guzik 	}
289ec6ea5e8SDavid Xu 	mtx_unlock(&tid_lock);
290ec6ea5e8SDavid Xu }
291ec6ea5e8SDavid Xu 
292fdcac928SMarcel Moolenaar /*
2935ef7b7a0SMateusz Guzik  * Batching for thread reapping.
2945ef7b7a0SMateusz Guzik  */
2955ef7b7a0SMateusz Guzik struct tidbatch {
2965ef7b7a0SMateusz Guzik 	lwpid_t tab[16];
2975ef7b7a0SMateusz Guzik 	int n;
2985ef7b7a0SMateusz Guzik };
2995ef7b7a0SMateusz Guzik 
3005ef7b7a0SMateusz Guzik static void
3015ef7b7a0SMateusz Guzik tidbatch_prep(struct tidbatch *tb)
3025ef7b7a0SMateusz Guzik {
3035ef7b7a0SMateusz Guzik 
3045ef7b7a0SMateusz Guzik 	tb->n = 0;
3055ef7b7a0SMateusz Guzik }
3065ef7b7a0SMateusz Guzik 
3075ef7b7a0SMateusz Guzik static void
3085ef7b7a0SMateusz Guzik tidbatch_add(struct tidbatch *tb, struct thread *td)
3095ef7b7a0SMateusz Guzik {
3105ef7b7a0SMateusz Guzik 
3115ef7b7a0SMateusz Guzik 	KASSERT(tb->n < nitems(tb->tab),
3125ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3135ef7b7a0SMateusz Guzik 	tb->tab[tb->n] = td->td_tid;
3145ef7b7a0SMateusz Guzik 	tb->n++;
3155ef7b7a0SMateusz Guzik }
3165ef7b7a0SMateusz Guzik 
3175ef7b7a0SMateusz Guzik static void
3185ef7b7a0SMateusz Guzik tidbatch_process(struct tidbatch *tb)
3195ef7b7a0SMateusz Guzik {
3205ef7b7a0SMateusz Guzik 
3215ef7b7a0SMateusz Guzik 	KASSERT(tb->n <= nitems(tb->tab),
3225ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3235ef7b7a0SMateusz Guzik 	if (tb->n == nitems(tb->tab)) {
3245ef7b7a0SMateusz Guzik 		tid_free_batch(tb->tab, tb->n);
3255ef7b7a0SMateusz Guzik 		tb->n = 0;
3265ef7b7a0SMateusz Guzik 	}
3275ef7b7a0SMateusz Guzik }
3285ef7b7a0SMateusz Guzik 
3295ef7b7a0SMateusz Guzik static void
3305ef7b7a0SMateusz Guzik tidbatch_final(struct tidbatch *tb)
3315ef7b7a0SMateusz Guzik {
3325ef7b7a0SMateusz Guzik 
3335ef7b7a0SMateusz Guzik 	KASSERT(tb->n <= nitems(tb->tab),
3345ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3355ef7b7a0SMateusz Guzik 	if (tb->n != 0) {
3365ef7b7a0SMateusz Guzik 		tid_free_batch(tb->tab, tb->n);
3375ef7b7a0SMateusz Guzik 	}
3385ef7b7a0SMateusz Guzik }
3395ef7b7a0SMateusz Guzik 
3405ef7b7a0SMateusz Guzik /*
341696058c3SJulian Elischer  * Prepare a thread for use.
34244990b8cSJulian Elischer  */
343b23f72e9SBrian Feldman static int
344b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags)
34544990b8cSJulian Elischer {
34644990b8cSJulian Elischer 	struct thread	*td;
34744990b8cSJulian Elischer 
34844990b8cSJulian Elischer 	td = (struct thread *)mem;
349fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
35094dd54b9SKonstantin Belousov 	td->td_lastcpu = td->td_oncpu = NOCPU;
3516c27c603SJuli Mallett 
3526c27c603SJuli Mallett 	/*
3536c27c603SJuli Mallett 	 * Note that td_critnest begins life as 1 because the thread is not
3546c27c603SJuli Mallett 	 * running and is thereby implicitly waiting to be on the receiving
355a54e85fdSJeff Roberson 	 * end of a context switch.
3566c27c603SJuli Mallett 	 */
357139b7550SJohn Baldwin 	td->td_critnest = 1;
358acbe332aSDavid Xu 	td->td_lend_user_pri = PRI_MAX;
359911b84b0SRobert Watson #ifdef AUDIT
360911b84b0SRobert Watson 	audit_thread_alloc(td);
361911b84b0SRobert Watson #endif
362598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS
363598f2b81SMateusz Guzik 	kdtrace_thread_ctor(td);
364598f2b81SMateusz Guzik #endif
365d10183d9SDavid Xu 	umtx_thread_alloc(td);
36619d3e47dSMateusz Guzik 	MPASS(td->td_sel == NULL);
367b23f72e9SBrian Feldman 	return (0);
36844990b8cSJulian Elischer }
36944990b8cSJulian Elischer 
37044990b8cSJulian Elischer /*
37144990b8cSJulian Elischer  * Reclaim a thread after use.
37244990b8cSJulian Elischer  */
37344990b8cSJulian Elischer static void
37444990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg)
37544990b8cSJulian Elischer {
37644990b8cSJulian Elischer 	struct thread *td;
37744990b8cSJulian Elischer 
37844990b8cSJulian Elischer 	td = (struct thread *)mem;
37944990b8cSJulian Elischer 
38044990b8cSJulian Elischer #ifdef INVARIANTS
38144990b8cSJulian Elischer 	/* Verify that this thread is in a safe state to free. */
382fa2528acSAlex Richardson 	switch (TD_GET_STATE(td)) {
38371fad9fdSJulian Elischer 	case TDS_INHIBITED:
38471fad9fdSJulian Elischer 	case TDS_RUNNING:
38571fad9fdSJulian Elischer 	case TDS_CAN_RUN:
38644990b8cSJulian Elischer 	case TDS_RUNQ:
38744990b8cSJulian Elischer 		/*
38844990b8cSJulian Elischer 		 * We must never unlink a thread that is in one of
38944990b8cSJulian Elischer 		 * these states, because it is currently active.
39044990b8cSJulian Elischer 		 */
39144990b8cSJulian Elischer 		panic("bad state for thread unlinking");
39244990b8cSJulian Elischer 		/* NOTREACHED */
39371fad9fdSJulian Elischer 	case TDS_INACTIVE:
39444990b8cSJulian Elischer 		break;
39544990b8cSJulian Elischer 	default:
39644990b8cSJulian Elischer 		panic("bad thread state");
39744990b8cSJulian Elischer 		/* NOTREACHED */
39844990b8cSJulian Elischer 	}
39944990b8cSJulian Elischer #endif
4006e8525ceSRobert Watson #ifdef AUDIT
4016e8525ceSRobert Watson 	audit_thread_free(td);
4026e8525ceSRobert Watson #endif
403598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS
404598f2b81SMateusz Guzik 	kdtrace_thread_dtor(td);
405598f2b81SMateusz Guzik #endif
4061ba4a712SPawel Jakub Dawidek 	/* Free all OSD associated to this thread. */
4071ba4a712SPawel Jakub Dawidek 	osd_thread_exit(td);
408aca4bb91SKonstantin Belousov 	td_softdep_cleanup(td);
409aca4bb91SKonstantin Belousov 	MPASS(td->td_su == NULL);
41019d3e47dSMateusz Guzik 	seltdfini(td);
41144990b8cSJulian Elischer }
41244990b8cSJulian Elischer 
41344990b8cSJulian Elischer /*
41444990b8cSJulian Elischer  * Initialize type-stable parts of a thread (when newly created).
41544990b8cSJulian Elischer  */
416b23f72e9SBrian Feldman static int
417b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags)
41844990b8cSJulian Elischer {
41944990b8cSJulian Elischer 	struct thread *td;
42044990b8cSJulian Elischer 
42144990b8cSJulian Elischer 	td = (struct thread *)mem;
422247aba24SMarcel Moolenaar 
423b83e94beSMateusz Guzik 	td->td_allocdomain = vm_phys_domain(vtophys(td));
42444f3b092SJohn Baldwin 	td->td_sleepqueue = sleepq_alloc();
425961a7b24SJohn Baldwin 	td->td_turnstile = turnstile_alloc();
4268f0e9130SKonstantin Belousov 	td->td_rlqe = NULL;
4272ca45184SMatt Joras 	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
428d10183d9SDavid Xu 	umtx_thread_init(td);
42989b57fcfSKonstantin Belousov 	td->td_kstack = 0;
430ad8b1d85SKonstantin Belousov 	td->td_sel = NULL;
431b23f72e9SBrian Feldman 	return (0);
43244990b8cSJulian Elischer }
43344990b8cSJulian Elischer 
43444990b8cSJulian Elischer /*
43544990b8cSJulian Elischer  * Tear down type-stable parts of a thread (just before being discarded).
43644990b8cSJulian Elischer  */
43744990b8cSJulian Elischer static void
43844990b8cSJulian Elischer thread_fini(void *mem, int size)
43944990b8cSJulian Elischer {
44044990b8cSJulian Elischer 	struct thread *td;
44144990b8cSJulian Elischer 
44244990b8cSJulian Elischer 	td = (struct thread *)mem;
4432ca45184SMatt Joras 	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
4448f0e9130SKonstantin Belousov 	rlqentry_free(td->td_rlqe);
445961a7b24SJohn Baldwin 	turnstile_free(td->td_turnstile);
44644f3b092SJohn Baldwin 	sleepq_free(td->td_sleepqueue);
447d10183d9SDavid Xu 	umtx_thread_fini(td);
44819d3e47dSMateusz Guzik 	MPASS(td->td_sel == NULL);
44944990b8cSJulian Elischer }
4505215b187SJeff Roberson 
4515c8329edSJulian Elischer /*
4525215b187SJeff Roberson  * For a newly created process,
4535215b187SJeff Roberson  * link up all the structures and its initial threads etc.
454ed062c8dSJulian Elischer  * called from:
455e7d939bdSMarcel Moolenaar  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
456ed062c8dSJulian Elischer  * proc_dtor() (should go away)
457ed062c8dSJulian Elischer  * proc_init()
4585c8329edSJulian Elischer  */
4595c8329edSJulian Elischer void
46089b57fcfSKonstantin Belousov proc_linkup0(struct proc *p, struct thread *td)
46189b57fcfSKonstantin Belousov {
46289b57fcfSKonstantin Belousov 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
46389b57fcfSKonstantin Belousov 	proc_linkup(p, td);
46489b57fcfSKonstantin Belousov }
46589b57fcfSKonstantin Belousov 
46689b57fcfSKonstantin Belousov void
4678460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td)
4685c8329edSJulian Elischer {
469a54e85fdSJeff Roberson 
4709104847fSDavid Xu 	sigqueue_init(&p->p_sigqueue, p);
471ebceaf6dSDavid Xu 	p->p_ksi = ksiginfo_alloc(1);
472ebceaf6dSDavid Xu 	if (p->p_ksi != NULL) {
4735c474517SDavid Xu 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
474ebceaf6dSDavid Xu 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
475ebceaf6dSDavid Xu 	}
476b2f92ef9SDavid Xu 	LIST_INIT(&p->p_mqnotifier);
4775c8329edSJulian Elischer 	p->p_numthreads = 0;
4788460a577SJohn Birrell 	thread_link(td, p);
4795c8329edSJulian Elischer }
4805c8329edSJulian Elischer 
4811bd3cf5dSMateusz Guzik extern int max_threads_per_proc;
4821bd3cf5dSMateusz Guzik 
4835c8329edSJulian Elischer /*
48444990b8cSJulian Elischer  * Initialize global thread allocation resources.
48544990b8cSJulian Elischer  */
48644990b8cSJulian Elischer void
48744990b8cSJulian Elischer threadinit(void)
48844990b8cSJulian Elischer {
48926007fe3SMateusz Guzik 	u_long i;
490cf31cadeSMateusz Guzik 	lwpid_t tid0;
4915aa5420fSMark Johnston 	uint32_t flags;
49244990b8cSJulian Elischer 
4931bd3cf5dSMateusz Guzik 	/*
4941bd3cf5dSMateusz Guzik 	 * Place an upper limit on threads which can be allocated.
4951bd3cf5dSMateusz Guzik 	 *
4961bd3cf5dSMateusz Guzik 	 * Note that other factors may make the de facto limit much lower.
4971bd3cf5dSMateusz Guzik 	 *
4981bd3cf5dSMateusz Guzik 	 * Platform limits are somewhat arbitrary but deemed "more than good
4991bd3cf5dSMateusz Guzik 	 * enough" for the foreseable future.
5001bd3cf5dSMateusz Guzik 	 */
5011bd3cf5dSMateusz Guzik 	if (maxthread == 0) {
5021bd3cf5dSMateusz Guzik #ifdef _LP64
5031bd3cf5dSMateusz Guzik 		maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
5041bd3cf5dSMateusz Guzik #else
5051bd3cf5dSMateusz Guzik 		maxthread = MIN(maxproc * max_threads_per_proc, 100000);
5061bd3cf5dSMateusz Guzik #endif
5071bd3cf5dSMateusz Guzik 	}
5081bd3cf5dSMateusz Guzik 
5091ea7a6f8SPoul-Henning Kamp 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
51035bb59edSMateusz Guzik 	tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
51162dbc992SMateusz Guzik 	/*
51262dbc992SMateusz Guzik 	 * Handle thread0.
51362dbc992SMateusz Guzik 	 */
51462dbc992SMateusz Guzik 	thread_count_inc();
515cf31cadeSMateusz Guzik 	tid0 = tid_alloc();
516cf31cadeSMateusz Guzik 	if (tid0 != THREAD0_TID)
517cf31cadeSMateusz Guzik 		panic("tid0 %d != %d\n", tid0, THREAD0_TID);
5181ea7a6f8SPoul-Henning Kamp 
5195aa5420fSMark Johnston 	flags = UMA_ZONE_NOFREE;
5205aa5420fSMark Johnston #ifdef __aarch64__
5215aa5420fSMark Johnston 	/*
5225aa5420fSMark Johnston 	 * Force thread structures to be allocated from the direct map.
5235aa5420fSMark Johnston 	 * Otherwise, superpage promotions and demotions may temporarily
5245aa5420fSMark Johnston 	 * invalidate thread structure mappings.  For most dynamically allocated
5255aa5420fSMark Johnston 	 * structures this is not a problem, but translation faults cannot be
5265aa5420fSMark Johnston 	 * handled without accessing curthread.
5275aa5420fSMark Johnston 	 */
5285aa5420fSMark Johnston 	flags |= UMA_ZONE_CONTIG;
5295aa5420fSMark Johnston #endif
530de028f5aSJeff Roberson 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
53144990b8cSJulian Elischer 	    thread_ctor, thread_dtor, thread_init, thread_fini,
5325aa5420fSMark Johnston 	    32 - 1, flags);
533cf7d9a8cSDavid Xu 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
53426007fe3SMateusz Guzik 	tidhashlock = (tidhash + 1) / 64;
53526007fe3SMateusz Guzik 	if (tidhashlock > 0)
53626007fe3SMateusz Guzik 		tidhashlock--;
53726007fe3SMateusz Guzik 	tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
53826007fe3SMateusz Guzik 	    M_TIDHASH, M_WAITOK | M_ZERO);
53926007fe3SMateusz Guzik 	for (i = 0; i < tidhashlock + 1; i++)
54026007fe3SMateusz Guzik 		rw_init(&tidhashtbl_lock[i], "tidhash");
541d116b9f1SMateusz Guzik 
542d116b9f1SMateusz Guzik 	TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
543d116b9f1SMateusz Guzik 	callout_init(&thread_reap_callout, 1);
544d116b9f1SMateusz Guzik 	callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL);
54544990b8cSJulian Elischer }
54644990b8cSJulian Elischer 
54744990b8cSJulian Elischer /*
548ff8fbcffSJeff Roberson  * Place an unused thread on the zombie list.
54944990b8cSJulian Elischer  */
55044990b8cSJulian Elischer void
551ff8fbcffSJeff Roberson thread_zombie(struct thread *td)
55244990b8cSJulian Elischer {
553d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
554c5315f51SMateusz Guzik 	struct thread *ztd;
555c5315f51SMateusz Guzik 
556a9568cd2SMateusz Guzik 	tdd = &thread_domain_data[td->td_allocdomain];
557d116b9f1SMateusz Guzik 	ztd = atomic_load_ptr(&tdd->tdd_zombies);
558c5315f51SMateusz Guzik 	for (;;) {
559c5315f51SMateusz Guzik 		td->td_zombie = ztd;
560d116b9f1SMateusz Guzik 		if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
561c5315f51SMateusz Guzik 		    (uintptr_t *)&ztd, (uintptr_t)td))
562c5315f51SMateusz Guzik 			break;
563c5315f51SMateusz Guzik 		continue;
564c5315f51SMateusz Guzik 	}
56544990b8cSJulian Elischer }
56644990b8cSJulian Elischer 
5675c8329edSJulian Elischer /*
568ff8fbcffSJeff Roberson  * Release a thread that has exited after cpu_throw().
569ff8fbcffSJeff Roberson  */
570ff8fbcffSJeff Roberson void
571ff8fbcffSJeff Roberson thread_stash(struct thread *td)
572ff8fbcffSJeff Roberson {
573ff8fbcffSJeff Roberson 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
574ff8fbcffSJeff Roberson 	thread_zombie(td);
575ff8fbcffSJeff Roberson }
576ff8fbcffSJeff Roberson 
577ff8fbcffSJeff Roberson /*
578d116b9f1SMateusz Guzik  * Reap zombies from passed domain.
57944990b8cSJulian Elischer  */
580d116b9f1SMateusz Guzik static void
581d116b9f1SMateusz Guzik thread_reap_domain(struct thread_domain_data *tdd)
58244990b8cSJulian Elischer {
583c5315f51SMateusz Guzik 	struct thread *itd, *ntd;
5845ef7b7a0SMateusz Guzik 	struct tidbatch tidbatch;
585f34a2f56SMateusz Guzik 	struct credbatch credbatch;
5865ef7b7a0SMateusz Guzik 	int tdcount;
587fb8ab680SMateusz Guzik 	struct plimit *lim;
588fb8ab680SMateusz Guzik 	int limcount;
58944990b8cSJulian Elischer 
59044990b8cSJulian Elischer 	/*
591c5315f51SMateusz Guzik 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
592c5315f51SMateusz Guzik 	 * but most of the time the list is empty.
59344990b8cSJulian Elischer 	 */
594d116b9f1SMateusz Guzik 	if (tdd->tdd_zombies == NULL)
595c5315f51SMateusz Guzik 		return;
596c5315f51SMateusz Guzik 
597d116b9f1SMateusz Guzik 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
598c5315f51SMateusz Guzik 	    (uintptr_t)NULL);
5995ef7b7a0SMateusz Guzik 	if (itd == NULL)
6005ef7b7a0SMateusz Guzik 		return;
6015ef7b7a0SMateusz Guzik 
602d116b9f1SMateusz Guzik 	/*
603d116b9f1SMateusz Guzik 	 * Multiple CPUs can get here, the race is fine as ticks is only
604d116b9f1SMateusz Guzik 	 * advisory.
605d116b9f1SMateusz Guzik 	 */
606d116b9f1SMateusz Guzik 	tdd->tdd_reapticks = ticks;
607d116b9f1SMateusz Guzik 
6085ef7b7a0SMateusz Guzik 	tidbatch_prep(&tidbatch);
609f34a2f56SMateusz Guzik 	credbatch_prep(&credbatch);
6105ef7b7a0SMateusz Guzik 	tdcount = 0;
611fb8ab680SMateusz Guzik 	lim = NULL;
612fb8ab680SMateusz Guzik 	limcount = 0;
613d116b9f1SMateusz Guzik 
614c5315f51SMateusz Guzik 	while (itd != NULL) {
615c5315f51SMateusz Guzik 		ntd = itd->td_zombie;
6165ef7b7a0SMateusz Guzik 		EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
6175ef7b7a0SMateusz Guzik 		tidbatch_add(&tidbatch, itd);
618f34a2f56SMateusz Guzik 		credbatch_add(&credbatch, itd);
619fb8ab680SMateusz Guzik 		MPASS(itd->td_limit != NULL);
620fb8ab680SMateusz Guzik 		if (lim != itd->td_limit) {
621fb8ab680SMateusz Guzik 			if (limcount != 0) {
622fb8ab680SMateusz Guzik 				lim_freen(lim, limcount);
623fb8ab680SMateusz Guzik 				limcount = 0;
624fb8ab680SMateusz Guzik 			}
625fb8ab680SMateusz Guzik 		}
626fb8ab680SMateusz Guzik 		lim = itd->td_limit;
627fb8ab680SMateusz Guzik 		limcount++;
628755341dfSMateusz Guzik 		thread_free_batched(itd);
6295ef7b7a0SMateusz Guzik 		tidbatch_process(&tidbatch);
630f34a2f56SMateusz Guzik 		credbatch_process(&credbatch);
6315ef7b7a0SMateusz Guzik 		tdcount++;
6325ef7b7a0SMateusz Guzik 		if (tdcount == 32) {
6335ef7b7a0SMateusz Guzik 			thread_count_sub(tdcount);
6345ef7b7a0SMateusz Guzik 			tdcount = 0;
635755341dfSMateusz Guzik 		}
636c5315f51SMateusz Guzik 		itd = ntd;
63744990b8cSJulian Elischer 	}
638755341dfSMateusz Guzik 
6395ef7b7a0SMateusz Guzik 	tidbatch_final(&tidbatch);
640f34a2f56SMateusz Guzik 	credbatch_final(&credbatch);
6415ef7b7a0SMateusz Guzik 	if (tdcount != 0) {
6425ef7b7a0SMateusz Guzik 		thread_count_sub(tdcount);
643755341dfSMateusz Guzik 	}
644fb8ab680SMateusz Guzik 	MPASS(limcount != 0);
645fb8ab680SMateusz Guzik 	lim_freen(lim, limcount);
646ed062c8dSJulian Elischer }
64744990b8cSJulian Elischer 
6484f0db5e0SJulian Elischer /*
649d116b9f1SMateusz Guzik  * Reap zombies from all domains.
650d116b9f1SMateusz Guzik  */
651d116b9f1SMateusz Guzik static void
652d116b9f1SMateusz Guzik thread_reap_all(void)
653d116b9f1SMateusz Guzik {
654d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
655d116b9f1SMateusz Guzik 	int i, domain;
656d116b9f1SMateusz Guzik 
657d116b9f1SMateusz Guzik 	domain = PCPU_GET(domain);
658d116b9f1SMateusz Guzik 	for (i = 0; i < vm_ndomains; i++) {
659d116b9f1SMateusz Guzik 		tdd = &thread_domain_data[(i + domain) % vm_ndomains];
660d116b9f1SMateusz Guzik 		thread_reap_domain(tdd);
661d116b9f1SMateusz Guzik 	}
662d116b9f1SMateusz Guzik }
663d116b9f1SMateusz Guzik 
664d116b9f1SMateusz Guzik /*
665d116b9f1SMateusz Guzik  * Reap zombies from local domain.
666d116b9f1SMateusz Guzik  */
667b83e94beSMateusz Guzik static void
668d116b9f1SMateusz Guzik thread_reap(void)
669d116b9f1SMateusz Guzik {
670d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
671d116b9f1SMateusz Guzik 	int domain;
672d116b9f1SMateusz Guzik 
673d116b9f1SMateusz Guzik 	domain = PCPU_GET(domain);
674d116b9f1SMateusz Guzik 	tdd = &thread_domain_data[domain];
675d116b9f1SMateusz Guzik 
676d116b9f1SMateusz Guzik 	thread_reap_domain(tdd);
677d116b9f1SMateusz Guzik }
678d116b9f1SMateusz Guzik 
679d116b9f1SMateusz Guzik static void
680d116b9f1SMateusz Guzik thread_reap_task_cb(void *arg __unused, int pending __unused)
681d116b9f1SMateusz Guzik {
682d116b9f1SMateusz Guzik 
683d116b9f1SMateusz Guzik 	thread_reap_all();
684d116b9f1SMateusz Guzik }
685d116b9f1SMateusz Guzik 
686d116b9f1SMateusz Guzik static void
687d116b9f1SMateusz Guzik thread_reap_callout_cb(void *arg __unused)
688d116b9f1SMateusz Guzik {
689d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
690d116b9f1SMateusz Guzik 	int i, cticks, lticks;
691d116b9f1SMateusz Guzik 	bool wantreap;
692d116b9f1SMateusz Guzik 
693d116b9f1SMateusz Guzik 	wantreap = false;
694d116b9f1SMateusz Guzik 	cticks = atomic_load_int(&ticks);
695d116b9f1SMateusz Guzik 	for (i = 0; i < vm_ndomains; i++) {
696d116b9f1SMateusz Guzik 		tdd = &thread_domain_data[i];
697d116b9f1SMateusz Guzik 		lticks = tdd->tdd_reapticks;
698d116b9f1SMateusz Guzik 		if (tdd->tdd_zombies != NULL &&
699d116b9f1SMateusz Guzik 		    (u_int)(cticks - lticks) > 5 * hz) {
700d116b9f1SMateusz Guzik 			wantreap = true;
701d116b9f1SMateusz Guzik 			break;
702d116b9f1SMateusz Guzik 		}
703d116b9f1SMateusz Guzik 	}
704d116b9f1SMateusz Guzik 
705d116b9f1SMateusz Guzik 	if (wantreap)
706d116b9f1SMateusz Guzik 		taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
707d116b9f1SMateusz Guzik 	callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL);
708d116b9f1SMateusz Guzik }
709d116b9f1SMateusz Guzik 
710d116b9f1SMateusz Guzik /*
71144990b8cSJulian Elischer  * Allocate a thread.
71244990b8cSJulian Elischer  */
71344990b8cSJulian Elischer struct thread *
7148a945d10SKonstantin Belousov thread_alloc(int pages)
71544990b8cSJulian Elischer {
71689b57fcfSKonstantin Belousov 	struct thread *td;
7171bd3cf5dSMateusz Guzik 	lwpid_t tid;
7188460a577SJohn Birrell 
71962dbc992SMateusz Guzik 	if (!thread_count_inc()) {
7201bd3cf5dSMateusz Guzik 		return (NULL);
7211bd3cf5dSMateusz Guzik 	}
7221bd3cf5dSMateusz Guzik 
72362dbc992SMateusz Guzik 	tid = tid_alloc();
7241bd3cf5dSMateusz Guzik 	td = uma_zalloc(thread_zone, M_WAITOK);
72589b57fcfSKonstantin Belousov 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
7268a945d10SKonstantin Belousov 	if (!vm_thread_new(td, pages)) {
72789b57fcfSKonstantin Belousov 		uma_zfree(thread_zone, td);
7281bd3cf5dSMateusz Guzik 		tid_free(tid);
72962dbc992SMateusz Guzik 		thread_count_dec();
73089b57fcfSKonstantin Belousov 		return (NULL);
73189b57fcfSKonstantin Belousov 	}
7321bd3cf5dSMateusz Guzik 	td->td_tid = tid;
7330c3967e7SMarcel Moolenaar 	cpu_thread_alloc(td);
7341bd3cf5dSMateusz Guzik 	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
73589b57fcfSKonstantin Belousov 	return (td);
73644990b8cSJulian Elischer }
73744990b8cSJulian Elischer 
7388a945d10SKonstantin Belousov int
7398a945d10SKonstantin Belousov thread_alloc_stack(struct thread *td, int pages)
7408a945d10SKonstantin Belousov {
7418a945d10SKonstantin Belousov 
7428a945d10SKonstantin Belousov 	KASSERT(td->td_kstack == 0,
7438a945d10SKonstantin Belousov 	    ("thread_alloc_stack called on a thread with kstack"));
7448a945d10SKonstantin Belousov 	if (!vm_thread_new(td, pages))
7458a945d10SKonstantin Belousov 		return (0);
7468a945d10SKonstantin Belousov 	cpu_thread_alloc(td);
7478a945d10SKonstantin Belousov 	return (1);
7488a945d10SKonstantin Belousov }
7494f0db5e0SJulian Elischer 
7504f0db5e0SJulian Elischer /*
75144990b8cSJulian Elischer  * Deallocate a thread.
75244990b8cSJulian Elischer  */
753755341dfSMateusz Guzik static void
754755341dfSMateusz Guzik thread_free_batched(struct thread *td)
75544990b8cSJulian Elischer {
7562e6b8de4SJeff Roberson 
7572e6b8de4SJeff Roberson 	lock_profile_thread_exit(td);
75845aea8deSJeff Roberson 	if (td->td_cpuset)
759d7f687fcSJeff Roberson 		cpuset_rel(td->td_cpuset);
760d7f687fcSJeff Roberson 	td->td_cpuset = NULL;
7610c3967e7SMarcel Moolenaar 	cpu_thread_free(td);
76289b57fcfSKonstantin Belousov 	if (td->td_kstack != 0)
76389b57fcfSKonstantin Belousov 		vm_thread_dispose(td);
7642d19b736SKonstantin Belousov 	callout_drain(&td->td_slpcallout);
765755341dfSMateusz Guzik 	/*
766755341dfSMateusz Guzik 	 * Freeing handled by the caller.
767755341dfSMateusz Guzik 	 */
7681bd3cf5dSMateusz Guzik 	td->td_tid = -1;
76944990b8cSJulian Elischer 	uma_zfree(thread_zone, td);
77044990b8cSJulian Elischer }
77144990b8cSJulian Elischer 
7724ea6a9a2SMateusz Guzik void
773755341dfSMateusz Guzik thread_free(struct thread *td)
774755341dfSMateusz Guzik {
775755341dfSMateusz Guzik 	lwpid_t tid;
776755341dfSMateusz Guzik 
7775ef7b7a0SMateusz Guzik 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
778755341dfSMateusz Guzik 	tid = td->td_tid;
779755341dfSMateusz Guzik 	thread_free_batched(td);
780755341dfSMateusz Guzik 	tid_free(tid);
78162dbc992SMateusz Guzik 	thread_count_dec();
782755341dfSMateusz Guzik }
783755341dfSMateusz Guzik 
784755341dfSMateusz Guzik void
7854ea6a9a2SMateusz Guzik thread_cow_get_proc(struct thread *newtd, struct proc *p)
7864ea6a9a2SMateusz Guzik {
7874ea6a9a2SMateusz Guzik 
7884ea6a9a2SMateusz Guzik 	PROC_LOCK_ASSERT(p, MA_OWNED);
7891724c563SMateusz Guzik 	newtd->td_realucred = crcowget(p->p_ucred);
7901724c563SMateusz Guzik 	newtd->td_ucred = newtd->td_realucred;
791f6f6d240SMateusz Guzik 	newtd->td_limit = lim_hold(p->p_limit);
7924ea6a9a2SMateusz Guzik 	newtd->td_cowgen = p->p_cowgen;
7934ea6a9a2SMateusz Guzik }
7944ea6a9a2SMateusz Guzik 
7954ea6a9a2SMateusz Guzik void
7964ea6a9a2SMateusz Guzik thread_cow_get(struct thread *newtd, struct thread *td)
7974ea6a9a2SMateusz Guzik {
7984ea6a9a2SMateusz Guzik 
7991724c563SMateusz Guzik 	MPASS(td->td_realucred == td->td_ucred);
8001724c563SMateusz Guzik 	newtd->td_realucred = crcowget(td->td_realucred);
8011724c563SMateusz Guzik 	newtd->td_ucred = newtd->td_realucred;
802f6f6d240SMateusz Guzik 	newtd->td_limit = lim_hold(td->td_limit);
8034ea6a9a2SMateusz Guzik 	newtd->td_cowgen = td->td_cowgen;
8044ea6a9a2SMateusz Guzik }
8054ea6a9a2SMateusz Guzik 
8064ea6a9a2SMateusz Guzik void
8074ea6a9a2SMateusz Guzik thread_cow_free(struct thread *td)
8084ea6a9a2SMateusz Guzik {
8094ea6a9a2SMateusz Guzik 
8101724c563SMateusz Guzik 	if (td->td_realucred != NULL)
8111724c563SMateusz Guzik 		crcowfree(td);
812cd672ca6SMateusz Guzik 	if (td->td_limit != NULL)
813f6f6d240SMateusz Guzik 		lim_free(td->td_limit);
8144ea6a9a2SMateusz Guzik }
8154ea6a9a2SMateusz Guzik 
8164ea6a9a2SMateusz Guzik void
8174ea6a9a2SMateusz Guzik thread_cow_update(struct thread *td)
8184ea6a9a2SMateusz Guzik {
8194ea6a9a2SMateusz Guzik 	struct proc *p;
820cd672ca6SMateusz Guzik 	struct ucred *oldcred;
821cd672ca6SMateusz Guzik 	struct plimit *oldlimit;
8224ea6a9a2SMateusz Guzik 
8234ea6a9a2SMateusz Guzik 	p = td->td_proc;
824cd672ca6SMateusz Guzik 	oldlimit = NULL;
8254ea6a9a2SMateusz Guzik 	PROC_LOCK(p);
8261724c563SMateusz Guzik 	oldcred = crcowsync();
827cd672ca6SMateusz Guzik 	if (td->td_limit != p->p_limit) {
828cd672ca6SMateusz Guzik 		oldlimit = td->td_limit;
829cd672ca6SMateusz Guzik 		td->td_limit = lim_hold(p->p_limit);
830cd672ca6SMateusz Guzik 	}
8314ea6a9a2SMateusz Guzik 	td->td_cowgen = p->p_cowgen;
8324ea6a9a2SMateusz Guzik 	PROC_UNLOCK(p);
833cd672ca6SMateusz Guzik 	if (oldcred != NULL)
834cd672ca6SMateusz Guzik 		crfree(oldcred);
835cd672ca6SMateusz Guzik 	if (oldlimit != NULL)
836cd672ca6SMateusz Guzik 		lim_free(oldlimit);
8374ea6a9a2SMateusz Guzik }
8384ea6a9a2SMateusz Guzik 
83944990b8cSJulian Elischer /*
84044990b8cSJulian Elischer  * Discard the current thread and exit from its context.
84194e0a4cdSJulian Elischer  * Always called with scheduler locked.
84244990b8cSJulian Elischer  *
84344990b8cSJulian Elischer  * Because we can't free a thread while we're operating under its context,
844696058c3SJulian Elischer  * push the current thread into our CPU's deadthread holder. This means
845696058c3SJulian Elischer  * we needn't worry about someone else grabbing our context before we
8466617724cSJeff Roberson  * do a cpu_throw().
84744990b8cSJulian Elischer  */
84844990b8cSJulian Elischer void
84944990b8cSJulian Elischer thread_exit(void)
85044990b8cSJulian Elischer {
8517e3a96eaSJohn Baldwin 	uint64_t runtime, new_switchtime;
85244990b8cSJulian Elischer 	struct thread *td;
8531c4bcd05SJeff Roberson 	struct thread *td2;
85444990b8cSJulian Elischer 	struct proc *p;
8557847a9daSJohn Baldwin 	int wakeup_swapper;
85644990b8cSJulian Elischer 
85744990b8cSJulian Elischer 	td = curthread;
85844990b8cSJulian Elischer 	p = td->td_proc;
85944990b8cSJulian Elischer 
860a54e85fdSJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
861ed062c8dSJulian Elischer 	mtx_assert(&Giant, MA_NOTOWNED);
862a54e85fdSJeff Roberson 
86344990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
864ed062c8dSJulian Elischer 	KASSERT(p != NULL, ("thread exiting without a process"));
865cc701b73SRobert Watson 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
866e01eafefSJulian Elischer 	    (long)p->p_pid, td->td_name);
8676c9271a9SAndriy Gapon 	SDT_PROBE0(proc, , , lwp__exit);
8689104847fSDavid Xu 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
869936c24faSMateusz Guzik 	MPASS(td->td_realucred == td->td_ucred);
87044990b8cSJulian Elischer 
871ed062c8dSJulian Elischer 	/*
872ed062c8dSJulian Elischer 	 * drop FPU & debug register state storage, or any other
873ed062c8dSJulian Elischer 	 * architecture specific resources that
874ed062c8dSJulian Elischer 	 * would not be on a new untouched process.
875ed062c8dSJulian Elischer 	 */
876bd07998eSKonstantin Belousov 	cpu_thread_exit(td);
87744990b8cSJulian Elischer 
878ed062c8dSJulian Elischer 	/*
8791faf202eSJulian Elischer 	 * The last thread is left attached to the process
8801faf202eSJulian Elischer 	 * So that the whole bundle gets recycled. Skip
881ed062c8dSJulian Elischer 	 * all this stuff if we never had threads.
882ed062c8dSJulian Elischer 	 * EXIT clears all sign of other threads when
883ed062c8dSJulian Elischer 	 * it goes to single threading, so the last thread always
884ed062c8dSJulian Elischer 	 * takes the short path.
8851faf202eSJulian Elischer 	 */
886ed062c8dSJulian Elischer 	if (p->p_flag & P_HADTHREADS) {
8871faf202eSJulian Elischer 		if (p->p_numthreads > 1) {
888fd229b5bSKonstantin Belousov 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
889d3a0bd78SJulian Elischer 			thread_unlink(td);
8901c4bcd05SJeff Roberson 			td2 = FIRST_THREAD_IN_PROC(p);
8911c4bcd05SJeff Roberson 			sched_exit_thread(td2, td);
892ed062c8dSJulian Elischer 
893ed062c8dSJulian Elischer 			/*
89444990b8cSJulian Elischer 			 * The test below is NOT true if we are the
8959182554aSKonstantin Belousov 			 * sole exiting thread. P_STOPPED_SINGLE is unset
89644990b8cSJulian Elischer 			 * in exit1() after it is the only survivor.
89744990b8cSJulian Elischer 			 */
8981279572aSDavid Xu 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
89944990b8cSJulian Elischer 				if (p->p_numthreads == p->p_suspcount) {
900a54e85fdSJeff Roberson 					thread_lock(p->p_singlethread);
9017847a9daSJohn Baldwin 					wakeup_swapper = thread_unsuspend_one(
90284cdea97SKonstantin Belousov 						p->p_singlethread, p, false);
9037847a9daSJohn Baldwin 					if (wakeup_swapper)
9047847a9daSJohn Baldwin 						kick_proc0();
90544990b8cSJulian Elischer 				}
90644990b8cSJulian Elischer 			}
90748bfcdddSJulian Elischer 
908696058c3SJulian Elischer 			PCPU_SET(deadthread, td);
9091faf202eSJulian Elischer 		} else {
910ed062c8dSJulian Elischer 			/*
911ed062c8dSJulian Elischer 			 * The last thread is exiting.. but not through exit()
912ed062c8dSJulian Elischer 			 */
913ed062c8dSJulian Elischer 			panic ("thread_exit: Last thread exiting on its own");
914ed062c8dSJulian Elischer 		}
9151faf202eSJulian Elischer 	}
91616d95d4fSJoseph Koshy #ifdef	HWPMC_HOOKS
91716d95d4fSJoseph Koshy 	/*
91816d95d4fSJoseph Koshy 	 * If this thread is part of a process that is being tracked by hwpmc(4),
91916d95d4fSJoseph Koshy 	 * inform the module of the thread's impending exit.
92016d95d4fSJoseph Koshy 	 */
9216161b98cSMatt Macy 	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
92216d95d4fSJoseph Koshy 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
9236161b98cSMatt Macy 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
924ebfaf69cSMatt Macy 	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
925ebfaf69cSMatt Macy 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
92616d95d4fSJoseph Koshy #endif
927a54e85fdSJeff Roberson 	PROC_UNLOCK(p);
9285c7bebf9SKonstantin Belousov 	PROC_STATLOCK(p);
9295c7bebf9SKonstantin Belousov 	thread_lock(td);
9305c7bebf9SKonstantin Belousov 	PROC_SUNLOCK(p);
9317e3a96eaSJohn Baldwin 
9327e3a96eaSJohn Baldwin 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
9337e3a96eaSJohn Baldwin 	new_switchtime = cpu_ticks();
9347e3a96eaSJohn Baldwin 	runtime = new_switchtime - PCPU_GET(switchtime);
9357e3a96eaSJohn Baldwin 	td->td_runtime += runtime;
9367e3a96eaSJohn Baldwin 	td->td_incruntime += runtime;
9377e3a96eaSJohn Baldwin 	PCPU_SET(switchtime, new_switchtime);
9387e3a96eaSJohn Baldwin 	PCPU_SET(switchticks, ticks);
93983c9dea1SGleb Smirnoff 	VM_CNT_INC(v_swtch);
9407e3a96eaSJohn Baldwin 
9417e3a96eaSJohn Baldwin 	/* Save our resource usage in our process. */
9427e3a96eaSJohn Baldwin 	td->td_ru.ru_nvcsw++;
94361a74c5cSJeff Roberson 	ruxagg_locked(p, td);
9447e3a96eaSJohn Baldwin 	rucollect(&p->p_ru, &td->td_ru);
9455c7bebf9SKonstantin Belousov 	PROC_STATUNLOCK(p);
9467e3a96eaSJohn Baldwin 
947fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
9483d06b4b3SAttilio Rao #ifdef WITNESS
9493d06b4b3SAttilio Rao 	witness_thread_exit(td);
9503d06b4b3SAttilio Rao #endif
951732d9528SJulian Elischer 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
952a54e85fdSJeff Roberson 	sched_throw(td);
953cc66ebe2SPeter Wemm 	panic("I'm a teapot!");
95444990b8cSJulian Elischer 	/* NOTREACHED */
95544990b8cSJulian Elischer }
95644990b8cSJulian Elischer 
95744990b8cSJulian Elischer /*
958696058c3SJulian Elischer  * Do any thread specific cleanups that may be needed in wait()
95937814395SPeter Wemm  * called with Giant, proc and schedlock not held.
960696058c3SJulian Elischer  */
961696058c3SJulian Elischer void
962696058c3SJulian Elischer thread_wait(struct proc *p)
963696058c3SJulian Elischer {
964696058c3SJulian Elischer 	struct thread *td;
965696058c3SJulian Elischer 
96637814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
967624bf9e1SKonstantin Belousov 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
968624bf9e1SKonstantin Belousov 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
969ff8fbcffSJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
970ff8fbcffSJeff Roberson 	/* Lock the last thread so we spin until it exits cpu_throw(). */
971ff8fbcffSJeff Roberson 	thread_lock(td);
972ff8fbcffSJeff Roberson 	thread_unlock(td);
9732e6b8de4SJeff Roberson 	lock_profile_thread_exit(td);
974d7f687fcSJeff Roberson 	cpuset_rel(td->td_cpuset);
975d7f687fcSJeff Roberson 	td->td_cpuset = NULL;
976696058c3SJulian Elischer 	cpu_thread_clean(td);
9774ea6a9a2SMateusz Guzik 	thread_cow_free(td);
9782d19b736SKonstantin Belousov 	callout_drain(&td->td_slpcallout);
979696058c3SJulian Elischer 	thread_reap();	/* check for zombie threads etc. */
980696058c3SJulian Elischer }
981696058c3SJulian Elischer 
982696058c3SJulian Elischer /*
98344990b8cSJulian Elischer  * Link a thread to a process.
9841faf202eSJulian Elischer  * set up anything that needs to be initialized for it to
9851faf202eSJulian Elischer  * be used by the process.
98644990b8cSJulian Elischer  */
98744990b8cSJulian Elischer void
9888460a577SJohn Birrell thread_link(struct thread *td, struct proc *p)
98944990b8cSJulian Elischer {
99044990b8cSJulian Elischer 
991a54e85fdSJeff Roberson 	/*
992a54e85fdSJeff Roberson 	 * XXX This can't be enabled because it's called for proc0 before
993374ae2a3SJeff Roberson 	 * its lock has been created.
994374ae2a3SJeff Roberson 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
995a54e85fdSJeff Roberson 	 */
996fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
99744990b8cSJulian Elischer 	td->td_proc     = p;
998b61ce5b0SJeff Roberson 	td->td_flags    = TDF_INMEM;
99944990b8cSJulian Elischer 
10001faf202eSJulian Elischer 	LIST_INIT(&td->td_contested);
1001eea4f254SJeff Roberson 	LIST_INIT(&td->td_lprof[0]);
1002eea4f254SJeff Roberson 	LIST_INIT(&td->td_lprof[1]);
1003f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE
1004dd902d01SGleb Smirnoff 	SLIST_INIT(&td->td_epochs);
1005f6eccf96SGleb Smirnoff #endif
10069104847fSDavid Xu 	sigqueue_init(&td->td_sigqueue, p);
1007fd90e2edSJung-uk Kim 	callout_init(&td->td_slpcallout, 1);
100866d8df9dSDaniel Eischen 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
100944990b8cSJulian Elischer 	p->p_numthreads++;
101044990b8cSJulian Elischer }
101144990b8cSJulian Elischer 
1012ed062c8dSJulian Elischer /*
1013ed062c8dSJulian Elischer  * Called from:
1014ed062c8dSJulian Elischer  *  thread_exit()
1015ed062c8dSJulian Elischer  */
1016d3a0bd78SJulian Elischer void
1017d3a0bd78SJulian Elischer thread_unlink(struct thread *td)
1018d3a0bd78SJulian Elischer {
1019d3a0bd78SJulian Elischer 	struct proc *p = td->td_proc;
1020d3a0bd78SJulian Elischer 
1021374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
1022f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE
1023dd902d01SGleb Smirnoff 	MPASS(SLIST_EMPTY(&td->td_epochs));
1024f6eccf96SGleb Smirnoff #endif
1025dd902d01SGleb Smirnoff 
1026d3a0bd78SJulian Elischer 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1027d3a0bd78SJulian Elischer 	p->p_numthreads--;
1028d3a0bd78SJulian Elischer 	/* could clear a few other things here */
10298460a577SJohn Birrell 	/* Must  NOT clear links to proc! */
10305c8329edSJulian Elischer }
10315c8329edSJulian Elischer 
103279799053SKonstantin Belousov static int
103379799053SKonstantin Belousov calc_remaining(struct proc *p, int mode)
103479799053SKonstantin Belousov {
103579799053SKonstantin Belousov 	int remaining;
103679799053SKonstantin Belousov 
10377b519077SKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
10387b519077SKonstantin Belousov 	PROC_SLOCK_ASSERT(p, MA_OWNED);
103979799053SKonstantin Belousov 	if (mode == SINGLE_EXIT)
104079799053SKonstantin Belousov 		remaining = p->p_numthreads;
104179799053SKonstantin Belousov 	else if (mode == SINGLE_BOUNDARY)
104279799053SKonstantin Belousov 		remaining = p->p_numthreads - p->p_boundary_count;
10436ddcc233SKonstantin Belousov 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
104479799053SKonstantin Belousov 		remaining = p->p_numthreads - p->p_suspcount;
104579799053SKonstantin Belousov 	else
104679799053SKonstantin Belousov 		panic("calc_remaining: wrong mode %d", mode);
104779799053SKonstantin Belousov 	return (remaining);
104879799053SKonstantin Belousov }
104979799053SKonstantin Belousov 
105007a9368aSKonstantin Belousov static int
105107a9368aSKonstantin Belousov remain_for_mode(int mode)
105207a9368aSKonstantin Belousov {
105307a9368aSKonstantin Belousov 
10546ddcc233SKonstantin Belousov 	return (mode == SINGLE_ALLPROC ? 0 : 1);
105507a9368aSKonstantin Belousov }
105607a9368aSKonstantin Belousov 
105707a9368aSKonstantin Belousov static int
105807a9368aSKonstantin Belousov weed_inhib(int mode, struct thread *td2, struct proc *p)
105907a9368aSKonstantin Belousov {
106007a9368aSKonstantin Belousov 	int wakeup_swapper;
106107a9368aSKonstantin Belousov 
106207a9368aSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
106307a9368aSKonstantin Belousov 	PROC_SLOCK_ASSERT(p, MA_OWNED);
106407a9368aSKonstantin Belousov 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
106507a9368aSKonstantin Belousov 
106607a9368aSKonstantin Belousov 	wakeup_swapper = 0;
106761a74c5cSJeff Roberson 
106861a74c5cSJeff Roberson 	/*
106961a74c5cSJeff Roberson 	 * Since the thread lock is dropped by the scheduler we have
107061a74c5cSJeff Roberson 	 * to retry to check for races.
107161a74c5cSJeff Roberson 	 */
107261a74c5cSJeff Roberson restart:
107307a9368aSKonstantin Belousov 	switch (mode) {
107407a9368aSKonstantin Belousov 	case SINGLE_EXIT:
107561a74c5cSJeff Roberson 		if (TD_IS_SUSPENDED(td2)) {
107684cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
107761a74c5cSJeff Roberson 			thread_lock(td2);
107861a74c5cSJeff Roberson 			goto restart;
107961a74c5cSJeff Roberson 		}
108061a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
108107a9368aSKonstantin Belousov 			wakeup_swapper |= sleepq_abort(td2, EINTR);
108261a74c5cSJeff Roberson 			return (wakeup_swapper);
108361a74c5cSJeff Roberson 		}
108407a9368aSKonstantin Belousov 		break;
108507a9368aSKonstantin Belousov 	case SINGLE_BOUNDARY:
108607a9368aSKonstantin Belousov 	case SINGLE_NO_EXIT:
108761a74c5cSJeff Roberson 		if (TD_IS_SUSPENDED(td2) &&
108861a74c5cSJeff Roberson 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
108984cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
109061a74c5cSJeff Roberson 			thread_lock(td2);
109161a74c5cSJeff Roberson 			goto restart;
109261a74c5cSJeff Roberson 		}
109361a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
109407a9368aSKonstantin Belousov 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
109561a74c5cSJeff Roberson 			return (wakeup_swapper);
109661a74c5cSJeff Roberson 		}
1097917dd390SKonstantin Belousov 		break;
10986ddcc233SKonstantin Belousov 	case SINGLE_ALLPROC:
10996ddcc233SKonstantin Belousov 		/*
11006ddcc233SKonstantin Belousov 		 * ALLPROC suspend tries to avoid spurious EINTR for
11016ddcc233SKonstantin Belousov 		 * threads sleeping interruptable, by suspending the
11026ddcc233SKonstantin Belousov 		 * thread directly, similarly to sig_suspend_threads().
11036ddcc233SKonstantin Belousov 		 * Since such sleep is not performed at the user
11046ddcc233SKonstantin Belousov 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
11056ddcc233SKonstantin Belousov 		 * is used to avoid immediate un-suspend.
11066ddcc233SKonstantin Belousov 		 */
11076ddcc233SKonstantin Belousov 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
110861a74c5cSJeff Roberson 		    TDF_ALLPROCSUSP)) == 0) {
110984cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
111061a74c5cSJeff Roberson 			thread_lock(td2);
111161a74c5cSJeff Roberson 			goto restart;
111261a74c5cSJeff Roberson 		}
111361a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
11146ddcc233SKonstantin Belousov 			if ((td2->td_flags & TDF_SBDRY) == 0) {
11156ddcc233SKonstantin Belousov 				thread_suspend_one(td2);
11166ddcc233SKonstantin Belousov 				td2->td_flags |= TDF_ALLPROCSUSP;
11176ddcc233SKonstantin Belousov 			} else {
11186ddcc233SKonstantin Belousov 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
111961a74c5cSJeff Roberson 				return (wakeup_swapper);
11206ddcc233SKonstantin Belousov 			}
11216ddcc233SKonstantin Belousov 		}
112207a9368aSKonstantin Belousov 		break;
112361a74c5cSJeff Roberson 	default:
112461a74c5cSJeff Roberson 		break;
112507a9368aSKonstantin Belousov 	}
112661a74c5cSJeff Roberson 	thread_unlock(td2);
112707a9368aSKonstantin Belousov 	return (wakeup_swapper);
112807a9368aSKonstantin Belousov }
112907a9368aSKonstantin Belousov 
11305215b187SJeff Roberson /*
113144990b8cSJulian Elischer  * Enforce single-threading.
113244990b8cSJulian Elischer  *
113344990b8cSJulian Elischer  * Returns 1 if the caller must abort (another thread is waiting to
113444990b8cSJulian Elischer  * exit the process or similar). Process is locked!
113544990b8cSJulian Elischer  * Returns 0 when you are successfully the only thread running.
113644990b8cSJulian Elischer  * A process has successfully single threaded in the suspend mode when
113744990b8cSJulian Elischer  * There are no threads in user mode. Threads in the kernel must be
113844990b8cSJulian Elischer  * allowed to continue until they get to the user boundary. They may even
113944990b8cSJulian Elischer  * copy out their return values and data before suspending. They may however be
1140e2668f55SMaxim Konovalov  * accelerated in reaching the user boundary as we will wake up
114144990b8cSJulian Elischer  * any sleeping threads that are interruptable. (PCATCH).
114244990b8cSJulian Elischer  */
114344990b8cSJulian Elischer int
11446ddcc233SKonstantin Belousov thread_single(struct proc *p, int mode)
114544990b8cSJulian Elischer {
114644990b8cSJulian Elischer 	struct thread *td;
114744990b8cSJulian Elischer 	struct thread *td2;
1148da7bbd2cSJohn Baldwin 	int remaining, wakeup_swapper;
114944990b8cSJulian Elischer 
115044990b8cSJulian Elischer 	td = curthread;
11516ddcc233SKonstantin Belousov 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
11526ddcc233SKonstantin Belousov 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
11536ddcc233SKonstantin Belousov 	    ("invalid mode %d", mode));
11546ddcc233SKonstantin Belousov 	/*
11556ddcc233SKonstantin Belousov 	 * If allowing non-ALLPROC singlethreading for non-curproc
11566ddcc233SKonstantin Belousov 	 * callers, calc_remaining() and remain_for_mode() should be
11576ddcc233SKonstantin Belousov 	 * adjusted to also account for td->td_proc != p.  For now
11586ddcc233SKonstantin Belousov 	 * this is not implemented because it is not used.
11596ddcc233SKonstantin Belousov 	 */
11606ddcc233SKonstantin Belousov 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
11616ddcc233SKonstantin Belousov 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
11626ddcc233SKonstantin Belousov 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
116337814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
116444990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
116544990b8cSJulian Elischer 
11666ddcc233SKonstantin Belousov 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
116744990b8cSJulian Elischer 		return (0);
116844990b8cSJulian Elischer 
1169e3b9bf71SJulian Elischer 	/* Is someone already single threading? */
1170906ac69dSDavid Xu 	if (p->p_singlethread != NULL && p->p_singlethread != td)
117144990b8cSJulian Elischer 		return (1);
117244990b8cSJulian Elischer 
1173906ac69dSDavid Xu 	if (mode == SINGLE_EXIT) {
1174906ac69dSDavid Xu 		p->p_flag |= P_SINGLE_EXIT;
1175906ac69dSDavid Xu 		p->p_flag &= ~P_SINGLE_BOUNDARY;
1176906ac69dSDavid Xu 	} else {
1177906ac69dSDavid Xu 		p->p_flag &= ~P_SINGLE_EXIT;
1178906ac69dSDavid Xu 		if (mode == SINGLE_BOUNDARY)
1179906ac69dSDavid Xu 			p->p_flag |= P_SINGLE_BOUNDARY;
1180906ac69dSDavid Xu 		else
1181906ac69dSDavid Xu 			p->p_flag &= ~P_SINGLE_BOUNDARY;
1182906ac69dSDavid Xu 	}
11836ddcc233SKonstantin Belousov 	if (mode == SINGLE_ALLPROC)
11846ddcc233SKonstantin Belousov 		p->p_flag |= P_TOTAL_STOP;
11851279572aSDavid Xu 	p->p_flag |= P_STOPPED_SINGLE;
11867b4a950aSDavid Xu 	PROC_SLOCK(p);
1187112afcb2SJohn Baldwin 	p->p_singlethread = td;
118879799053SKonstantin Belousov 	remaining = calc_remaining(p, mode);
118907a9368aSKonstantin Belousov 	while (remaining != remain_for_mode(mode)) {
1190bf1a3220SDavid Xu 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1191bf1a3220SDavid Xu 			goto stopme;
1192da7bbd2cSJohn Baldwin 		wakeup_swapper = 0;
119344990b8cSJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td2) {
119444990b8cSJulian Elischer 			if (td2 == td)
119544990b8cSJulian Elischer 				continue;
1196a54e85fdSJeff Roberson 			thread_lock(td2);
1197b7edba77SJeff Roberson 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
11986ddcc233SKonstantin Belousov 			if (TD_IS_INHIBITED(td2)) {
119907a9368aSKonstantin Belousov 				wakeup_swapper |= weed_inhib(mode, td2, p);
1200d8267df7SDavid Xu #ifdef SMP
12016ddcc233SKonstantin Belousov 			} else if (TD_IS_RUNNING(td2) && td != td2) {
1202d8267df7SDavid Xu 				forward_signal(td2);
120361a74c5cSJeff Roberson 				thread_unlock(td2);
1204d8267df7SDavid Xu #endif
120561a74c5cSJeff Roberson 			} else
1206a54e85fdSJeff Roberson 				thread_unlock(td2);
12079d102777SJulian Elischer 		}
1208da7bbd2cSJohn Baldwin 		if (wakeup_swapper)
1209da7bbd2cSJohn Baldwin 			kick_proc0();
121079799053SKonstantin Belousov 		remaining = calc_remaining(p, mode);
1211ec008e96SDavid Xu 
12129d102777SJulian Elischer 		/*
12139d102777SJulian Elischer 		 * Maybe we suspended some threads.. was it enough?
12149d102777SJulian Elischer 		 */
121507a9368aSKonstantin Belousov 		if (remaining == remain_for_mode(mode))
12169d102777SJulian Elischer 			break;
12179d102777SJulian Elischer 
1218bf1a3220SDavid Xu stopme:
121944990b8cSJulian Elischer 		/*
122044990b8cSJulian Elischer 		 * Wake us up when everyone else has suspended.
1221e3b9bf71SJulian Elischer 		 * In the mean time we suspend as well.
122244990b8cSJulian Elischer 		 */
12236ddcc233SKonstantin Belousov 		thread_suspend_switch(td, p);
122479799053SKonstantin Belousov 		remaining = calc_remaining(p, mode);
122544990b8cSJulian Elischer 	}
1226906ac69dSDavid Xu 	if (mode == SINGLE_EXIT) {
122791599697SJulian Elischer 		/*
12288626a0ddSKonstantin Belousov 		 * Convert the process to an unthreaded process.  The
12298626a0ddSKonstantin Belousov 		 * SINGLE_EXIT is called by exit1() or execve(), in
12308626a0ddSKonstantin Belousov 		 * both cases other threads must be retired.
123191599697SJulian Elischer 		 */
12328626a0ddSKonstantin Belousov 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1233ed062c8dSJulian Elischer 		p->p_singlethread = NULL;
12348626a0ddSKonstantin Belousov 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1235fd229b5bSKonstantin Belousov 
1236fd229b5bSKonstantin Belousov 		/*
1237fd229b5bSKonstantin Belousov 		 * Wait for any remaining threads to exit cpu_throw().
1238fd229b5bSKonstantin Belousov 		 */
1239fd229b5bSKonstantin Belousov 		while (p->p_exitthreads != 0) {
1240fd229b5bSKonstantin Belousov 			PROC_SUNLOCK(p);
1241fd229b5bSKonstantin Belousov 			PROC_UNLOCK(p);
1242fd229b5bSKonstantin Belousov 			sched_relinquish(td);
1243fd229b5bSKonstantin Belousov 			PROC_LOCK(p);
1244fd229b5bSKonstantin Belousov 			PROC_SLOCK(p);
1245fd229b5bSKonstantin Belousov 		}
1246ac437c07SKonstantin Belousov 	} else if (mode == SINGLE_BOUNDARY) {
1247ac437c07SKonstantin Belousov 		/*
1248ac437c07SKonstantin Belousov 		 * Wait until all suspended threads are removed from
1249ac437c07SKonstantin Belousov 		 * the processors.  The thread_suspend_check()
1250ac437c07SKonstantin Belousov 		 * increments p_boundary_count while it is still
1251ac437c07SKonstantin Belousov 		 * running, which makes it possible for the execve()
1252ac437c07SKonstantin Belousov 		 * to destroy vmspace while our other threads are
1253ac437c07SKonstantin Belousov 		 * still using the address space.
1254ac437c07SKonstantin Belousov 		 *
1255ac437c07SKonstantin Belousov 		 * We lock the thread, which is only allowed to
1256ac437c07SKonstantin Belousov 		 * succeed after context switch code finished using
1257ac437c07SKonstantin Belousov 		 * the address space.
1258ac437c07SKonstantin Belousov 		 */
1259ac437c07SKonstantin Belousov 		FOREACH_THREAD_IN_PROC(p, td2) {
1260ac437c07SKonstantin Belousov 			if (td2 == td)
1261ac437c07SKonstantin Belousov 				continue;
1262ac437c07SKonstantin Belousov 			thread_lock(td2);
1263ac437c07SKonstantin Belousov 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1264ac437c07SKonstantin Belousov 			    ("td %p not on boundary", td2));
1265ac437c07SKonstantin Belousov 			KASSERT(TD_IS_SUSPENDED(td2),
1266ac437c07SKonstantin Belousov 			    ("td %p is not suspended", td2));
1267ac437c07SKonstantin Belousov 			thread_unlock(td2);
1268ac437c07SKonstantin Belousov 		}
126991599697SJulian Elischer 	}
12707b4a950aSDavid Xu 	PROC_SUNLOCK(p);
127144990b8cSJulian Elischer 	return (0);
127244990b8cSJulian Elischer }
127344990b8cSJulian Elischer 
12748638fe7bSKonstantin Belousov bool
12758638fe7bSKonstantin Belousov thread_suspend_check_needed(void)
12768638fe7bSKonstantin Belousov {
12778638fe7bSKonstantin Belousov 	struct proc *p;
12788638fe7bSKonstantin Belousov 	struct thread *td;
12798638fe7bSKonstantin Belousov 
12808638fe7bSKonstantin Belousov 	td = curthread;
12818638fe7bSKonstantin Belousov 	p = td->td_proc;
12828638fe7bSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
12838638fe7bSKonstantin Belousov 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
12848638fe7bSKonstantin Belousov 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
12858638fe7bSKonstantin Belousov }
12868638fe7bSKonstantin Belousov 
128744990b8cSJulian Elischer /*
128844990b8cSJulian Elischer  * Called in from locations that can safely check to see
128944990b8cSJulian Elischer  * whether we have to suspend or at least throttle for a
129044990b8cSJulian Elischer  * single-thread event (e.g. fork).
129144990b8cSJulian Elischer  *
129244990b8cSJulian Elischer  * Such locations include userret().
129344990b8cSJulian Elischer  * If the "return_instead" argument is non zero, the thread must be able to
129444990b8cSJulian Elischer  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
129544990b8cSJulian Elischer  *
129644990b8cSJulian Elischer  * The 'return_instead' argument tells the function if it may do a
129744990b8cSJulian Elischer  * thread_exit() or suspend, or whether the caller must abort and back
129844990b8cSJulian Elischer  * out instead.
129944990b8cSJulian Elischer  *
130044990b8cSJulian Elischer  * If the thread that set the single_threading request has set the
130144990b8cSJulian Elischer  * P_SINGLE_EXIT bit in the process flags then this call will never return
130244990b8cSJulian Elischer  * if 'return_instead' is false, but will exit.
130344990b8cSJulian Elischer  *
130444990b8cSJulian Elischer  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
130544990b8cSJulian Elischer  *---------------+--------------------+---------------------
130644990b8cSJulian Elischer  *       0       | returns 0          |   returns 0 or 1
1307353374b5SJohn Baldwin  *               | when ST ends       |   immediately
130844990b8cSJulian Elischer  *---------------+--------------------+---------------------
130944990b8cSJulian Elischer  *       1       | thread exits       |   returns 1
1310353374b5SJohn Baldwin  *               |                    |  immediately
131144990b8cSJulian Elischer  * 0 = thread_exit() or suspension ok,
131244990b8cSJulian Elischer  * other = return error instead of stopping the thread.
131344990b8cSJulian Elischer  *
131444990b8cSJulian Elischer  * While a full suspension is under effect, even a single threading
131544990b8cSJulian Elischer  * thread would be suspended if it made this call (but it shouldn't).
131644990b8cSJulian Elischer  * This call should only be made from places where
131744990b8cSJulian Elischer  * thread_exit() would be safe as that may be the outcome unless
131844990b8cSJulian Elischer  * return_instead is set.
131944990b8cSJulian Elischer  */
132044990b8cSJulian Elischer int
132144990b8cSJulian Elischer thread_suspend_check(int return_instead)
132244990b8cSJulian Elischer {
1323ecafb24bSJuli Mallett 	struct thread *td;
1324ecafb24bSJuli Mallett 	struct proc *p;
132546e47c4fSKonstantin Belousov 	int wakeup_swapper;
132644990b8cSJulian Elischer 
132744990b8cSJulian Elischer 	td = curthread;
132844990b8cSJulian Elischer 	p = td->td_proc;
132937814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
133044990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
13318638fe7bSKonstantin Belousov 	while (thread_suspend_check_needed()) {
13321279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
133344990b8cSJulian Elischer 			KASSERT(p->p_singlethread != NULL,
133444990b8cSJulian Elischer 			    ("singlethread not set"));
133544990b8cSJulian Elischer 			/*
1336e3b9bf71SJulian Elischer 			 * The only suspension in action is a
1337e3b9bf71SJulian Elischer 			 * single-threading. Single threader need not stop.
1338bd07998eSKonstantin Belousov 			 * It is safe to access p->p_singlethread unlocked
1339bd07998eSKonstantin Belousov 			 * because it can only be set to our address by us.
134044990b8cSJulian Elischer 			 */
1341e3b9bf71SJulian Elischer 			if (p->p_singlethread == td)
134244990b8cSJulian Elischer 				return (0);	/* Exempt from stopping. */
134344990b8cSJulian Elischer 		}
134445a4bfa1SDavid Xu 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
134594f0972bSDavid Xu 			return (EINTR);
134644990b8cSJulian Elischer 
1347906ac69dSDavid Xu 		/* Should we goto user boundary if we didn't come from there? */
1348906ac69dSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1349906ac69dSDavid Xu 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
135094f0972bSDavid Xu 			return (ERESTART);
1351906ac69dSDavid Xu 
135244990b8cSJulian Elischer 		/*
13533077f938SKonstantin Belousov 		 * Ignore suspend requests if they are deferred.
1354d071a6faSJohn Baldwin 		 */
13553077f938SKonstantin Belousov 		if ((td->td_flags & TDF_SBDRY) != 0) {
1356d071a6faSJohn Baldwin 			KASSERT(return_instead,
1357d071a6faSJohn Baldwin 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
135846e47c4fSKonstantin Belousov 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
135946e47c4fSKonstantin Belousov 			    (TDF_SEINTR | TDF_SERESTART),
136046e47c4fSKonstantin Belousov 			    ("both TDF_SEINTR and TDF_SERESTART"));
136146e47c4fSKonstantin Belousov 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1362d071a6faSJohn Baldwin 		}
1363d071a6faSJohn Baldwin 
1364d071a6faSJohn Baldwin 		/*
136544990b8cSJulian Elischer 		 * If the process is waiting for us to exit,
136644990b8cSJulian Elischer 		 * this thread should just suicide.
13671279572aSDavid Xu 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
136844990b8cSJulian Elischer 		 */
1369cf7d9a8cSDavid Xu 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1370cf7d9a8cSDavid Xu 			PROC_UNLOCK(p);
137191d1786fSDmitry Chagin 
137291d1786fSDmitry Chagin 			/*
137391d1786fSDmitry Chagin 			 * Allow Linux emulation layer to do some work
137491d1786fSDmitry Chagin 			 * before thread suicide.
137591d1786fSDmitry Chagin 			 */
137691d1786fSDmitry Chagin 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
137791d1786fSDmitry Chagin 				(p->p_sysent->sv_thread_detach)(td);
13782a339d9eSKonstantin Belousov 			umtx_thread_exit(td);
1379d1e7a4a5SJohn Baldwin 			kern_thr_exit(td);
1380d1e7a4a5SJohn Baldwin 			panic("stopped thread did not exit");
1381cf7d9a8cSDavid Xu 		}
138221ecd1e9SDavid Xu 
138321ecd1e9SDavid Xu 		PROC_SLOCK(p);
138421ecd1e9SDavid Xu 		thread_stopped(p);
1385a54e85fdSJeff Roberson 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1386a54e85fdSJeff Roberson 			if (p->p_numthreads == p->p_suspcount + 1) {
1387a54e85fdSJeff Roberson 				thread_lock(p->p_singlethread);
138884cdea97SKonstantin Belousov 				wakeup_swapper = thread_unsuspend_one(
138984cdea97SKonstantin Belousov 				    p->p_singlethread, p, false);
13907847a9daSJohn Baldwin 				if (wakeup_swapper)
13917847a9daSJohn Baldwin 					kick_proc0();
1392a54e85fdSJeff Roberson 			}
1393a54e85fdSJeff Roberson 		}
13943f9be10eSDavid Xu 		PROC_UNLOCK(p);
13957b4a950aSDavid Xu 		thread_lock(td);
139644990b8cSJulian Elischer 		/*
139744990b8cSJulian Elischer 		 * When a thread suspends, it just
1398ad1e7d28SJulian Elischer 		 * gets taken off all queues.
139944990b8cSJulian Elischer 		 */
140071fad9fdSJulian Elischer 		thread_suspend_one(td);
1401906ac69dSDavid Xu 		if (return_instead == 0) {
1402906ac69dSDavid Xu 			p->p_boundary_count++;
1403906ac69dSDavid Xu 			td->td_flags |= TDF_BOUNDARY;
1404cf19bf91SJulian Elischer 		}
14057b4a950aSDavid Xu 		PROC_SUNLOCK(p);
1406686bcb5cSJeff Roberson 		mi_switch(SW_INVOL | SWT_SUSPEND);
140744990b8cSJulian Elischer 		PROC_LOCK(p);
140844990b8cSJulian Elischer 	}
140944990b8cSJulian Elischer 	return (0);
141044990b8cSJulian Elischer }
141144990b8cSJulian Elischer 
1412478ca4b0SKonstantin Belousov /*
1413478ca4b0SKonstantin Belousov  * Check for possible stops and suspensions while executing a
1414478ca4b0SKonstantin Belousov  * casueword or similar transiently failing operation.
1415478ca4b0SKonstantin Belousov  *
1416478ca4b0SKonstantin Belousov  * The sleep argument controls whether the function can handle a stop
1417478ca4b0SKonstantin Belousov  * request itself or it should return ERESTART and the request is
1418478ca4b0SKonstantin Belousov  * proceed at the kernel/user boundary in ast.
1419478ca4b0SKonstantin Belousov  *
1420478ca4b0SKonstantin Belousov  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1421478ca4b0SKonstantin Belousov  * should handle the stop requests there, with exception of cases when
1422478ca4b0SKonstantin Belousov  * the thread owns a kernel resource, for instance busied the umtx
1423300b525dSKonstantin Belousov  * key, or when functions return immediately if thread_check_susp()
1424478ca4b0SKonstantin Belousov  * returned non-zero.  On the other hand, retrying the whole lock
1425478ca4b0SKonstantin Belousov  * operation, we better not stop there but delegate the handling to
1426478ca4b0SKonstantin Belousov  * ast.
1427478ca4b0SKonstantin Belousov  *
1428478ca4b0SKonstantin Belousov  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1429478ca4b0SKonstantin Belousov  * handle it at all, and simply return EINTR.
1430478ca4b0SKonstantin Belousov  */
1431478ca4b0SKonstantin Belousov int
1432478ca4b0SKonstantin Belousov thread_check_susp(struct thread *td, bool sleep)
1433478ca4b0SKonstantin Belousov {
1434478ca4b0SKonstantin Belousov 	struct proc *p;
1435478ca4b0SKonstantin Belousov 	int error;
1436478ca4b0SKonstantin Belousov 
1437478ca4b0SKonstantin Belousov 	/*
1438478ca4b0SKonstantin Belousov 	 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
1439478ca4b0SKonstantin Belousov 	 * eventually break the lockstep loop.
1440478ca4b0SKonstantin Belousov 	 */
1441478ca4b0SKonstantin Belousov 	if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
1442478ca4b0SKonstantin Belousov 		return (0);
1443478ca4b0SKonstantin Belousov 	error = 0;
1444478ca4b0SKonstantin Belousov 	p = td->td_proc;
1445478ca4b0SKonstantin Belousov 	PROC_LOCK(p);
1446478ca4b0SKonstantin Belousov 	if (p->p_flag & P_SINGLE_EXIT)
1447478ca4b0SKonstantin Belousov 		error = EINTR;
1448478ca4b0SKonstantin Belousov 	else if (P_SHOULDSTOP(p) ||
1449478ca4b0SKonstantin Belousov 	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1450478ca4b0SKonstantin Belousov 		error = sleep ? thread_suspend_check(0) : ERESTART;
1451478ca4b0SKonstantin Belousov 	PROC_UNLOCK(p);
1452478ca4b0SKonstantin Belousov 	return (error);
1453478ca4b0SKonstantin Belousov }
1454478ca4b0SKonstantin Belousov 
145535c32a76SDavid Xu void
14566ddcc233SKonstantin Belousov thread_suspend_switch(struct thread *td, struct proc *p)
1457a54e85fdSJeff Roberson {
1458a54e85fdSJeff Roberson 
1459a54e85fdSJeff Roberson 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1460a54e85fdSJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
14617b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1462a54e85fdSJeff Roberson 	/*
1463a54e85fdSJeff Roberson 	 * We implement thread_suspend_one in stages here to avoid
1464a54e85fdSJeff Roberson 	 * dropping the proc lock while the thread lock is owned.
1465a54e85fdSJeff Roberson 	 */
14666ddcc233SKonstantin Belousov 	if (p == td->td_proc) {
1467a54e85fdSJeff Roberson 		thread_stopped(p);
1468a54e85fdSJeff Roberson 		p->p_suspcount++;
14696ddcc233SKonstantin Belousov 	}
14703f9be10eSDavid Xu 	PROC_UNLOCK(p);
14717b4a950aSDavid Xu 	thread_lock(td);
1472b7edba77SJeff Roberson 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1473a54e85fdSJeff Roberson 	TD_SET_SUSPENDED(td);
1474c5aa6b58SJeff Roberson 	sched_sleep(td, 0);
14757b4a950aSDavid Xu 	PROC_SUNLOCK(p);
1476a54e85fdSJeff Roberson 	DROP_GIANT();
1477686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_SUSPEND);
1478a54e85fdSJeff Roberson 	PICKUP_GIANT();
1479a54e85fdSJeff Roberson 	PROC_LOCK(p);
14807b4a950aSDavid Xu 	PROC_SLOCK(p);
1481a54e85fdSJeff Roberson }
1482a54e85fdSJeff Roberson 
1483a54e85fdSJeff Roberson void
148435c32a76SDavid Xu thread_suspend_one(struct thread *td)
148535c32a76SDavid Xu {
14866ddcc233SKonstantin Belousov 	struct proc *p;
148735c32a76SDavid Xu 
14886ddcc233SKonstantin Belousov 	p = td->td_proc;
14897b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1490a54e85fdSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1491e574e444SDavid Xu 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
149235c32a76SDavid Xu 	p->p_suspcount++;
1493b7edba77SJeff Roberson 	td->td_flags &= ~TDF_NEEDSUSPCHK;
149471fad9fdSJulian Elischer 	TD_SET_SUSPENDED(td);
1495c5aa6b58SJeff Roberson 	sched_sleep(td, 0);
149635c32a76SDavid Xu }
149735c32a76SDavid Xu 
149884cdea97SKonstantin Belousov static int
149984cdea97SKonstantin Belousov thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
150035c32a76SDavid Xu {
150135c32a76SDavid Xu 
1502a54e85fdSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1503ad1e7d28SJulian Elischer 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
150471fad9fdSJulian Elischer 	TD_CLR_SUSPENDED(td);
15056ddcc233SKonstantin Belousov 	td->td_flags &= ~TDF_ALLPROCSUSP;
15066ddcc233SKonstantin Belousov 	if (td->td_proc == p) {
15076ddcc233SKonstantin Belousov 		PROC_SLOCK_ASSERT(p, MA_OWNED);
150835c32a76SDavid Xu 		p->p_suspcount--;
150984cdea97SKonstantin Belousov 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
151084cdea97SKonstantin Belousov 			td->td_flags &= ~TDF_BOUNDARY;
151184cdea97SKonstantin Belousov 			p->p_boundary_count--;
151284cdea97SKonstantin Belousov 		}
15136ddcc233SKonstantin Belousov 	}
151461a74c5cSJeff Roberson 	return (setrunnable(td, 0));
151535c32a76SDavid Xu }
151635c32a76SDavid Xu 
1517*af928fdeSKonstantin Belousov void
1518*af928fdeSKonstantin Belousov thread_run_flash(struct thread *td)
1519*af928fdeSKonstantin Belousov {
1520*af928fdeSKonstantin Belousov 	struct proc *p;
1521*af928fdeSKonstantin Belousov 
1522*af928fdeSKonstantin Belousov 	p = td->td_proc;
1523*af928fdeSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
1524*af928fdeSKonstantin Belousov 
1525*af928fdeSKonstantin Belousov 	if (TD_ON_SLEEPQ(td))
1526*af928fdeSKonstantin Belousov 		sleepq_remove_nested(td);
1527*af928fdeSKonstantin Belousov 	else
1528*af928fdeSKonstantin Belousov 		thread_lock(td);
1529*af928fdeSKonstantin Belousov 
1530*af928fdeSKonstantin Belousov 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1531*af928fdeSKonstantin Belousov 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1532*af928fdeSKonstantin Belousov 
1533*af928fdeSKonstantin Belousov 	TD_CLR_SUSPENDED(td);
1534*af928fdeSKonstantin Belousov 	PROC_SLOCK(p);
1535*af928fdeSKonstantin Belousov 	MPASS(p->p_suspcount > 0);
1536*af928fdeSKonstantin Belousov 	p->p_suspcount--;
1537*af928fdeSKonstantin Belousov 	PROC_SUNLOCK(p);
1538*af928fdeSKonstantin Belousov 	if (setrunnable(td, 0))
1539*af928fdeSKonstantin Belousov 		kick_proc0();
1540*af928fdeSKonstantin Belousov }
1541*af928fdeSKonstantin Belousov 
154244990b8cSJulian Elischer /*
154344990b8cSJulian Elischer  * Allow all threads blocked by single threading to continue running.
154444990b8cSJulian Elischer  */
154544990b8cSJulian Elischer void
154644990b8cSJulian Elischer thread_unsuspend(struct proc *p)
154744990b8cSJulian Elischer {
154844990b8cSJulian Elischer 	struct thread *td;
15497847a9daSJohn Baldwin 	int wakeup_swapper;
155044990b8cSJulian Elischer 
155144990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
15527b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
15537847a9daSJohn Baldwin 	wakeup_swapper = 0;
155444990b8cSJulian Elischer 	if (!P_SHOULDSTOP(p)) {
1555ad1e7d28SJulian Elischer                 FOREACH_THREAD_IN_PROC(p, td) {
1556a54e85fdSJeff Roberson 			thread_lock(td);
1557ad1e7d28SJulian Elischer 			if (TD_IS_SUSPENDED(td)) {
155884cdea97SKonstantin Belousov 				wakeup_swapper |= thread_unsuspend_one(td, p,
155984cdea97SKonstantin Belousov 				    true);
156061a74c5cSJeff Roberson 			} else
1561a54e85fdSJeff Roberson 				thread_unlock(td);
1562ad1e7d28SJulian Elischer 		}
156384cdea97SKonstantin Belousov 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
156484cdea97SKonstantin Belousov 	    p->p_numthreads == p->p_suspcount) {
156544990b8cSJulian Elischer 		/*
156644990b8cSJulian Elischer 		 * Stopping everything also did the job for the single
156744990b8cSJulian Elischer 		 * threading request. Now we've downgraded to single-threaded,
156844990b8cSJulian Elischer 		 * let it continue.
156944990b8cSJulian Elischer 		 */
15706ddcc233SKonstantin Belousov 		if (p->p_singlethread->td_proc == p) {
1571a54e85fdSJeff Roberson 			thread_lock(p->p_singlethread);
15726ddcc233SKonstantin Belousov 			wakeup_swapper = thread_unsuspend_one(
157384cdea97SKonstantin Belousov 			    p->p_singlethread, p, false);
157444990b8cSJulian Elischer 		}
15756ddcc233SKonstantin Belousov 	}
15767847a9daSJohn Baldwin 	if (wakeup_swapper)
15777847a9daSJohn Baldwin 		kick_proc0();
157844990b8cSJulian Elischer }
157944990b8cSJulian Elischer 
1580ed062c8dSJulian Elischer /*
1581ed062c8dSJulian Elischer  * End the single threading mode..
1582ed062c8dSJulian Elischer  */
158344990b8cSJulian Elischer void
15846ddcc233SKonstantin Belousov thread_single_end(struct proc *p, int mode)
158544990b8cSJulian Elischer {
158644990b8cSJulian Elischer 	struct thread *td;
15877847a9daSJohn Baldwin 	int wakeup_swapper;
158844990b8cSJulian Elischer 
15896ddcc233SKonstantin Belousov 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
15906ddcc233SKonstantin Belousov 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
15916ddcc233SKonstantin Belousov 	    ("invalid mode %d", mode));
159244990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
15936ddcc233SKonstantin Belousov 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
15946ddcc233SKonstantin Belousov 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
15956ddcc233SKonstantin Belousov 	    ("mode %d does not match P_TOTAL_STOP", mode));
159684cdea97SKonstantin Belousov 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
159784cdea97SKonstantin Belousov 	    ("thread_single_end from other thread %p %p",
159884cdea97SKonstantin Belousov 	    curthread, p->p_singlethread));
159984cdea97SKonstantin Belousov 	KASSERT(mode != SINGLE_BOUNDARY ||
160084cdea97SKonstantin Belousov 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
160184cdea97SKonstantin Belousov 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
16026ddcc233SKonstantin Belousov 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
16036ddcc233SKonstantin Belousov 	    P_TOTAL_STOP);
16047b4a950aSDavid Xu 	PROC_SLOCK(p);
160544990b8cSJulian Elischer 	p->p_singlethread = NULL;
16067847a9daSJohn Baldwin 	wakeup_swapper = 0;
160749539972SJulian Elischer 	/*
16087847a9daSJohn Baldwin 	 * If there are other threads they may now run,
160949539972SJulian Elischer 	 * unless of course there is a blanket 'stop order'
161049539972SJulian Elischer 	 * on the process. The single threader must be allowed
161149539972SJulian Elischer 	 * to continue however as this is a bad place to stop.
161249539972SJulian Elischer 	 */
16136ddcc233SKonstantin Belousov 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1614ad1e7d28SJulian Elischer                 FOREACH_THREAD_IN_PROC(p, td) {
1615a54e85fdSJeff Roberson 			thread_lock(td);
1616ad1e7d28SJulian Elischer 			if (TD_IS_SUSPENDED(td)) {
161784cdea97SKonstantin Belousov 				wakeup_swapper |= thread_unsuspend_one(td, p,
161884cdea97SKonstantin Belousov 				    mode == SINGLE_BOUNDARY);
161961a74c5cSJeff Roberson 			} else
1620a54e85fdSJeff Roberson 				thread_unlock(td);
162149539972SJulian Elischer 		}
1622ad1e7d28SJulian Elischer 	}
162384cdea97SKonstantin Belousov 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
162484cdea97SKonstantin Belousov 	    ("inconsistent boundary count %d", p->p_boundary_count));
16257b4a950aSDavid Xu 	PROC_SUNLOCK(p);
16267847a9daSJohn Baldwin 	if (wakeup_swapper)
16277847a9daSJohn Baldwin 		kick_proc0();
162849539972SJulian Elischer }
16294fc21c09SDaniel Eischen 
1630aae3547bSMateusz Guzik /*
1631aae3547bSMateusz Guzik  * Locate a thread by number and return with proc lock held.
1632aae3547bSMateusz Guzik  *
1633aae3547bSMateusz Guzik  * thread exit establishes proc -> tidhash lock ordering, but lookup
1634aae3547bSMateusz Guzik  * takes tidhash first and needs to return locked proc.
1635aae3547bSMateusz Guzik  *
1636aae3547bSMateusz Guzik  * The problem is worked around by relying on type-safety of both
1637aae3547bSMateusz Guzik  * structures and doing the work in 2 steps:
1638aae3547bSMateusz Guzik  * - tidhash-locked lookup which saves both thread and proc pointers
1639aae3547bSMateusz Guzik  * - proc-locked verification that the found thread still matches
1640aae3547bSMateusz Guzik  */
1641aae3547bSMateusz Guzik static bool
1642aae3547bSMateusz Guzik tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1643cf7d9a8cSDavid Xu {
1644cf7d9a8cSDavid Xu #define RUN_THRESH	16
1645aae3547bSMateusz Guzik 	struct proc *p;
1646cf7d9a8cSDavid Xu 	struct thread *td;
1647aae3547bSMateusz Guzik 	int run;
1648aae3547bSMateusz Guzik 	bool locked;
1649cf7d9a8cSDavid Xu 
1650aae3547bSMateusz Guzik 	run = 0;
165126007fe3SMateusz Guzik 	rw_rlock(TIDHASHLOCK(tid));
1652aae3547bSMateusz Guzik 	locked = true;
1653cf7d9a8cSDavid Xu 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1654aae3547bSMateusz Guzik 		if (td->td_tid != tid) {
1655aae3547bSMateusz Guzik 			run++;
1656aae3547bSMateusz Guzik 			continue;
1657cf7d9a8cSDavid Xu 		}
1658aae3547bSMateusz Guzik 		p = td->td_proc;
1659aae3547bSMateusz Guzik 		if (pid != -1 && p->p_pid != pid) {
1660cf7d9a8cSDavid Xu 			td = NULL;
1661cf7d9a8cSDavid Xu 			break;
1662cf7d9a8cSDavid Xu 		}
1663cf7d9a8cSDavid Xu 		if (run > RUN_THRESH) {
166426007fe3SMateusz Guzik 			if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1665cf7d9a8cSDavid Xu 				LIST_REMOVE(td, td_hash);
1666cf7d9a8cSDavid Xu 				LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1667cf7d9a8cSDavid Xu 					td, td_hash);
166826007fe3SMateusz Guzik 				rw_wunlock(TIDHASHLOCK(tid));
1669aae3547bSMateusz Guzik 				locked = false;
1670aae3547bSMateusz Guzik 				break;
1671cf7d9a8cSDavid Xu 			}
1672cf7d9a8cSDavid Xu 		}
1673cf7d9a8cSDavid Xu 		break;
1674cf7d9a8cSDavid Xu 	}
1675aae3547bSMateusz Guzik 	if (locked)
167626007fe3SMateusz Guzik 		rw_runlock(TIDHASHLOCK(tid));
1677aae3547bSMateusz Guzik 	if (td == NULL)
1678aae3547bSMateusz Guzik 		return (false);
1679aae3547bSMateusz Guzik 	*pp = p;
1680aae3547bSMateusz Guzik 	*tdp = td;
1681aae3547bSMateusz Guzik 	return (true);
1682aae3547bSMateusz Guzik }
1683aae3547bSMateusz Guzik 
1684aae3547bSMateusz Guzik struct thread *
1685aae3547bSMateusz Guzik tdfind(lwpid_t tid, pid_t pid)
1686aae3547bSMateusz Guzik {
1687aae3547bSMateusz Guzik 	struct proc *p;
1688aae3547bSMateusz Guzik 	struct thread *td;
1689aae3547bSMateusz Guzik 
1690aae3547bSMateusz Guzik 	td = curthread;
1691aae3547bSMateusz Guzik 	if (td->td_tid == tid) {
1692aae3547bSMateusz Guzik 		if (pid != -1 && td->td_proc->p_pid != pid)
1693aae3547bSMateusz Guzik 			return (NULL);
1694aae3547bSMateusz Guzik 		PROC_LOCK(td->td_proc);
1695cf7d9a8cSDavid Xu 		return (td);
1696cf7d9a8cSDavid Xu 	}
1697cf7d9a8cSDavid Xu 
1698aae3547bSMateusz Guzik 	for (;;) {
1699aae3547bSMateusz Guzik 		if (!tdfind_hash(tid, pid, &p, &td))
1700aae3547bSMateusz Guzik 			return (NULL);
1701aae3547bSMateusz Guzik 		PROC_LOCK(p);
1702aae3547bSMateusz Guzik 		if (td->td_tid != tid) {
1703aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1704aae3547bSMateusz Guzik 			continue;
1705aae3547bSMateusz Guzik 		}
1706aae3547bSMateusz Guzik 		if (td->td_proc != p) {
1707aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1708aae3547bSMateusz Guzik 			continue;
1709aae3547bSMateusz Guzik 		}
1710aae3547bSMateusz Guzik 		if (p->p_state == PRS_NEW) {
1711aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1712aae3547bSMateusz Guzik 			return (NULL);
1713aae3547bSMateusz Guzik 		}
1714aae3547bSMateusz Guzik 		return (td);
1715aae3547bSMateusz Guzik 	}
1716aae3547bSMateusz Guzik }
1717aae3547bSMateusz Guzik 
1718cf7d9a8cSDavid Xu void
1719cf7d9a8cSDavid Xu tidhash_add(struct thread *td)
1720cf7d9a8cSDavid Xu {
172126007fe3SMateusz Guzik 	rw_wlock(TIDHASHLOCK(td->td_tid));
1722cf7d9a8cSDavid Xu 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
172326007fe3SMateusz Guzik 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1724cf7d9a8cSDavid Xu }
1725cf7d9a8cSDavid Xu 
1726cf7d9a8cSDavid Xu void
1727cf7d9a8cSDavid Xu tidhash_remove(struct thread *td)
1728cf7d9a8cSDavid Xu {
172926007fe3SMateusz Guzik 
173026007fe3SMateusz Guzik 	rw_wlock(TIDHASHLOCK(td->td_tid));
1731cf7d9a8cSDavid Xu 	LIST_REMOVE(td, td_hash);
173226007fe3SMateusz Guzik 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1733cf7d9a8cSDavid Xu }
1734