xref: /freebsd/sys/kern/kern_thread.c (revision a422084abbda10edc0b591021536a7c9c6d0c8b4)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
444990b8cSJulian Elischer  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
544990b8cSJulian Elischer  *  All rights reserved.
644990b8cSJulian Elischer  *
744990b8cSJulian Elischer  * Redistribution and use in source and binary forms, with or without
844990b8cSJulian Elischer  * modification, are permitted provided that the following conditions
944990b8cSJulian Elischer  * are met:
1044990b8cSJulian Elischer  * 1. Redistributions of source code must retain the above copyright
1144990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer as
1244990b8cSJulian Elischer  *    the first lines of this file unmodified other than the possible
1344990b8cSJulian Elischer  *    addition of one or more copyright notices.
1444990b8cSJulian Elischer  * 2. Redistributions in binary form must reproduce the above copyright
1544990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer in the
1644990b8cSJulian Elischer  *    documentation and/or other materials provided with the distribution.
1744990b8cSJulian Elischer  *
1844990b8cSJulian Elischer  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1944990b8cSJulian Elischer  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2044990b8cSJulian Elischer  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2144990b8cSJulian Elischer  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2244990b8cSJulian Elischer  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2344990b8cSJulian Elischer  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2444990b8cSJulian Elischer  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2544990b8cSJulian Elischer  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2644990b8cSJulian Elischer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2744990b8cSJulian Elischer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2844990b8cSJulian Elischer  * DAMAGE.
2944990b8cSJulian Elischer  */
3044990b8cSJulian Elischer 
313d06b4b3SAttilio Rao #include "opt_witness.h"
3216d95d4fSJoseph Koshy #include "opt_hwpmc_hooks.h"
333d06b4b3SAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3744990b8cSJulian Elischer #include <sys/param.h>
3844990b8cSJulian Elischer #include <sys/systm.h>
3944990b8cSJulian Elischer #include <sys/kernel.h>
4044990b8cSJulian Elischer #include <sys/lock.h>
4144990b8cSJulian Elischer #include <sys/mutex.h>
4244990b8cSJulian Elischer #include <sys/proc.h>
4335bb59edSMateusz Guzik #include <sys/bitstring.h>
446febf180SGleb Smirnoff #include <sys/epoch.h>
458f0e9130SKonstantin Belousov #include <sys/rangelock.h>
46e170bfdaSDavid Xu #include <sys/resourcevar.h>
47b3e9e682SRyan Stone #include <sys/sdt.h>
4894e0a4cdSJulian Elischer #include <sys/smp.h>
49de028f5aSJeff Roberson #include <sys/sched.h>
5044f3b092SJohn Baldwin #include <sys/sleepqueue.h>
51ace8398dSJeff Roberson #include <sys/selinfo.h>
52d1e7a4a5SJohn Baldwin #include <sys/syscallsubr.h>
53598f2b81SMateusz Guzik #include <sys/dtrace_bsd.h>
5491d1786fSDmitry Chagin #include <sys/sysent.h>
55961a7b24SJohn Baldwin #include <sys/turnstile.h>
56d116b9f1SMateusz Guzik #include <sys/taskqueue.h>
5744990b8cSJulian Elischer #include <sys/ktr.h>
58cf7d9a8cSDavid Xu #include <sys/rwlock.h>
59af29f399SDmitry Chagin #include <sys/umtxvar.h>
609ed01c32SGleb Smirnoff #include <sys/vmmeter.h>
61d7f687fcSJeff Roberson #include <sys/cpuset.h>
6216d95d4fSJoseph Koshy #ifdef	HWPMC_HOOKS
6316d95d4fSJoseph Koshy #include <sys/pmckern.h>
6416d95d4fSJoseph Koshy #endif
651bd3cf5dSMateusz Guzik #include <sys/priv.h>
6644990b8cSJulian Elischer 
67911b84b0SRobert Watson #include <security/audit/audit.h>
68911b84b0SRobert Watson 
69d116b9f1SMateusz Guzik #include <vm/pmap.h>
7044990b8cSJulian Elischer #include <vm/vm.h>
7149a2507bSAlan Cox #include <vm/vm_extern.h>
7244990b8cSJulian Elischer #include <vm/uma.h>
73d116b9f1SMateusz Guzik #include <vm/vm_phys.h>
74b209f889SRandall Stewart #include <sys/eventhandler.h>
7502fb42b0SPeter Wemm 
76acd9f517SKonstantin Belousov /*
77acd9f517SKonstantin Belousov  * Asserts below verify the stability of struct thread and struct proc
78acd9f517SKonstantin Belousov  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
79acd9f517SKonstantin Belousov  * to drift, change to the structures must be accompanied by the
80acd9f517SKonstantin Belousov  * assert update.
81acd9f517SKonstantin Belousov  *
82acd9f517SKonstantin Belousov  * On the stable branches after KBI freeze, conditions must not be
83acd9f517SKonstantin Belousov  * violated.  Typically new fields are moved to the end of the
84acd9f517SKonstantin Belousov  * structures.
85acd9f517SKonstantin Belousov  */
86acd9f517SKonstantin Belousov #ifdef __amd64__
87*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x108,
88acd9f517SKonstantin Belousov     "struct thread KBI td_flags");
89*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_pflags) == 0x110,
90acd9f517SKonstantin Belousov     "struct thread KBI td_pflags");
91*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_frame) == 0x4a8,
92acd9f517SKonstantin Belousov     "struct thread KBI td_frame");
931724c563SMateusz Guzik _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
94acd9f517SKonstantin Belousov     "struct thread KBI td_emuldata");
9585078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
96acd9f517SKonstantin Belousov     "struct proc KBI p_flag");
9785078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
98acd9f517SKonstantin Belousov     "struct proc KBI p_pid");
991762f674SKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x3b8,
100acd9f517SKonstantin Belousov     "struct proc KBI p_filemon");
1011762f674SKonstantin Belousov _Static_assert(offsetof(struct proc, p_comm) == 0x3d0,
102acd9f517SKonstantin Belousov     "struct proc KBI p_comm");
103615f22b2SDmitry Chagin _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
104acd9f517SKonstantin Belousov     "struct proc KBI p_emuldata");
105acd9f517SKonstantin Belousov #endif
106acd9f517SKonstantin Belousov #ifdef __i386__
107*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x9c,
108acd9f517SKonstantin Belousov     "struct thread KBI td_flags");
109*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_pflags) == 0xa4,
110acd9f517SKonstantin Belousov     "struct thread KBI td_pflags");
111*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_frame) == 0x308,
112acd9f517SKonstantin Belousov     "struct thread KBI td_frame");
113*a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_emuldata) == 0x34c,
114acd9f517SKonstantin Belousov     "struct thread KBI td_emuldata");
11585078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
116acd9f517SKonstantin Belousov     "struct proc KBI p_flag");
11785078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0x78,
118acd9f517SKonstantin Belousov     "struct proc KBI p_pid");
1191762f674SKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x268,
120acd9f517SKonstantin Belousov     "struct proc KBI p_filemon");
1211762f674SKonstantin Belousov _Static_assert(offsetof(struct proc, p_comm) == 0x27c,
122acd9f517SKonstantin Belousov     "struct proc KBI p_comm");
1235d9f7901SDmitry Chagin _Static_assert(offsetof(struct proc, p_emuldata) == 0x308,
124acd9f517SKonstantin Belousov     "struct proc KBI p_emuldata");
125acd9f517SKonstantin Belousov #endif
126acd9f517SKonstantin Belousov 
127b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(proc);
128d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(proc, , , lwp__exit);
129b3e9e682SRyan Stone 
1308460a577SJohn Birrell /*
1318460a577SJohn Birrell  * thread related storage.
1328460a577SJohn Birrell  */
13344990b8cSJulian Elischer static uma_zone_t thread_zone;
13444990b8cSJulian Elischer 
135d116b9f1SMateusz Guzik struct thread_domain_data {
136d116b9f1SMateusz Guzik 	struct thread	*tdd_zombies;
137d116b9f1SMateusz Guzik 	int		tdd_reapticks;
138d116b9f1SMateusz Guzik } __aligned(CACHE_LINE_SIZE);
139d116b9f1SMateusz Guzik 
140d116b9f1SMateusz Guzik static struct thread_domain_data thread_domain_data[MAXMEMDOM];
141d116b9f1SMateusz Guzik 
142d116b9f1SMateusz Guzik static struct task	thread_reap_task;
143d116b9f1SMateusz Guzik static struct callout  	thread_reap_callout;
14444990b8cSJulian Elischer 
145ff8fbcffSJeff Roberson static void thread_zombie(struct thread *);
146b83e94beSMateusz Guzik static void thread_reap(void);
147d116b9f1SMateusz Guzik static void thread_reap_all(void);
148d116b9f1SMateusz Guzik static void thread_reap_task_cb(void *, int);
149d116b9f1SMateusz Guzik static void thread_reap_callout_cb(void *);
15084cdea97SKonstantin Belousov static int thread_unsuspend_one(struct thread *td, struct proc *p,
15184cdea97SKonstantin Belousov     bool boundary);
152755341dfSMateusz Guzik static void thread_free_batched(struct thread *td);
153ff8fbcffSJeff Roberson 
154d1ca25beSMateusz Guzik static __exclusive_cache_line struct mtx tid_lock;
155934e7e5eSMateusz Guzik static bitstr_t *tid_bitmap;
15635bb59edSMateusz Guzik 
157cf7d9a8cSDavid Xu static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
158cf7d9a8cSDavid Xu 
1591bd3cf5dSMateusz Guzik static int maxthread;
1601bd3cf5dSMateusz Guzik SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
1611bd3cf5dSMateusz Guzik     &maxthread, 0, "Maximum number of threads");
1621bd3cf5dSMateusz Guzik 
16362dbc992SMateusz Guzik static __exclusive_cache_line int nthreads;
1641bd3cf5dSMateusz Guzik 
165aae3547bSMateusz Guzik static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
166aae3547bSMateusz Guzik static u_long	tidhash;
16726007fe3SMateusz Guzik static u_long	tidhashlock;
16826007fe3SMateusz Guzik static struct	rwlock *tidhashtbl_lock;
169aae3547bSMateusz Guzik #define	TIDHASH(tid)		(&tidhashtbl[(tid) & tidhash])
17026007fe3SMateusz Guzik #define	TIDHASHLOCK(tid)	(&tidhashtbl_lock[(tid) & tidhashlock])
171cf7d9a8cSDavid Xu 
1722ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_ctor);
1732ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_dtor);
1742ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_init);
1752ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_fini);
1762ca45184SMatt Joras 
17762dbc992SMateusz Guzik static bool
178d116b9f1SMateusz Guzik thread_count_inc_try(void)
179ec6ea5e8SDavid Xu {
18062dbc992SMateusz Guzik 	int nthreads_new;
181ec6ea5e8SDavid Xu 
18262dbc992SMateusz Guzik 	nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
18362dbc992SMateusz Guzik 	if (nthreads_new >= maxthread - 100) {
1841bd3cf5dSMateusz Guzik 		if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
18562dbc992SMateusz Guzik 		    nthreads_new >= maxthread) {
18662dbc992SMateusz Guzik 			atomic_subtract_int(&nthreads, 1);
187d116b9f1SMateusz Guzik 			return (false);
188d116b9f1SMateusz Guzik 		}
189d116b9f1SMateusz Guzik 	}
190d116b9f1SMateusz Guzik 	return (true);
191d116b9f1SMateusz Guzik }
192d116b9f1SMateusz Guzik 
193d116b9f1SMateusz Guzik static bool
194d116b9f1SMateusz Guzik thread_count_inc(void)
195d116b9f1SMateusz Guzik {
196d116b9f1SMateusz Guzik 	static struct timeval lastfail;
197d116b9f1SMateusz Guzik 	static int curfail;
198d116b9f1SMateusz Guzik 
199d116b9f1SMateusz Guzik 	thread_reap();
200d116b9f1SMateusz Guzik 	if (thread_count_inc_try()) {
201d116b9f1SMateusz Guzik 		return (true);
202d116b9f1SMateusz Guzik 	}
203d116b9f1SMateusz Guzik 
204d116b9f1SMateusz Guzik 	thread_reap_all();
205d116b9f1SMateusz Guzik 	if (thread_count_inc_try()) {
206d116b9f1SMateusz Guzik 		return (true);
207d116b9f1SMateusz Guzik 	}
208d116b9f1SMateusz Guzik 
2091bd3cf5dSMateusz Guzik 	if (ppsratecheck(&lastfail, &curfail, 1)) {
2101bd3cf5dSMateusz Guzik 		printf("maxthread limit exceeded by uid %u "
2111bd3cf5dSMateusz Guzik 		    "(pid %d); consider increasing kern.maxthread\n",
2121bd3cf5dSMateusz Guzik 		    curthread->td_ucred->cr_ruid, curproc->p_pid);
2131bd3cf5dSMateusz Guzik 	}
21462dbc992SMateusz Guzik 	return (false);
2151bd3cf5dSMateusz Guzik }
2161bd3cf5dSMateusz Guzik 
21762dbc992SMateusz Guzik static void
21862dbc992SMateusz Guzik thread_count_sub(int n)
21962dbc992SMateusz Guzik {
22062dbc992SMateusz Guzik 
22162dbc992SMateusz Guzik 	atomic_subtract_int(&nthreads, n);
22262dbc992SMateusz Guzik }
22362dbc992SMateusz Guzik 
22462dbc992SMateusz Guzik static void
22562dbc992SMateusz Guzik thread_count_dec(void)
22662dbc992SMateusz Guzik {
22762dbc992SMateusz Guzik 
22862dbc992SMateusz Guzik 	thread_count_sub(1);
22962dbc992SMateusz Guzik }
23062dbc992SMateusz Guzik 
23162dbc992SMateusz Guzik static lwpid_t
23262dbc992SMateusz Guzik tid_alloc(void)
23362dbc992SMateusz Guzik {
23462dbc992SMateusz Guzik 	static lwpid_t trytid;
23562dbc992SMateusz Guzik 	lwpid_t tid;
23662dbc992SMateusz Guzik 
23762dbc992SMateusz Guzik 	mtx_lock(&tid_lock);
23835bb59edSMateusz Guzik 	/*
23935bb59edSMateusz Guzik 	 * It is an invariant that the bitmap is big enough to hold maxthread
24035bb59edSMateusz Guzik 	 * IDs. If we got to this point there has to be at least one free.
24135bb59edSMateusz Guzik 	 */
24235bb59edSMateusz Guzik 	if (trytid >= maxthread)
24335bb59edSMateusz Guzik 		trytid = 0;
24435bb59edSMateusz Guzik 	bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
24535bb59edSMateusz Guzik 	if (tid == -1) {
24635bb59edSMateusz Guzik 		KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
24735bb59edSMateusz Guzik 		trytid = 0;
24835bb59edSMateusz Guzik 		bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
24935bb59edSMateusz Guzik 		KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
250ec6ea5e8SDavid Xu 	}
25135bb59edSMateusz Guzik 	bit_set(tid_bitmap, tid);
252934e7e5eSMateusz Guzik 	trytid = tid + 1;
253ec6ea5e8SDavid Xu 	mtx_unlock(&tid_lock);
25435bb59edSMateusz Guzik 	return (tid + NO_PID);
255ec6ea5e8SDavid Xu }
256ec6ea5e8SDavid Xu 
257ec6ea5e8SDavid Xu static void
258755341dfSMateusz Guzik tid_free_locked(lwpid_t rtid)
259ec6ea5e8SDavid Xu {
26035bb59edSMateusz Guzik 	lwpid_t tid;
261ec6ea5e8SDavid Xu 
262755341dfSMateusz Guzik 	mtx_assert(&tid_lock, MA_OWNED);
26335bb59edSMateusz Guzik 	KASSERT(rtid >= NO_PID,
26435bb59edSMateusz Guzik 	    ("%s: invalid tid %d\n", __func__, rtid));
26535bb59edSMateusz Guzik 	tid = rtid - NO_PID;
26635bb59edSMateusz Guzik 	KASSERT(bit_test(tid_bitmap, tid) != 0,
26735bb59edSMateusz Guzik 	    ("thread ID %d not allocated\n", rtid));
26835bb59edSMateusz Guzik 	bit_clear(tid_bitmap, tid);
269755341dfSMateusz Guzik }
270755341dfSMateusz Guzik 
271755341dfSMateusz Guzik static void
272755341dfSMateusz Guzik tid_free(lwpid_t rtid)
273755341dfSMateusz Guzik {
274755341dfSMateusz Guzik 
275755341dfSMateusz Guzik 	mtx_lock(&tid_lock);
276755341dfSMateusz Guzik 	tid_free_locked(rtid);
277755341dfSMateusz Guzik 	mtx_unlock(&tid_lock);
278755341dfSMateusz Guzik }
279755341dfSMateusz Guzik 
280755341dfSMateusz Guzik static void
281755341dfSMateusz Guzik tid_free_batch(lwpid_t *batch, int n)
282755341dfSMateusz Guzik {
283755341dfSMateusz Guzik 	int i;
284755341dfSMateusz Guzik 
285755341dfSMateusz Guzik 	mtx_lock(&tid_lock);
286755341dfSMateusz Guzik 	for (i = 0; i < n; i++) {
287755341dfSMateusz Guzik 		tid_free_locked(batch[i]);
288755341dfSMateusz Guzik 	}
289ec6ea5e8SDavid Xu 	mtx_unlock(&tid_lock);
290ec6ea5e8SDavid Xu }
291ec6ea5e8SDavid Xu 
292fdcac928SMarcel Moolenaar /*
2935ef7b7a0SMateusz Guzik  * Batching for thread reapping.
2945ef7b7a0SMateusz Guzik  */
2955ef7b7a0SMateusz Guzik struct tidbatch {
2965ef7b7a0SMateusz Guzik 	lwpid_t tab[16];
2975ef7b7a0SMateusz Guzik 	int n;
2985ef7b7a0SMateusz Guzik };
2995ef7b7a0SMateusz Guzik 
3005ef7b7a0SMateusz Guzik static void
3015ef7b7a0SMateusz Guzik tidbatch_prep(struct tidbatch *tb)
3025ef7b7a0SMateusz Guzik {
3035ef7b7a0SMateusz Guzik 
3045ef7b7a0SMateusz Guzik 	tb->n = 0;
3055ef7b7a0SMateusz Guzik }
3065ef7b7a0SMateusz Guzik 
3075ef7b7a0SMateusz Guzik static void
3085ef7b7a0SMateusz Guzik tidbatch_add(struct tidbatch *tb, struct thread *td)
3095ef7b7a0SMateusz Guzik {
3105ef7b7a0SMateusz Guzik 
3115ef7b7a0SMateusz Guzik 	KASSERT(tb->n < nitems(tb->tab),
3125ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3135ef7b7a0SMateusz Guzik 	tb->tab[tb->n] = td->td_tid;
3145ef7b7a0SMateusz Guzik 	tb->n++;
3155ef7b7a0SMateusz Guzik }
3165ef7b7a0SMateusz Guzik 
3175ef7b7a0SMateusz Guzik static void
3185ef7b7a0SMateusz Guzik tidbatch_process(struct tidbatch *tb)
3195ef7b7a0SMateusz Guzik {
3205ef7b7a0SMateusz Guzik 
3215ef7b7a0SMateusz Guzik 	KASSERT(tb->n <= nitems(tb->tab),
3225ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3235ef7b7a0SMateusz Guzik 	if (tb->n == nitems(tb->tab)) {
3245ef7b7a0SMateusz Guzik 		tid_free_batch(tb->tab, tb->n);
3255ef7b7a0SMateusz Guzik 		tb->n = 0;
3265ef7b7a0SMateusz Guzik 	}
3275ef7b7a0SMateusz Guzik }
3285ef7b7a0SMateusz Guzik 
3295ef7b7a0SMateusz Guzik static void
3305ef7b7a0SMateusz Guzik tidbatch_final(struct tidbatch *tb)
3315ef7b7a0SMateusz Guzik {
3325ef7b7a0SMateusz Guzik 
3335ef7b7a0SMateusz Guzik 	KASSERT(tb->n <= nitems(tb->tab),
3345ef7b7a0SMateusz Guzik 	    ("%s: count too high %d", __func__, tb->n));
3355ef7b7a0SMateusz Guzik 	if (tb->n != 0) {
3365ef7b7a0SMateusz Guzik 		tid_free_batch(tb->tab, tb->n);
3375ef7b7a0SMateusz Guzik 	}
3385ef7b7a0SMateusz Guzik }
3395ef7b7a0SMateusz Guzik 
3405ef7b7a0SMateusz Guzik /*
341696058c3SJulian Elischer  * Prepare a thread for use.
34244990b8cSJulian Elischer  */
343b23f72e9SBrian Feldman static int
344b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags)
34544990b8cSJulian Elischer {
34644990b8cSJulian Elischer 	struct thread	*td;
34744990b8cSJulian Elischer 
34844990b8cSJulian Elischer 	td = (struct thread *)mem;
349fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
35094dd54b9SKonstantin Belousov 	td->td_lastcpu = td->td_oncpu = NOCPU;
3516c27c603SJuli Mallett 
3526c27c603SJuli Mallett 	/*
3536c27c603SJuli Mallett 	 * Note that td_critnest begins life as 1 because the thread is not
3546c27c603SJuli Mallett 	 * running and is thereby implicitly waiting to be on the receiving
355a54e85fdSJeff Roberson 	 * end of a context switch.
3566c27c603SJuli Mallett 	 */
357139b7550SJohn Baldwin 	td->td_critnest = 1;
358acbe332aSDavid Xu 	td->td_lend_user_pri = PRI_MAX;
359911b84b0SRobert Watson #ifdef AUDIT
360911b84b0SRobert Watson 	audit_thread_alloc(td);
361911b84b0SRobert Watson #endif
362598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS
363598f2b81SMateusz Guzik 	kdtrace_thread_ctor(td);
364598f2b81SMateusz Guzik #endif
365d10183d9SDavid Xu 	umtx_thread_alloc(td);
36619d3e47dSMateusz Guzik 	MPASS(td->td_sel == NULL);
367b23f72e9SBrian Feldman 	return (0);
36844990b8cSJulian Elischer }
36944990b8cSJulian Elischer 
37044990b8cSJulian Elischer /*
37144990b8cSJulian Elischer  * Reclaim a thread after use.
37244990b8cSJulian Elischer  */
37344990b8cSJulian Elischer static void
37444990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg)
37544990b8cSJulian Elischer {
37644990b8cSJulian Elischer 	struct thread *td;
37744990b8cSJulian Elischer 
37844990b8cSJulian Elischer 	td = (struct thread *)mem;
37944990b8cSJulian Elischer 
38044990b8cSJulian Elischer #ifdef INVARIANTS
38144990b8cSJulian Elischer 	/* Verify that this thread is in a safe state to free. */
382fa2528acSAlex Richardson 	switch (TD_GET_STATE(td)) {
38371fad9fdSJulian Elischer 	case TDS_INHIBITED:
38471fad9fdSJulian Elischer 	case TDS_RUNNING:
38571fad9fdSJulian Elischer 	case TDS_CAN_RUN:
38644990b8cSJulian Elischer 	case TDS_RUNQ:
38744990b8cSJulian Elischer 		/*
38844990b8cSJulian Elischer 		 * We must never unlink a thread that is in one of
38944990b8cSJulian Elischer 		 * these states, because it is currently active.
39044990b8cSJulian Elischer 		 */
39144990b8cSJulian Elischer 		panic("bad state for thread unlinking");
39244990b8cSJulian Elischer 		/* NOTREACHED */
39371fad9fdSJulian Elischer 	case TDS_INACTIVE:
39444990b8cSJulian Elischer 		break;
39544990b8cSJulian Elischer 	default:
39644990b8cSJulian Elischer 		panic("bad thread state");
39744990b8cSJulian Elischer 		/* NOTREACHED */
39844990b8cSJulian Elischer 	}
39944990b8cSJulian Elischer #endif
4006e8525ceSRobert Watson #ifdef AUDIT
4016e8525ceSRobert Watson 	audit_thread_free(td);
4026e8525ceSRobert Watson #endif
403598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS
404598f2b81SMateusz Guzik 	kdtrace_thread_dtor(td);
405598f2b81SMateusz Guzik #endif
4061ba4a712SPawel Jakub Dawidek 	/* Free all OSD associated to this thread. */
4071ba4a712SPawel Jakub Dawidek 	osd_thread_exit(td);
408aca4bb91SKonstantin Belousov 	td_softdep_cleanup(td);
409aca4bb91SKonstantin Belousov 	MPASS(td->td_su == NULL);
41019d3e47dSMateusz Guzik 	seltdfini(td);
41144990b8cSJulian Elischer }
41244990b8cSJulian Elischer 
41344990b8cSJulian Elischer /*
41444990b8cSJulian Elischer  * Initialize type-stable parts of a thread (when newly created).
41544990b8cSJulian Elischer  */
416b23f72e9SBrian Feldman static int
417b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags)
41844990b8cSJulian Elischer {
41944990b8cSJulian Elischer 	struct thread *td;
42044990b8cSJulian Elischer 
42144990b8cSJulian Elischer 	td = (struct thread *)mem;
422247aba24SMarcel Moolenaar 
423b83e94beSMateusz Guzik 	td->td_allocdomain = vm_phys_domain(vtophys(td));
42444f3b092SJohn Baldwin 	td->td_sleepqueue = sleepq_alloc();
425961a7b24SJohn Baldwin 	td->td_turnstile = turnstile_alloc();
4268f0e9130SKonstantin Belousov 	td->td_rlqe = NULL;
4272ca45184SMatt Joras 	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
428d10183d9SDavid Xu 	umtx_thread_init(td);
42989b57fcfSKonstantin Belousov 	td->td_kstack = 0;
430ad8b1d85SKonstantin Belousov 	td->td_sel = NULL;
431b23f72e9SBrian Feldman 	return (0);
43244990b8cSJulian Elischer }
43344990b8cSJulian Elischer 
43444990b8cSJulian Elischer /*
43544990b8cSJulian Elischer  * Tear down type-stable parts of a thread (just before being discarded).
43644990b8cSJulian Elischer  */
43744990b8cSJulian Elischer static void
43844990b8cSJulian Elischer thread_fini(void *mem, int size)
43944990b8cSJulian Elischer {
44044990b8cSJulian Elischer 	struct thread *td;
44144990b8cSJulian Elischer 
44244990b8cSJulian Elischer 	td = (struct thread *)mem;
4432ca45184SMatt Joras 	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
4448f0e9130SKonstantin Belousov 	rlqentry_free(td->td_rlqe);
445961a7b24SJohn Baldwin 	turnstile_free(td->td_turnstile);
44644f3b092SJohn Baldwin 	sleepq_free(td->td_sleepqueue);
447d10183d9SDavid Xu 	umtx_thread_fini(td);
44819d3e47dSMateusz Guzik 	MPASS(td->td_sel == NULL);
44944990b8cSJulian Elischer }
4505215b187SJeff Roberson 
4515c8329edSJulian Elischer /*
4525215b187SJeff Roberson  * For a newly created process,
4535215b187SJeff Roberson  * link up all the structures and its initial threads etc.
454ed062c8dSJulian Elischer  * called from:
455e7d939bdSMarcel Moolenaar  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
456ed062c8dSJulian Elischer  * proc_dtor() (should go away)
457ed062c8dSJulian Elischer  * proc_init()
4585c8329edSJulian Elischer  */
4595c8329edSJulian Elischer void
46089b57fcfSKonstantin Belousov proc_linkup0(struct proc *p, struct thread *td)
46189b57fcfSKonstantin Belousov {
46289b57fcfSKonstantin Belousov 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
46389b57fcfSKonstantin Belousov 	proc_linkup(p, td);
46489b57fcfSKonstantin Belousov }
46589b57fcfSKonstantin Belousov 
46689b57fcfSKonstantin Belousov void
4678460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td)
4685c8329edSJulian Elischer {
469a54e85fdSJeff Roberson 
4709104847fSDavid Xu 	sigqueue_init(&p->p_sigqueue, p);
471ebceaf6dSDavid Xu 	p->p_ksi = ksiginfo_alloc(1);
472ebceaf6dSDavid Xu 	if (p->p_ksi != NULL) {
4735c474517SDavid Xu 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
474ebceaf6dSDavid Xu 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
475ebceaf6dSDavid Xu 	}
476b2f92ef9SDavid Xu 	LIST_INIT(&p->p_mqnotifier);
4775c8329edSJulian Elischer 	p->p_numthreads = 0;
4788460a577SJohn Birrell 	thread_link(td, p);
4795c8329edSJulian Elischer }
4805c8329edSJulian Elischer 
4811bd3cf5dSMateusz Guzik extern int max_threads_per_proc;
4821bd3cf5dSMateusz Guzik 
4835c8329edSJulian Elischer /*
48444990b8cSJulian Elischer  * Initialize global thread allocation resources.
48544990b8cSJulian Elischer  */
48644990b8cSJulian Elischer void
48744990b8cSJulian Elischer threadinit(void)
48844990b8cSJulian Elischer {
48926007fe3SMateusz Guzik 	u_long i;
490cf31cadeSMateusz Guzik 	lwpid_t tid0;
4915aa5420fSMark Johnston 	uint32_t flags;
49244990b8cSJulian Elischer 
4931bd3cf5dSMateusz Guzik 	/*
4941bd3cf5dSMateusz Guzik 	 * Place an upper limit on threads which can be allocated.
4951bd3cf5dSMateusz Guzik 	 *
4961bd3cf5dSMateusz Guzik 	 * Note that other factors may make the de facto limit much lower.
4971bd3cf5dSMateusz Guzik 	 *
4981bd3cf5dSMateusz Guzik 	 * Platform limits are somewhat arbitrary but deemed "more than good
4991bd3cf5dSMateusz Guzik 	 * enough" for the foreseable future.
5001bd3cf5dSMateusz Guzik 	 */
5011bd3cf5dSMateusz Guzik 	if (maxthread == 0) {
5021bd3cf5dSMateusz Guzik #ifdef _LP64
5031bd3cf5dSMateusz Guzik 		maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
5041bd3cf5dSMateusz Guzik #else
5051bd3cf5dSMateusz Guzik 		maxthread = MIN(maxproc * max_threads_per_proc, 100000);
5061bd3cf5dSMateusz Guzik #endif
5071bd3cf5dSMateusz Guzik 	}
5081bd3cf5dSMateusz Guzik 
5091ea7a6f8SPoul-Henning Kamp 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
51035bb59edSMateusz Guzik 	tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
51162dbc992SMateusz Guzik 	/*
51262dbc992SMateusz Guzik 	 * Handle thread0.
51362dbc992SMateusz Guzik 	 */
51462dbc992SMateusz Guzik 	thread_count_inc();
515cf31cadeSMateusz Guzik 	tid0 = tid_alloc();
516cf31cadeSMateusz Guzik 	if (tid0 != THREAD0_TID)
517cf31cadeSMateusz Guzik 		panic("tid0 %d != %d\n", tid0, THREAD0_TID);
5181ea7a6f8SPoul-Henning Kamp 
5195aa5420fSMark Johnston 	flags = UMA_ZONE_NOFREE;
5205aa5420fSMark Johnston #ifdef __aarch64__
5215aa5420fSMark Johnston 	/*
5225aa5420fSMark Johnston 	 * Force thread structures to be allocated from the direct map.
5235aa5420fSMark Johnston 	 * Otherwise, superpage promotions and demotions may temporarily
5245aa5420fSMark Johnston 	 * invalidate thread structure mappings.  For most dynamically allocated
5255aa5420fSMark Johnston 	 * structures this is not a problem, but translation faults cannot be
5265aa5420fSMark Johnston 	 * handled without accessing curthread.
5275aa5420fSMark Johnston 	 */
5285aa5420fSMark Johnston 	flags |= UMA_ZONE_CONTIG;
5295aa5420fSMark Johnston #endif
530de028f5aSJeff Roberson 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
53144990b8cSJulian Elischer 	    thread_ctor, thread_dtor, thread_init, thread_fini,
5325aa5420fSMark Johnston 	    32 - 1, flags);
533cf7d9a8cSDavid Xu 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
53426007fe3SMateusz Guzik 	tidhashlock = (tidhash + 1) / 64;
53526007fe3SMateusz Guzik 	if (tidhashlock > 0)
53626007fe3SMateusz Guzik 		tidhashlock--;
53726007fe3SMateusz Guzik 	tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
53826007fe3SMateusz Guzik 	    M_TIDHASH, M_WAITOK | M_ZERO);
53926007fe3SMateusz Guzik 	for (i = 0; i < tidhashlock + 1; i++)
54026007fe3SMateusz Guzik 		rw_init(&tidhashtbl_lock[i], "tidhash");
541d116b9f1SMateusz Guzik 
542d116b9f1SMateusz Guzik 	TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
543d116b9f1SMateusz Guzik 	callout_init(&thread_reap_callout, 1);
544845d7797SKonstantin Belousov 	callout_reset(&thread_reap_callout, 5 * hz,
545845d7797SKonstantin Belousov 	    thread_reap_callout_cb, NULL);
54644990b8cSJulian Elischer }
54744990b8cSJulian Elischer 
54844990b8cSJulian Elischer /*
549ff8fbcffSJeff Roberson  * Place an unused thread on the zombie list.
55044990b8cSJulian Elischer  */
55144990b8cSJulian Elischer void
552ff8fbcffSJeff Roberson thread_zombie(struct thread *td)
55344990b8cSJulian Elischer {
554d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
555c5315f51SMateusz Guzik 	struct thread *ztd;
556c5315f51SMateusz Guzik 
557a9568cd2SMateusz Guzik 	tdd = &thread_domain_data[td->td_allocdomain];
558d116b9f1SMateusz Guzik 	ztd = atomic_load_ptr(&tdd->tdd_zombies);
559c5315f51SMateusz Guzik 	for (;;) {
560c5315f51SMateusz Guzik 		td->td_zombie = ztd;
561d116b9f1SMateusz Guzik 		if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
562c5315f51SMateusz Guzik 		    (uintptr_t *)&ztd, (uintptr_t)td))
563c5315f51SMateusz Guzik 			break;
564c5315f51SMateusz Guzik 		continue;
565c5315f51SMateusz Guzik 	}
56644990b8cSJulian Elischer }
56744990b8cSJulian Elischer 
5685c8329edSJulian Elischer /*
569ff8fbcffSJeff Roberson  * Release a thread that has exited after cpu_throw().
570ff8fbcffSJeff Roberson  */
571ff8fbcffSJeff Roberson void
572ff8fbcffSJeff Roberson thread_stash(struct thread *td)
573ff8fbcffSJeff Roberson {
574ff8fbcffSJeff Roberson 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
575ff8fbcffSJeff Roberson 	thread_zombie(td);
576ff8fbcffSJeff Roberson }
577ff8fbcffSJeff Roberson 
578ff8fbcffSJeff Roberson /*
579d116b9f1SMateusz Guzik  * Reap zombies from passed domain.
58044990b8cSJulian Elischer  */
581d116b9f1SMateusz Guzik static void
582d116b9f1SMateusz Guzik thread_reap_domain(struct thread_domain_data *tdd)
58344990b8cSJulian Elischer {
584c5315f51SMateusz Guzik 	struct thread *itd, *ntd;
5855ef7b7a0SMateusz Guzik 	struct tidbatch tidbatch;
586f34a2f56SMateusz Guzik 	struct credbatch credbatch;
5875ef7b7a0SMateusz Guzik 	int tdcount;
588fb8ab680SMateusz Guzik 	struct plimit *lim;
589fb8ab680SMateusz Guzik 	int limcount;
59044990b8cSJulian Elischer 
59144990b8cSJulian Elischer 	/*
592c5315f51SMateusz Guzik 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
593c5315f51SMateusz Guzik 	 * but most of the time the list is empty.
59444990b8cSJulian Elischer 	 */
595d116b9f1SMateusz Guzik 	if (tdd->tdd_zombies == NULL)
596c5315f51SMateusz Guzik 		return;
597c5315f51SMateusz Guzik 
598d116b9f1SMateusz Guzik 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
599c5315f51SMateusz Guzik 	    (uintptr_t)NULL);
6005ef7b7a0SMateusz Guzik 	if (itd == NULL)
6015ef7b7a0SMateusz Guzik 		return;
6025ef7b7a0SMateusz Guzik 
603d116b9f1SMateusz Guzik 	/*
604d116b9f1SMateusz Guzik 	 * Multiple CPUs can get here, the race is fine as ticks is only
605d116b9f1SMateusz Guzik 	 * advisory.
606d116b9f1SMateusz Guzik 	 */
607d116b9f1SMateusz Guzik 	tdd->tdd_reapticks = ticks;
608d116b9f1SMateusz Guzik 
6095ef7b7a0SMateusz Guzik 	tidbatch_prep(&tidbatch);
610f34a2f56SMateusz Guzik 	credbatch_prep(&credbatch);
6115ef7b7a0SMateusz Guzik 	tdcount = 0;
612fb8ab680SMateusz Guzik 	lim = NULL;
613fb8ab680SMateusz Guzik 	limcount = 0;
614d116b9f1SMateusz Guzik 
615c5315f51SMateusz Guzik 	while (itd != NULL) {
616c5315f51SMateusz Guzik 		ntd = itd->td_zombie;
6175ef7b7a0SMateusz Guzik 		EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
6185ef7b7a0SMateusz Guzik 		tidbatch_add(&tidbatch, itd);
619f34a2f56SMateusz Guzik 		credbatch_add(&credbatch, itd);
620fb8ab680SMateusz Guzik 		MPASS(itd->td_limit != NULL);
621fb8ab680SMateusz Guzik 		if (lim != itd->td_limit) {
622fb8ab680SMateusz Guzik 			if (limcount != 0) {
623fb8ab680SMateusz Guzik 				lim_freen(lim, limcount);
624fb8ab680SMateusz Guzik 				limcount = 0;
625fb8ab680SMateusz Guzik 			}
626fb8ab680SMateusz Guzik 		}
627fb8ab680SMateusz Guzik 		lim = itd->td_limit;
628fb8ab680SMateusz Guzik 		limcount++;
629755341dfSMateusz Guzik 		thread_free_batched(itd);
6305ef7b7a0SMateusz Guzik 		tidbatch_process(&tidbatch);
631f34a2f56SMateusz Guzik 		credbatch_process(&credbatch);
6325ef7b7a0SMateusz Guzik 		tdcount++;
6335ef7b7a0SMateusz Guzik 		if (tdcount == 32) {
6345ef7b7a0SMateusz Guzik 			thread_count_sub(tdcount);
6355ef7b7a0SMateusz Guzik 			tdcount = 0;
636755341dfSMateusz Guzik 		}
637c5315f51SMateusz Guzik 		itd = ntd;
63844990b8cSJulian Elischer 	}
639755341dfSMateusz Guzik 
6405ef7b7a0SMateusz Guzik 	tidbatch_final(&tidbatch);
641f34a2f56SMateusz Guzik 	credbatch_final(&credbatch);
6425ef7b7a0SMateusz Guzik 	if (tdcount != 0) {
6435ef7b7a0SMateusz Guzik 		thread_count_sub(tdcount);
644755341dfSMateusz Guzik 	}
645fb8ab680SMateusz Guzik 	MPASS(limcount != 0);
646fb8ab680SMateusz Guzik 	lim_freen(lim, limcount);
647ed062c8dSJulian Elischer }
64844990b8cSJulian Elischer 
6494f0db5e0SJulian Elischer /*
650d116b9f1SMateusz Guzik  * Reap zombies from all domains.
651d116b9f1SMateusz Guzik  */
652d116b9f1SMateusz Guzik static void
653d116b9f1SMateusz Guzik thread_reap_all(void)
654d116b9f1SMateusz Guzik {
655d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
656d116b9f1SMateusz Guzik 	int i, domain;
657d116b9f1SMateusz Guzik 
658d116b9f1SMateusz Guzik 	domain = PCPU_GET(domain);
659d116b9f1SMateusz Guzik 	for (i = 0; i < vm_ndomains; i++) {
660d116b9f1SMateusz Guzik 		tdd = &thread_domain_data[(i + domain) % vm_ndomains];
661d116b9f1SMateusz Guzik 		thread_reap_domain(tdd);
662d116b9f1SMateusz Guzik 	}
663d116b9f1SMateusz Guzik }
664d116b9f1SMateusz Guzik 
665d116b9f1SMateusz Guzik /*
666d116b9f1SMateusz Guzik  * Reap zombies from local domain.
667d116b9f1SMateusz Guzik  */
668b83e94beSMateusz Guzik static void
669d116b9f1SMateusz Guzik thread_reap(void)
670d116b9f1SMateusz Guzik {
671d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
672d116b9f1SMateusz Guzik 	int domain;
673d116b9f1SMateusz Guzik 
674d116b9f1SMateusz Guzik 	domain = PCPU_GET(domain);
675d116b9f1SMateusz Guzik 	tdd = &thread_domain_data[domain];
676d116b9f1SMateusz Guzik 
677d116b9f1SMateusz Guzik 	thread_reap_domain(tdd);
678d116b9f1SMateusz Guzik }
679d116b9f1SMateusz Guzik 
680d116b9f1SMateusz Guzik static void
681d116b9f1SMateusz Guzik thread_reap_task_cb(void *arg __unused, int pending __unused)
682d116b9f1SMateusz Guzik {
683d116b9f1SMateusz Guzik 
684d116b9f1SMateusz Guzik 	thread_reap_all();
685d116b9f1SMateusz Guzik }
686d116b9f1SMateusz Guzik 
687d116b9f1SMateusz Guzik static void
688d116b9f1SMateusz Guzik thread_reap_callout_cb(void *arg __unused)
689d116b9f1SMateusz Guzik {
690d116b9f1SMateusz Guzik 	struct thread_domain_data *tdd;
691d116b9f1SMateusz Guzik 	int i, cticks, lticks;
692d116b9f1SMateusz Guzik 	bool wantreap;
693d116b9f1SMateusz Guzik 
694d116b9f1SMateusz Guzik 	wantreap = false;
695d116b9f1SMateusz Guzik 	cticks = atomic_load_int(&ticks);
696d116b9f1SMateusz Guzik 	for (i = 0; i < vm_ndomains; i++) {
697d116b9f1SMateusz Guzik 		tdd = &thread_domain_data[i];
698d116b9f1SMateusz Guzik 		lticks = tdd->tdd_reapticks;
699d116b9f1SMateusz Guzik 		if (tdd->tdd_zombies != NULL &&
700d116b9f1SMateusz Guzik 		    (u_int)(cticks - lticks) > 5 * hz) {
701d116b9f1SMateusz Guzik 			wantreap = true;
702d116b9f1SMateusz Guzik 			break;
703d116b9f1SMateusz Guzik 		}
704d116b9f1SMateusz Guzik 	}
705d116b9f1SMateusz Guzik 
706d116b9f1SMateusz Guzik 	if (wantreap)
707d116b9f1SMateusz Guzik 		taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
708845d7797SKonstantin Belousov 	callout_reset(&thread_reap_callout, 5 * hz,
709845d7797SKonstantin Belousov 	    thread_reap_callout_cb, NULL);
710d116b9f1SMateusz Guzik }
711d116b9f1SMateusz Guzik 
712d116b9f1SMateusz Guzik /*
713f62c7e54SKonstantin Belousov  * Calling this function guarantees that any thread that exited before
714f62c7e54SKonstantin Belousov  * the call is reaped when the function returns.  By 'exited' we mean
715f62c7e54SKonstantin Belousov  * a thread removed from the process linkage with thread_unlink().
716f62c7e54SKonstantin Belousov  * Practically this means that caller must lock/unlock corresponding
717f62c7e54SKonstantin Belousov  * process lock before the call, to synchronize with thread_exit().
718f62c7e54SKonstantin Belousov  */
719f62c7e54SKonstantin Belousov void
720f62c7e54SKonstantin Belousov thread_reap_barrier(void)
721f62c7e54SKonstantin Belousov {
722f62c7e54SKonstantin Belousov 	struct task *t;
723f62c7e54SKonstantin Belousov 
724f62c7e54SKonstantin Belousov 	/*
725f62c7e54SKonstantin Belousov 	 * First do context switches to each CPU to ensure that all
726f62c7e54SKonstantin Belousov 	 * PCPU pc_deadthreads are moved to zombie list.
727f62c7e54SKonstantin Belousov 	 */
728f62c7e54SKonstantin Belousov 	quiesce_all_cpus("", PDROP);
729f62c7e54SKonstantin Belousov 
730f62c7e54SKonstantin Belousov 	/*
731f62c7e54SKonstantin Belousov 	 * Second, fire the task in the same thread as normal
732f62c7e54SKonstantin Belousov 	 * thread_reap() is done, to serialize reaping.
733f62c7e54SKonstantin Belousov 	 */
734f62c7e54SKonstantin Belousov 	t = malloc(sizeof(*t), M_TEMP, M_WAITOK);
735f62c7e54SKonstantin Belousov 	TASK_INIT(t, 0, thread_reap_task_cb, t);
736f62c7e54SKonstantin Belousov 	taskqueue_enqueue(taskqueue_thread, t);
737f62c7e54SKonstantin Belousov 	taskqueue_drain(taskqueue_thread, t);
738f62c7e54SKonstantin Belousov 	free(t, M_TEMP);
739f62c7e54SKonstantin Belousov }
740f62c7e54SKonstantin Belousov 
741f62c7e54SKonstantin Belousov /*
74244990b8cSJulian Elischer  * Allocate a thread.
74344990b8cSJulian Elischer  */
74444990b8cSJulian Elischer struct thread *
7458a945d10SKonstantin Belousov thread_alloc(int pages)
74644990b8cSJulian Elischer {
74789b57fcfSKonstantin Belousov 	struct thread *td;
7481bd3cf5dSMateusz Guzik 	lwpid_t tid;
7498460a577SJohn Birrell 
75062dbc992SMateusz Guzik 	if (!thread_count_inc()) {
7511bd3cf5dSMateusz Guzik 		return (NULL);
7521bd3cf5dSMateusz Guzik 	}
7531bd3cf5dSMateusz Guzik 
75462dbc992SMateusz Guzik 	tid = tid_alloc();
7551bd3cf5dSMateusz Guzik 	td = uma_zalloc(thread_zone, M_WAITOK);
75689b57fcfSKonstantin Belousov 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
7578a945d10SKonstantin Belousov 	if (!vm_thread_new(td, pages)) {
75889b57fcfSKonstantin Belousov 		uma_zfree(thread_zone, td);
7591bd3cf5dSMateusz Guzik 		tid_free(tid);
76062dbc992SMateusz Guzik 		thread_count_dec();
76189b57fcfSKonstantin Belousov 		return (NULL);
76289b57fcfSKonstantin Belousov 	}
7631bd3cf5dSMateusz Guzik 	td->td_tid = tid;
7640c3967e7SMarcel Moolenaar 	cpu_thread_alloc(td);
7651bd3cf5dSMateusz Guzik 	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
76689b57fcfSKonstantin Belousov 	return (td);
76744990b8cSJulian Elischer }
76844990b8cSJulian Elischer 
7698a945d10SKonstantin Belousov int
7708a945d10SKonstantin Belousov thread_alloc_stack(struct thread *td, int pages)
7718a945d10SKonstantin Belousov {
7728a945d10SKonstantin Belousov 
7738a945d10SKonstantin Belousov 	KASSERT(td->td_kstack == 0,
7748a945d10SKonstantin Belousov 	    ("thread_alloc_stack called on a thread with kstack"));
7758a945d10SKonstantin Belousov 	if (!vm_thread_new(td, pages))
7768a945d10SKonstantin Belousov 		return (0);
7778a945d10SKonstantin Belousov 	cpu_thread_alloc(td);
7788a945d10SKonstantin Belousov 	return (1);
7798a945d10SKonstantin Belousov }
7804f0db5e0SJulian Elischer 
7814f0db5e0SJulian Elischer /*
78244990b8cSJulian Elischer  * Deallocate a thread.
78344990b8cSJulian Elischer  */
784755341dfSMateusz Guzik static void
785755341dfSMateusz Guzik thread_free_batched(struct thread *td)
78644990b8cSJulian Elischer {
7872e6b8de4SJeff Roberson 
7882e6b8de4SJeff Roberson 	lock_profile_thread_exit(td);
78945aea8deSJeff Roberson 	if (td->td_cpuset)
790d7f687fcSJeff Roberson 		cpuset_rel(td->td_cpuset);
791d7f687fcSJeff Roberson 	td->td_cpuset = NULL;
7920c3967e7SMarcel Moolenaar 	cpu_thread_free(td);
79389b57fcfSKonstantin Belousov 	if (td->td_kstack != 0)
79489b57fcfSKonstantin Belousov 		vm_thread_dispose(td);
7952d19b736SKonstantin Belousov 	callout_drain(&td->td_slpcallout);
796755341dfSMateusz Guzik 	/*
797755341dfSMateusz Guzik 	 * Freeing handled by the caller.
798755341dfSMateusz Guzik 	 */
7991bd3cf5dSMateusz Guzik 	td->td_tid = -1;
80044990b8cSJulian Elischer 	uma_zfree(thread_zone, td);
80144990b8cSJulian Elischer }
80244990b8cSJulian Elischer 
8034ea6a9a2SMateusz Guzik void
804755341dfSMateusz Guzik thread_free(struct thread *td)
805755341dfSMateusz Guzik {
806755341dfSMateusz Guzik 	lwpid_t tid;
807755341dfSMateusz Guzik 
8085ef7b7a0SMateusz Guzik 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
809755341dfSMateusz Guzik 	tid = td->td_tid;
810755341dfSMateusz Guzik 	thread_free_batched(td);
811755341dfSMateusz Guzik 	tid_free(tid);
81262dbc992SMateusz Guzik 	thread_count_dec();
813755341dfSMateusz Guzik }
814755341dfSMateusz Guzik 
815755341dfSMateusz Guzik void
8164ea6a9a2SMateusz Guzik thread_cow_get_proc(struct thread *newtd, struct proc *p)
8174ea6a9a2SMateusz Guzik {
8184ea6a9a2SMateusz Guzik 
8194ea6a9a2SMateusz Guzik 	PROC_LOCK_ASSERT(p, MA_OWNED);
8201724c563SMateusz Guzik 	newtd->td_realucred = crcowget(p->p_ucred);
8211724c563SMateusz Guzik 	newtd->td_ucred = newtd->td_realucred;
822f6f6d240SMateusz Guzik 	newtd->td_limit = lim_hold(p->p_limit);
8234ea6a9a2SMateusz Guzik 	newtd->td_cowgen = p->p_cowgen;
8244ea6a9a2SMateusz Guzik }
8254ea6a9a2SMateusz Guzik 
8264ea6a9a2SMateusz Guzik void
8274ea6a9a2SMateusz Guzik thread_cow_get(struct thread *newtd, struct thread *td)
8284ea6a9a2SMateusz Guzik {
8294ea6a9a2SMateusz Guzik 
8301724c563SMateusz Guzik 	MPASS(td->td_realucred == td->td_ucred);
8311724c563SMateusz Guzik 	newtd->td_realucred = crcowget(td->td_realucred);
8321724c563SMateusz Guzik 	newtd->td_ucred = newtd->td_realucred;
833f6f6d240SMateusz Guzik 	newtd->td_limit = lim_hold(td->td_limit);
8344ea6a9a2SMateusz Guzik 	newtd->td_cowgen = td->td_cowgen;
8354ea6a9a2SMateusz Guzik }
8364ea6a9a2SMateusz Guzik 
8374ea6a9a2SMateusz Guzik void
8384ea6a9a2SMateusz Guzik thread_cow_free(struct thread *td)
8394ea6a9a2SMateusz Guzik {
8404ea6a9a2SMateusz Guzik 
8411724c563SMateusz Guzik 	if (td->td_realucred != NULL)
8421724c563SMateusz Guzik 		crcowfree(td);
843cd672ca6SMateusz Guzik 	if (td->td_limit != NULL)
844f6f6d240SMateusz Guzik 		lim_free(td->td_limit);
8454ea6a9a2SMateusz Guzik }
8464ea6a9a2SMateusz Guzik 
8474ea6a9a2SMateusz Guzik void
8484ea6a9a2SMateusz Guzik thread_cow_update(struct thread *td)
8494ea6a9a2SMateusz Guzik {
8504ea6a9a2SMateusz Guzik 	struct proc *p;
851cd672ca6SMateusz Guzik 	struct ucred *oldcred;
852cd672ca6SMateusz Guzik 	struct plimit *oldlimit;
8534ea6a9a2SMateusz Guzik 
8544ea6a9a2SMateusz Guzik 	p = td->td_proc;
855cd672ca6SMateusz Guzik 	oldlimit = NULL;
8564ea6a9a2SMateusz Guzik 	PROC_LOCK(p);
8571724c563SMateusz Guzik 	oldcred = crcowsync();
858cd672ca6SMateusz Guzik 	if (td->td_limit != p->p_limit) {
859cd672ca6SMateusz Guzik 		oldlimit = td->td_limit;
860cd672ca6SMateusz Guzik 		td->td_limit = lim_hold(p->p_limit);
861cd672ca6SMateusz Guzik 	}
8624ea6a9a2SMateusz Guzik 	td->td_cowgen = p->p_cowgen;
8634ea6a9a2SMateusz Guzik 	PROC_UNLOCK(p);
864cd672ca6SMateusz Guzik 	if (oldcred != NULL)
865cd672ca6SMateusz Guzik 		crfree(oldcred);
866cd672ca6SMateusz Guzik 	if (oldlimit != NULL)
867cd672ca6SMateusz Guzik 		lim_free(oldlimit);
8684ea6a9a2SMateusz Guzik }
8694ea6a9a2SMateusz Guzik 
87044990b8cSJulian Elischer /*
87144990b8cSJulian Elischer  * Discard the current thread and exit from its context.
87294e0a4cdSJulian Elischer  * Always called with scheduler locked.
87344990b8cSJulian Elischer  *
87444990b8cSJulian Elischer  * Because we can't free a thread while we're operating under its context,
875696058c3SJulian Elischer  * push the current thread into our CPU's deadthread holder. This means
876696058c3SJulian Elischer  * we needn't worry about someone else grabbing our context before we
8776617724cSJeff Roberson  * do a cpu_throw().
87844990b8cSJulian Elischer  */
87944990b8cSJulian Elischer void
88044990b8cSJulian Elischer thread_exit(void)
88144990b8cSJulian Elischer {
8827e3a96eaSJohn Baldwin 	uint64_t runtime, new_switchtime;
88344990b8cSJulian Elischer 	struct thread *td;
8841c4bcd05SJeff Roberson 	struct thread *td2;
88544990b8cSJulian Elischer 	struct proc *p;
8867847a9daSJohn Baldwin 	int wakeup_swapper;
88744990b8cSJulian Elischer 
88844990b8cSJulian Elischer 	td = curthread;
88944990b8cSJulian Elischer 	p = td->td_proc;
89044990b8cSJulian Elischer 
891a54e85fdSJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
892ed062c8dSJulian Elischer 	mtx_assert(&Giant, MA_NOTOWNED);
893a54e85fdSJeff Roberson 
89444990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
895ed062c8dSJulian Elischer 	KASSERT(p != NULL, ("thread exiting without a process"));
896cc701b73SRobert Watson 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
897e01eafefSJulian Elischer 	    (long)p->p_pid, td->td_name);
8986c9271a9SAndriy Gapon 	SDT_PROBE0(proc, , , lwp__exit);
8999104847fSDavid Xu 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
900936c24faSMateusz Guzik 	MPASS(td->td_realucred == td->td_ucred);
90144990b8cSJulian Elischer 
902ed062c8dSJulian Elischer 	/*
903ed062c8dSJulian Elischer 	 * drop FPU & debug register state storage, or any other
904ed062c8dSJulian Elischer 	 * architecture specific resources that
905ed062c8dSJulian Elischer 	 * would not be on a new untouched process.
906ed062c8dSJulian Elischer 	 */
907bd07998eSKonstantin Belousov 	cpu_thread_exit(td);
90844990b8cSJulian Elischer 
909ed062c8dSJulian Elischer 	/*
9101faf202eSJulian Elischer 	 * The last thread is left attached to the process
9111faf202eSJulian Elischer 	 * So that the whole bundle gets recycled. Skip
912ed062c8dSJulian Elischer 	 * all this stuff if we never had threads.
913ed062c8dSJulian Elischer 	 * EXIT clears all sign of other threads when
914ed062c8dSJulian Elischer 	 * it goes to single threading, so the last thread always
915ed062c8dSJulian Elischer 	 * takes the short path.
9161faf202eSJulian Elischer 	 */
917ed062c8dSJulian Elischer 	if (p->p_flag & P_HADTHREADS) {
9181faf202eSJulian Elischer 		if (p->p_numthreads > 1) {
919fd229b5bSKonstantin Belousov 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
920d3a0bd78SJulian Elischer 			thread_unlink(td);
9211c4bcd05SJeff Roberson 			td2 = FIRST_THREAD_IN_PROC(p);
9221c4bcd05SJeff Roberson 			sched_exit_thread(td2, td);
923ed062c8dSJulian Elischer 
924ed062c8dSJulian Elischer 			/*
92544990b8cSJulian Elischer 			 * The test below is NOT true if we are the
9269182554aSKonstantin Belousov 			 * sole exiting thread. P_STOPPED_SINGLE is unset
92744990b8cSJulian Elischer 			 * in exit1() after it is the only survivor.
92844990b8cSJulian Elischer 			 */
9291279572aSDavid Xu 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
93044990b8cSJulian Elischer 				if (p->p_numthreads == p->p_suspcount) {
931a54e85fdSJeff Roberson 					thread_lock(p->p_singlethread);
9327847a9daSJohn Baldwin 					wakeup_swapper = thread_unsuspend_one(
93384cdea97SKonstantin Belousov 						p->p_singlethread, p, false);
9347847a9daSJohn Baldwin 					if (wakeup_swapper)
9357847a9daSJohn Baldwin 						kick_proc0();
93644990b8cSJulian Elischer 				}
93744990b8cSJulian Elischer 			}
93848bfcdddSJulian Elischer 
939696058c3SJulian Elischer 			PCPU_SET(deadthread, td);
9401faf202eSJulian Elischer 		} else {
941ed062c8dSJulian Elischer 			/*
942ed062c8dSJulian Elischer 			 * The last thread is exiting.. but not through exit()
943ed062c8dSJulian Elischer 			 */
944ed062c8dSJulian Elischer 			panic ("thread_exit: Last thread exiting on its own");
945ed062c8dSJulian Elischer 		}
9461faf202eSJulian Elischer 	}
94716d95d4fSJoseph Koshy #ifdef	HWPMC_HOOKS
94816d95d4fSJoseph Koshy 	/*
94916d95d4fSJoseph Koshy 	 * If this thread is part of a process that is being tracked by hwpmc(4),
95016d95d4fSJoseph Koshy 	 * inform the module of the thread's impending exit.
95116d95d4fSJoseph Koshy 	 */
9526161b98cSMatt Macy 	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
95316d95d4fSJoseph Koshy 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
9546161b98cSMatt Macy 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
955ebfaf69cSMatt Macy 	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
956ebfaf69cSMatt Macy 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
95716d95d4fSJoseph Koshy #endif
958a54e85fdSJeff Roberson 	PROC_UNLOCK(p);
9595c7bebf9SKonstantin Belousov 	PROC_STATLOCK(p);
9605c7bebf9SKonstantin Belousov 	thread_lock(td);
9615c7bebf9SKonstantin Belousov 	PROC_SUNLOCK(p);
9627e3a96eaSJohn Baldwin 
9637e3a96eaSJohn Baldwin 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
9647e3a96eaSJohn Baldwin 	new_switchtime = cpu_ticks();
9657e3a96eaSJohn Baldwin 	runtime = new_switchtime - PCPU_GET(switchtime);
9667e3a96eaSJohn Baldwin 	td->td_runtime += runtime;
9677e3a96eaSJohn Baldwin 	td->td_incruntime += runtime;
9687e3a96eaSJohn Baldwin 	PCPU_SET(switchtime, new_switchtime);
9697e3a96eaSJohn Baldwin 	PCPU_SET(switchticks, ticks);
97083c9dea1SGleb Smirnoff 	VM_CNT_INC(v_swtch);
9717e3a96eaSJohn Baldwin 
9727e3a96eaSJohn Baldwin 	/* Save our resource usage in our process. */
9737e3a96eaSJohn Baldwin 	td->td_ru.ru_nvcsw++;
97461a74c5cSJeff Roberson 	ruxagg_locked(p, td);
9757e3a96eaSJohn Baldwin 	rucollect(&p->p_ru, &td->td_ru);
9765c7bebf9SKonstantin Belousov 	PROC_STATUNLOCK(p);
9777e3a96eaSJohn Baldwin 
978fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
9793d06b4b3SAttilio Rao #ifdef WITNESS
9803d06b4b3SAttilio Rao 	witness_thread_exit(td);
9813d06b4b3SAttilio Rao #endif
982732d9528SJulian Elischer 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
983a54e85fdSJeff Roberson 	sched_throw(td);
984cc66ebe2SPeter Wemm 	panic("I'm a teapot!");
98544990b8cSJulian Elischer 	/* NOTREACHED */
98644990b8cSJulian Elischer }
98744990b8cSJulian Elischer 
98844990b8cSJulian Elischer /*
989696058c3SJulian Elischer  * Do any thread specific cleanups that may be needed in wait()
99037814395SPeter Wemm  * called with Giant, proc and schedlock not held.
991696058c3SJulian Elischer  */
992696058c3SJulian Elischer void
993696058c3SJulian Elischer thread_wait(struct proc *p)
994696058c3SJulian Elischer {
995696058c3SJulian Elischer 	struct thread *td;
996696058c3SJulian Elischer 
99737814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
998624bf9e1SKonstantin Belousov 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
999624bf9e1SKonstantin Belousov 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
1000ff8fbcffSJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
1001ff8fbcffSJeff Roberson 	/* Lock the last thread so we spin until it exits cpu_throw(). */
1002ff8fbcffSJeff Roberson 	thread_lock(td);
1003ff8fbcffSJeff Roberson 	thread_unlock(td);
10042e6b8de4SJeff Roberson 	lock_profile_thread_exit(td);
1005d7f687fcSJeff Roberson 	cpuset_rel(td->td_cpuset);
1006d7f687fcSJeff Roberson 	td->td_cpuset = NULL;
1007696058c3SJulian Elischer 	cpu_thread_clean(td);
10084ea6a9a2SMateusz Guzik 	thread_cow_free(td);
10092d19b736SKonstantin Belousov 	callout_drain(&td->td_slpcallout);
1010696058c3SJulian Elischer 	thread_reap();	/* check for zombie threads etc. */
1011696058c3SJulian Elischer }
1012696058c3SJulian Elischer 
1013696058c3SJulian Elischer /*
101444990b8cSJulian Elischer  * Link a thread to a process.
10151faf202eSJulian Elischer  * set up anything that needs to be initialized for it to
10161faf202eSJulian Elischer  * be used by the process.
101744990b8cSJulian Elischer  */
101844990b8cSJulian Elischer void
10198460a577SJohn Birrell thread_link(struct thread *td, struct proc *p)
102044990b8cSJulian Elischer {
102144990b8cSJulian Elischer 
1022a54e85fdSJeff Roberson 	/*
1023a54e85fdSJeff Roberson 	 * XXX This can't be enabled because it's called for proc0 before
1024374ae2a3SJeff Roberson 	 * its lock has been created.
1025374ae2a3SJeff Roberson 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
1026a54e85fdSJeff Roberson 	 */
1027fa2528acSAlex Richardson 	TD_SET_STATE(td, TDS_INACTIVE);
102844990b8cSJulian Elischer 	td->td_proc     = p;
1029b61ce5b0SJeff Roberson 	td->td_flags    = TDF_INMEM;
103044990b8cSJulian Elischer 
10311faf202eSJulian Elischer 	LIST_INIT(&td->td_contested);
1032eea4f254SJeff Roberson 	LIST_INIT(&td->td_lprof[0]);
1033eea4f254SJeff Roberson 	LIST_INIT(&td->td_lprof[1]);
1034f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE
1035dd902d01SGleb Smirnoff 	SLIST_INIT(&td->td_epochs);
1036f6eccf96SGleb Smirnoff #endif
10379104847fSDavid Xu 	sigqueue_init(&td->td_sigqueue, p);
1038fd90e2edSJung-uk Kim 	callout_init(&td->td_slpcallout, 1);
103966d8df9dSDaniel Eischen 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
104044990b8cSJulian Elischer 	p->p_numthreads++;
104144990b8cSJulian Elischer }
104244990b8cSJulian Elischer 
1043ed062c8dSJulian Elischer /*
1044ed062c8dSJulian Elischer  * Called from:
1045ed062c8dSJulian Elischer  *  thread_exit()
1046ed062c8dSJulian Elischer  */
1047d3a0bd78SJulian Elischer void
1048d3a0bd78SJulian Elischer thread_unlink(struct thread *td)
1049d3a0bd78SJulian Elischer {
1050d3a0bd78SJulian Elischer 	struct proc *p = td->td_proc;
1051d3a0bd78SJulian Elischer 
1052374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
1053f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE
1054dd902d01SGleb Smirnoff 	MPASS(SLIST_EMPTY(&td->td_epochs));
1055f6eccf96SGleb Smirnoff #endif
1056dd902d01SGleb Smirnoff 
1057d3a0bd78SJulian Elischer 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1058d3a0bd78SJulian Elischer 	p->p_numthreads--;
1059d3a0bd78SJulian Elischer 	/* could clear a few other things here */
10608460a577SJohn Birrell 	/* Must  NOT clear links to proc! */
10615c8329edSJulian Elischer }
10625c8329edSJulian Elischer 
106379799053SKonstantin Belousov static int
106479799053SKonstantin Belousov calc_remaining(struct proc *p, int mode)
106579799053SKonstantin Belousov {
106679799053SKonstantin Belousov 	int remaining;
106779799053SKonstantin Belousov 
10687b519077SKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
10697b519077SKonstantin Belousov 	PROC_SLOCK_ASSERT(p, MA_OWNED);
107079799053SKonstantin Belousov 	if (mode == SINGLE_EXIT)
107179799053SKonstantin Belousov 		remaining = p->p_numthreads;
107279799053SKonstantin Belousov 	else if (mode == SINGLE_BOUNDARY)
107379799053SKonstantin Belousov 		remaining = p->p_numthreads - p->p_boundary_count;
10746ddcc233SKonstantin Belousov 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
107579799053SKonstantin Belousov 		remaining = p->p_numthreads - p->p_suspcount;
107679799053SKonstantin Belousov 	else
107779799053SKonstantin Belousov 		panic("calc_remaining: wrong mode %d", mode);
107879799053SKonstantin Belousov 	return (remaining);
107979799053SKonstantin Belousov }
108079799053SKonstantin Belousov 
108107a9368aSKonstantin Belousov static int
108207a9368aSKonstantin Belousov remain_for_mode(int mode)
108307a9368aSKonstantin Belousov {
108407a9368aSKonstantin Belousov 
10856ddcc233SKonstantin Belousov 	return (mode == SINGLE_ALLPROC ? 0 : 1);
108607a9368aSKonstantin Belousov }
108707a9368aSKonstantin Belousov 
108807a9368aSKonstantin Belousov static int
108907a9368aSKonstantin Belousov weed_inhib(int mode, struct thread *td2, struct proc *p)
109007a9368aSKonstantin Belousov {
109107a9368aSKonstantin Belousov 	int wakeup_swapper;
109207a9368aSKonstantin Belousov 
109307a9368aSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
109407a9368aSKonstantin Belousov 	PROC_SLOCK_ASSERT(p, MA_OWNED);
109507a9368aSKonstantin Belousov 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
109607a9368aSKonstantin Belousov 
109707a9368aSKonstantin Belousov 	wakeup_swapper = 0;
109861a74c5cSJeff Roberson 
109961a74c5cSJeff Roberson 	/*
110061a74c5cSJeff Roberson 	 * Since the thread lock is dropped by the scheduler we have
110161a74c5cSJeff Roberson 	 * to retry to check for races.
110261a74c5cSJeff Roberson 	 */
110361a74c5cSJeff Roberson restart:
110407a9368aSKonstantin Belousov 	switch (mode) {
110507a9368aSKonstantin Belousov 	case SINGLE_EXIT:
110661a74c5cSJeff Roberson 		if (TD_IS_SUSPENDED(td2)) {
110784cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
110861a74c5cSJeff Roberson 			thread_lock(td2);
110961a74c5cSJeff Roberson 			goto restart;
111061a74c5cSJeff Roberson 		}
111161a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
111207a9368aSKonstantin Belousov 			wakeup_swapper |= sleepq_abort(td2, EINTR);
111361a74c5cSJeff Roberson 			return (wakeup_swapper);
111461a74c5cSJeff Roberson 		}
111507a9368aSKonstantin Belousov 		break;
111607a9368aSKonstantin Belousov 	case SINGLE_BOUNDARY:
111707a9368aSKonstantin Belousov 	case SINGLE_NO_EXIT:
111861a74c5cSJeff Roberson 		if (TD_IS_SUSPENDED(td2) &&
111961a74c5cSJeff Roberson 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
112084cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
112161a74c5cSJeff Roberson 			thread_lock(td2);
112261a74c5cSJeff Roberson 			goto restart;
112361a74c5cSJeff Roberson 		}
112461a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
112507a9368aSKonstantin Belousov 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
112661a74c5cSJeff Roberson 			return (wakeup_swapper);
112761a74c5cSJeff Roberson 		}
1128917dd390SKonstantin Belousov 		break;
11296ddcc233SKonstantin Belousov 	case SINGLE_ALLPROC:
11306ddcc233SKonstantin Belousov 		/*
11316ddcc233SKonstantin Belousov 		 * ALLPROC suspend tries to avoid spurious EINTR for
11326ddcc233SKonstantin Belousov 		 * threads sleeping interruptable, by suspending the
11336ddcc233SKonstantin Belousov 		 * thread directly, similarly to sig_suspend_threads().
11346ddcc233SKonstantin Belousov 		 * Since such sleep is not performed at the user
11356ddcc233SKonstantin Belousov 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
11366ddcc233SKonstantin Belousov 		 * is used to avoid immediate un-suspend.
11376ddcc233SKonstantin Belousov 		 */
11386ddcc233SKonstantin Belousov 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
113961a74c5cSJeff Roberson 		    TDF_ALLPROCSUSP)) == 0) {
114084cdea97SKonstantin Belousov 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
114161a74c5cSJeff Roberson 			thread_lock(td2);
114261a74c5cSJeff Roberson 			goto restart;
114361a74c5cSJeff Roberson 		}
114461a74c5cSJeff Roberson 		if (TD_CAN_ABORT(td2)) {
11456ddcc233SKonstantin Belousov 			if ((td2->td_flags & TDF_SBDRY) == 0) {
11466ddcc233SKonstantin Belousov 				thread_suspend_one(td2);
11476ddcc233SKonstantin Belousov 				td2->td_flags |= TDF_ALLPROCSUSP;
11486ddcc233SKonstantin Belousov 			} else {
11496ddcc233SKonstantin Belousov 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
115061a74c5cSJeff Roberson 				return (wakeup_swapper);
11516ddcc233SKonstantin Belousov 			}
11526ddcc233SKonstantin Belousov 		}
115307a9368aSKonstantin Belousov 		break;
115461a74c5cSJeff Roberson 	default:
115561a74c5cSJeff Roberson 		break;
115607a9368aSKonstantin Belousov 	}
115761a74c5cSJeff Roberson 	thread_unlock(td2);
115807a9368aSKonstantin Belousov 	return (wakeup_swapper);
115907a9368aSKonstantin Belousov }
116007a9368aSKonstantin Belousov 
11615215b187SJeff Roberson /*
116244990b8cSJulian Elischer  * Enforce single-threading.
116344990b8cSJulian Elischer  *
116444990b8cSJulian Elischer  * Returns 1 if the caller must abort (another thread is waiting to
116544990b8cSJulian Elischer  * exit the process or similar). Process is locked!
116644990b8cSJulian Elischer  * Returns 0 when you are successfully the only thread running.
116744990b8cSJulian Elischer  * A process has successfully single threaded in the suspend mode when
116844990b8cSJulian Elischer  * There are no threads in user mode. Threads in the kernel must be
116944990b8cSJulian Elischer  * allowed to continue until they get to the user boundary. They may even
117044990b8cSJulian Elischer  * copy out their return values and data before suspending. They may however be
1171e2668f55SMaxim Konovalov  * accelerated in reaching the user boundary as we will wake up
117244990b8cSJulian Elischer  * any sleeping threads that are interruptable. (PCATCH).
117344990b8cSJulian Elischer  */
117444990b8cSJulian Elischer int
11756ddcc233SKonstantin Belousov thread_single(struct proc *p, int mode)
117644990b8cSJulian Elischer {
117744990b8cSJulian Elischer 	struct thread *td;
117844990b8cSJulian Elischer 	struct thread *td2;
1179da7bbd2cSJohn Baldwin 	int remaining, wakeup_swapper;
118044990b8cSJulian Elischer 
118144990b8cSJulian Elischer 	td = curthread;
11826ddcc233SKonstantin Belousov 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
11836ddcc233SKonstantin Belousov 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
11846ddcc233SKonstantin Belousov 	    ("invalid mode %d", mode));
11856ddcc233SKonstantin Belousov 	/*
11866ddcc233SKonstantin Belousov 	 * If allowing non-ALLPROC singlethreading for non-curproc
11876ddcc233SKonstantin Belousov 	 * callers, calc_remaining() and remain_for_mode() should be
11886ddcc233SKonstantin Belousov 	 * adjusted to also account for td->td_proc != p.  For now
11896ddcc233SKonstantin Belousov 	 * this is not implemented because it is not used.
11906ddcc233SKonstantin Belousov 	 */
11916ddcc233SKonstantin Belousov 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
11926ddcc233SKonstantin Belousov 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
11936ddcc233SKonstantin Belousov 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
119437814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
119544990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
119644990b8cSJulian Elischer 
11976ddcc233SKonstantin Belousov 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
119844990b8cSJulian Elischer 		return (0);
119944990b8cSJulian Elischer 
1200e3b9bf71SJulian Elischer 	/* Is someone already single threading? */
1201906ac69dSDavid Xu 	if (p->p_singlethread != NULL && p->p_singlethread != td)
120244990b8cSJulian Elischer 		return (1);
120344990b8cSJulian Elischer 
1204906ac69dSDavid Xu 	if (mode == SINGLE_EXIT) {
1205906ac69dSDavid Xu 		p->p_flag |= P_SINGLE_EXIT;
1206906ac69dSDavid Xu 		p->p_flag &= ~P_SINGLE_BOUNDARY;
1207906ac69dSDavid Xu 	} else {
1208906ac69dSDavid Xu 		p->p_flag &= ~P_SINGLE_EXIT;
1209906ac69dSDavid Xu 		if (mode == SINGLE_BOUNDARY)
1210906ac69dSDavid Xu 			p->p_flag |= P_SINGLE_BOUNDARY;
1211906ac69dSDavid Xu 		else
1212906ac69dSDavid Xu 			p->p_flag &= ~P_SINGLE_BOUNDARY;
1213906ac69dSDavid Xu 	}
12146ddcc233SKonstantin Belousov 	if (mode == SINGLE_ALLPROC)
12156ddcc233SKonstantin Belousov 		p->p_flag |= P_TOTAL_STOP;
12161279572aSDavid Xu 	p->p_flag |= P_STOPPED_SINGLE;
12177b4a950aSDavid Xu 	PROC_SLOCK(p);
1218112afcb2SJohn Baldwin 	p->p_singlethread = td;
121979799053SKonstantin Belousov 	remaining = calc_remaining(p, mode);
122007a9368aSKonstantin Belousov 	while (remaining != remain_for_mode(mode)) {
1221bf1a3220SDavid Xu 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1222bf1a3220SDavid Xu 			goto stopme;
1223da7bbd2cSJohn Baldwin 		wakeup_swapper = 0;
122444990b8cSJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td2) {
122544990b8cSJulian Elischer 			if (td2 == td)
122644990b8cSJulian Elischer 				continue;
1227a54e85fdSJeff Roberson 			thread_lock(td2);
1228b7edba77SJeff Roberson 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
12296ddcc233SKonstantin Belousov 			if (TD_IS_INHIBITED(td2)) {
123007a9368aSKonstantin Belousov 				wakeup_swapper |= weed_inhib(mode, td2, p);
1231d8267df7SDavid Xu #ifdef SMP
12326ddcc233SKonstantin Belousov 			} else if (TD_IS_RUNNING(td2) && td != td2) {
1233d8267df7SDavid Xu 				forward_signal(td2);
123461a74c5cSJeff Roberson 				thread_unlock(td2);
1235d8267df7SDavid Xu #endif
123661a74c5cSJeff Roberson 			} else
1237a54e85fdSJeff Roberson 				thread_unlock(td2);
12389d102777SJulian Elischer 		}
1239da7bbd2cSJohn Baldwin 		if (wakeup_swapper)
1240da7bbd2cSJohn Baldwin 			kick_proc0();
124179799053SKonstantin Belousov 		remaining = calc_remaining(p, mode);
1242ec008e96SDavid Xu 
12439d102777SJulian Elischer 		/*
12449d102777SJulian Elischer 		 * Maybe we suspended some threads.. was it enough?
12459d102777SJulian Elischer 		 */
124607a9368aSKonstantin Belousov 		if (remaining == remain_for_mode(mode))
12479d102777SJulian Elischer 			break;
12489d102777SJulian Elischer 
1249bf1a3220SDavid Xu stopme:
125044990b8cSJulian Elischer 		/*
125144990b8cSJulian Elischer 		 * Wake us up when everyone else has suspended.
1252e3b9bf71SJulian Elischer 		 * In the mean time we suspend as well.
125344990b8cSJulian Elischer 		 */
12546ddcc233SKonstantin Belousov 		thread_suspend_switch(td, p);
125579799053SKonstantin Belousov 		remaining = calc_remaining(p, mode);
125644990b8cSJulian Elischer 	}
1257906ac69dSDavid Xu 	if (mode == SINGLE_EXIT) {
125891599697SJulian Elischer 		/*
12598626a0ddSKonstantin Belousov 		 * Convert the process to an unthreaded process.  The
12608626a0ddSKonstantin Belousov 		 * SINGLE_EXIT is called by exit1() or execve(), in
12618626a0ddSKonstantin Belousov 		 * both cases other threads must be retired.
126291599697SJulian Elischer 		 */
12638626a0ddSKonstantin Belousov 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1264ed062c8dSJulian Elischer 		p->p_singlethread = NULL;
12658626a0ddSKonstantin Belousov 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1266fd229b5bSKonstantin Belousov 
1267fd229b5bSKonstantin Belousov 		/*
1268fd229b5bSKonstantin Belousov 		 * Wait for any remaining threads to exit cpu_throw().
1269fd229b5bSKonstantin Belousov 		 */
1270fd229b5bSKonstantin Belousov 		while (p->p_exitthreads != 0) {
1271fd229b5bSKonstantin Belousov 			PROC_SUNLOCK(p);
1272fd229b5bSKonstantin Belousov 			PROC_UNLOCK(p);
1273fd229b5bSKonstantin Belousov 			sched_relinquish(td);
1274fd229b5bSKonstantin Belousov 			PROC_LOCK(p);
1275fd229b5bSKonstantin Belousov 			PROC_SLOCK(p);
1276fd229b5bSKonstantin Belousov 		}
1277ac437c07SKonstantin Belousov 	} else if (mode == SINGLE_BOUNDARY) {
1278ac437c07SKonstantin Belousov 		/*
1279ac437c07SKonstantin Belousov 		 * Wait until all suspended threads are removed from
1280ac437c07SKonstantin Belousov 		 * the processors.  The thread_suspend_check()
1281ac437c07SKonstantin Belousov 		 * increments p_boundary_count while it is still
1282ac437c07SKonstantin Belousov 		 * running, which makes it possible for the execve()
1283ac437c07SKonstantin Belousov 		 * to destroy vmspace while our other threads are
1284ac437c07SKonstantin Belousov 		 * still using the address space.
1285ac437c07SKonstantin Belousov 		 *
1286ac437c07SKonstantin Belousov 		 * We lock the thread, which is only allowed to
1287ac437c07SKonstantin Belousov 		 * succeed after context switch code finished using
1288ac437c07SKonstantin Belousov 		 * the address space.
1289ac437c07SKonstantin Belousov 		 */
1290ac437c07SKonstantin Belousov 		FOREACH_THREAD_IN_PROC(p, td2) {
1291ac437c07SKonstantin Belousov 			if (td2 == td)
1292ac437c07SKonstantin Belousov 				continue;
1293ac437c07SKonstantin Belousov 			thread_lock(td2);
1294ac437c07SKonstantin Belousov 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1295ac437c07SKonstantin Belousov 			    ("td %p not on boundary", td2));
1296ac437c07SKonstantin Belousov 			KASSERT(TD_IS_SUSPENDED(td2),
1297ac437c07SKonstantin Belousov 			    ("td %p is not suspended", td2));
1298ac437c07SKonstantin Belousov 			thread_unlock(td2);
1299ac437c07SKonstantin Belousov 		}
130091599697SJulian Elischer 	}
13017b4a950aSDavid Xu 	PROC_SUNLOCK(p);
130244990b8cSJulian Elischer 	return (0);
130344990b8cSJulian Elischer }
130444990b8cSJulian Elischer 
13058638fe7bSKonstantin Belousov bool
13068638fe7bSKonstantin Belousov thread_suspend_check_needed(void)
13078638fe7bSKonstantin Belousov {
13088638fe7bSKonstantin Belousov 	struct proc *p;
13098638fe7bSKonstantin Belousov 	struct thread *td;
13108638fe7bSKonstantin Belousov 
13118638fe7bSKonstantin Belousov 	td = curthread;
13128638fe7bSKonstantin Belousov 	p = td->td_proc;
13138638fe7bSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
13148638fe7bSKonstantin Belousov 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
13158638fe7bSKonstantin Belousov 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
13168638fe7bSKonstantin Belousov }
13178638fe7bSKonstantin Belousov 
131844990b8cSJulian Elischer /*
131944990b8cSJulian Elischer  * Called in from locations that can safely check to see
132044990b8cSJulian Elischer  * whether we have to suspend or at least throttle for a
132144990b8cSJulian Elischer  * single-thread event (e.g. fork).
132244990b8cSJulian Elischer  *
132344990b8cSJulian Elischer  * Such locations include userret().
132444990b8cSJulian Elischer  * If the "return_instead" argument is non zero, the thread must be able to
132544990b8cSJulian Elischer  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
132644990b8cSJulian Elischer  *
132744990b8cSJulian Elischer  * The 'return_instead' argument tells the function if it may do a
132844990b8cSJulian Elischer  * thread_exit() or suspend, or whether the caller must abort and back
132944990b8cSJulian Elischer  * out instead.
133044990b8cSJulian Elischer  *
133144990b8cSJulian Elischer  * If the thread that set the single_threading request has set the
133244990b8cSJulian Elischer  * P_SINGLE_EXIT bit in the process flags then this call will never return
133344990b8cSJulian Elischer  * if 'return_instead' is false, but will exit.
133444990b8cSJulian Elischer  *
133544990b8cSJulian Elischer  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
133644990b8cSJulian Elischer  *---------------+--------------------+---------------------
133744990b8cSJulian Elischer  *       0       | returns 0          |   returns 0 or 1
1338353374b5SJohn Baldwin  *               | when ST ends       |   immediately
133944990b8cSJulian Elischer  *---------------+--------------------+---------------------
134044990b8cSJulian Elischer  *       1       | thread exits       |   returns 1
1341353374b5SJohn Baldwin  *               |                    |  immediately
134244990b8cSJulian Elischer  * 0 = thread_exit() or suspension ok,
134344990b8cSJulian Elischer  * other = return error instead of stopping the thread.
134444990b8cSJulian Elischer  *
134544990b8cSJulian Elischer  * While a full suspension is under effect, even a single threading
134644990b8cSJulian Elischer  * thread would be suspended if it made this call (but it shouldn't).
134744990b8cSJulian Elischer  * This call should only be made from places where
134844990b8cSJulian Elischer  * thread_exit() would be safe as that may be the outcome unless
134944990b8cSJulian Elischer  * return_instead is set.
135044990b8cSJulian Elischer  */
135144990b8cSJulian Elischer int
135244990b8cSJulian Elischer thread_suspend_check(int return_instead)
135344990b8cSJulian Elischer {
1354ecafb24bSJuli Mallett 	struct thread *td;
1355ecafb24bSJuli Mallett 	struct proc *p;
135646e47c4fSKonstantin Belousov 	int wakeup_swapper;
135744990b8cSJulian Elischer 
135844990b8cSJulian Elischer 	td = curthread;
135944990b8cSJulian Elischer 	p = td->td_proc;
136037814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
136144990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
13628638fe7bSKonstantin Belousov 	while (thread_suspend_check_needed()) {
13631279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
136444990b8cSJulian Elischer 			KASSERT(p->p_singlethread != NULL,
136544990b8cSJulian Elischer 			    ("singlethread not set"));
136644990b8cSJulian Elischer 			/*
1367e3b9bf71SJulian Elischer 			 * The only suspension in action is a
1368e3b9bf71SJulian Elischer 			 * single-threading. Single threader need not stop.
1369bd07998eSKonstantin Belousov 			 * It is safe to access p->p_singlethread unlocked
1370bd07998eSKonstantin Belousov 			 * because it can only be set to our address by us.
137144990b8cSJulian Elischer 			 */
1372e3b9bf71SJulian Elischer 			if (p->p_singlethread == td)
137344990b8cSJulian Elischer 				return (0);	/* Exempt from stopping. */
137444990b8cSJulian Elischer 		}
137545a4bfa1SDavid Xu 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
137694f0972bSDavid Xu 			return (EINTR);
137744990b8cSJulian Elischer 
1378906ac69dSDavid Xu 		/* Should we goto user boundary if we didn't come from there? */
1379906ac69dSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1380906ac69dSDavid Xu 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
138194f0972bSDavid Xu 			return (ERESTART);
1382906ac69dSDavid Xu 
138344990b8cSJulian Elischer 		/*
13843077f938SKonstantin Belousov 		 * Ignore suspend requests if they are deferred.
1385d071a6faSJohn Baldwin 		 */
13863077f938SKonstantin Belousov 		if ((td->td_flags & TDF_SBDRY) != 0) {
1387d071a6faSJohn Baldwin 			KASSERT(return_instead,
1388d071a6faSJohn Baldwin 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
138946e47c4fSKonstantin Belousov 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
139046e47c4fSKonstantin Belousov 			    (TDF_SEINTR | TDF_SERESTART),
139146e47c4fSKonstantin Belousov 			    ("both TDF_SEINTR and TDF_SERESTART"));
139246e47c4fSKonstantin Belousov 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1393d071a6faSJohn Baldwin 		}
1394d071a6faSJohn Baldwin 
1395d071a6faSJohn Baldwin 		/*
139644990b8cSJulian Elischer 		 * If the process is waiting for us to exit,
139744990b8cSJulian Elischer 		 * this thread should just suicide.
13981279572aSDavid Xu 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
139944990b8cSJulian Elischer 		 */
1400cf7d9a8cSDavid Xu 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1401cf7d9a8cSDavid Xu 			PROC_UNLOCK(p);
140291d1786fSDmitry Chagin 
140391d1786fSDmitry Chagin 			/*
140491d1786fSDmitry Chagin 			 * Allow Linux emulation layer to do some work
140591d1786fSDmitry Chagin 			 * before thread suicide.
140691d1786fSDmitry Chagin 			 */
140791d1786fSDmitry Chagin 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
140891d1786fSDmitry Chagin 				(p->p_sysent->sv_thread_detach)(td);
14092a339d9eSKonstantin Belousov 			umtx_thread_exit(td);
1410d1e7a4a5SJohn Baldwin 			kern_thr_exit(td);
1411d1e7a4a5SJohn Baldwin 			panic("stopped thread did not exit");
1412cf7d9a8cSDavid Xu 		}
141321ecd1e9SDavid Xu 
141421ecd1e9SDavid Xu 		PROC_SLOCK(p);
141521ecd1e9SDavid Xu 		thread_stopped(p);
1416a54e85fdSJeff Roberson 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1417a54e85fdSJeff Roberson 			if (p->p_numthreads == p->p_suspcount + 1) {
1418a54e85fdSJeff Roberson 				thread_lock(p->p_singlethread);
141984cdea97SKonstantin Belousov 				wakeup_swapper = thread_unsuspend_one(
142084cdea97SKonstantin Belousov 				    p->p_singlethread, p, false);
14217847a9daSJohn Baldwin 				if (wakeup_swapper)
14227847a9daSJohn Baldwin 					kick_proc0();
1423a54e85fdSJeff Roberson 			}
1424a54e85fdSJeff Roberson 		}
14253f9be10eSDavid Xu 		PROC_UNLOCK(p);
14267b4a950aSDavid Xu 		thread_lock(td);
142744990b8cSJulian Elischer 		/*
142844990b8cSJulian Elischer 		 * When a thread suspends, it just
1429ad1e7d28SJulian Elischer 		 * gets taken off all queues.
143044990b8cSJulian Elischer 		 */
143171fad9fdSJulian Elischer 		thread_suspend_one(td);
1432906ac69dSDavid Xu 		if (return_instead == 0) {
1433906ac69dSDavid Xu 			p->p_boundary_count++;
1434906ac69dSDavid Xu 			td->td_flags |= TDF_BOUNDARY;
1435cf19bf91SJulian Elischer 		}
14367b4a950aSDavid Xu 		PROC_SUNLOCK(p);
1437686bcb5cSJeff Roberson 		mi_switch(SW_INVOL | SWT_SUSPEND);
143844990b8cSJulian Elischer 		PROC_LOCK(p);
143944990b8cSJulian Elischer 	}
144044990b8cSJulian Elischer 	return (0);
144144990b8cSJulian Elischer }
144244990b8cSJulian Elischer 
1443478ca4b0SKonstantin Belousov /*
1444478ca4b0SKonstantin Belousov  * Check for possible stops and suspensions while executing a
1445478ca4b0SKonstantin Belousov  * casueword or similar transiently failing operation.
1446478ca4b0SKonstantin Belousov  *
1447478ca4b0SKonstantin Belousov  * The sleep argument controls whether the function can handle a stop
1448478ca4b0SKonstantin Belousov  * request itself or it should return ERESTART and the request is
1449478ca4b0SKonstantin Belousov  * proceed at the kernel/user boundary in ast.
1450478ca4b0SKonstantin Belousov  *
1451478ca4b0SKonstantin Belousov  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1452478ca4b0SKonstantin Belousov  * should handle the stop requests there, with exception of cases when
1453478ca4b0SKonstantin Belousov  * the thread owns a kernel resource, for instance busied the umtx
1454300b525dSKonstantin Belousov  * key, or when functions return immediately if thread_check_susp()
1455478ca4b0SKonstantin Belousov  * returned non-zero.  On the other hand, retrying the whole lock
1456478ca4b0SKonstantin Belousov  * operation, we better not stop there but delegate the handling to
1457478ca4b0SKonstantin Belousov  * ast.
1458478ca4b0SKonstantin Belousov  *
1459478ca4b0SKonstantin Belousov  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1460478ca4b0SKonstantin Belousov  * handle it at all, and simply return EINTR.
1461478ca4b0SKonstantin Belousov  */
1462478ca4b0SKonstantin Belousov int
1463478ca4b0SKonstantin Belousov thread_check_susp(struct thread *td, bool sleep)
1464478ca4b0SKonstantin Belousov {
1465478ca4b0SKonstantin Belousov 	struct proc *p;
1466478ca4b0SKonstantin Belousov 	int error;
1467478ca4b0SKonstantin Belousov 
1468478ca4b0SKonstantin Belousov 	/*
1469478ca4b0SKonstantin Belousov 	 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
1470478ca4b0SKonstantin Belousov 	 * eventually break the lockstep loop.
1471478ca4b0SKonstantin Belousov 	 */
1472478ca4b0SKonstantin Belousov 	if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
1473478ca4b0SKonstantin Belousov 		return (0);
1474478ca4b0SKonstantin Belousov 	error = 0;
1475478ca4b0SKonstantin Belousov 	p = td->td_proc;
1476478ca4b0SKonstantin Belousov 	PROC_LOCK(p);
1477478ca4b0SKonstantin Belousov 	if (p->p_flag & P_SINGLE_EXIT)
1478478ca4b0SKonstantin Belousov 		error = EINTR;
1479478ca4b0SKonstantin Belousov 	else if (P_SHOULDSTOP(p) ||
1480478ca4b0SKonstantin Belousov 	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1481478ca4b0SKonstantin Belousov 		error = sleep ? thread_suspend_check(0) : ERESTART;
1482478ca4b0SKonstantin Belousov 	PROC_UNLOCK(p);
1483478ca4b0SKonstantin Belousov 	return (error);
1484478ca4b0SKonstantin Belousov }
1485478ca4b0SKonstantin Belousov 
148635c32a76SDavid Xu void
14876ddcc233SKonstantin Belousov thread_suspend_switch(struct thread *td, struct proc *p)
1488a54e85fdSJeff Roberson {
1489a54e85fdSJeff Roberson 
1490a54e85fdSJeff Roberson 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1491a54e85fdSJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
14927b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1493a54e85fdSJeff Roberson 	/*
1494a54e85fdSJeff Roberson 	 * We implement thread_suspend_one in stages here to avoid
1495a54e85fdSJeff Roberson 	 * dropping the proc lock while the thread lock is owned.
1496a54e85fdSJeff Roberson 	 */
14976ddcc233SKonstantin Belousov 	if (p == td->td_proc) {
1498a54e85fdSJeff Roberson 		thread_stopped(p);
1499a54e85fdSJeff Roberson 		p->p_suspcount++;
15006ddcc233SKonstantin Belousov 	}
15013f9be10eSDavid Xu 	PROC_UNLOCK(p);
15027b4a950aSDavid Xu 	thread_lock(td);
1503b7edba77SJeff Roberson 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1504a54e85fdSJeff Roberson 	TD_SET_SUSPENDED(td);
1505c5aa6b58SJeff Roberson 	sched_sleep(td, 0);
15067b4a950aSDavid Xu 	PROC_SUNLOCK(p);
1507a54e85fdSJeff Roberson 	DROP_GIANT();
1508686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_SUSPEND);
1509a54e85fdSJeff Roberson 	PICKUP_GIANT();
1510a54e85fdSJeff Roberson 	PROC_LOCK(p);
15117b4a950aSDavid Xu 	PROC_SLOCK(p);
1512a54e85fdSJeff Roberson }
1513a54e85fdSJeff Roberson 
1514a54e85fdSJeff Roberson void
151535c32a76SDavid Xu thread_suspend_one(struct thread *td)
151635c32a76SDavid Xu {
15176ddcc233SKonstantin Belousov 	struct proc *p;
151835c32a76SDavid Xu 
15196ddcc233SKonstantin Belousov 	p = td->td_proc;
15207b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1521a54e85fdSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1522e574e444SDavid Xu 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
152335c32a76SDavid Xu 	p->p_suspcount++;
1524b7edba77SJeff Roberson 	td->td_flags &= ~TDF_NEEDSUSPCHK;
152571fad9fdSJulian Elischer 	TD_SET_SUSPENDED(td);
1526c5aa6b58SJeff Roberson 	sched_sleep(td, 0);
152735c32a76SDavid Xu }
152835c32a76SDavid Xu 
152984cdea97SKonstantin Belousov static int
153084cdea97SKonstantin Belousov thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
153135c32a76SDavid Xu {
153235c32a76SDavid Xu 
1533a54e85fdSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1534ad1e7d28SJulian Elischer 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
153571fad9fdSJulian Elischer 	TD_CLR_SUSPENDED(td);
15366ddcc233SKonstantin Belousov 	td->td_flags &= ~TDF_ALLPROCSUSP;
15376ddcc233SKonstantin Belousov 	if (td->td_proc == p) {
15386ddcc233SKonstantin Belousov 		PROC_SLOCK_ASSERT(p, MA_OWNED);
153935c32a76SDavid Xu 		p->p_suspcount--;
154084cdea97SKonstantin Belousov 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
154184cdea97SKonstantin Belousov 			td->td_flags &= ~TDF_BOUNDARY;
154284cdea97SKonstantin Belousov 			p->p_boundary_count--;
154384cdea97SKonstantin Belousov 		}
15446ddcc233SKonstantin Belousov 	}
154561a74c5cSJeff Roberson 	return (setrunnable(td, 0));
154635c32a76SDavid Xu }
154735c32a76SDavid Xu 
1548af928fdeSKonstantin Belousov void
1549af928fdeSKonstantin Belousov thread_run_flash(struct thread *td)
1550af928fdeSKonstantin Belousov {
1551af928fdeSKonstantin Belousov 	struct proc *p;
1552af928fdeSKonstantin Belousov 
1553af928fdeSKonstantin Belousov 	p = td->td_proc;
1554af928fdeSKonstantin Belousov 	PROC_LOCK_ASSERT(p, MA_OWNED);
1555af928fdeSKonstantin Belousov 
1556af928fdeSKonstantin Belousov 	if (TD_ON_SLEEPQ(td))
1557af928fdeSKonstantin Belousov 		sleepq_remove_nested(td);
1558af928fdeSKonstantin Belousov 	else
1559af928fdeSKonstantin Belousov 		thread_lock(td);
1560af928fdeSKonstantin Belousov 
1561af928fdeSKonstantin Belousov 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1562af928fdeSKonstantin Belousov 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1563af928fdeSKonstantin Belousov 
1564af928fdeSKonstantin Belousov 	TD_CLR_SUSPENDED(td);
1565af928fdeSKonstantin Belousov 	PROC_SLOCK(p);
1566af928fdeSKonstantin Belousov 	MPASS(p->p_suspcount > 0);
1567af928fdeSKonstantin Belousov 	p->p_suspcount--;
1568af928fdeSKonstantin Belousov 	PROC_SUNLOCK(p);
1569af928fdeSKonstantin Belousov 	if (setrunnable(td, 0))
1570af928fdeSKonstantin Belousov 		kick_proc0();
1571af928fdeSKonstantin Belousov }
1572af928fdeSKonstantin Belousov 
157344990b8cSJulian Elischer /*
157444990b8cSJulian Elischer  * Allow all threads blocked by single threading to continue running.
157544990b8cSJulian Elischer  */
157644990b8cSJulian Elischer void
157744990b8cSJulian Elischer thread_unsuspend(struct proc *p)
157844990b8cSJulian Elischer {
157944990b8cSJulian Elischer 	struct thread *td;
15807847a9daSJohn Baldwin 	int wakeup_swapper;
158144990b8cSJulian Elischer 
158244990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
15837b4a950aSDavid Xu 	PROC_SLOCK_ASSERT(p, MA_OWNED);
15847847a9daSJohn Baldwin 	wakeup_swapper = 0;
158544990b8cSJulian Elischer 	if (!P_SHOULDSTOP(p)) {
1586ad1e7d28SJulian Elischer                 FOREACH_THREAD_IN_PROC(p, td) {
1587a54e85fdSJeff Roberson 			thread_lock(td);
1588ad1e7d28SJulian Elischer 			if (TD_IS_SUSPENDED(td)) {
158984cdea97SKonstantin Belousov 				wakeup_swapper |= thread_unsuspend_one(td, p,
159084cdea97SKonstantin Belousov 				    true);
159161a74c5cSJeff Roberson 			} else
1592a54e85fdSJeff Roberson 				thread_unlock(td);
1593ad1e7d28SJulian Elischer 		}
159484cdea97SKonstantin Belousov 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
159584cdea97SKonstantin Belousov 	    p->p_numthreads == p->p_suspcount) {
159644990b8cSJulian Elischer 		/*
159744990b8cSJulian Elischer 		 * Stopping everything also did the job for the single
159844990b8cSJulian Elischer 		 * threading request. Now we've downgraded to single-threaded,
159944990b8cSJulian Elischer 		 * let it continue.
160044990b8cSJulian Elischer 		 */
16016ddcc233SKonstantin Belousov 		if (p->p_singlethread->td_proc == p) {
1602a54e85fdSJeff Roberson 			thread_lock(p->p_singlethread);
16036ddcc233SKonstantin Belousov 			wakeup_swapper = thread_unsuspend_one(
160484cdea97SKonstantin Belousov 			    p->p_singlethread, p, false);
160544990b8cSJulian Elischer 		}
16066ddcc233SKonstantin Belousov 	}
16077847a9daSJohn Baldwin 	if (wakeup_swapper)
16087847a9daSJohn Baldwin 		kick_proc0();
160944990b8cSJulian Elischer }
161044990b8cSJulian Elischer 
1611ed062c8dSJulian Elischer /*
1612ed062c8dSJulian Elischer  * End the single threading mode..
1613ed062c8dSJulian Elischer  */
161444990b8cSJulian Elischer void
16156ddcc233SKonstantin Belousov thread_single_end(struct proc *p, int mode)
161644990b8cSJulian Elischer {
161744990b8cSJulian Elischer 	struct thread *td;
16187847a9daSJohn Baldwin 	int wakeup_swapper;
161944990b8cSJulian Elischer 
16206ddcc233SKonstantin Belousov 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
16216ddcc233SKonstantin Belousov 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
16226ddcc233SKonstantin Belousov 	    ("invalid mode %d", mode));
162344990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
16246ddcc233SKonstantin Belousov 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
16256ddcc233SKonstantin Belousov 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
16266ddcc233SKonstantin Belousov 	    ("mode %d does not match P_TOTAL_STOP", mode));
162784cdea97SKonstantin Belousov 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
162884cdea97SKonstantin Belousov 	    ("thread_single_end from other thread %p %p",
162984cdea97SKonstantin Belousov 	    curthread, p->p_singlethread));
163084cdea97SKonstantin Belousov 	KASSERT(mode != SINGLE_BOUNDARY ||
163184cdea97SKonstantin Belousov 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
163284cdea97SKonstantin Belousov 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
16336ddcc233SKonstantin Belousov 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
16346ddcc233SKonstantin Belousov 	    P_TOTAL_STOP);
16357b4a950aSDavid Xu 	PROC_SLOCK(p);
163644990b8cSJulian Elischer 	p->p_singlethread = NULL;
16377847a9daSJohn Baldwin 	wakeup_swapper = 0;
163849539972SJulian Elischer 	/*
16397847a9daSJohn Baldwin 	 * If there are other threads they may now run,
164049539972SJulian Elischer 	 * unless of course there is a blanket 'stop order'
164149539972SJulian Elischer 	 * on the process. The single threader must be allowed
164249539972SJulian Elischer 	 * to continue however as this is a bad place to stop.
164349539972SJulian Elischer 	 */
16446ddcc233SKonstantin Belousov 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1645ad1e7d28SJulian Elischer                 FOREACH_THREAD_IN_PROC(p, td) {
1646a54e85fdSJeff Roberson 			thread_lock(td);
1647ad1e7d28SJulian Elischer 			if (TD_IS_SUSPENDED(td)) {
164884cdea97SKonstantin Belousov 				wakeup_swapper |= thread_unsuspend_one(td, p,
164984cdea97SKonstantin Belousov 				    mode == SINGLE_BOUNDARY);
165061a74c5cSJeff Roberson 			} else
1651a54e85fdSJeff Roberson 				thread_unlock(td);
165249539972SJulian Elischer 		}
1653ad1e7d28SJulian Elischer 	}
165484cdea97SKonstantin Belousov 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
165584cdea97SKonstantin Belousov 	    ("inconsistent boundary count %d", p->p_boundary_count));
16567b4a950aSDavid Xu 	PROC_SUNLOCK(p);
16577847a9daSJohn Baldwin 	if (wakeup_swapper)
16587847a9daSJohn Baldwin 		kick_proc0();
165949539972SJulian Elischer }
16604fc21c09SDaniel Eischen 
1661aae3547bSMateusz Guzik /*
1662aae3547bSMateusz Guzik  * Locate a thread by number and return with proc lock held.
1663aae3547bSMateusz Guzik  *
1664aae3547bSMateusz Guzik  * thread exit establishes proc -> tidhash lock ordering, but lookup
1665aae3547bSMateusz Guzik  * takes tidhash first and needs to return locked proc.
1666aae3547bSMateusz Guzik  *
1667aae3547bSMateusz Guzik  * The problem is worked around by relying on type-safety of both
1668aae3547bSMateusz Guzik  * structures and doing the work in 2 steps:
1669aae3547bSMateusz Guzik  * - tidhash-locked lookup which saves both thread and proc pointers
1670aae3547bSMateusz Guzik  * - proc-locked verification that the found thread still matches
1671aae3547bSMateusz Guzik  */
1672aae3547bSMateusz Guzik static bool
1673aae3547bSMateusz Guzik tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1674cf7d9a8cSDavid Xu {
1675cf7d9a8cSDavid Xu #define RUN_THRESH	16
1676aae3547bSMateusz Guzik 	struct proc *p;
1677cf7d9a8cSDavid Xu 	struct thread *td;
1678aae3547bSMateusz Guzik 	int run;
1679aae3547bSMateusz Guzik 	bool locked;
1680cf7d9a8cSDavid Xu 
1681aae3547bSMateusz Guzik 	run = 0;
168226007fe3SMateusz Guzik 	rw_rlock(TIDHASHLOCK(tid));
1683aae3547bSMateusz Guzik 	locked = true;
1684cf7d9a8cSDavid Xu 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1685aae3547bSMateusz Guzik 		if (td->td_tid != tid) {
1686aae3547bSMateusz Guzik 			run++;
1687aae3547bSMateusz Guzik 			continue;
1688cf7d9a8cSDavid Xu 		}
1689aae3547bSMateusz Guzik 		p = td->td_proc;
1690aae3547bSMateusz Guzik 		if (pid != -1 && p->p_pid != pid) {
1691cf7d9a8cSDavid Xu 			td = NULL;
1692cf7d9a8cSDavid Xu 			break;
1693cf7d9a8cSDavid Xu 		}
1694cf7d9a8cSDavid Xu 		if (run > RUN_THRESH) {
169526007fe3SMateusz Guzik 			if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1696cf7d9a8cSDavid Xu 				LIST_REMOVE(td, td_hash);
1697cf7d9a8cSDavid Xu 				LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1698cf7d9a8cSDavid Xu 					td, td_hash);
169926007fe3SMateusz Guzik 				rw_wunlock(TIDHASHLOCK(tid));
1700aae3547bSMateusz Guzik 				locked = false;
1701aae3547bSMateusz Guzik 				break;
1702cf7d9a8cSDavid Xu 			}
1703cf7d9a8cSDavid Xu 		}
1704cf7d9a8cSDavid Xu 		break;
1705cf7d9a8cSDavid Xu 	}
1706aae3547bSMateusz Guzik 	if (locked)
170726007fe3SMateusz Guzik 		rw_runlock(TIDHASHLOCK(tid));
1708aae3547bSMateusz Guzik 	if (td == NULL)
1709aae3547bSMateusz Guzik 		return (false);
1710aae3547bSMateusz Guzik 	*pp = p;
1711aae3547bSMateusz Guzik 	*tdp = td;
1712aae3547bSMateusz Guzik 	return (true);
1713aae3547bSMateusz Guzik }
1714aae3547bSMateusz Guzik 
1715aae3547bSMateusz Guzik struct thread *
1716aae3547bSMateusz Guzik tdfind(lwpid_t tid, pid_t pid)
1717aae3547bSMateusz Guzik {
1718aae3547bSMateusz Guzik 	struct proc *p;
1719aae3547bSMateusz Guzik 	struct thread *td;
1720aae3547bSMateusz Guzik 
1721aae3547bSMateusz Guzik 	td = curthread;
1722aae3547bSMateusz Guzik 	if (td->td_tid == tid) {
1723aae3547bSMateusz Guzik 		if (pid != -1 && td->td_proc->p_pid != pid)
1724aae3547bSMateusz Guzik 			return (NULL);
1725aae3547bSMateusz Guzik 		PROC_LOCK(td->td_proc);
1726cf7d9a8cSDavid Xu 		return (td);
1727cf7d9a8cSDavid Xu 	}
1728cf7d9a8cSDavid Xu 
1729aae3547bSMateusz Guzik 	for (;;) {
1730aae3547bSMateusz Guzik 		if (!tdfind_hash(tid, pid, &p, &td))
1731aae3547bSMateusz Guzik 			return (NULL);
1732aae3547bSMateusz Guzik 		PROC_LOCK(p);
1733aae3547bSMateusz Guzik 		if (td->td_tid != tid) {
1734aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1735aae3547bSMateusz Guzik 			continue;
1736aae3547bSMateusz Guzik 		}
1737aae3547bSMateusz Guzik 		if (td->td_proc != p) {
1738aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1739aae3547bSMateusz Guzik 			continue;
1740aae3547bSMateusz Guzik 		}
1741aae3547bSMateusz Guzik 		if (p->p_state == PRS_NEW) {
1742aae3547bSMateusz Guzik 			PROC_UNLOCK(p);
1743aae3547bSMateusz Guzik 			return (NULL);
1744aae3547bSMateusz Guzik 		}
1745aae3547bSMateusz Guzik 		return (td);
1746aae3547bSMateusz Guzik 	}
1747aae3547bSMateusz Guzik }
1748aae3547bSMateusz Guzik 
1749cf7d9a8cSDavid Xu void
1750cf7d9a8cSDavid Xu tidhash_add(struct thread *td)
1751cf7d9a8cSDavid Xu {
175226007fe3SMateusz Guzik 	rw_wlock(TIDHASHLOCK(td->td_tid));
1753cf7d9a8cSDavid Xu 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
175426007fe3SMateusz Guzik 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1755cf7d9a8cSDavid Xu }
1756cf7d9a8cSDavid Xu 
1757cf7d9a8cSDavid Xu void
1758cf7d9a8cSDavid Xu tidhash_remove(struct thread *td)
1759cf7d9a8cSDavid Xu {
176026007fe3SMateusz Guzik 
176126007fe3SMateusz Guzik 	rw_wlock(TIDHASHLOCK(td->td_tid));
1762cf7d9a8cSDavid Xu 	LIST_REMOVE(td, td_hash);
176326007fe3SMateusz Guzik 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1764cf7d9a8cSDavid Xu }
1765