xref: /freebsd/sys/kern/kern_thread.c (revision 732d95288a75db32ad012f99b130806378938564)
144990b8cSJulian Elischer /*
244990b8cSJulian Elischer  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
344990b8cSJulian Elischer  *  All rights reserved.
444990b8cSJulian Elischer  *
544990b8cSJulian Elischer  * Redistribution and use in source and binary forms, with or without
644990b8cSJulian Elischer  * modification, are permitted provided that the following conditions
744990b8cSJulian Elischer  * are met:
844990b8cSJulian Elischer  * 1. Redistributions of source code must retain the above copyright
944990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer as
1044990b8cSJulian Elischer  *    the first lines of this file unmodified other than the possible
1144990b8cSJulian Elischer  *    addition of one or more copyright notices.
1244990b8cSJulian Elischer  * 2. Redistributions in binary form must reproduce the above copyright
1344990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer in the
1444990b8cSJulian Elischer  *    documentation and/or other materials provided with the distribution.
1544990b8cSJulian Elischer  *
1644990b8cSJulian Elischer  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1744990b8cSJulian Elischer  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1844990b8cSJulian Elischer  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1944990b8cSJulian Elischer  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2044990b8cSJulian Elischer  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2144990b8cSJulian Elischer  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2244990b8cSJulian Elischer  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2344990b8cSJulian Elischer  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2444990b8cSJulian Elischer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2544990b8cSJulian Elischer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2644990b8cSJulian Elischer  * DAMAGE.
2744990b8cSJulian Elischer  */
2844990b8cSJulian Elischer 
29677b542eSDavid E. O'Brien #include <sys/cdefs.h>
30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
31677b542eSDavid E. O'Brien 
3244990b8cSJulian Elischer #include <sys/param.h>
3344990b8cSJulian Elischer #include <sys/systm.h>
3444990b8cSJulian Elischer #include <sys/kernel.h>
3544990b8cSJulian Elischer #include <sys/lock.h>
3644990b8cSJulian Elischer #include <sys/mutex.h>
3744990b8cSJulian Elischer #include <sys/proc.h>
3894e0a4cdSJulian Elischer #include <sys/smp.h>
3944990b8cSJulian Elischer #include <sys/sysctl.h>
40de028f5aSJeff Roberson #include <sys/sched.h>
4144f3b092SJohn Baldwin #include <sys/sleepqueue.h>
42961a7b24SJohn Baldwin #include <sys/turnstile.h>
4344990b8cSJulian Elischer #include <sys/ktr.h>
4444990b8cSJulian Elischer 
4544990b8cSJulian Elischer #include <vm/vm.h>
4649a2507bSAlan Cox #include <vm/vm_extern.h>
4744990b8cSJulian Elischer #include <vm/uma.h>
4802fb42b0SPeter Wemm 
4944990b8cSJulian Elischer /*
504f0db5e0SJulian Elischer  * KSEGRP related storage.
5144990b8cSJulian Elischer  */
524f0db5e0SJulian Elischer static uma_zone_t ksegrp_zone;
534f0db5e0SJulian Elischer static uma_zone_t kse_zone;
5444990b8cSJulian Elischer static uma_zone_t thread_zone;
5544990b8cSJulian Elischer 
564f0db5e0SJulian Elischer /* DEBUG ONLY */
5744990b8cSJulian Elischer SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
58696058c3SJulian Elischer static int thread_debug = 0;
59696058c3SJulian Elischer SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
60696058c3SJulian Elischer 	&thread_debug, 0, "thread debug");
61fdc5ecd2SDavid Xu 
62345ad866SJulian Elischer int max_threads_per_proc = 1500;
63fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
644f0db5e0SJulian Elischer 	&max_threads_per_proc, 0, "Limit on threads per proc");
654f0db5e0SJulian Elischer 
66345ad866SJulian Elischer int max_groups_per_proc = 500;
67fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
68fdc5ecd2SDavid Xu 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
69fdc5ecd2SDavid Xu 
70345ad866SJulian Elischer int max_threads_hits;
710252d203SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
720252d203SDavid Xu 	&max_threads_hits, 0, "");
730252d203SDavid Xu 
7494e0a4cdSJulian Elischer int virtual_cpu;
7594e0a4cdSJulian Elischer 
7694e0a4cdSJulian Elischer #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
7744990b8cSJulian Elischer 
785215b187SJeff Roberson TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
795c8329edSJulian Elischer TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
805c8329edSJulian Elischer TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
815215b187SJeff Roberson struct mtx kse_zombie_lock;
825215b187SJeff Roberson MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
8344990b8cSJulian Elischer 
84345ad866SJulian Elischer void kse_purge(struct proc *p, struct thread *td);
85345ad866SJulian Elischer void kse_purge_group(struct thread *td);
865215b187SJeff Roberson 
87345ad866SJulian Elischer /* move to proc.h */
88345ad866SJulian Elischer extern void	kseinit(void);
89345ad866SJulian Elischer extern void	kse_GC(void);
905215b187SJeff Roberson 
915215b187SJeff Roberson 
9294e0a4cdSJulian Elischer static int
9394e0a4cdSJulian Elischer sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
9494e0a4cdSJulian Elischer {
9594e0a4cdSJulian Elischer 	int error, new_val;
9694e0a4cdSJulian Elischer 	int def_val;
9794e0a4cdSJulian Elischer 
9894e0a4cdSJulian Elischer 	def_val = mp_ncpus;
9994e0a4cdSJulian Elischer 	if (virtual_cpu == 0)
10094e0a4cdSJulian Elischer 		new_val = def_val;
10194e0a4cdSJulian Elischer 	else
10294e0a4cdSJulian Elischer 		new_val = virtual_cpu;
10394e0a4cdSJulian Elischer 	error = sysctl_handle_int(oidp, &new_val, 0, req);
10494e0a4cdSJulian Elischer         if (error != 0 || req->newptr == NULL)
10594e0a4cdSJulian Elischer 		return (error);
10694e0a4cdSJulian Elischer 	if (new_val < 0)
10794e0a4cdSJulian Elischer 		return (EINVAL);
10894e0a4cdSJulian Elischer 	virtual_cpu = new_val;
10994e0a4cdSJulian Elischer 	return (0);
11094e0a4cdSJulian Elischer }
11194e0a4cdSJulian Elischer 
11294e0a4cdSJulian Elischer /* DEBUG ONLY */
11394e0a4cdSJulian Elischer SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
11494e0a4cdSJulian Elischer 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
11594e0a4cdSJulian Elischer 	"debug virtual cpus");
1165c8329edSJulian Elischer 
11744990b8cSJulian Elischer /*
118fdcac928SMarcel Moolenaar  * Thread ID allocator. The allocator keeps track of assigned IDs by
119fdcac928SMarcel Moolenaar  * using a bitmap. The bitmap is created in parts. The parts are linked
120fdcac928SMarcel Moolenaar  * together.
121fdcac928SMarcel Moolenaar  */
122fdcac928SMarcel Moolenaar typedef u_long tid_bitmap_word;
123fdcac928SMarcel Moolenaar 
124fdcac928SMarcel Moolenaar #define	TID_IDS_PER_PART	1024
125fdcac928SMarcel Moolenaar #define	TID_IDS_PER_IDX		(sizeof(tid_bitmap_word) << 3)
126fdcac928SMarcel Moolenaar #define	TID_BITMAP_SIZE		(TID_IDS_PER_PART / TID_IDS_PER_IDX)
127fdcac928SMarcel Moolenaar #define	TID_MIN			(PID_MAX + 1)
128fdcac928SMarcel Moolenaar 
129fdcac928SMarcel Moolenaar struct tid_bitmap_part {
130fdcac928SMarcel Moolenaar 	STAILQ_ENTRY(tid_bitmap_part) bmp_next;
131fdcac928SMarcel Moolenaar 	tid_bitmap_word	bmp_bitmap[TID_BITMAP_SIZE];
1320068114dSMarcel Moolenaar 	lwpid_t		bmp_base;
133fdcac928SMarcel Moolenaar 	int		bmp_free;
134fdcac928SMarcel Moolenaar };
135fdcac928SMarcel Moolenaar 
136fdcac928SMarcel Moolenaar static STAILQ_HEAD(, tid_bitmap_part) tid_bitmap =
137fdcac928SMarcel Moolenaar     STAILQ_HEAD_INITIALIZER(tid_bitmap);
138fdcac928SMarcel Moolenaar static uma_zone_t tid_zone;
139fdcac928SMarcel Moolenaar 
140fdcac928SMarcel Moolenaar struct mtx tid_lock;
141fdcac928SMarcel Moolenaar MTX_SYSINIT(tid_lock, &tid_lock, "TID lock", MTX_DEF);
142fdcac928SMarcel Moolenaar 
143fdcac928SMarcel Moolenaar /*
144696058c3SJulian Elischer  * Prepare a thread for use.
14544990b8cSJulian Elischer  */
146b23f72e9SBrian Feldman static int
147b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags)
14844990b8cSJulian Elischer {
14944990b8cSJulian Elischer 	struct thread	*td;
15044990b8cSJulian Elischer 
15144990b8cSJulian Elischer 	td = (struct thread *)mem;
15271fad9fdSJulian Elischer 	td->td_state = TDS_INACTIVE;
153060563ecSJulian Elischer 	td->td_oncpu	= NOCPU;
1546c27c603SJuli Mallett 
1556c27c603SJuli Mallett 	/*
1566c27c603SJuli Mallett 	 * Note that td_critnest begins life as 1 because the thread is not
1576c27c603SJuli Mallett 	 * running and is thereby implicitly waiting to be on the receiving
1586c27c603SJuli Mallett 	 * end of a context switch.  A context switch must occur inside a
1596c27c603SJuli Mallett 	 * critical section, and in fact, includes hand-off of the sched_lock.
1606c27c603SJuli Mallett 	 * After a context switch to a newly created thread, it will release
1616c27c603SJuli Mallett 	 * sched_lock for the first time, and its td_critnest will hit 0 for
1626c27c603SJuli Mallett 	 * the first time.  This happens on the far end of a context switch,
1636c27c603SJuli Mallett 	 * and when it context switches away from itself, it will in fact go
1646c27c603SJuli Mallett 	 * back into a critical section, and hand off the sched lock to the
1656c27c603SJuli Mallett 	 * next thread.
1666c27c603SJuli Mallett 	 */
167139b7550SJohn Baldwin 	td->td_critnest = 1;
168b23f72e9SBrian Feldman 	return (0);
16944990b8cSJulian Elischer }
17044990b8cSJulian Elischer 
17144990b8cSJulian Elischer /*
17244990b8cSJulian Elischer  * Reclaim a thread after use.
17344990b8cSJulian Elischer  */
17444990b8cSJulian Elischer static void
17544990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg)
17644990b8cSJulian Elischer {
17744990b8cSJulian Elischer 	struct thread *td;
17844990b8cSJulian Elischer 
17944990b8cSJulian Elischer 	td = (struct thread *)mem;
18044990b8cSJulian Elischer 
18144990b8cSJulian Elischer #ifdef INVARIANTS
18244990b8cSJulian Elischer 	/* Verify that this thread is in a safe state to free. */
18344990b8cSJulian Elischer 	switch (td->td_state) {
18471fad9fdSJulian Elischer 	case TDS_INHIBITED:
18571fad9fdSJulian Elischer 	case TDS_RUNNING:
18671fad9fdSJulian Elischer 	case TDS_CAN_RUN:
18744990b8cSJulian Elischer 	case TDS_RUNQ:
18844990b8cSJulian Elischer 		/*
18944990b8cSJulian Elischer 		 * We must never unlink a thread that is in one of
19044990b8cSJulian Elischer 		 * these states, because it is currently active.
19144990b8cSJulian Elischer 		 */
19244990b8cSJulian Elischer 		panic("bad state for thread unlinking");
19344990b8cSJulian Elischer 		/* NOTREACHED */
19471fad9fdSJulian Elischer 	case TDS_INACTIVE:
19544990b8cSJulian Elischer 		break;
19644990b8cSJulian Elischer 	default:
19744990b8cSJulian Elischer 		panic("bad thread state");
19844990b8cSJulian Elischer 		/* NOTREACHED */
19944990b8cSJulian Elischer 	}
20044990b8cSJulian Elischer #endif
20144990b8cSJulian Elischer }
20244990b8cSJulian Elischer 
20344990b8cSJulian Elischer /*
20444990b8cSJulian Elischer  * Initialize type-stable parts of a thread (when newly created).
20544990b8cSJulian Elischer  */
206b23f72e9SBrian Feldman static int
207b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags)
20844990b8cSJulian Elischer {
20944990b8cSJulian Elischer 	struct thread *td;
210247aba24SMarcel Moolenaar 	struct tid_bitmap_part *bmp, *new;
211247aba24SMarcel Moolenaar 	int bit, idx;
21244990b8cSJulian Elischer 
21344990b8cSJulian Elischer 	td = (struct thread *)mem;
214247aba24SMarcel Moolenaar 
215247aba24SMarcel Moolenaar 	mtx_lock(&tid_lock);
216247aba24SMarcel Moolenaar 	STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
217247aba24SMarcel Moolenaar 		if (bmp->bmp_free)
218247aba24SMarcel Moolenaar 			break;
219247aba24SMarcel Moolenaar 	}
220247aba24SMarcel Moolenaar 	/* Create a new bitmap if we run out of free bits. */
221247aba24SMarcel Moolenaar 	if (bmp == NULL) {
222247aba24SMarcel Moolenaar 		mtx_unlock(&tid_lock);
223247aba24SMarcel Moolenaar 		new = uma_zalloc(tid_zone, M_WAITOK);
224247aba24SMarcel Moolenaar 		mtx_lock(&tid_lock);
225247aba24SMarcel Moolenaar 		bmp = STAILQ_LAST(&tid_bitmap, tid_bitmap_part, bmp_next);
226247aba24SMarcel Moolenaar 		if (bmp == NULL || bmp->bmp_free < TID_IDS_PER_PART/2) {
227247aba24SMarcel Moolenaar 			/* 1=free, 0=assigned. This way we can use ffsl(). */
228247aba24SMarcel Moolenaar 			memset(new->bmp_bitmap, ~0U, sizeof(new->bmp_bitmap));
229247aba24SMarcel Moolenaar 			new->bmp_base = (bmp == NULL) ? TID_MIN :
230247aba24SMarcel Moolenaar 			    bmp->bmp_base + TID_IDS_PER_PART;
231247aba24SMarcel Moolenaar 			new->bmp_free = TID_IDS_PER_PART;
232247aba24SMarcel Moolenaar 			STAILQ_INSERT_TAIL(&tid_bitmap, new, bmp_next);
233247aba24SMarcel Moolenaar 			bmp = new;
234247aba24SMarcel Moolenaar 			new = NULL;
235247aba24SMarcel Moolenaar 		}
236247aba24SMarcel Moolenaar 	} else
237247aba24SMarcel Moolenaar 		new = NULL;
238247aba24SMarcel Moolenaar 	/* We have a bitmap with available IDs. */
239247aba24SMarcel Moolenaar 	idx = 0;
240247aba24SMarcel Moolenaar 	while (idx < TID_BITMAP_SIZE && bmp->bmp_bitmap[idx] == 0UL)
241247aba24SMarcel Moolenaar 		idx++;
242247aba24SMarcel Moolenaar 	bit = ffsl(bmp->bmp_bitmap[idx]) - 1;
243247aba24SMarcel Moolenaar 	td->td_tid = bmp->bmp_base + idx * TID_IDS_PER_IDX + bit;
244247aba24SMarcel Moolenaar 	bmp->bmp_bitmap[idx] &= ~(1UL << bit);
245247aba24SMarcel Moolenaar 	bmp->bmp_free--;
246247aba24SMarcel Moolenaar 	mtx_unlock(&tid_lock);
247247aba24SMarcel Moolenaar 	if (new != NULL)
248247aba24SMarcel Moolenaar 		uma_zfree(tid_zone, new);
249247aba24SMarcel Moolenaar 
25049a2507bSAlan Cox 	vm_thread_new(td, 0);
25144990b8cSJulian Elischer 	cpu_thread_setup(td);
25244f3b092SJohn Baldwin 	td->td_sleepqueue = sleepq_alloc();
253961a7b24SJohn Baldwin 	td->td_turnstile = turnstile_alloc();
254de028f5aSJeff Roberson 	td->td_sched = (struct td_sched *)&td[1];
255b23f72e9SBrian Feldman 	return (0);
25644990b8cSJulian Elischer }
25744990b8cSJulian Elischer 
25844990b8cSJulian Elischer /*
25944990b8cSJulian Elischer  * Tear down type-stable parts of a thread (just before being discarded).
26044990b8cSJulian Elischer  */
26144990b8cSJulian Elischer static void
26244990b8cSJulian Elischer thread_fini(void *mem, int size)
26344990b8cSJulian Elischer {
26444990b8cSJulian Elischer 	struct thread *td;
265247aba24SMarcel Moolenaar 	struct tid_bitmap_part *bmp;
266247aba24SMarcel Moolenaar 	lwpid_t tid;
267247aba24SMarcel Moolenaar 	int bit, idx;
26844990b8cSJulian Elischer 
26944990b8cSJulian Elischer 	td = (struct thread *)mem;
270961a7b24SJohn Baldwin 	turnstile_free(td->td_turnstile);
27144f3b092SJohn Baldwin 	sleepq_free(td->td_sleepqueue);
27249a2507bSAlan Cox 	vm_thread_dispose(td);
273247aba24SMarcel Moolenaar 
274247aba24SMarcel Moolenaar 	STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
275247aba24SMarcel Moolenaar 		if (td->td_tid >= bmp->bmp_base &&
276247aba24SMarcel Moolenaar 		    td->td_tid < bmp->bmp_base + TID_IDS_PER_PART)
277247aba24SMarcel Moolenaar 			break;
278247aba24SMarcel Moolenaar 	}
279247aba24SMarcel Moolenaar 	KASSERT(bmp != NULL, ("No TID bitmap?"));
280247aba24SMarcel Moolenaar 	mtx_lock(&tid_lock);
281247aba24SMarcel Moolenaar 	tid = td->td_tid - bmp->bmp_base;
282247aba24SMarcel Moolenaar 	idx = tid / TID_IDS_PER_IDX;
283247aba24SMarcel Moolenaar 	bit = 1UL << (tid % TID_IDS_PER_IDX);
284247aba24SMarcel Moolenaar 	bmp->bmp_bitmap[idx] |= bit;
285247aba24SMarcel Moolenaar 	bmp->bmp_free++;
286247aba24SMarcel Moolenaar 	mtx_unlock(&tid_lock);
28744990b8cSJulian Elischer }
2885215b187SJeff Roberson 
289de028f5aSJeff Roberson /*
290de028f5aSJeff Roberson  * Initialize type-stable parts of a kse (when newly created).
291de028f5aSJeff Roberson  */
292b23f72e9SBrian Feldman static int
293b23f72e9SBrian Feldman kse_init(void *mem, int size, int flags)
294de028f5aSJeff Roberson {
295de028f5aSJeff Roberson 	struct kse	*ke;
296de028f5aSJeff Roberson 
297de028f5aSJeff Roberson 	ke = (struct kse *)mem;
298de028f5aSJeff Roberson 	ke->ke_sched = (struct ke_sched *)&ke[1];
299b23f72e9SBrian Feldman 	return (0);
300de028f5aSJeff Roberson }
3015215b187SJeff Roberson 
302de028f5aSJeff Roberson /*
303de028f5aSJeff Roberson  * Initialize type-stable parts of a ksegrp (when newly created).
304de028f5aSJeff Roberson  */
305b23f72e9SBrian Feldman static int
306b23f72e9SBrian Feldman ksegrp_init(void *mem, int size, int flags)
307de028f5aSJeff Roberson {
308de028f5aSJeff Roberson 	struct ksegrp	*kg;
309de028f5aSJeff Roberson 
310de028f5aSJeff Roberson 	kg = (struct ksegrp *)mem;
311de028f5aSJeff Roberson 	kg->kg_sched = (struct kg_sched *)&kg[1];
312b23f72e9SBrian Feldman 	return (0);
313de028f5aSJeff Roberson }
31444990b8cSJulian Elischer 
31544990b8cSJulian Elischer /*
3165215b187SJeff Roberson  * KSE is linked into kse group.
3175c8329edSJulian Elischer  */
3185c8329edSJulian Elischer void
3195c8329edSJulian Elischer kse_link(struct kse *ke, struct ksegrp *kg)
3205c8329edSJulian Elischer {
3215c8329edSJulian Elischer 	struct proc *p = kg->kg_proc;
3225c8329edSJulian Elischer 
3235c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
3245c8329edSJulian Elischer 	kg->kg_kses++;
3255c8329edSJulian Elischer 	ke->ke_state	= KES_UNQUEUED;
3265c8329edSJulian Elischer 	ke->ke_proc	= p;
3275c8329edSJulian Elischer 	ke->ke_ksegrp	= kg;
3285c8329edSJulian Elischer 	ke->ke_thread	= NULL;
3295c8329edSJulian Elischer 	ke->ke_oncpu	= NOCPU;
3305215b187SJeff Roberson 	ke->ke_flags	= 0;
3315c8329edSJulian Elischer }
3325c8329edSJulian Elischer 
3335c8329edSJulian Elischer void
3345c8329edSJulian Elischer kse_unlink(struct kse *ke)
3355c8329edSJulian Elischer {
3365c8329edSJulian Elischer 	struct ksegrp *kg;
3375c8329edSJulian Elischer 
3385c8329edSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
3395c8329edSJulian Elischer 	kg = ke->ke_ksegrp;
3405c8329edSJulian Elischer 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
3415215b187SJeff Roberson 	if (ke->ke_state == KES_IDLE) {
3425215b187SJeff Roberson 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
3435215b187SJeff Roberson 		kg->kg_idle_kses--;
3446f8132a8SJulian Elischer 	}
345ab2baa72SDavid Xu 	--kg->kg_kses;
3465c8329edSJulian Elischer 	/*
3475c8329edSJulian Elischer 	 * Aggregate stats from the KSE
3485c8329edSJulian Elischer 	 */
3495c8329edSJulian Elischer 	kse_stash(ke);
3505c8329edSJulian Elischer }
3515c8329edSJulian Elischer 
3525c8329edSJulian Elischer void
3535c8329edSJulian Elischer ksegrp_link(struct ksegrp *kg, struct proc *p)
3545c8329edSJulian Elischer {
3555c8329edSJulian Elischer 
3565c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_threads);
3575c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
3585c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
3595c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
3605215b187SJeff Roberson 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
3615215b187SJeff Roberson 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
3625c8329edSJulian Elischer 	kg->kg_proc = p;
3635215b187SJeff Roberson 	/*
3645215b187SJeff Roberson 	 * the following counters are in the -zero- section
3655215b187SJeff Roberson 	 * and may not need clearing
3665215b187SJeff Roberson 	 */
3675c8329edSJulian Elischer 	kg->kg_numthreads = 0;
3685c8329edSJulian Elischer 	kg->kg_runnable   = 0;
3695c8329edSJulian Elischer 	kg->kg_kses       = 0;
3705c8329edSJulian Elischer 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
3715215b187SJeff Roberson 	kg->kg_idle_kses  = 0;
3725215b187SJeff Roberson 	kg->kg_numupcalls = 0;
3735c8329edSJulian Elischer 	/* link it in now that it's consistent */
3745c8329edSJulian Elischer 	p->p_numksegrps++;
3755c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
3765c8329edSJulian Elischer }
3775c8329edSJulian Elischer 
3785c8329edSJulian Elischer void
3795c8329edSJulian Elischer ksegrp_unlink(struct ksegrp *kg)
3805c8329edSJulian Elischer {
3815c8329edSJulian Elischer 	struct proc *p;
3825c8329edSJulian Elischer 
3835c8329edSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
3845215b187SJeff Roberson 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
3855215b187SJeff Roberson 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
3865215b187SJeff Roberson 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
3875215b187SJeff Roberson 
3885c8329edSJulian Elischer 	p = kg->kg_proc;
3895c8329edSJulian Elischer 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
3905c8329edSJulian Elischer 	p->p_numksegrps--;
3915c8329edSJulian Elischer 	/*
3925c8329edSJulian Elischer 	 * Aggregate stats from the KSE
3935c8329edSJulian Elischer 	 */
3945c8329edSJulian Elischer 	ksegrp_stash(kg);
3955c8329edSJulian Elischer }
3965c8329edSJulian Elischer 
3975c8329edSJulian Elischer /*
3985215b187SJeff Roberson  * For a newly created process,
3995215b187SJeff Roberson  * link up all the structures and its initial threads etc.
4005c8329edSJulian Elischer  */
4015c8329edSJulian Elischer void
4025c8329edSJulian Elischer proc_linkup(struct proc *p, struct ksegrp *kg,
4035c8329edSJulian Elischer 	    struct kse *ke, struct thread *td)
4045c8329edSJulian Elischer {
4055c8329edSJulian Elischer 
4065c8329edSJulian Elischer 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
4075c8329edSJulian Elischer 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
4085c8329edSJulian Elischer 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
4095c8329edSJulian Elischer 	p->p_numksegrps = 0;
4105c8329edSJulian Elischer 	p->p_numthreads = 0;
4115c8329edSJulian Elischer 
4125c8329edSJulian Elischer 	ksegrp_link(kg, p);
4135c8329edSJulian Elischer 	kse_link(ke, kg);
4145c8329edSJulian Elischer 	thread_link(td, kg);
4155c8329edSJulian Elischer }
4165c8329edSJulian Elischer 
4175c8329edSJulian Elischer /*
41844990b8cSJulian Elischer  * Initialize global thread allocation resources.
41944990b8cSJulian Elischer  */
42044990b8cSJulian Elischer void
42144990b8cSJulian Elischer threadinit(void)
42244990b8cSJulian Elischer {
42344990b8cSJulian Elischer 
424de028f5aSJeff Roberson 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
42544990b8cSJulian Elischer 	    thread_ctor, thread_dtor, thread_init, thread_fini,
42644990b8cSJulian Elischer 	    UMA_ALIGN_CACHE, 0);
427fdcac928SMarcel Moolenaar 	tid_zone = uma_zcreate("TID", sizeof(struct tid_bitmap_part),
428fdcac928SMarcel Moolenaar 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
429de028f5aSJeff Roberson 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
430de028f5aSJeff Roberson 	    NULL, NULL, ksegrp_init, NULL,
4314f0db5e0SJulian Elischer 	    UMA_ALIGN_CACHE, 0);
432de028f5aSJeff Roberson 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
433de028f5aSJeff Roberson 	    NULL, NULL, kse_init, NULL,
4344f0db5e0SJulian Elischer 	    UMA_ALIGN_CACHE, 0);
435345ad866SJulian Elischer 	kseinit();
43644990b8cSJulian Elischer }
43744990b8cSJulian Elischer 
43844990b8cSJulian Elischer /*
4391faf202eSJulian Elischer  * Stash an embarasingly extra thread into the zombie thread queue.
44044990b8cSJulian Elischer  */
44144990b8cSJulian Elischer void
44244990b8cSJulian Elischer thread_stash(struct thread *td)
44344990b8cSJulian Elischer {
4445215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
44544990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
4465215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
44744990b8cSJulian Elischer }
44844990b8cSJulian Elischer 
44944990b8cSJulian Elischer /*
4505c8329edSJulian Elischer  * Stash an embarasingly extra kse into the zombie kse queue.
4515c8329edSJulian Elischer  */
4525c8329edSJulian Elischer void
4535c8329edSJulian Elischer kse_stash(struct kse *ke)
4545c8329edSJulian Elischer {
4555215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
4565c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
4575215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
4585215b187SJeff Roberson }
4595215b187SJeff Roberson 
4605215b187SJeff Roberson /*
4615c8329edSJulian Elischer  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
4625c8329edSJulian Elischer  */
4635c8329edSJulian Elischer void
4645c8329edSJulian Elischer ksegrp_stash(struct ksegrp *kg)
4655c8329edSJulian Elischer {
4665215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
4675c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
4685215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
4695c8329edSJulian Elischer }
4705c8329edSJulian Elischer 
4715c8329edSJulian Elischer /*
4725215b187SJeff Roberson  * Reap zombie kse resource.
47344990b8cSJulian Elischer  */
47444990b8cSJulian Elischer void
47544990b8cSJulian Elischer thread_reap(void)
47644990b8cSJulian Elischer {
4775c8329edSJulian Elischer 	struct thread *td_first, *td_next;
4785c8329edSJulian Elischer 	struct kse *ke_first, *ke_next;
4795c8329edSJulian Elischer 	struct ksegrp *kg_first, * kg_next;
48044990b8cSJulian Elischer 
48144990b8cSJulian Elischer 	/*
4825215b187SJeff Roberson 	 * Don't even bother to lock if none at this instant,
4835215b187SJeff Roberson 	 * we really don't care about the next instant..
48444990b8cSJulian Elischer 	 */
4855c8329edSJulian Elischer 	if ((!TAILQ_EMPTY(&zombie_threads))
4865c8329edSJulian Elischer 	    || (!TAILQ_EMPTY(&zombie_kses))
487345ad866SJulian Elischer 	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
4885215b187SJeff Roberson 		mtx_lock_spin(&kse_zombie_lock);
4895c8329edSJulian Elischer 		td_first = TAILQ_FIRST(&zombie_threads);
4905c8329edSJulian Elischer 		ke_first = TAILQ_FIRST(&zombie_kses);
4915c8329edSJulian Elischer 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
4925c8329edSJulian Elischer 		if (td_first)
4935c8329edSJulian Elischer 			TAILQ_INIT(&zombie_threads);
4945c8329edSJulian Elischer 		if (ke_first)
4955c8329edSJulian Elischer 			TAILQ_INIT(&zombie_kses);
4965c8329edSJulian Elischer 		if (kg_first)
4975c8329edSJulian Elischer 			TAILQ_INIT(&zombie_ksegrps);
4985215b187SJeff Roberson 		mtx_unlock_spin(&kse_zombie_lock);
4995c8329edSJulian Elischer 		while (td_first) {
5005c8329edSJulian Elischer 			td_next = TAILQ_NEXT(td_first, td_runq);
5015215b187SJeff Roberson 			if (td_first->td_ucred)
5025215b187SJeff Roberson 				crfree(td_first->td_ucred);
5035c8329edSJulian Elischer 			thread_free(td_first);
5045c8329edSJulian Elischer 			td_first = td_next;
50544990b8cSJulian Elischer 		}
5065c8329edSJulian Elischer 		while (ke_first) {
5075c8329edSJulian Elischer 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
5085c8329edSJulian Elischer 			kse_free(ke_first);
5095c8329edSJulian Elischer 			ke_first = ke_next;
5105c8329edSJulian Elischer 		}
5115c8329edSJulian Elischer 		while (kg_first) {
5125c8329edSJulian Elischer 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
5135c8329edSJulian Elischer 			ksegrp_free(kg_first);
5145c8329edSJulian Elischer 			kg_first = kg_next;
5155c8329edSJulian Elischer 		}
5165215b187SJeff Roberson 	}
517345ad866SJulian Elischer 	kse_GC();
51844990b8cSJulian Elischer }
51944990b8cSJulian Elischer 
52044990b8cSJulian Elischer /*
5214f0db5e0SJulian Elischer  * Allocate a ksegrp.
5224f0db5e0SJulian Elischer  */
5234f0db5e0SJulian Elischer struct ksegrp *
5244f0db5e0SJulian Elischer ksegrp_alloc(void)
5254f0db5e0SJulian Elischer {
526a163d034SWarner Losh 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
5274f0db5e0SJulian Elischer }
5284f0db5e0SJulian Elischer 
5294f0db5e0SJulian Elischer /*
5304f0db5e0SJulian Elischer  * Allocate a kse.
5314f0db5e0SJulian Elischer  */
5324f0db5e0SJulian Elischer struct kse *
5334f0db5e0SJulian Elischer kse_alloc(void)
5344f0db5e0SJulian Elischer {
535a163d034SWarner Losh 	return (uma_zalloc(kse_zone, M_WAITOK));
5364f0db5e0SJulian Elischer }
5374f0db5e0SJulian Elischer 
5384f0db5e0SJulian Elischer /*
53944990b8cSJulian Elischer  * Allocate a thread.
54044990b8cSJulian Elischer  */
54144990b8cSJulian Elischer struct thread *
54244990b8cSJulian Elischer thread_alloc(void)
54344990b8cSJulian Elischer {
54444990b8cSJulian Elischer 	thread_reap(); /* check if any zombies to get */
545a163d034SWarner Losh 	return (uma_zalloc(thread_zone, M_WAITOK));
54644990b8cSJulian Elischer }
54744990b8cSJulian Elischer 
54844990b8cSJulian Elischer /*
5494f0db5e0SJulian Elischer  * Deallocate a ksegrp.
5504f0db5e0SJulian Elischer  */
5514f0db5e0SJulian Elischer void
5524f0db5e0SJulian Elischer ksegrp_free(struct ksegrp *td)
5534f0db5e0SJulian Elischer {
5544f0db5e0SJulian Elischer 	uma_zfree(ksegrp_zone, td);
5554f0db5e0SJulian Elischer }
5564f0db5e0SJulian Elischer 
5574f0db5e0SJulian Elischer /*
5584f0db5e0SJulian Elischer  * Deallocate a kse.
5594f0db5e0SJulian Elischer  */
5604f0db5e0SJulian Elischer void
5614f0db5e0SJulian Elischer kse_free(struct kse *td)
5624f0db5e0SJulian Elischer {
5634f0db5e0SJulian Elischer 	uma_zfree(kse_zone, td);
5644f0db5e0SJulian Elischer }
5654f0db5e0SJulian Elischer 
5664f0db5e0SJulian Elischer /*
56744990b8cSJulian Elischer  * Deallocate a thread.
56844990b8cSJulian Elischer  */
56944990b8cSJulian Elischer void
57044990b8cSJulian Elischer thread_free(struct thread *td)
57144990b8cSJulian Elischer {
572696058c3SJulian Elischer 
573696058c3SJulian Elischer 	cpu_thread_clean(td);
57444990b8cSJulian Elischer 	uma_zfree(thread_zone, td);
57544990b8cSJulian Elischer }
57644990b8cSJulian Elischer 
57744990b8cSJulian Elischer /*
57844990b8cSJulian Elischer  * Discard the current thread and exit from its context.
57994e0a4cdSJulian Elischer  * Always called with scheduler locked.
58044990b8cSJulian Elischer  *
58144990b8cSJulian Elischer  * Because we can't free a thread while we're operating under its context,
582696058c3SJulian Elischer  * push the current thread into our CPU's deadthread holder. This means
583696058c3SJulian Elischer  * we needn't worry about someone else grabbing our context before we
58494e0a4cdSJulian Elischer  * do a cpu_throw().  This may not be needed now as we are under schedlock.
58594e0a4cdSJulian Elischer  * Maybe we can just do a thread_stash() as thr_exit1 does.
58694e0a4cdSJulian Elischer  */
58794e0a4cdSJulian Elischer /*  XXX
58894e0a4cdSJulian Elischer  * libthr expects its thread exit to return for the last
58994e0a4cdSJulian Elischer  * thread, meaning that the program is back to non-threaded
59094e0a4cdSJulian Elischer  * mode I guess. Because we do this (cpu_throw) unconditionally
59194e0a4cdSJulian Elischer  * here, they have their own version of it. (thr_exit1())
59294e0a4cdSJulian Elischer  * that doesn't do it all if this was the last thread.
59394e0a4cdSJulian Elischer  * It is also called from thread_suspend_check().
59494e0a4cdSJulian Elischer  * Of course in the end, they end up coming here through exit1
59594e0a4cdSJulian Elischer  * anyhow..  After fixing 'thr' to play by the rules we should be able
59694e0a4cdSJulian Elischer  * to merge these two functions together.
59744990b8cSJulian Elischer  */
59844990b8cSJulian Elischer void
59944990b8cSJulian Elischer thread_exit(void)
60044990b8cSJulian Elischer {
60144990b8cSJulian Elischer 	struct thread *td;
60244990b8cSJulian Elischer 	struct kse *ke;
60344990b8cSJulian Elischer 	struct proc *p;
60444990b8cSJulian Elischer 	struct ksegrp	*kg;
60544990b8cSJulian Elischer 
60644990b8cSJulian Elischer 	td = curthread;
60744990b8cSJulian Elischer 	kg = td->td_ksegrp;
60844990b8cSJulian Elischer 	p = td->td_proc;
60944990b8cSJulian Elischer 	ke = td->td_kse;
61044990b8cSJulian Elischer 
61144990b8cSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
61288151aa3SJulian Elischer 	KASSERT(p != NULL, ("thread exiting without a process"));
61388151aa3SJulian Elischer 	KASSERT(ke != NULL, ("thread exiting without a kse"));
61488151aa3SJulian Elischer 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
61544990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
616cc701b73SRobert Watson 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
617cc701b73SRobert Watson 	    (long)p->p_pid, p->p_comm);
61862a0fd94SJohn Baldwin 	mtx_assert(&Giant, MA_NOTOWNED);
61944990b8cSJulian Elischer 
62048bfcdddSJulian Elischer 	if (td->td_standin != NULL) {
62148bfcdddSJulian Elischer 		thread_stash(td->td_standin);
62248bfcdddSJulian Elischer 		td->td_standin = NULL;
62348bfcdddSJulian Elischer 	}
62448bfcdddSJulian Elischer 
62544990b8cSJulian Elischer 	cpu_thread_exit(td);	/* XXXSMP */
62644990b8cSJulian Elischer 
6271faf202eSJulian Elischer 	/*
6281faf202eSJulian Elischer 	 * The last thread is left attached to the process
6291faf202eSJulian Elischer 	 * So that the whole bundle gets recycled. Skip
6301faf202eSJulian Elischer 	 * all this stuff.
6311faf202eSJulian Elischer 	 */
6321faf202eSJulian Elischer 	if (p->p_numthreads > 1) {
633d3a0bd78SJulian Elischer 		thread_unlink(td);
6340252d203SDavid Xu 		if (p->p_maxthrwaits)
6350252d203SDavid Xu 			wakeup(&p->p_numthreads);
63644990b8cSJulian Elischer 		/*
63744990b8cSJulian Elischer 		 * The test below is NOT true if we are the
6381faf202eSJulian Elischer 		 * sole exiting thread. P_STOPPED_SNGL is unset
63944990b8cSJulian Elischer 		 * in exit1() after it is the only survivor.
64044990b8cSJulian Elischer 		 */
6411279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
64244990b8cSJulian Elischer 			if (p->p_numthreads == p->p_suspcount) {
64371fad9fdSJulian Elischer 				thread_unsuspend_one(p->p_singlethread);
64444990b8cSJulian Elischer 			}
64544990b8cSJulian Elischer 		}
64648bfcdddSJulian Elischer 
6475215b187SJeff Roberson 		/*
6485215b187SJeff Roberson 		 * Because each upcall structure has an owner thread,
6495215b187SJeff Roberson 		 * owner thread exits only when process is in exiting
6505215b187SJeff Roberson 		 * state, so upcall to userland is no longer needed,
6515215b187SJeff Roberson 		 * deleting upcall structure is safe here.
6525215b187SJeff Roberson 		 * So when all threads in a group is exited, all upcalls
6535215b187SJeff Roberson 		 * in the group should be automatically freed.
6545215b187SJeff Roberson 		 */
6555215b187SJeff Roberson 		if (td->td_upcall)
6565215b187SJeff Roberson 			upcall_remove(td);
6576f8132a8SJulian Elischer 
658ab2baa72SDavid Xu 		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
65955d44f79SJulian Elischer 		sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
6605215b187SJeff Roberson 		ke->ke_state = KES_UNQUEUED;
6615215b187SJeff Roberson 		ke->ke_thread = NULL;
66248bfcdddSJulian Elischer 		/*
66393a7aa79SJulian Elischer 		 * Decide what to do with the KSE attached to this thread.
66448bfcdddSJulian Elischer 		 */
665ab2baa72SDavid Xu 		if (ke->ke_flags & KEF_EXIT) {
6666f8132a8SJulian Elischer 			kse_unlink(ke);
667ab2baa72SDavid Xu 			if (kg->kg_kses == 0) {
66855d44f79SJulian Elischer 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
669ab2baa72SDavid Xu 				ksegrp_unlink(kg);
670ab2baa72SDavid Xu 			}
671ab2baa72SDavid Xu 		}
6725215b187SJeff Roberson 		else
6736f8132a8SJulian Elischer 			kse_reassign(ke);
6746f8132a8SJulian Elischer 		PROC_UNLOCK(p);
6755215b187SJeff Roberson 		td->td_kse	= NULL;
67636f7b36fSDavid Xu #if 0
6775c8329edSJulian Elischer 		td->td_proc	= NULL;
67836f7b36fSDavid Xu #endif
6795c8329edSJulian Elischer 		td->td_ksegrp	= NULL;
6805c8329edSJulian Elischer 		td->td_last_kse	= NULL;
681696058c3SJulian Elischer 		PCPU_SET(deadthread, td);
6821faf202eSJulian Elischer 	} else {
6831faf202eSJulian Elischer 		PROC_UNLOCK(p);
6841faf202eSJulian Elischer 	}
685dcc9954eSJulian Elischer 	td->td_state = TDS_INACTIVE;
686732d9528SJulian Elischer 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
687cc66ebe2SPeter Wemm 	cpu_throw(td, choosethread());
688cc66ebe2SPeter Wemm 	panic("I'm a teapot!");
68944990b8cSJulian Elischer 	/* NOTREACHED */
69044990b8cSJulian Elischer }
69144990b8cSJulian Elischer 
69244990b8cSJulian Elischer /*
693696058c3SJulian Elischer  * Do any thread specific cleanups that may be needed in wait()
69437814395SPeter Wemm  * called with Giant, proc and schedlock not held.
695696058c3SJulian Elischer  */
696696058c3SJulian Elischer void
697696058c3SJulian Elischer thread_wait(struct proc *p)
698696058c3SJulian Elischer {
699696058c3SJulian Elischer 	struct thread *td;
700696058c3SJulian Elischer 
70137814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
70285495c72SJens Schweikhardt 	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
70385495c72SJens Schweikhardt 	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
704696058c3SJulian Elischer 	FOREACH_THREAD_IN_PROC(p, td) {
705696058c3SJulian Elischer 		if (td->td_standin != NULL) {
706696058c3SJulian Elischer 			thread_free(td->td_standin);
707696058c3SJulian Elischer 			td->td_standin = NULL;
708696058c3SJulian Elischer 		}
709696058c3SJulian Elischer 		cpu_thread_clean(td);
710696058c3SJulian Elischer 	}
711696058c3SJulian Elischer 	thread_reap();	/* check for zombie threads etc. */
712696058c3SJulian Elischer }
713696058c3SJulian Elischer 
714696058c3SJulian Elischer /*
71544990b8cSJulian Elischer  * Link a thread to a process.
7161faf202eSJulian Elischer  * set up anything that needs to be initialized for it to
7171faf202eSJulian Elischer  * be used by the process.
71844990b8cSJulian Elischer  *
71944990b8cSJulian Elischer  * Note that we do not link to the proc's ucred here.
72044990b8cSJulian Elischer  * The thread is linked as if running but no KSE assigned.
72144990b8cSJulian Elischer  */
72244990b8cSJulian Elischer void
72344990b8cSJulian Elischer thread_link(struct thread *td, struct ksegrp *kg)
72444990b8cSJulian Elischer {
72544990b8cSJulian Elischer 	struct proc *p;
72644990b8cSJulian Elischer 
72744990b8cSJulian Elischer 	p = kg->kg_proc;
72871fad9fdSJulian Elischer 	td->td_state    = TDS_INACTIVE;
72944990b8cSJulian Elischer 	td->td_proc     = p;
73044990b8cSJulian Elischer 	td->td_ksegrp   = kg;
73144990b8cSJulian Elischer 	td->td_last_kse = NULL;
7325215b187SJeff Roberson 	td->td_flags    = 0;
7334fc21c09SDaniel Eischen 	td->td_kflags	= 0;
7345215b187SJeff Roberson 	td->td_kse      = NULL;
73544990b8cSJulian Elischer 
7361faf202eSJulian Elischer 	LIST_INIT(&td->td_contested);
737c06eb4e2SSam Leffler 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
73844990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
73944990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
74044990b8cSJulian Elischer 	p->p_numthreads++;
74144990b8cSJulian Elischer 	kg->kg_numthreads++;
74244990b8cSJulian Elischer }
74344990b8cSJulian Elischer 
744d3a0bd78SJulian Elischer void
745d3a0bd78SJulian Elischer thread_unlink(struct thread *td)
746d3a0bd78SJulian Elischer {
747d3a0bd78SJulian Elischer 	struct proc *p = td->td_proc;
748d3a0bd78SJulian Elischer 	struct ksegrp *kg = td->td_ksegrp;
749d3a0bd78SJulian Elischer 
750112afcb2SJohn Baldwin 	mtx_assert(&sched_lock, MA_OWNED);
751d3a0bd78SJulian Elischer 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
752d3a0bd78SJulian Elischer 	p->p_numthreads--;
753d3a0bd78SJulian Elischer 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
754d3a0bd78SJulian Elischer 	kg->kg_numthreads--;
755d3a0bd78SJulian Elischer 	/* could clear a few other things here */
756d3a0bd78SJulian Elischer }
757d3a0bd78SJulian Elischer 
7585215b187SJeff Roberson /*
7595215b187SJeff Roberson  * Purge a ksegrp resource. When a ksegrp is preparing to
7605215b187SJeff Roberson  * exit, it calls this function.
7615215b187SJeff Roberson  */
762345ad866SJulian Elischer void
7635215b187SJeff Roberson kse_purge_group(struct thread *td)
7645215b187SJeff Roberson {
7655215b187SJeff Roberson 	struct ksegrp *kg;
7665215b187SJeff Roberson 	struct kse *ke;
7675215b187SJeff Roberson 
7685215b187SJeff Roberson 	kg = td->td_ksegrp;
7695215b187SJeff Roberson  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
7705215b187SJeff Roberson 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
7715215b187SJeff Roberson 		KASSERT(ke->ke_state == KES_IDLE,
7725215b187SJeff Roberson 			("%s: wrong idle KSE state", __func__));
7735215b187SJeff Roberson 		kse_unlink(ke);
7745215b187SJeff Roberson 	}
7755215b187SJeff Roberson 	KASSERT((kg->kg_kses == 1),
7765215b187SJeff Roberson 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
7775215b187SJeff Roberson 	KASSERT((kg->kg_numupcalls == 0),
7785215b187SJeff Roberson 	        ("%s: ksegrp still has %d upcall datas",
7795215b187SJeff Roberson 		__func__, kg->kg_numupcalls));
7805215b187SJeff Roberson }
7815215b187SJeff Roberson 
7825215b187SJeff Roberson /*
7835215b187SJeff Roberson  * Purge a process's KSE resource. When a process is preparing to
7845215b187SJeff Roberson  * exit, it calls kse_purge to release any extra KSE resources in
7855215b187SJeff Roberson  * the process.
7865215b187SJeff Roberson  */
787345ad866SJulian Elischer void
7885c8329edSJulian Elischer kse_purge(struct proc *p, struct thread *td)
7895c8329edSJulian Elischer {
7905c8329edSJulian Elischer 	struct ksegrp *kg;
7915215b187SJeff Roberson 	struct kse *ke;
7925c8329edSJulian Elischer 
7935c8329edSJulian Elischer  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
7945c8329edSJulian Elischer 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
7955c8329edSJulian Elischer 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
7965c8329edSJulian Elischer 		p->p_numksegrps--;
7975215b187SJeff Roberson 		/*
7985215b187SJeff Roberson 		 * There is no ownership for KSE, after all threads
7995215b187SJeff Roberson 		 * in the group exited, it is possible that some KSEs
8005215b187SJeff Roberson 		 * were left in idle queue, gc them now.
8015215b187SJeff Roberson 		 */
8025215b187SJeff Roberson 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
8035215b187SJeff Roberson 			KASSERT(ke->ke_state == KES_IDLE,
8045215b187SJeff Roberson 			   ("%s: wrong idle KSE state", __func__));
8055215b187SJeff Roberson 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
8065215b187SJeff Roberson 			kg->kg_idle_kses--;
8075215b187SJeff Roberson 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
8085215b187SJeff Roberson 			kg->kg_kses--;
8095215b187SJeff Roberson 			kse_stash(ke);
8105215b187SJeff Roberson 		}
8115c8329edSJulian Elischer 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
8125c8329edSJulian Elischer 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
8135215b187SJeff Roberson 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
8145215b187SJeff Roberson 		KASSERT((kg->kg_numupcalls == 0),
8155215b187SJeff Roberson 		        ("%s: ksegrp still has %d upcall datas",
8165215b187SJeff Roberson 			__func__, kg->kg_numupcalls));
8175215b187SJeff Roberson 
8185215b187SJeff Roberson 		if (kg != td->td_ksegrp)
8195c8329edSJulian Elischer 			ksegrp_stash(kg);
8205c8329edSJulian Elischer 	}
8215c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
8225c8329edSJulian Elischer 	p->p_numksegrps++;
8235c8329edSJulian Elischer }
8245c8329edSJulian Elischer 
8255215b187SJeff Roberson /*
82644990b8cSJulian Elischer  * Enforce single-threading.
82744990b8cSJulian Elischer  *
82844990b8cSJulian Elischer  * Returns 1 if the caller must abort (another thread is waiting to
82944990b8cSJulian Elischer  * exit the process or similar). Process is locked!
83044990b8cSJulian Elischer  * Returns 0 when you are successfully the only thread running.
83144990b8cSJulian Elischer  * A process has successfully single threaded in the suspend mode when
83244990b8cSJulian Elischer  * There are no threads in user mode. Threads in the kernel must be
83344990b8cSJulian Elischer  * allowed to continue until they get to the user boundary. They may even
83444990b8cSJulian Elischer  * copy out their return values and data before suspending. They may however be
83544990b8cSJulian Elischer  * accellerated in reaching the user boundary as we will wake up
83644990b8cSJulian Elischer  * any sleeping threads that are interruptable. (PCATCH).
83744990b8cSJulian Elischer  */
83844990b8cSJulian Elischer int
83944990b8cSJulian Elischer thread_single(int force_exit)
84044990b8cSJulian Elischer {
84144990b8cSJulian Elischer 	struct thread *td;
84244990b8cSJulian Elischer 	struct thread *td2;
84344990b8cSJulian Elischer 	struct proc *p;
844ec008e96SDavid Xu 	int remaining;
84544990b8cSJulian Elischer 
84644990b8cSJulian Elischer 	td = curthread;
84744990b8cSJulian Elischer 	p = td->td_proc;
84837814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
84944990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
85044990b8cSJulian Elischer 	KASSERT((td != NULL), ("curthread is NULL"));
85144990b8cSJulian Elischer 
8520e2a4d3aSDavid Xu 	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
85344990b8cSJulian Elischer 		return (0);
85444990b8cSJulian Elischer 
855e3b9bf71SJulian Elischer 	/* Is someone already single threading? */
856e3b9bf71SJulian Elischer 	if (p->p_singlethread)
85744990b8cSJulian Elischer 		return (1);
85844990b8cSJulian Elischer 
85993a7aa79SJulian Elischer 	if (force_exit == SINGLE_EXIT) {
86044990b8cSJulian Elischer 		p->p_flag |= P_SINGLE_EXIT;
86193a7aa79SJulian Elischer 	} else
86244990b8cSJulian Elischer 		p->p_flag &= ~P_SINGLE_EXIT;
8631279572aSDavid Xu 	p->p_flag |= P_STOPPED_SINGLE;
86471fad9fdSJulian Elischer 	mtx_lock_spin(&sched_lock);
865112afcb2SJohn Baldwin 	p->p_singlethread = td;
866ec008e96SDavid Xu 	if (force_exit == SINGLE_EXIT)
867ec008e96SDavid Xu 		remaining = p->p_numthreads;
868ec008e96SDavid Xu 	else
869ec008e96SDavid Xu 		remaining = p->p_numthreads - p->p_suspcount;
870ec008e96SDavid Xu 	while (remaining != 1) {
87144990b8cSJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td2) {
87244990b8cSJulian Elischer 			if (td2 == td)
87344990b8cSJulian Elischer 				continue;
874588257e8SDavid Xu 			td2->td_flags |= TDF_ASTPENDING;
87571fad9fdSJulian Elischer 			if (TD_IS_INHIBITED(td2)) {
8761279572aSDavid Xu 				if (force_exit == SINGLE_EXIT) {
877cbf4e354SDavid Xu 					if (td->td_flags & TDF_DBSUSPEND)
878cbf4e354SDavid Xu 						td->td_flags &= ~TDF_DBSUSPEND;
8799d102777SJulian Elischer 					if (TD_IS_SUSPENDED(td2)) {
88071fad9fdSJulian Elischer 						thread_unsuspend_one(td2);
88171fad9fdSJulian Elischer 					}
88233862f40SDavid Xu 					if (TD_ON_SLEEPQ(td2) &&
88333862f40SDavid Xu 					    (td2->td_flags & TDF_SINTR)) {
88444f3b092SJohn Baldwin 						sleepq_abort(td2);
88571fad9fdSJulian Elischer 					}
8869d102777SJulian Elischer 				} else {
8879d102777SJulian Elischer 					if (TD_IS_SUSPENDED(td2))
8889d102777SJulian Elischer 						continue;
8895215b187SJeff Roberson 					/*
8905215b187SJeff Roberson 					 * maybe other inhibitted states too?
8915215b187SJeff Roberson 					 * XXXKSE Is it totally safe to
8925215b187SJeff Roberson 					 * suspend a non-interruptable thread?
8935215b187SJeff Roberson 					 */
89493a7aa79SJulian Elischer 					if (td2->td_inhibitors &
8955215b187SJeff Roberson 					    (TDI_SLEEPING | TDI_SWAPPED))
8969d102777SJulian Elischer 						thread_suspend_one(td2);
89744990b8cSJulian Elischer 				}
89844990b8cSJulian Elischer 			}
8999d102777SJulian Elischer 		}
900ec008e96SDavid Xu 		if (force_exit == SINGLE_EXIT)
901ec008e96SDavid Xu 			remaining = p->p_numthreads;
902ec008e96SDavid Xu 		else
903ec008e96SDavid Xu 			remaining = p->p_numthreads - p->p_suspcount;
904ec008e96SDavid Xu 
9059d102777SJulian Elischer 		/*
9069d102777SJulian Elischer 		 * Maybe we suspended some threads.. was it enough?
9079d102777SJulian Elischer 		 */
908ec008e96SDavid Xu 		if (remaining == 1)
9099d102777SJulian Elischer 			break;
9109d102777SJulian Elischer 
91144990b8cSJulian Elischer 		/*
91244990b8cSJulian Elischer 		 * Wake us up when everyone else has suspended.
913e3b9bf71SJulian Elischer 		 * In the mean time we suspend as well.
91444990b8cSJulian Elischer 		 */
91571fad9fdSJulian Elischer 		thread_suspend_one(td);
91644990b8cSJulian Elischer 		PROC_UNLOCK(p);
917bf0acc27SJohn Baldwin 		mi_switch(SW_VOL, NULL);
91844990b8cSJulian Elischer 		mtx_unlock_spin(&sched_lock);
91944990b8cSJulian Elischer 		PROC_LOCK(p);
920112afcb2SJohn Baldwin 		mtx_lock_spin(&sched_lock);
921ec008e96SDavid Xu 		if (force_exit == SINGLE_EXIT)
922ec008e96SDavid Xu 			remaining = p->p_numthreads;
923ec008e96SDavid Xu 		else
924ec008e96SDavid Xu 			remaining = p->p_numthreads - p->p_suspcount;
92544990b8cSJulian Elischer 	}
9265215b187SJeff Roberson 	if (force_exit == SINGLE_EXIT) {
927112afcb2SJohn Baldwin 		if (td->td_upcall)
9285215b187SJeff Roberson 			upcall_remove(td);
9295c8329edSJulian Elischer 		kse_purge(p, td);
9305215b187SJeff Roberson 	}
931112afcb2SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
93244990b8cSJulian Elischer 	return (0);
93344990b8cSJulian Elischer }
93444990b8cSJulian Elischer 
93544990b8cSJulian Elischer /*
93644990b8cSJulian Elischer  * Called in from locations that can safely check to see
93744990b8cSJulian Elischer  * whether we have to suspend or at least throttle for a
93844990b8cSJulian Elischer  * single-thread event (e.g. fork).
93944990b8cSJulian Elischer  *
94044990b8cSJulian Elischer  * Such locations include userret().
94144990b8cSJulian Elischer  * If the "return_instead" argument is non zero, the thread must be able to
94244990b8cSJulian Elischer  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
94344990b8cSJulian Elischer  *
94444990b8cSJulian Elischer  * The 'return_instead' argument tells the function if it may do a
94544990b8cSJulian Elischer  * thread_exit() or suspend, or whether the caller must abort and back
94644990b8cSJulian Elischer  * out instead.
94744990b8cSJulian Elischer  *
94844990b8cSJulian Elischer  * If the thread that set the single_threading request has set the
94944990b8cSJulian Elischer  * P_SINGLE_EXIT bit in the process flags then this call will never return
95044990b8cSJulian Elischer  * if 'return_instead' is false, but will exit.
95144990b8cSJulian Elischer  *
95244990b8cSJulian Elischer  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
95344990b8cSJulian Elischer  *---------------+--------------------+---------------------
95444990b8cSJulian Elischer  *       0       | returns 0          |   returns 0 or 1
95544990b8cSJulian Elischer  *               | when ST ends       |   immediatly
95644990b8cSJulian Elischer  *---------------+--------------------+---------------------
95744990b8cSJulian Elischer  *       1       | thread exits       |   returns 1
95844990b8cSJulian Elischer  *               |                    |  immediatly
95944990b8cSJulian Elischer  * 0 = thread_exit() or suspension ok,
96044990b8cSJulian Elischer  * other = return error instead of stopping the thread.
96144990b8cSJulian Elischer  *
96244990b8cSJulian Elischer  * While a full suspension is under effect, even a single threading
96344990b8cSJulian Elischer  * thread would be suspended if it made this call (but it shouldn't).
96444990b8cSJulian Elischer  * This call should only be made from places where
96544990b8cSJulian Elischer  * thread_exit() would be safe as that may be the outcome unless
96644990b8cSJulian Elischer  * return_instead is set.
96744990b8cSJulian Elischer  */
96844990b8cSJulian Elischer int
96944990b8cSJulian Elischer thread_suspend_check(int return_instead)
97044990b8cSJulian Elischer {
971ecafb24bSJuli Mallett 	struct thread *td;
972ecafb24bSJuli Mallett 	struct proc *p;
97344990b8cSJulian Elischer 
97444990b8cSJulian Elischer 	td = curthread;
97544990b8cSJulian Elischer 	p = td->td_proc;
97637814395SPeter Wemm 	mtx_assert(&Giant, MA_NOTOWNED);
97744990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
978cbf4e354SDavid Xu 	while (P_SHOULDSTOP(p) ||
979cbf4e354SDavid Xu 	      ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
9801279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
98144990b8cSJulian Elischer 			KASSERT(p->p_singlethread != NULL,
98244990b8cSJulian Elischer 			    ("singlethread not set"));
98344990b8cSJulian Elischer 			/*
984e3b9bf71SJulian Elischer 			 * The only suspension in action is a
985e3b9bf71SJulian Elischer 			 * single-threading. Single threader need not stop.
986b6d5995eSJulian Elischer 			 * XXX Should be safe to access unlocked
987b6d5995eSJulian Elischer 			 * as it can only be set to be true by us.
98844990b8cSJulian Elischer 			 */
989e3b9bf71SJulian Elischer 			if (p->p_singlethread == td)
99044990b8cSJulian Elischer 				return (0);	/* Exempt from stopping. */
99144990b8cSJulian Elischer 		}
992e3b9bf71SJulian Elischer 		if (return_instead)
99344990b8cSJulian Elischer 			return (1);
99444990b8cSJulian Elischer 
995e574e444SDavid Xu 		mtx_lock_spin(&sched_lock);
996e574e444SDavid Xu 		thread_stopped(p);
99744990b8cSJulian Elischer 		/*
99844990b8cSJulian Elischer 		 * If the process is waiting for us to exit,
99944990b8cSJulian Elischer 		 * this thread should just suicide.
10001279572aSDavid Xu 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
100144990b8cSJulian Elischer 		 */
100244990b8cSJulian Elischer 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
10030e2a4d3aSDavid Xu 			if (p->p_flag & P_SA)
100444990b8cSJulian Elischer 				thread_exit();
10052c10d16aSJeff Roberson 			else
10062c10d16aSJeff Roberson 				thr_exit1();
100744990b8cSJulian Elischer 		}
100844990b8cSJulian Elischer 
100944990b8cSJulian Elischer 		/*
101044990b8cSJulian Elischer 		 * When a thread suspends, it just
101144990b8cSJulian Elischer 		 * moves to the processes's suspend queue
101244990b8cSJulian Elischer 		 * and stays there.
101344990b8cSJulian Elischer 		 */
101471fad9fdSJulian Elischer 		thread_suspend_one(td);
10151279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1016cf19bf91SJulian Elischer 			if (p->p_numthreads == p->p_suspcount) {
101771fad9fdSJulian Elischer 				thread_unsuspend_one(p->p_singlethread);
1018cf19bf91SJulian Elischer 			}
1019cf19bf91SJulian Elischer 		}
1020a6f37ac9SJohn Baldwin 		PROC_UNLOCK(p);
1021bf0acc27SJohn Baldwin 		mi_switch(SW_INVOL, NULL);
102244990b8cSJulian Elischer 		mtx_unlock_spin(&sched_lock);
102344990b8cSJulian Elischer 		PROC_LOCK(p);
102444990b8cSJulian Elischer 	}
102544990b8cSJulian Elischer 	return (0);
102644990b8cSJulian Elischer }
102744990b8cSJulian Elischer 
102835c32a76SDavid Xu void
102935c32a76SDavid Xu thread_suspend_one(struct thread *td)
103035c32a76SDavid Xu {
103135c32a76SDavid Xu 	struct proc *p = td->td_proc;
103235c32a76SDavid Xu 
103335c32a76SDavid Xu 	mtx_assert(&sched_lock, MA_OWNED);
1034112afcb2SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
1035e574e444SDavid Xu 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
103635c32a76SDavid Xu 	p->p_suspcount++;
103771fad9fdSJulian Elischer 	TD_SET_SUSPENDED(td);
103835c32a76SDavid Xu 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
103971fad9fdSJulian Elischer 	/*
104071fad9fdSJulian Elischer 	 * Hack: If we are suspending but are on the sleep queue
104171fad9fdSJulian Elischer 	 * then we are in msleep or the cv equivalent. We
104271fad9fdSJulian Elischer 	 * want to look like we have two Inhibitors.
10439d102777SJulian Elischer 	 * May already be set.. doesn't matter.
104471fad9fdSJulian Elischer 	 */
104571fad9fdSJulian Elischer 	if (TD_ON_SLEEPQ(td))
104671fad9fdSJulian Elischer 		TD_SET_SLEEPING(td);
104735c32a76SDavid Xu }
104835c32a76SDavid Xu 
104935c32a76SDavid Xu void
105035c32a76SDavid Xu thread_unsuspend_one(struct thread *td)
105135c32a76SDavid Xu {
105235c32a76SDavid Xu 	struct proc *p = td->td_proc;
105335c32a76SDavid Xu 
105435c32a76SDavid Xu 	mtx_assert(&sched_lock, MA_OWNED);
1055112afcb2SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
105635c32a76SDavid Xu 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
105771fad9fdSJulian Elischer 	TD_CLR_SUSPENDED(td);
105835c32a76SDavid Xu 	p->p_suspcount--;
105971fad9fdSJulian Elischer 	setrunnable(td);
106035c32a76SDavid Xu }
106135c32a76SDavid Xu 
106244990b8cSJulian Elischer /*
106344990b8cSJulian Elischer  * Allow all threads blocked by single threading to continue running.
106444990b8cSJulian Elischer  */
106544990b8cSJulian Elischer void
106644990b8cSJulian Elischer thread_unsuspend(struct proc *p)
106744990b8cSJulian Elischer {
106844990b8cSJulian Elischer 	struct thread *td;
106944990b8cSJulian Elischer 
1070b6d5995eSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
107144990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
107244990b8cSJulian Elischer 	if (!P_SHOULDSTOP(p)) {
107344990b8cSJulian Elischer 		while ((td = TAILQ_FIRST(&p->p_suspended))) {
107435c32a76SDavid Xu 			thread_unsuspend_one(td);
107544990b8cSJulian Elischer 		}
10761279572aSDavid Xu 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
107744990b8cSJulian Elischer 	    (p->p_numthreads == p->p_suspcount)) {
107844990b8cSJulian Elischer 		/*
107944990b8cSJulian Elischer 		 * Stopping everything also did the job for the single
108044990b8cSJulian Elischer 		 * threading request. Now we've downgraded to single-threaded,
108144990b8cSJulian Elischer 		 * let it continue.
108244990b8cSJulian Elischer 		 */
108335c32a76SDavid Xu 		thread_unsuspend_one(p->p_singlethread);
108444990b8cSJulian Elischer 	}
108544990b8cSJulian Elischer }
108644990b8cSJulian Elischer 
108744990b8cSJulian Elischer void
108844990b8cSJulian Elischer thread_single_end(void)
108944990b8cSJulian Elischer {
109044990b8cSJulian Elischer 	struct thread *td;
109144990b8cSJulian Elischer 	struct proc *p;
109244990b8cSJulian Elischer 
109344990b8cSJulian Elischer 	td = curthread;
109444990b8cSJulian Elischer 	p = td->td_proc;
109544990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
1096702ac0f1SDavid Xu 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
1097112afcb2SJohn Baldwin 	mtx_lock_spin(&sched_lock);
109844990b8cSJulian Elischer 	p->p_singlethread = NULL;
109949539972SJulian Elischer 	/*
110049539972SJulian Elischer 	 * If there are other threads they mey now run,
110149539972SJulian Elischer 	 * unless of course there is a blanket 'stop order'
110249539972SJulian Elischer 	 * on the process. The single threader must be allowed
110349539972SJulian Elischer 	 * to continue however as this is a bad place to stop.
110449539972SJulian Elischer 	 */
110549539972SJulian Elischer 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
110649539972SJulian Elischer 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
110771fad9fdSJulian Elischer 			thread_unsuspend_one(td);
110844990b8cSJulian Elischer 		}
110949539972SJulian Elischer 	}
1110112afcb2SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
111149539972SJulian Elischer }
11124fc21c09SDaniel Eischen 
1113