xref: /freebsd/sys/kern/kern_thread.c (revision e574e444e0f80a693cd6b2db4eee4621b33dc827)
144990b8cSJulian Elischer /*
244990b8cSJulian Elischer  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
344990b8cSJulian Elischer  *  All rights reserved.
444990b8cSJulian Elischer  *
544990b8cSJulian Elischer  * Redistribution and use in source and binary forms, with or without
644990b8cSJulian Elischer  * modification, are permitted provided that the following conditions
744990b8cSJulian Elischer  * are met:
844990b8cSJulian Elischer  * 1. Redistributions of source code must retain the above copyright
944990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer as
1044990b8cSJulian Elischer  *    the first lines of this file unmodified other than the possible
1144990b8cSJulian Elischer  *    addition of one or more copyright notices.
1244990b8cSJulian Elischer  * 2. Redistributions in binary form must reproduce the above copyright
1344990b8cSJulian Elischer  *    notice(s), this list of conditions and the following disclaimer in the
1444990b8cSJulian Elischer  *    documentation and/or other materials provided with the distribution.
1544990b8cSJulian Elischer  *
1644990b8cSJulian Elischer  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
1744990b8cSJulian Elischer  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1844990b8cSJulian Elischer  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1944990b8cSJulian Elischer  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
2044990b8cSJulian Elischer  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2144990b8cSJulian Elischer  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
2244990b8cSJulian Elischer  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
2344990b8cSJulian Elischer  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2444990b8cSJulian Elischer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2544990b8cSJulian Elischer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
2644990b8cSJulian Elischer  * DAMAGE.
2744990b8cSJulian Elischer  *
2844990b8cSJulian Elischer  * $FreeBSD$
2944990b8cSJulian Elischer  */
3044990b8cSJulian Elischer 
3144990b8cSJulian Elischer #include <sys/param.h>
3244990b8cSJulian Elischer #include <sys/systm.h>
3344990b8cSJulian Elischer #include <sys/kernel.h>
3444990b8cSJulian Elischer #include <sys/lock.h>
3544990b8cSJulian Elischer #include <sys/malloc.h>
3644990b8cSJulian Elischer #include <sys/mutex.h>
3744990b8cSJulian Elischer #include <sys/proc.h>
38904f1b77SJulian Elischer #include <sys/smp.h>
3944990b8cSJulian Elischer #include <sys/sysctl.h>
405c8329edSJulian Elischer #include <sys/sysproto.h>
4144990b8cSJulian Elischer #include <sys/filedesc.h>
42de028f5aSJeff Roberson #include <sys/sched.h>
4344990b8cSJulian Elischer #include <sys/signalvar.h>
4444990b8cSJulian Elischer #include <sys/sx.h>
45de028f5aSJeff Roberson #include <sys/tty.h>
4644990b8cSJulian Elischer #include <sys/user.h>
4744990b8cSJulian Elischer #include <sys/jail.h>
4844990b8cSJulian Elischer #include <sys/kse.h>
4944990b8cSJulian Elischer #include <sys/ktr.h>
50c76e33b6SJonathan Mini #include <sys/ucontext.h>
5144990b8cSJulian Elischer 
5244990b8cSJulian Elischer #include <vm/vm.h>
5344990b8cSJulian Elischer #include <vm/vm_object.h>
5444990b8cSJulian Elischer #include <vm/pmap.h>
5544990b8cSJulian Elischer #include <vm/uma.h>
5644990b8cSJulian Elischer #include <vm/vm_map.h>
5744990b8cSJulian Elischer 
5802fb42b0SPeter Wemm #include <machine/frame.h>
5902fb42b0SPeter Wemm 
6044990b8cSJulian Elischer /*
614f0db5e0SJulian Elischer  * KSEGRP related storage.
6244990b8cSJulian Elischer  */
634f0db5e0SJulian Elischer static uma_zone_t ksegrp_zone;
644f0db5e0SJulian Elischer static uma_zone_t kse_zone;
6544990b8cSJulian Elischer static uma_zone_t thread_zone;
665215b187SJeff Roberson static uma_zone_t upcall_zone;
6744990b8cSJulian Elischer 
684f0db5e0SJulian Elischer /* DEBUG ONLY */
6944990b8cSJulian Elischer SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70696058c3SJulian Elischer static int thread_debug = 0;
71696058c3SJulian Elischer SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72696058c3SJulian Elischer 	&thread_debug, 0, "thread debug");
73fdc5ecd2SDavid Xu 
74fdc5ecd2SDavid Xu static int max_threads_per_proc = 30;
75fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
764f0db5e0SJulian Elischer 	&max_threads_per_proc, 0, "Limit on threads per proc");
774f0db5e0SJulian Elischer 
78fdc5ecd2SDavid Xu static int max_groups_per_proc = 5;
79fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80fdc5ecd2SDavid Xu 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
81fdc5ecd2SDavid Xu 
820252d203SDavid Xu static int max_threads_hits;
830252d203SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
840252d203SDavid Xu 	&max_threads_hits, 0, "");
850252d203SDavid Xu 
865215b187SJeff Roberson static int virtual_cpu;
875215b187SJeff Roberson 
8844990b8cSJulian Elischer #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
8944990b8cSJulian Elischer 
905215b187SJeff Roberson TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
915c8329edSJulian Elischer TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
925c8329edSJulian Elischer TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
935215b187SJeff Roberson TAILQ_HEAD(, kse_upcall) zombie_upcalls =
945215b187SJeff Roberson 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
955215b187SJeff Roberson struct mtx kse_zombie_lock;
965215b187SJeff Roberson MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
9744990b8cSJulian Elischer 
98696058c3SJulian Elischer static void kse_purge(struct proc *p, struct thread *td);
995215b187SJeff Roberson static void kse_purge_group(struct thread *td);
1004b4866edSDavid Xu static int thread_update_usr_ticks(struct thread *td, int user);
1015215b187SJeff Roberson static void thread_alloc_spare(struct thread *td, struct thread *spare);
1025215b187SJeff Roberson 
1035215b187SJeff Roberson static int
1045215b187SJeff Roberson sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
1055215b187SJeff Roberson {
1065215b187SJeff Roberson 	int error, new_val;
1075215b187SJeff Roberson 	int def_val;
1085215b187SJeff Roberson 
1095215b187SJeff Roberson #ifdef SMP
1105215b187SJeff Roberson 	def_val = mp_ncpus;
1115215b187SJeff Roberson #else
1125215b187SJeff Roberson 	def_val = 1;
1135215b187SJeff Roberson #endif
1145215b187SJeff Roberson 	if (virtual_cpu == 0)
1155215b187SJeff Roberson 		new_val = def_val;
1165215b187SJeff Roberson 	else
1175215b187SJeff Roberson 		new_val = virtual_cpu;
1185215b187SJeff Roberson 	error = sysctl_handle_int(oidp, &new_val, 0, req);
1195215b187SJeff Roberson         if (error != 0 || req->newptr == NULL)
1205215b187SJeff Roberson 		return (error);
1215215b187SJeff Roberson 	if (new_val < 0)
1225215b187SJeff Roberson 		return (EINVAL);
1235215b187SJeff Roberson 	virtual_cpu = new_val;
1245215b187SJeff Roberson 	return (0);
1255215b187SJeff Roberson }
1265215b187SJeff Roberson 
1275215b187SJeff Roberson /* DEBUG ONLY */
1285215b187SJeff Roberson SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
1295215b187SJeff Roberson 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
1305215b187SJeff Roberson 	"debug virtual cpus");
1315c8329edSJulian Elischer 
13244990b8cSJulian Elischer /*
133696058c3SJulian Elischer  * Prepare a thread for use.
13444990b8cSJulian Elischer  */
13544990b8cSJulian Elischer static void
13644990b8cSJulian Elischer thread_ctor(void *mem, int size, void *arg)
13744990b8cSJulian Elischer {
13844990b8cSJulian Elischer 	struct thread	*td;
13944990b8cSJulian Elischer 
14044990b8cSJulian Elischer 	td = (struct thread *)mem;
14171fad9fdSJulian Elischer 	td->td_state = TDS_INACTIVE;
14244990b8cSJulian Elischer }
14344990b8cSJulian Elischer 
14444990b8cSJulian Elischer /*
14544990b8cSJulian Elischer  * Reclaim a thread after use.
14644990b8cSJulian Elischer  */
14744990b8cSJulian Elischer static void
14844990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg)
14944990b8cSJulian Elischer {
15044990b8cSJulian Elischer 	struct thread	*td;
15144990b8cSJulian Elischer 
15244990b8cSJulian Elischer 	td = (struct thread *)mem;
15344990b8cSJulian Elischer 
15444990b8cSJulian Elischer #ifdef INVARIANTS
15544990b8cSJulian Elischer 	/* Verify that this thread is in a safe state to free. */
15644990b8cSJulian Elischer 	switch (td->td_state) {
15771fad9fdSJulian Elischer 	case TDS_INHIBITED:
15871fad9fdSJulian Elischer 	case TDS_RUNNING:
15971fad9fdSJulian Elischer 	case TDS_CAN_RUN:
16044990b8cSJulian Elischer 	case TDS_RUNQ:
16144990b8cSJulian Elischer 		/*
16244990b8cSJulian Elischer 		 * We must never unlink a thread that is in one of
16344990b8cSJulian Elischer 		 * these states, because it is currently active.
16444990b8cSJulian Elischer 		 */
16544990b8cSJulian Elischer 		panic("bad state for thread unlinking");
16644990b8cSJulian Elischer 		/* NOTREACHED */
16771fad9fdSJulian Elischer 	case TDS_INACTIVE:
16844990b8cSJulian Elischer 		break;
16944990b8cSJulian Elischer 	default:
17044990b8cSJulian Elischer 		panic("bad thread state");
17144990b8cSJulian Elischer 		/* NOTREACHED */
17244990b8cSJulian Elischer 	}
17344990b8cSJulian Elischer #endif
17444990b8cSJulian Elischer }
17544990b8cSJulian Elischer 
17644990b8cSJulian Elischer /*
17744990b8cSJulian Elischer  * Initialize type-stable parts of a thread (when newly created).
17844990b8cSJulian Elischer  */
17944990b8cSJulian Elischer static void
18044990b8cSJulian Elischer thread_init(void *mem, int size)
18144990b8cSJulian Elischer {
18244990b8cSJulian Elischer 	struct thread	*td;
18344990b8cSJulian Elischer 
18444990b8cSJulian Elischer 	td = (struct thread *)mem;
185e6e24ff9SJulian Elischer 	mtx_lock(&Giant);
186316ec49aSScott Long 	pmap_new_thread(td, 0);
187e6e24ff9SJulian Elischer 	mtx_unlock(&Giant);
18844990b8cSJulian Elischer 	cpu_thread_setup(td);
189de028f5aSJeff Roberson 	td->td_sched = (struct td_sched *)&td[1];
19044990b8cSJulian Elischer }
19144990b8cSJulian Elischer 
19244990b8cSJulian Elischer /*
19344990b8cSJulian Elischer  * Tear down type-stable parts of a thread (just before being discarded).
19444990b8cSJulian Elischer  */
19544990b8cSJulian Elischer static void
19644990b8cSJulian Elischer thread_fini(void *mem, int size)
19744990b8cSJulian Elischer {
19844990b8cSJulian Elischer 	struct thread	*td;
19944990b8cSJulian Elischer 
20044990b8cSJulian Elischer 	td = (struct thread *)mem;
20144990b8cSJulian Elischer 	pmap_dispose_thread(td);
20244990b8cSJulian Elischer }
2035215b187SJeff Roberson 
204de028f5aSJeff Roberson /*
205de028f5aSJeff Roberson  * Initialize type-stable parts of a kse (when newly created).
206de028f5aSJeff Roberson  */
207de028f5aSJeff Roberson static void
208de028f5aSJeff Roberson kse_init(void *mem, int size)
209de028f5aSJeff Roberson {
210de028f5aSJeff Roberson 	struct kse	*ke;
211de028f5aSJeff Roberson 
212de028f5aSJeff Roberson 	ke = (struct kse *)mem;
213de028f5aSJeff Roberson 	ke->ke_sched = (struct ke_sched *)&ke[1];
214de028f5aSJeff Roberson }
2155215b187SJeff Roberson 
216de028f5aSJeff Roberson /*
217de028f5aSJeff Roberson  * Initialize type-stable parts of a ksegrp (when newly created).
218de028f5aSJeff Roberson  */
219de028f5aSJeff Roberson static void
220de028f5aSJeff Roberson ksegrp_init(void *mem, int size)
221de028f5aSJeff Roberson {
222de028f5aSJeff Roberson 	struct ksegrp	*kg;
223de028f5aSJeff Roberson 
224de028f5aSJeff Roberson 	kg = (struct ksegrp *)mem;
225de028f5aSJeff Roberson 	kg->kg_sched = (struct kg_sched *)&kg[1];
226de028f5aSJeff Roberson }
22744990b8cSJulian Elischer 
22844990b8cSJulian Elischer /*
2295215b187SJeff Roberson  * KSE is linked into kse group.
2305c8329edSJulian Elischer  */
2315c8329edSJulian Elischer void
2325c8329edSJulian Elischer kse_link(struct kse *ke, struct ksegrp *kg)
2335c8329edSJulian Elischer {
2345c8329edSJulian Elischer 	struct proc *p = kg->kg_proc;
2355c8329edSJulian Elischer 
2365c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
2375c8329edSJulian Elischer 	kg->kg_kses++;
2385c8329edSJulian Elischer 	ke->ke_state	= KES_UNQUEUED;
2395c8329edSJulian Elischer 	ke->ke_proc	= p;
2405c8329edSJulian Elischer 	ke->ke_ksegrp	= kg;
2415c8329edSJulian Elischer 	ke->ke_thread	= NULL;
2425c8329edSJulian Elischer 	ke->ke_oncpu	= NOCPU;
2435215b187SJeff Roberson 	ke->ke_flags	= 0;
2445c8329edSJulian Elischer }
2455c8329edSJulian Elischer 
2465c8329edSJulian Elischer void
2475c8329edSJulian Elischer kse_unlink(struct kse *ke)
2485c8329edSJulian Elischer {
2495c8329edSJulian Elischer 	struct ksegrp *kg;
2505c8329edSJulian Elischer 
2515c8329edSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
2525c8329edSJulian Elischer 	kg = ke->ke_ksegrp;
2535c8329edSJulian Elischer 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
2545215b187SJeff Roberson 	if (ke->ke_state == KES_IDLE) {
2555215b187SJeff Roberson 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
2565215b187SJeff Roberson 		kg->kg_idle_kses--;
2576f8132a8SJulian Elischer 	}
2585215b187SJeff Roberson 	if (--kg->kg_kses == 0)
2595215b187SJeff Roberson 		ksegrp_unlink(kg);
2605c8329edSJulian Elischer 	/*
2615c8329edSJulian Elischer 	 * Aggregate stats from the KSE
2625c8329edSJulian Elischer 	 */
2635c8329edSJulian Elischer 	kse_stash(ke);
2645c8329edSJulian Elischer }
2655c8329edSJulian Elischer 
2665c8329edSJulian Elischer void
2675c8329edSJulian Elischer ksegrp_link(struct ksegrp *kg, struct proc *p)
2685c8329edSJulian Elischer {
2695c8329edSJulian Elischer 
2705c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_threads);
2715c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
2725c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
2735c8329edSJulian Elischer 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
2745215b187SJeff Roberson 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
2755215b187SJeff Roberson 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
2765c8329edSJulian Elischer 	kg->kg_proc = p;
2775215b187SJeff Roberson 	/*
2785215b187SJeff Roberson 	 * the following counters are in the -zero- section
2795215b187SJeff Roberson 	 * and may not need clearing
2805215b187SJeff Roberson 	 */
2815c8329edSJulian Elischer 	kg->kg_numthreads = 0;
2825c8329edSJulian Elischer 	kg->kg_runnable   = 0;
2835c8329edSJulian Elischer 	kg->kg_kses       = 0;
2845c8329edSJulian Elischer 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
2855215b187SJeff Roberson 	kg->kg_idle_kses  = 0;
2865215b187SJeff Roberson 	kg->kg_numupcalls = 0;
2875c8329edSJulian Elischer 	/* link it in now that it's consistent */
2885c8329edSJulian Elischer 	p->p_numksegrps++;
2895c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
2905c8329edSJulian Elischer }
2915c8329edSJulian Elischer 
2925c8329edSJulian Elischer void
2935c8329edSJulian Elischer ksegrp_unlink(struct ksegrp *kg)
2945c8329edSJulian Elischer {
2955c8329edSJulian Elischer 	struct proc *p;
2965c8329edSJulian Elischer 
2975c8329edSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
2985215b187SJeff Roberson 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
2995215b187SJeff Roberson 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
3005215b187SJeff Roberson 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
3015215b187SJeff Roberson 
3025c8329edSJulian Elischer 	p = kg->kg_proc;
3035c8329edSJulian Elischer 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
3045c8329edSJulian Elischer 	p->p_numksegrps--;
3055c8329edSJulian Elischer 	/*
3065c8329edSJulian Elischer 	 * Aggregate stats from the KSE
3075c8329edSJulian Elischer 	 */
3085c8329edSJulian Elischer 	ksegrp_stash(kg);
3095c8329edSJulian Elischer }
3105c8329edSJulian Elischer 
3115215b187SJeff Roberson struct kse_upcall *
3125215b187SJeff Roberson upcall_alloc(void)
3135215b187SJeff Roberson {
3145215b187SJeff Roberson 	struct kse_upcall *ku;
3155215b187SJeff Roberson 
31630621e14SDavid Xu 	ku = uma_zalloc(upcall_zone, M_WAITOK);
3175215b187SJeff Roberson 	bzero(ku, sizeof(*ku));
3185215b187SJeff Roberson 	return (ku);
3195215b187SJeff Roberson }
3205215b187SJeff Roberson 
3215215b187SJeff Roberson void
3225215b187SJeff Roberson upcall_free(struct kse_upcall *ku)
3235215b187SJeff Roberson {
3245215b187SJeff Roberson 
3255215b187SJeff Roberson 	uma_zfree(upcall_zone, ku);
3265215b187SJeff Roberson }
3275215b187SJeff Roberson 
3285215b187SJeff Roberson void
3295215b187SJeff Roberson upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
3305215b187SJeff Roberson {
3315215b187SJeff Roberson 
3325215b187SJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
3335215b187SJeff Roberson 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
3345215b187SJeff Roberson 	ku->ku_ksegrp = kg;
3355215b187SJeff Roberson 	kg->kg_numupcalls++;
3365215b187SJeff Roberson }
3375215b187SJeff Roberson 
3385215b187SJeff Roberson void
3395215b187SJeff Roberson upcall_unlink(struct kse_upcall *ku)
3405215b187SJeff Roberson {
3415215b187SJeff Roberson 	struct ksegrp *kg = ku->ku_ksegrp;
3425215b187SJeff Roberson 
3435215b187SJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
3445215b187SJeff Roberson 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
3455215b187SJeff Roberson 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
3465215b187SJeff Roberson 	kg->kg_numupcalls--;
3475215b187SJeff Roberson 	upcall_stash(ku);
3485215b187SJeff Roberson }
3495215b187SJeff Roberson 
3505215b187SJeff Roberson void
3515215b187SJeff Roberson upcall_remove(struct thread *td)
3525215b187SJeff Roberson {
3535215b187SJeff Roberson 
3545215b187SJeff Roberson 	if (td->td_upcall) {
3555215b187SJeff Roberson 		td->td_upcall->ku_owner = NULL;
3565215b187SJeff Roberson 		upcall_unlink(td->td_upcall);
3575215b187SJeff Roberson 		td->td_upcall = 0;
3585215b187SJeff Roberson 	}
3595215b187SJeff Roberson }
3605215b187SJeff Roberson 
3615c8329edSJulian Elischer /*
3625215b187SJeff Roberson  * For a newly created process,
3635215b187SJeff Roberson  * link up all the structures and its initial threads etc.
3645c8329edSJulian Elischer  */
3655c8329edSJulian Elischer void
3665c8329edSJulian Elischer proc_linkup(struct proc *p, struct ksegrp *kg,
3675c8329edSJulian Elischer 	    struct kse *ke, struct thread *td)
3685c8329edSJulian Elischer {
3695c8329edSJulian Elischer 
3705c8329edSJulian Elischer 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
3715c8329edSJulian Elischer 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
3725c8329edSJulian Elischer 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
3735c8329edSJulian Elischer 	p->p_numksegrps = 0;
3745c8329edSJulian Elischer 	p->p_numthreads = 0;
3755c8329edSJulian Elischer 
3765c8329edSJulian Elischer 	ksegrp_link(kg, p);
3775c8329edSJulian Elischer 	kse_link(ke, kg);
3785c8329edSJulian Elischer 	thread_link(td, kg);
3795c8329edSJulian Elischer }
3805c8329edSJulian Elischer 
3815215b187SJeff Roberson /*
3825215b187SJeff Roberson struct kse_thr_interrupt_args {
3835215b187SJeff Roberson 	struct kse_thr_mailbox * tmbx;
3845215b187SJeff Roberson };
3855215b187SJeff Roberson */
3865c8329edSJulian Elischer int
3875c8329edSJulian Elischer kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
3885c8329edSJulian Elischer {
38934e80e02SDavid Xu 	struct proc *p;
39034e80e02SDavid Xu 	struct thread *td2;
3915c8329edSJulian Elischer 
392adac9400SDavid Xu 	p = td->td_proc;
393ac2e4153SJulian Elischer 	if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
3948db2431fSDavid Xu 		return (EINVAL);
39534e80e02SDavid Xu 	mtx_lock_spin(&sched_lock);
39634e80e02SDavid Xu 	FOREACH_THREAD_IN_PROC(p, td2) {
39734e80e02SDavid Xu 		if (td2->td_mailbox == uap->tmbx) {
39834e80e02SDavid Xu 			td2->td_flags |= TDF_INTERRUPT;
39934e80e02SDavid Xu 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
40034e80e02SDavid Xu 				if (td2->td_flags & TDF_CVWAITQ)
40134e80e02SDavid Xu 					cv_abort(td2);
40234e80e02SDavid Xu 				else
40334e80e02SDavid Xu 					abortsleep(td2);
40434e80e02SDavid Xu 			}
40534e80e02SDavid Xu 			mtx_unlock_spin(&sched_lock);
4067b290dd0SDavid Xu 			return (0);
40734e80e02SDavid Xu 		}
40834e80e02SDavid Xu 	}
40934e80e02SDavid Xu 	mtx_unlock_spin(&sched_lock);
41034e80e02SDavid Xu 	return (ESRCH);
4115c8329edSJulian Elischer }
4125c8329edSJulian Elischer 
4135215b187SJeff Roberson /*
4145215b187SJeff Roberson struct kse_exit_args {
4155215b187SJeff Roberson 	register_t dummy;
4165215b187SJeff Roberson };
4175215b187SJeff Roberson */
4185c8329edSJulian Elischer int
4195c8329edSJulian Elischer kse_exit(struct thread *td, struct kse_exit_args *uap)
4205c8329edSJulian Elischer {
4215c8329edSJulian Elischer 	struct proc *p;
4225c8329edSJulian Elischer 	struct ksegrp *kg;
423450c38d0SDavid Xu 	struct kse *ke;
4245c8329edSJulian Elischer 
4255c8329edSJulian Elischer 	p = td->td_proc;
4265215b187SJeff Roberson 	/*
4275215b187SJeff Roberson 	 * Only UTS can call the syscall and current group
4285215b187SJeff Roberson 	 * should be a threaded group.
4295215b187SJeff Roberson 	 */
4305215b187SJeff Roberson 	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
4317b290dd0SDavid Xu 		return (EINVAL);
4325215b187SJeff Roberson 	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
4335215b187SJeff Roberson 
4345c8329edSJulian Elischer 	kg = td->td_ksegrp;
4355215b187SJeff Roberson 	/* Serialize removing upcall */
4365c8329edSJulian Elischer 	PROC_LOCK(p);
4375c8329edSJulian Elischer 	mtx_lock_spin(&sched_lock);
4385215b187SJeff Roberson 	if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
4395c8329edSJulian Elischer 		mtx_unlock_spin(&sched_lock);
4405c8329edSJulian Elischer 		PROC_UNLOCK(p);
4415c8329edSJulian Elischer 		return (EDEADLK);
4425c8329edSJulian Elischer 	}
443450c38d0SDavid Xu 	ke = td->td_kse;
4445215b187SJeff Roberson 	upcall_remove(td);
445450c38d0SDavid Xu 	if (p->p_numthreads == 1) {
4465215b187SJeff Roberson 		kse_purge(p, td);
447ac2e4153SJulian Elischer 		p->p_flag &= ~P_THREADED;
4485c8329edSJulian Elischer 		mtx_unlock_spin(&sched_lock);
4495c8329edSJulian Elischer 		PROC_UNLOCK(p);
4505c8329edSJulian Elischer 	} else {
4515215b187SJeff Roberson 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
4525215b187SJeff Roberson 			kse_purge_group(td);
453450c38d0SDavid Xu 			ke->ke_flags |= KEF_EXIT;
4545215b187SJeff Roberson 		}
455e574e444SDavid Xu 		thread_stopped(p);
4565c8329edSJulian Elischer 		thread_exit();
4575c8329edSJulian Elischer 		/* NOTREACHED */
4585c8329edSJulian Elischer 	}
4597b290dd0SDavid Xu 	return (0);
4605c8329edSJulian Elischer }
4615c8329edSJulian Elischer 
462696058c3SJulian Elischer /*
46393a7aa79SJulian Elischer  * Either becomes an upcall or waits for an awakening event and
4645215b187SJeff Roberson  * then becomes an upcall. Only error cases return.
4655215b187SJeff Roberson  */
4665215b187SJeff Roberson /*
4675215b187SJeff Roberson struct kse_release_args {
468eb117d5cSDavid Xu 	struct timespec *timeout;
4695215b187SJeff Roberson };
470696058c3SJulian Elischer */
4715c8329edSJulian Elischer int
4725c8329edSJulian Elischer kse_release(struct thread *td, struct kse_release_args *uap)
4735c8329edSJulian Elischer {
4745c8329edSJulian Elischer 	struct proc *p;
475696058c3SJulian Elischer 	struct ksegrp *kg;
476eb117d5cSDavid Xu 	struct timespec ts, ts2, ts3, timeout;
477eb117d5cSDavid Xu 	struct timeval tv;
478eb117d5cSDavid Xu 	int error;
4795c8329edSJulian Elischer 
4805c8329edSJulian Elischer 	p = td->td_proc;
481696058c3SJulian Elischer 	kg = td->td_ksegrp;
482ca161eb6SDavid Xu 	/*
4835215b187SJeff Roberson 	 * Only UTS can call the syscall and current group
4845215b187SJeff Roberson 	 * should be a threaded group.
485ca161eb6SDavid Xu 	 */
4865215b187SJeff Roberson 	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
4875c8329edSJulian Elischer 		return (EINVAL);
4885215b187SJeff Roberson 	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
489eb117d5cSDavid Xu 	if (uap->timeout != NULL) {
490eb117d5cSDavid Xu 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
491eb117d5cSDavid Xu 			return (error);
492eb117d5cSDavid Xu 		getnanouptime(&ts);
493eb117d5cSDavid Xu 		timespecadd(&ts, &timeout);
494eb117d5cSDavid Xu 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
495eb117d5cSDavid Xu 	}
49603ea4720SJulian Elischer 	mtx_lock_spin(&sched_lock);
49793a7aa79SJulian Elischer 	/* Change OURSELF to become an upcall. */
4985215b187SJeff Roberson 	td->td_flags = TDF_UPCALLING;
49988aba94cSDavid Xu 	if (p->p_sflag & PS_NEEDSIGCHK)
50088aba94cSDavid Xu 		td->td_flags |= TDF_ASTPENDING;
501eb117d5cSDavid Xu 	mtx_unlock_spin(&sched_lock);
502eb117d5cSDavid Xu 	PROC_LOCK(p);
503eb117d5cSDavid Xu 	while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
50403ea4720SJulian Elischer 	       (kg->kg_completed == NULL)) {
5055215b187SJeff Roberson 		kg->kg_upsleeps++;
506eb117d5cSDavid Xu 		error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH,
507eb117d5cSDavid Xu 			"kse_rel", (uap->timeout ? tvtohz(&tv) : 0));
5085215b187SJeff Roberson 		kg->kg_upsleeps--;
5095215b187SJeff Roberson 		PROC_UNLOCK(p);
510eb117d5cSDavid Xu 		if (uap->timeout == NULL || error != EWOULDBLOCK)
511eb117d5cSDavid Xu 			return (0);
512eb117d5cSDavid Xu 		getnanouptime(&ts2);
513eb117d5cSDavid Xu 		if (timespeccmp(&ts2, &ts, >=))
514eb117d5cSDavid Xu 			return (0);
515eb117d5cSDavid Xu 		ts3 = ts;
516eb117d5cSDavid Xu 		timespecsub(&ts3, &ts2);
517eb117d5cSDavid Xu 		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
518eb117d5cSDavid Xu 		PROC_LOCK(p);
51993a7aa79SJulian Elischer 	}
520eb117d5cSDavid Xu 	PROC_UNLOCK(p);
521696058c3SJulian Elischer 	return (0);
5225c8329edSJulian Elischer }
5235c8329edSJulian Elischer 
5245c8329edSJulian Elischer /* struct kse_wakeup_args {
5255c8329edSJulian Elischer 	struct kse_mailbox *mbx;
5265c8329edSJulian Elischer }; */
5275c8329edSJulian Elischer int
5285c8329edSJulian Elischer kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
5295c8329edSJulian Elischer {
5305c8329edSJulian Elischer 	struct proc *p;
5315c8329edSJulian Elischer 	struct ksegrp *kg;
5325215b187SJeff Roberson 	struct kse_upcall *ku;
53393a7aa79SJulian Elischer 	struct thread *td2;
5345c8329edSJulian Elischer 
5355c8329edSJulian Elischer 	p = td->td_proc;
53693a7aa79SJulian Elischer 	td2 = NULL;
5375215b187SJeff Roberson 	ku = NULL;
5385c8329edSJulian Elischer 	/* KSE-enabled processes only, please. */
539ac2e4153SJulian Elischer 	if (!(p->p_flag & P_THREADED))
5405215b187SJeff Roberson 		return (EINVAL);
5415215b187SJeff Roberson 	PROC_LOCK(p);
54203ea4720SJulian Elischer 	mtx_lock_spin(&sched_lock);
5435c8329edSJulian Elischer 	if (uap->mbx) {
5445c8329edSJulian Elischer 		FOREACH_KSEGRP_IN_PROC(p, kg) {
5455215b187SJeff Roberson 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
5465215b187SJeff Roberson 				if (ku->ku_mailbox == uap->mbx)
54793a7aa79SJulian Elischer 					break;
54893a7aa79SJulian Elischer 			}
5495215b187SJeff Roberson 			if (ku)
55093a7aa79SJulian Elischer 				break;
5515c8329edSJulian Elischer 		}
5525c8329edSJulian Elischer 	} else {
5535c8329edSJulian Elischer 		kg = td->td_ksegrp;
5545215b187SJeff Roberson 		if (kg->kg_upsleeps) {
5555215b187SJeff Roberson 			wakeup_one(&kg->kg_completed);
5565215b187SJeff Roberson 			mtx_unlock_spin(&sched_lock);
5575215b187SJeff Roberson 			PROC_UNLOCK(p);
5585215b187SJeff Roberson 			return (0);
5595c8329edSJulian Elischer 		}
5605215b187SJeff Roberson 		ku = TAILQ_FIRST(&kg->kg_upcalls);
5615c8329edSJulian Elischer 	}
5625215b187SJeff Roberson 	if (ku) {
5635215b187SJeff Roberson 		if ((td2 = ku->ku_owner) == NULL) {
5645215b187SJeff Roberson 			panic("%s: no owner", __func__);
5655215b187SJeff Roberson 		} else if (TD_ON_SLEEPQ(td2) &&
5665215b187SJeff Roberson 		           (td2->td_wchan == &kg->kg_completed)) {
5675215b187SJeff Roberson 			abortsleep(td2);
5685215b187SJeff Roberson 		} else {
5695215b187SJeff Roberson 			ku->ku_flags |= KUF_DOUPCALL;
57003ea4720SJulian Elischer 		}
5715c8329edSJulian Elischer 		mtx_unlock_spin(&sched_lock);
5725215b187SJeff Roberson 		PROC_UNLOCK(p);
5737b290dd0SDavid Xu 		return (0);
5745c8329edSJulian Elischer 	}
57593a7aa79SJulian Elischer 	mtx_unlock_spin(&sched_lock);
5765215b187SJeff Roberson 	PROC_UNLOCK(p);
57793a7aa79SJulian Elischer 	return (ESRCH);
57893a7aa79SJulian Elischer }
5795c8329edSJulian Elischer 
5805c8329edSJulian Elischer /*
5815c8329edSJulian Elischer  * No new KSEG: first call: use current KSE, don't schedule an upcall
5825215b187SJeff Roberson  * All other situations, do allocate max new KSEs and schedule an upcall.
5835c8329edSJulian Elischer  */
5845c8329edSJulian Elischer /* struct kse_create_args {
5855c8329edSJulian Elischer 	struct kse_mailbox *mbx;
5865c8329edSJulian Elischer 	int newgroup;
5875c8329edSJulian Elischer }; */
5885c8329edSJulian Elischer int
5895c8329edSJulian Elischer kse_create(struct thread *td, struct kse_create_args *uap)
5905c8329edSJulian Elischer {
5915c8329edSJulian Elischer 	struct kse *newke;
5925c8329edSJulian Elischer 	struct ksegrp *newkg;
5935c8329edSJulian Elischer 	struct ksegrp *kg;
5945c8329edSJulian Elischer 	struct proc *p;
5955c8329edSJulian Elischer 	struct kse_mailbox mbx;
5965215b187SJeff Roberson 	struct kse_upcall *newku;
5975215b187SJeff Roberson 	int err, ncpus;
5985c8329edSJulian Elischer 
5995c8329edSJulian Elischer 	p = td->td_proc;
6005c8329edSJulian Elischer 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
6015c8329edSJulian Elischer 		return (err);
6025c8329edSJulian Elischer 
6035215b187SJeff Roberson 	/* Too bad, why hasn't kernel always a cpu counter !? */
6045215b187SJeff Roberson #ifdef SMP
6055215b187SJeff Roberson 	ncpus = mp_ncpus;
6065215b187SJeff Roberson #else
6075215b187SJeff Roberson 	ncpus = 1;
6085215b187SJeff Roberson #endif
6095215b187SJeff Roberson 	if (thread_debug && virtual_cpu != 0)
6105215b187SJeff Roberson 		ncpus = virtual_cpu;
6115215b187SJeff Roberson 
6125215b187SJeff Roberson 	/* Easier to just set it than to test and set */
613ac2e4153SJulian Elischer 	p->p_flag |= P_THREADED;
6145c8329edSJulian Elischer 	kg = td->td_ksegrp;
6155c8329edSJulian Elischer 	if (uap->newgroup) {
6165215b187SJeff Roberson 		/* Have race condition but it is cheap */
617fdc5ecd2SDavid Xu 		if (p->p_numksegrps >= max_groups_per_proc)
618fdc5ecd2SDavid Xu 			return (EPROCLIM);
6195c8329edSJulian Elischer 		/*
6205c8329edSJulian Elischer 		 * If we want a new KSEGRP it doesn't matter whether
6215c8329edSJulian Elischer 		 * we have already fired up KSE mode before or not.
6225215b187SJeff Roberson 		 * We put the process in KSE mode and create a new KSEGRP.
6235c8329edSJulian Elischer 		 */
6245c8329edSJulian Elischer 		newkg = ksegrp_alloc();
6255c8329edSJulian Elischer 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
6265c8329edSJulian Elischer 		      kg_startzero, kg_endzero));
6275c8329edSJulian Elischer 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
6285c8329edSJulian Elischer 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
6295215b187SJeff Roberson 		mtx_lock_spin(&sched_lock);
6305215b187SJeff Roberson 		if (p->p_numksegrps >= max_groups_per_proc) {
6315215b187SJeff Roberson 			mtx_unlock_spin(&sched_lock);
6329948c47fSDavid Xu 			ksegrp_free(newkg);
6336f8132a8SJulian Elischer 			return (EPROCLIM);
6346f8132a8SJulian Elischer 		}
6359948c47fSDavid Xu 		ksegrp_link(newkg, p);
6365215b187SJeff Roberson 		mtx_unlock_spin(&sched_lock);
6376f8132a8SJulian Elischer 	} else {
6385215b187SJeff Roberson 		newkg = kg;
6396f8132a8SJulian Elischer 	}
6405215b187SJeff Roberson 
6415215b187SJeff Roberson 	/*
6425215b187SJeff Roberson 	 * Creating upcalls more than number of physical cpu does
6435215b187SJeff Roberson 	 * not help performance.
6445215b187SJeff Roberson 	 */
6455215b187SJeff Roberson 	if (newkg->kg_numupcalls >= ncpus)
6465215b187SJeff Roberson 		return (EPROCLIM);
6475215b187SJeff Roberson 
6485215b187SJeff Roberson 	if (newkg->kg_numupcalls == 0) {
6495215b187SJeff Roberson 		/*
6505215b187SJeff Roberson 		 * Initialize KSE group, optimized for MP.
6515215b187SJeff Roberson 		 * Create KSEs as many as physical cpus, this increases
6525215b187SJeff Roberson 		 * concurrent even if userland is not MP safe and can only run
6535215b187SJeff Roberson 		 * on single CPU (for early version of libpthread, it is true).
6545215b187SJeff Roberson 		 * In ideal world, every physical cpu should execute a thread.
6555215b187SJeff Roberson 		 * If there is enough KSEs, threads in kernel can be
6565215b187SJeff Roberson 		 * executed parallel on different cpus with full speed,
6575215b187SJeff Roberson 		 * Concurrent in kernel shouldn't be restricted by number of
6585215b187SJeff Roberson 		 * upcalls userland provides.
6595215b187SJeff Roberson 		 * Adding more upcall structures only increases concurrent
6605215b187SJeff Roberson 		 * in userland.
6615215b187SJeff Roberson 		 * Highest performance configuration is:
6625215b187SJeff Roberson 		 * N kses = N upcalls = N phyiscal cpus
6635215b187SJeff Roberson 		 */
6645215b187SJeff Roberson 		while (newkg->kg_kses < ncpus) {
6655215b187SJeff Roberson 			newke = kse_alloc();
6665c8329edSJulian Elischer 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
6675c8329edSJulian Elischer 			      ke_startzero, ke_endzero));
6685c8329edSJulian Elischer #if 0
6695215b187SJeff Roberson 			mtx_lock_spin(&sched_lock);
6705c8329edSJulian Elischer 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
6715c8329edSJulian Elischer 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
6726f8132a8SJulian Elischer 			mtx_unlock_spin(&sched_lock);
6735215b187SJeff Roberson #endif
6745215b187SJeff Roberson 			mtx_lock_spin(&sched_lock);
6755c8329edSJulian Elischer 			kse_link(newke, newkg);
6765215b187SJeff Roberson 			/* Add engine */
6775215b187SJeff Roberson 			kse_reassign(newke);
6785c8329edSJulian Elischer 			mtx_unlock_spin(&sched_lock);
6795215b187SJeff Roberson 		}
6805215b187SJeff Roberson 	}
6815215b187SJeff Roberson 	newku = upcall_alloc();
6825215b187SJeff Roberson 	newku->ku_mailbox = uap->mbx;
6835215b187SJeff Roberson 	newku->ku_func = mbx.km_func;
6845215b187SJeff Roberson 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
6855215b187SJeff Roberson 
6865215b187SJeff Roberson 	/* For the first call this may not have been set */
6875215b187SJeff Roberson 	if (td->td_standin == NULL)
6885215b187SJeff Roberson 		thread_alloc_spare(td, NULL);
6895215b187SJeff Roberson 
6905215b187SJeff Roberson 	mtx_lock_spin(&sched_lock);
6915215b187SJeff Roberson 	if (newkg->kg_numupcalls >= ncpus) {
6925215b187SJeff Roberson 		mtx_unlock_spin(&sched_lock);
6933b3df40fSDavid Xu 		upcall_free(newku);
6945215b187SJeff Roberson 		return (EPROCLIM);
6955215b187SJeff Roberson 	}
6965215b187SJeff Roberson 	upcall_link(newku, newkg);
6975215b187SJeff Roberson 
6985215b187SJeff Roberson 	/*
6995215b187SJeff Roberson 	 * Each upcall structure has an owner thread, find which
7005215b187SJeff Roberson 	 * one owns it.
7015215b187SJeff Roberson 	 */
7025215b187SJeff Roberson 	if (uap->newgroup) {
7035215b187SJeff Roberson 		/*
7045215b187SJeff Roberson 		 * Because new ksegrp hasn't thread,
7055215b187SJeff Roberson 		 * create an initial upcall thread to own it.
7065215b187SJeff Roberson 		 */
7075215b187SJeff Roberson 		thread_schedule_upcall(td, newku);
7085c8329edSJulian Elischer 	} else {
7095c8329edSJulian Elischer 		/*
7105215b187SJeff Roberson 		 * If current thread hasn't an upcall structure,
7115215b187SJeff Roberson 		 * just assign the upcall to it.
7125c8329edSJulian Elischer 		 */
7135215b187SJeff Roberson 		if (td->td_upcall == NULL) {
7145215b187SJeff Roberson 			newku->ku_owner = td;
7155215b187SJeff Roberson 			td->td_upcall = newku;
7165215b187SJeff Roberson 		} else {
7175c8329edSJulian Elischer 			/*
7185215b187SJeff Roberson 			 * Create a new upcall thread to own it.
7195c8329edSJulian Elischer 			 */
7205215b187SJeff Roberson 			thread_schedule_upcall(td, newku);
7215215b187SJeff Roberson 		}
7225215b187SJeff Roberson 	}
7235215b187SJeff Roberson 	mtx_unlock_spin(&sched_lock);
7245c8329edSJulian Elischer 	return (0);
7255c8329edSJulian Elischer }
7265c8329edSJulian Elischer 
7275c8329edSJulian Elischer /*
728c76e33b6SJonathan Mini  * Fill a ucontext_t with a thread's context information.
729c76e33b6SJonathan Mini  *
730c76e33b6SJonathan Mini  * This is an analogue to getcontext(3).
731c76e33b6SJonathan Mini  */
732c76e33b6SJonathan Mini void
733c76e33b6SJonathan Mini thread_getcontext(struct thread *td, ucontext_t *uc)
734c76e33b6SJonathan Mini {
735c76e33b6SJonathan Mini 
736acaa1566SPeter Wemm /*
737acaa1566SPeter Wemm  * XXX this is declared in a MD include file, i386/include/ucontext.h but
738acaa1566SPeter Wemm  * is used in MI code.
739acaa1566SPeter Wemm  */
7401e19df33SPeter Wemm #ifdef __i386__
741c76e33b6SJonathan Mini 	get_mcontext(td, &uc->uc_mcontext);
7421e19df33SPeter Wemm #endif
743c76e33b6SJonathan Mini 	uc->uc_sigmask = td->td_proc->p_sigmask;
744c76e33b6SJonathan Mini }
745c76e33b6SJonathan Mini 
746c76e33b6SJonathan Mini /*
747c76e33b6SJonathan Mini  * Set a thread's context from a ucontext_t.
748c76e33b6SJonathan Mini  *
749c76e33b6SJonathan Mini  * This is an analogue to setcontext(3).
750c76e33b6SJonathan Mini  */
751c76e33b6SJonathan Mini int
752c76e33b6SJonathan Mini thread_setcontext(struct thread *td, ucontext_t *uc)
753c76e33b6SJonathan Mini {
754c76e33b6SJonathan Mini 	int ret;
755c76e33b6SJonathan Mini 
756acaa1566SPeter Wemm /*
757acaa1566SPeter Wemm  * XXX this is declared in a MD include file, i386/include/ucontext.h but
758acaa1566SPeter Wemm  * is used in MI code.
759acaa1566SPeter Wemm  */
7601e19df33SPeter Wemm #ifdef __i386__
761c76e33b6SJonathan Mini 	ret = set_mcontext(td, &uc->uc_mcontext);
7621e19df33SPeter Wemm #else
7631e19df33SPeter Wemm 	ret = ENOSYS;
7641e19df33SPeter Wemm #endif
765c76e33b6SJonathan Mini 	if (ret == 0) {
766c76e33b6SJonathan Mini 		SIG_CANTMASK(uc->uc_sigmask);
767c76e33b6SJonathan Mini 		PROC_LOCK(td->td_proc);
768c76e33b6SJonathan Mini 		td->td_proc->p_sigmask = uc->uc_sigmask;
769c76e33b6SJonathan Mini 		PROC_UNLOCK(td->td_proc);
770c76e33b6SJonathan Mini 	}
771c76e33b6SJonathan Mini 	return (ret);
772c76e33b6SJonathan Mini }
773c76e33b6SJonathan Mini 
774c76e33b6SJonathan Mini /*
77544990b8cSJulian Elischer  * Initialize global thread allocation resources.
77644990b8cSJulian Elischer  */
77744990b8cSJulian Elischer void
77844990b8cSJulian Elischer threadinit(void)
77944990b8cSJulian Elischer {
78044990b8cSJulian Elischer 
781c281972eSPeter Wemm #ifndef __ia64__
782de028f5aSJeff Roberson 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
78344990b8cSJulian Elischer 	    thread_ctor, thread_dtor, thread_init, thread_fini,
78444990b8cSJulian Elischer 	    UMA_ALIGN_CACHE, 0);
785c281972eSPeter Wemm #else
786c281972eSPeter Wemm 	/*
787c281972eSPeter Wemm 	 * XXX the ia64 kstack allocator is really lame and is at the mercy
788c281972eSPeter Wemm 	 * of contigmallloc().  This hackery is to pre-construct a whole
789c281972eSPeter Wemm 	 * pile of thread structures with associated kernel stacks early
790c281972eSPeter Wemm 	 * in the system startup while contigmalloc() still works. Once we
791c281972eSPeter Wemm 	 * have them, keep them.  Sigh.
792c281972eSPeter Wemm 	 */
793de028f5aSJeff Roberson 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
794c281972eSPeter Wemm 	    thread_ctor, thread_dtor, thread_init, thread_fini,
795c281972eSPeter Wemm 	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
796c281972eSPeter Wemm 	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
797c281972eSPeter Wemm #endif
798de028f5aSJeff Roberson 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
799de028f5aSJeff Roberson 	    NULL, NULL, ksegrp_init, NULL,
8004f0db5e0SJulian Elischer 	    UMA_ALIGN_CACHE, 0);
801de028f5aSJeff Roberson 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
802de028f5aSJeff Roberson 	    NULL, NULL, kse_init, NULL,
8034f0db5e0SJulian Elischer 	    UMA_ALIGN_CACHE, 0);
8045215b187SJeff Roberson 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
8055215b187SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
80644990b8cSJulian Elischer }
80744990b8cSJulian Elischer 
80844990b8cSJulian Elischer /*
8091faf202eSJulian Elischer  * Stash an embarasingly extra thread into the zombie thread queue.
81044990b8cSJulian Elischer  */
81144990b8cSJulian Elischer void
81244990b8cSJulian Elischer thread_stash(struct thread *td)
81344990b8cSJulian Elischer {
8145215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
81544990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
8165215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
81744990b8cSJulian Elischer }
81844990b8cSJulian Elischer 
81944990b8cSJulian Elischer /*
8205c8329edSJulian Elischer  * Stash an embarasingly extra kse into the zombie kse queue.
8215c8329edSJulian Elischer  */
8225c8329edSJulian Elischer void
8235c8329edSJulian Elischer kse_stash(struct kse *ke)
8245c8329edSJulian Elischer {
8255215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
8265c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
8275215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
8285215b187SJeff Roberson }
8295215b187SJeff Roberson 
8305215b187SJeff Roberson /*
8315215b187SJeff Roberson  * Stash an embarasingly extra upcall into the zombie upcall queue.
8325215b187SJeff Roberson  */
8335215b187SJeff Roberson 
8345215b187SJeff Roberson void
8355215b187SJeff Roberson upcall_stash(struct kse_upcall *ku)
8365215b187SJeff Roberson {
8375215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
8385215b187SJeff Roberson 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
8395215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
8405c8329edSJulian Elischer }
8415c8329edSJulian Elischer 
8425c8329edSJulian Elischer /*
8435c8329edSJulian Elischer  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
8445c8329edSJulian Elischer  */
8455c8329edSJulian Elischer void
8465c8329edSJulian Elischer ksegrp_stash(struct ksegrp *kg)
8475c8329edSJulian Elischer {
8485215b187SJeff Roberson 	mtx_lock_spin(&kse_zombie_lock);
8495c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
8505215b187SJeff Roberson 	mtx_unlock_spin(&kse_zombie_lock);
8515c8329edSJulian Elischer }
8525c8329edSJulian Elischer 
8535c8329edSJulian Elischer /*
8545215b187SJeff Roberson  * Reap zombie kse resource.
85544990b8cSJulian Elischer  */
85644990b8cSJulian Elischer void
85744990b8cSJulian Elischer thread_reap(void)
85844990b8cSJulian Elischer {
8595c8329edSJulian Elischer 	struct thread *td_first, *td_next;
8605c8329edSJulian Elischer 	struct kse *ke_first, *ke_next;
8615c8329edSJulian Elischer 	struct ksegrp *kg_first, * kg_next;
8625215b187SJeff Roberson 	struct kse_upcall *ku_first, *ku_next;
86344990b8cSJulian Elischer 
86444990b8cSJulian Elischer 	/*
8655215b187SJeff Roberson 	 * Don't even bother to lock if none at this instant,
8665215b187SJeff Roberson 	 * we really don't care about the next instant..
86744990b8cSJulian Elischer 	 */
8685c8329edSJulian Elischer 	if ((!TAILQ_EMPTY(&zombie_threads))
8695c8329edSJulian Elischer 	    || (!TAILQ_EMPTY(&zombie_kses))
8705215b187SJeff Roberson 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
8715215b187SJeff Roberson 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
8725215b187SJeff Roberson 		mtx_lock_spin(&kse_zombie_lock);
8735c8329edSJulian Elischer 		td_first = TAILQ_FIRST(&zombie_threads);
8745c8329edSJulian Elischer 		ke_first = TAILQ_FIRST(&zombie_kses);
8755c8329edSJulian Elischer 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
8765215b187SJeff Roberson 		ku_first = TAILQ_FIRST(&zombie_upcalls);
8775c8329edSJulian Elischer 		if (td_first)
8785c8329edSJulian Elischer 			TAILQ_INIT(&zombie_threads);
8795c8329edSJulian Elischer 		if (ke_first)
8805c8329edSJulian Elischer 			TAILQ_INIT(&zombie_kses);
8815c8329edSJulian Elischer 		if (kg_first)
8825c8329edSJulian Elischer 			TAILQ_INIT(&zombie_ksegrps);
8835215b187SJeff Roberson 		if (ku_first)
8845215b187SJeff Roberson 			TAILQ_INIT(&zombie_upcalls);
8855215b187SJeff Roberson 		mtx_unlock_spin(&kse_zombie_lock);
8865c8329edSJulian Elischer 		while (td_first) {
8875c8329edSJulian Elischer 			td_next = TAILQ_NEXT(td_first, td_runq);
8885215b187SJeff Roberson 			if (td_first->td_ucred)
8895215b187SJeff Roberson 				crfree(td_first->td_ucred);
8905c8329edSJulian Elischer 			thread_free(td_first);
8915c8329edSJulian Elischer 			td_first = td_next;
89244990b8cSJulian Elischer 		}
8935c8329edSJulian Elischer 		while (ke_first) {
8945c8329edSJulian Elischer 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
8955c8329edSJulian Elischer 			kse_free(ke_first);
8965c8329edSJulian Elischer 			ke_first = ke_next;
8975c8329edSJulian Elischer 		}
8985c8329edSJulian Elischer 		while (kg_first) {
8995c8329edSJulian Elischer 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
9005c8329edSJulian Elischer 			ksegrp_free(kg_first);
9015c8329edSJulian Elischer 			kg_first = kg_next;
9025c8329edSJulian Elischer 		}
9035215b187SJeff Roberson 		while (ku_first) {
9045215b187SJeff Roberson 			ku_next = TAILQ_NEXT(ku_first, ku_link);
9055215b187SJeff Roberson 			upcall_free(ku_first);
9065215b187SJeff Roberson 			ku_first = ku_next;
9075215b187SJeff Roberson 		}
90844990b8cSJulian Elischer 	}
90944990b8cSJulian Elischer }
91044990b8cSJulian Elischer 
91144990b8cSJulian Elischer /*
9124f0db5e0SJulian Elischer  * Allocate a ksegrp.
9134f0db5e0SJulian Elischer  */
9144f0db5e0SJulian Elischer struct ksegrp *
9154f0db5e0SJulian Elischer ksegrp_alloc(void)
9164f0db5e0SJulian Elischer {
917a163d034SWarner Losh 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
9184f0db5e0SJulian Elischer }
9194f0db5e0SJulian Elischer 
9204f0db5e0SJulian Elischer /*
9214f0db5e0SJulian Elischer  * Allocate a kse.
9224f0db5e0SJulian Elischer  */
9234f0db5e0SJulian Elischer struct kse *
9244f0db5e0SJulian Elischer kse_alloc(void)
9254f0db5e0SJulian Elischer {
926a163d034SWarner Losh 	return (uma_zalloc(kse_zone, M_WAITOK));
9274f0db5e0SJulian Elischer }
9284f0db5e0SJulian Elischer 
9294f0db5e0SJulian Elischer /*
93044990b8cSJulian Elischer  * Allocate a thread.
93144990b8cSJulian Elischer  */
93244990b8cSJulian Elischer struct thread *
93344990b8cSJulian Elischer thread_alloc(void)
93444990b8cSJulian Elischer {
93544990b8cSJulian Elischer 	thread_reap(); /* check if any zombies to get */
936a163d034SWarner Losh 	return (uma_zalloc(thread_zone, M_WAITOK));
93744990b8cSJulian Elischer }
93844990b8cSJulian Elischer 
93944990b8cSJulian Elischer /*
9404f0db5e0SJulian Elischer  * Deallocate a ksegrp.
9414f0db5e0SJulian Elischer  */
9424f0db5e0SJulian Elischer void
9434f0db5e0SJulian Elischer ksegrp_free(struct ksegrp *td)
9444f0db5e0SJulian Elischer {
9454f0db5e0SJulian Elischer 	uma_zfree(ksegrp_zone, td);
9464f0db5e0SJulian Elischer }
9474f0db5e0SJulian Elischer 
9484f0db5e0SJulian Elischer /*
9494f0db5e0SJulian Elischer  * Deallocate a kse.
9504f0db5e0SJulian Elischer  */
9514f0db5e0SJulian Elischer void
9524f0db5e0SJulian Elischer kse_free(struct kse *td)
9534f0db5e0SJulian Elischer {
9544f0db5e0SJulian Elischer 	uma_zfree(kse_zone, td);
9554f0db5e0SJulian Elischer }
9564f0db5e0SJulian Elischer 
9574f0db5e0SJulian Elischer /*
95844990b8cSJulian Elischer  * Deallocate a thread.
95944990b8cSJulian Elischer  */
96044990b8cSJulian Elischer void
96144990b8cSJulian Elischer thread_free(struct thread *td)
96244990b8cSJulian Elischer {
963696058c3SJulian Elischer 
964696058c3SJulian Elischer 	cpu_thread_clean(td);
96544990b8cSJulian Elischer 	uma_zfree(thread_zone, td);
96644990b8cSJulian Elischer }
96744990b8cSJulian Elischer 
96844990b8cSJulian Elischer /*
96944990b8cSJulian Elischer  * Store the thread context in the UTS's mailbox.
9703d0586d4SJulian Elischer  * then add the mailbox at the head of a list we are building in user space.
9713d0586d4SJulian Elischer  * The list is anchored in the ksegrp structure.
97244990b8cSJulian Elischer  */
97344990b8cSJulian Elischer int
97444990b8cSJulian Elischer thread_export_context(struct thread *td)
97544990b8cSJulian Elischer {
9760d294460SJuli Mallett 	struct proc *p;
9773d0586d4SJulian Elischer 	struct ksegrp *kg;
9783d0586d4SJulian Elischer 	uintptr_t mbx;
9793d0586d4SJulian Elischer 	void *addr;
9805215b187SJeff Roberson 	int error,temp;
981c76e33b6SJonathan Mini 	ucontext_t uc;
98244990b8cSJulian Elischer 
9830d294460SJuli Mallett 	p = td->td_proc;
9840d294460SJuli Mallett 	kg = td->td_ksegrp;
9850d294460SJuli Mallett 
986c76e33b6SJonathan Mini 	/* Export the user/machine context. */
9873d0586d4SJulian Elischer 	addr = (void *)(&td->td_mailbox->tm_context);
9883d0586d4SJulian Elischer 	error = copyin(addr, &uc, sizeof(ucontext_t));
98993a7aa79SJulian Elischer 	if (error)
99093a7aa79SJulian Elischer 		goto bad;
99193a7aa79SJulian Elischer 
992c76e33b6SJonathan Mini 	thread_getcontext(td, &uc);
9933d0586d4SJulian Elischer 	error = copyout(&uc, addr, sizeof(ucontext_t));
99493a7aa79SJulian Elischer 	if (error)
99593a7aa79SJulian Elischer 		goto bad;
99644990b8cSJulian Elischer 
9975215b187SJeff Roberson 	/* Exports clock ticks in kernel mode */
9985215b187SJeff Roberson 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
9995215b187SJeff Roberson 	temp = fuword(addr) + td->td_usticks;
10005215b187SJeff Roberson 	if (suword(addr, temp))
10015215b187SJeff Roberson 		goto bad;
10025215b187SJeff Roberson 
10034b4866edSDavid Xu 	addr = (caddr_t)(&td->td_mailbox->tm_slices);
10044b4866edSDavid Xu 	temp = fuword(addr) - td->td_usticks;
10054b4866edSDavid Xu 	if (suword(addr, temp))
10064b4866edSDavid Xu 		goto bad;
10074b4866edSDavid Xu 
10085215b187SJeff Roberson 	/* Get address in latest mbox of list pointer */
10093d0586d4SJulian Elischer 	addr = (void *)(&td->td_mailbox->tm_next);
10103d0586d4SJulian Elischer 	/*
10113d0586d4SJulian Elischer 	 * Put the saved address of the previous first
10123d0586d4SJulian Elischer 	 * entry into this one
10133d0586d4SJulian Elischer 	 */
10143d0586d4SJulian Elischer 	for (;;) {
10153d0586d4SJulian Elischer 		mbx = (uintptr_t)kg->kg_completed;
10163d0586d4SJulian Elischer 		if (suword(addr, mbx)) {
101793a7aa79SJulian Elischer 			error = EFAULT;
10188798d4f9SDavid Xu 			goto bad;
10193d0586d4SJulian Elischer 		}
10200cd3964fSJulian Elischer 		PROC_LOCK(p);
10213d0586d4SJulian Elischer 		if (mbx == (uintptr_t)kg->kg_completed) {
10223d0586d4SJulian Elischer 			kg->kg_completed = td->td_mailbox;
10235215b187SJeff Roberson 			/*
10245215b187SJeff Roberson 			 * The thread context may be taken away by
10255215b187SJeff Roberson 			 * other upcall threads when we unlock
10265215b187SJeff Roberson 			 * process lock. it's no longer valid to
10275215b187SJeff Roberson 			 * use it again in any other places.
10285215b187SJeff Roberson 			 */
10295215b187SJeff Roberson 			td->td_mailbox = NULL;
10300cd3964fSJulian Elischer 			PROC_UNLOCK(p);
10313d0586d4SJulian Elischer 			break;
10323d0586d4SJulian Elischer 		}
10330cd3964fSJulian Elischer 		PROC_UNLOCK(p);
10343d0586d4SJulian Elischer 	}
10355215b187SJeff Roberson 	td->td_usticks = 0;
10363d0586d4SJulian Elischer 	return (0);
10378798d4f9SDavid Xu 
10388798d4f9SDavid Xu bad:
10398798d4f9SDavid Xu 	PROC_LOCK(p);
10408798d4f9SDavid Xu 	psignal(p, SIGSEGV);
10418798d4f9SDavid Xu 	PROC_UNLOCK(p);
10425215b187SJeff Roberson 	/* The mailbox is bad, don't use it */
10435215b187SJeff Roberson 	td->td_mailbox = NULL;
10445215b187SJeff Roberson 	td->td_usticks = 0;
104593a7aa79SJulian Elischer 	return (error);
10463d0586d4SJulian Elischer }
104744990b8cSJulian Elischer 
10483d0586d4SJulian Elischer /*
10493d0586d4SJulian Elischer  * Take the list of completed mailboxes for this KSEGRP and put them on this
10505215b187SJeff Roberson  * upcall's mailbox as it's the next one going up.
10513d0586d4SJulian Elischer  */
10523d0586d4SJulian Elischer static int
10535215b187SJeff Roberson thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
10543d0586d4SJulian Elischer {
10550cd3964fSJulian Elischer 	struct proc *p = kg->kg_proc;
10563d0586d4SJulian Elischer 	void *addr;
10573d0586d4SJulian Elischer 	uintptr_t mbx;
10583d0586d4SJulian Elischer 
10595215b187SJeff Roberson 	addr = (void *)(&ku->ku_mailbox->km_completed);
10603d0586d4SJulian Elischer 	for (;;) {
10613d0586d4SJulian Elischer 		mbx = (uintptr_t)kg->kg_completed;
10623d0586d4SJulian Elischer 		if (suword(addr, mbx)) {
10630cd3964fSJulian Elischer 			PROC_LOCK(p);
10640cd3964fSJulian Elischer 			psignal(p, SIGSEGV);
10650cd3964fSJulian Elischer 			PROC_UNLOCK(p);
10663d0586d4SJulian Elischer 			return (EFAULT);
10673d0586d4SJulian Elischer 		}
10680cd3964fSJulian Elischer 		PROC_LOCK(p);
10693d0586d4SJulian Elischer 		if (mbx == (uintptr_t)kg->kg_completed) {
10703d0586d4SJulian Elischer 			kg->kg_completed = NULL;
10710cd3964fSJulian Elischer 			PROC_UNLOCK(p);
10723d0586d4SJulian Elischer 			break;
10733d0586d4SJulian Elischer 		}
10740cd3964fSJulian Elischer 		PROC_UNLOCK(p);
10753d0586d4SJulian Elischer 	}
10763d0586d4SJulian Elischer 	return (0);
10773d0586d4SJulian Elischer }
107844990b8cSJulian Elischer 
107944990b8cSJulian Elischer /*
10808798d4f9SDavid Xu  * This function should be called at statclock interrupt time
10818798d4f9SDavid Xu  */
10828798d4f9SDavid Xu int
10835215b187SJeff Roberson thread_statclock(int user)
10848798d4f9SDavid Xu {
10858798d4f9SDavid Xu 	struct thread *td = curthread;
10868798d4f9SDavid Xu 
10875215b187SJeff Roberson 	if (td->td_ksegrp->kg_numupcalls == 0)
10885215b187SJeff Roberson 		return (-1);
10898798d4f9SDavid Xu 	if (user) {
10908798d4f9SDavid Xu 		/* Current always do via ast() */
1091b4508d7dSDavid Xu 		mtx_lock_spin(&sched_lock);
10924a338afdSJulian Elischer 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1093b4508d7dSDavid Xu 		mtx_unlock_spin(&sched_lock);
10945215b187SJeff Roberson 		td->td_uuticks++;
10958798d4f9SDavid Xu 	} else {
10968798d4f9SDavid Xu 		if (td->td_mailbox != NULL)
10975215b187SJeff Roberson 			td->td_usticks++;
10985215b187SJeff Roberson 		else {
10995215b187SJeff Roberson 			/* XXXKSE
11005215b187SJeff Roberson 		 	 * We will call thread_user_enter() for every
11015215b187SJeff Roberson 			 * kernel entry in future, so if the thread mailbox
11025215b187SJeff Roberson 			 * is NULL, it must be a UTS kernel, don't account
11035215b187SJeff Roberson 			 * clock ticks for it.
11045215b187SJeff Roberson 			 */
11058798d4f9SDavid Xu 		}
11065215b187SJeff Roberson 	}
11075215b187SJeff Roberson 	return (0);
11088798d4f9SDavid Xu }
11098798d4f9SDavid Xu 
11105215b187SJeff Roberson /*
11114b4866edSDavid Xu  * Export state clock ticks for userland
11125215b187SJeff Roberson  */
11138798d4f9SDavid Xu static int
11144b4866edSDavid Xu thread_update_usr_ticks(struct thread *td, int user)
11158798d4f9SDavid Xu {
11168798d4f9SDavid Xu 	struct proc *p = td->td_proc;
11178798d4f9SDavid Xu 	struct kse_thr_mailbox *tmbx;
11185215b187SJeff Roberson 	struct kse_upcall *ku;
11198798d4f9SDavid Xu 	caddr_t addr;
11205215b187SJeff Roberson 	uint uticks;
11214b4866edSDavid Xu 	int slices;
11228798d4f9SDavid Xu 
11235215b187SJeff Roberson 	if ((ku = td->td_upcall) == NULL)
11245215b187SJeff Roberson 		return (-1);
11258798d4f9SDavid Xu 
11265215b187SJeff Roberson 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
11278798d4f9SDavid Xu 	if ((tmbx == NULL) || (tmbx == (void *)-1))
11285215b187SJeff Roberson 		return (-1);
11294b4866edSDavid Xu 	if (user) {
11305215b187SJeff Roberson 		uticks = td->td_uuticks;
11315215b187SJeff Roberson 		td->td_uuticks = 0;
11325215b187SJeff Roberson 		addr = (caddr_t)&tmbx->tm_uticks;
11334b4866edSDavid Xu 	} else {
11344b4866edSDavid Xu 		uticks = td->td_usticks;
11355215b187SJeff Roberson 		td->td_usticks = 0;
11364b4866edSDavid Xu 		addr = (caddr_t)&tmbx->tm_sticks;
11374b4866edSDavid Xu 	}
11384b4866edSDavid Xu 	if (uticks) {
11394b4866edSDavid Xu 		if (suword(addr, uticks+fuword(addr))) {
11405215b187SJeff Roberson 			PROC_LOCK(p);
11415215b187SJeff Roberson 			psignal(p, SIGSEGV);
11425215b187SJeff Roberson 			PROC_UNLOCK(p);
11435215b187SJeff Roberson 			return (-2);
11445215b187SJeff Roberson 		}
11454b4866edSDavid Xu 		addr = (caddr_t)&tmbx->tm_slices;
11464b4866edSDavid Xu 		slices = (int)fuword(addr);
11474b4866edSDavid Xu 		if (slices > 0) {
11484b4866edSDavid Xu 			slices -= (int)uticks;
11494b4866edSDavid Xu 			if (suword(addr, slices)) {
11504b4866edSDavid Xu 				PROC_LOCK(p);
11514b4866edSDavid Xu 				psignal(p, SIGSEGV);
11524b4866edSDavid Xu 				PROC_UNLOCK(p);
11534b4866edSDavid Xu 				return (-2);
11544b4866edSDavid Xu 			}
11554b4866edSDavid Xu 			if (slices <= 0) {
11564b4866edSDavid Xu 				mtx_lock_spin(&sched_lock);
11574b4866edSDavid Xu 				td->td_upcall->ku_flags |= KUF_DOUPCALL;
11584b4866edSDavid Xu 				mtx_unlock_spin(&sched_lock);
11594b4866edSDavid Xu 			}
11604b4866edSDavid Xu 		}
11614b4866edSDavid Xu 	}
11625215b187SJeff Roberson 	return (0);
11638798d4f9SDavid Xu }
11648798d4f9SDavid Xu 
11658798d4f9SDavid Xu /*
116644990b8cSJulian Elischer  * Discard the current thread and exit from its context.
116744990b8cSJulian Elischer  *
116844990b8cSJulian Elischer  * Because we can't free a thread while we're operating under its context,
1169696058c3SJulian Elischer  * push the current thread into our CPU's deadthread holder. This means
1170696058c3SJulian Elischer  * we needn't worry about someone else grabbing our context before we
1171696058c3SJulian Elischer  * do a cpu_throw().
117244990b8cSJulian Elischer  */
117344990b8cSJulian Elischer void
117444990b8cSJulian Elischer thread_exit(void)
117544990b8cSJulian Elischer {
117644990b8cSJulian Elischer 	struct thread *td;
117744990b8cSJulian Elischer 	struct kse *ke;
117844990b8cSJulian Elischer 	struct proc *p;
117944990b8cSJulian Elischer 	struct ksegrp	*kg;
118044990b8cSJulian Elischer 
118144990b8cSJulian Elischer 	td = curthread;
118244990b8cSJulian Elischer 	kg = td->td_ksegrp;
118344990b8cSJulian Elischer 	p = td->td_proc;
118444990b8cSJulian Elischer 	ke = td->td_kse;
118544990b8cSJulian Elischer 
118644990b8cSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
118788151aa3SJulian Elischer 	KASSERT(p != NULL, ("thread exiting without a process"));
118888151aa3SJulian Elischer 	KASSERT(ke != NULL, ("thread exiting without a kse"));
118988151aa3SJulian Elischer 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
119044990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
119144990b8cSJulian Elischer 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
119244990b8cSJulian Elischer 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
119344990b8cSJulian Elischer 
119448bfcdddSJulian Elischer 	if (td->td_standin != NULL) {
119548bfcdddSJulian Elischer 		thread_stash(td->td_standin);
119648bfcdddSJulian Elischer 		td->td_standin = NULL;
119748bfcdddSJulian Elischer 	}
119848bfcdddSJulian Elischer 
119944990b8cSJulian Elischer 	cpu_thread_exit(td);	/* XXXSMP */
120044990b8cSJulian Elischer 
12011faf202eSJulian Elischer 	/*
12021faf202eSJulian Elischer 	 * The last thread is left attached to the process
12031faf202eSJulian Elischer 	 * So that the whole bundle gets recycled. Skip
12041faf202eSJulian Elischer 	 * all this stuff.
12051faf202eSJulian Elischer 	 */
12061faf202eSJulian Elischer 	if (p->p_numthreads > 1) {
12075c8329edSJulian Elischer 		/*
12085c8329edSJulian Elischer 		 * Unlink this thread from its proc and the kseg.
12095c8329edSJulian Elischer 		 * In keeping with the other structs we probably should
12105c8329edSJulian Elischer 		 * have a thread_unlink() that does some of this but it
12115c8329edSJulian Elischer 		 * would only be called from here (I think) so it would
12125c8329edSJulian Elischer 		 * be a waste. (might be useful for proc_fini() as well.)
12135c8329edSJulian Elischer  		 */
121444990b8cSJulian Elischer 		TAILQ_REMOVE(&p->p_threads, td, td_plist);
121544990b8cSJulian Elischer 		p->p_numthreads--;
121644990b8cSJulian Elischer 		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
121744990b8cSJulian Elischer 		kg->kg_numthreads--;
12180252d203SDavid Xu 		if (p->p_maxthrwaits)
12190252d203SDavid Xu 			wakeup(&p->p_numthreads);
122044990b8cSJulian Elischer 		/*
122144990b8cSJulian Elischer 		 * The test below is NOT true if we are the
12221faf202eSJulian Elischer 		 * sole exiting thread. P_STOPPED_SNGL is unset
122344990b8cSJulian Elischer 		 * in exit1() after it is the only survivor.
122444990b8cSJulian Elischer 		 */
12251279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
122644990b8cSJulian Elischer 			if (p->p_numthreads == p->p_suspcount) {
122771fad9fdSJulian Elischer 				thread_unsuspend_one(p->p_singlethread);
122844990b8cSJulian Elischer 			}
122944990b8cSJulian Elischer 		}
123048bfcdddSJulian Elischer 
12315215b187SJeff Roberson 		/*
12325215b187SJeff Roberson 		 * Because each upcall structure has an owner thread,
12335215b187SJeff Roberson 		 * owner thread exits only when process is in exiting
12345215b187SJeff Roberson 		 * state, so upcall to userland is no longer needed,
12355215b187SJeff Roberson 		 * deleting upcall structure is safe here.
12365215b187SJeff Roberson 		 * So when all threads in a group is exited, all upcalls
12375215b187SJeff Roberson 		 * in the group should be automatically freed.
12385215b187SJeff Roberson 		 */
12395215b187SJeff Roberson 		if (td->td_upcall)
12405215b187SJeff Roberson 			upcall_remove(td);
12416f8132a8SJulian Elischer 
12425215b187SJeff Roberson 		ke->ke_state = KES_UNQUEUED;
12435215b187SJeff Roberson 		ke->ke_thread = NULL;
124448bfcdddSJulian Elischer 		/*
124593a7aa79SJulian Elischer 		 * Decide what to do with the KSE attached to this thread.
124648bfcdddSJulian Elischer 		 */
12475215b187SJeff Roberson 		if (ke->ke_flags & KEF_EXIT)
12486f8132a8SJulian Elischer 			kse_unlink(ke);
12495215b187SJeff Roberson 		else
12506f8132a8SJulian Elischer 			kse_reassign(ke);
12516f8132a8SJulian Elischer 		PROC_UNLOCK(p);
12525215b187SJeff Roberson 		td->td_kse	= NULL;
12535c8329edSJulian Elischer 		td->td_state	= TDS_INACTIVE;
12545c8329edSJulian Elischer 		td->td_proc	= NULL;
12555c8329edSJulian Elischer 		td->td_ksegrp	= NULL;
12565c8329edSJulian Elischer 		td->td_last_kse	= NULL;
1257696058c3SJulian Elischer 		PCPU_SET(deadthread, td);
12581faf202eSJulian Elischer 	} else {
12591faf202eSJulian Elischer 		PROC_UNLOCK(p);
12601faf202eSJulian Elischer 	}
126144990b8cSJulian Elischer 	cpu_throw();
126244990b8cSJulian Elischer 	/* NOTREACHED */
126344990b8cSJulian Elischer }
126444990b8cSJulian Elischer 
126544990b8cSJulian Elischer /*
1266696058c3SJulian Elischer  * Do any thread specific cleanups that may be needed in wait()
1267696058c3SJulian Elischer  * called with Giant held, proc and schedlock not held.
1268696058c3SJulian Elischer  */
1269696058c3SJulian Elischer void
1270696058c3SJulian Elischer thread_wait(struct proc *p)
1271696058c3SJulian Elischer {
1272696058c3SJulian Elischer 	struct thread *td;
1273696058c3SJulian Elischer 
1274696058c3SJulian Elischer 	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1275696058c3SJulian Elischer 	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1276696058c3SJulian Elischer 	FOREACH_THREAD_IN_PROC(p, td) {
1277696058c3SJulian Elischer 		if (td->td_standin != NULL) {
1278696058c3SJulian Elischer 			thread_free(td->td_standin);
1279696058c3SJulian Elischer 			td->td_standin = NULL;
1280696058c3SJulian Elischer 		}
1281696058c3SJulian Elischer 		cpu_thread_clean(td);
1282696058c3SJulian Elischer 	}
1283696058c3SJulian Elischer 	thread_reap();	/* check for zombie threads etc. */
1284696058c3SJulian Elischer }
1285696058c3SJulian Elischer 
1286696058c3SJulian Elischer /*
128744990b8cSJulian Elischer  * Link a thread to a process.
12881faf202eSJulian Elischer  * set up anything that needs to be initialized for it to
12891faf202eSJulian Elischer  * be used by the process.
129044990b8cSJulian Elischer  *
129144990b8cSJulian Elischer  * Note that we do not link to the proc's ucred here.
129244990b8cSJulian Elischer  * The thread is linked as if running but no KSE assigned.
129344990b8cSJulian Elischer  */
129444990b8cSJulian Elischer void
129544990b8cSJulian Elischer thread_link(struct thread *td, struct ksegrp *kg)
129644990b8cSJulian Elischer {
129744990b8cSJulian Elischer 	struct proc *p;
129844990b8cSJulian Elischer 
129944990b8cSJulian Elischer 	p = kg->kg_proc;
130071fad9fdSJulian Elischer 	td->td_state    = TDS_INACTIVE;
130144990b8cSJulian Elischer 	td->td_proc     = p;
130244990b8cSJulian Elischer 	td->td_ksegrp   = kg;
130344990b8cSJulian Elischer 	td->td_last_kse = NULL;
13045215b187SJeff Roberson 	td->td_flags    = 0;
13055215b187SJeff Roberson 	td->td_kse      = NULL;
130644990b8cSJulian Elischer 
13071faf202eSJulian Elischer 	LIST_INIT(&td->td_contested);
13081faf202eSJulian Elischer 	callout_init(&td->td_slpcallout, 1);
130944990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
131044990b8cSJulian Elischer 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
131144990b8cSJulian Elischer 	p->p_numthreads++;
131244990b8cSJulian Elischer 	kg->kg_numthreads++;
131344990b8cSJulian Elischer }
131444990b8cSJulian Elischer 
13155215b187SJeff Roberson /*
13165215b187SJeff Roberson  * Purge a ksegrp resource. When a ksegrp is preparing to
13175215b187SJeff Roberson  * exit, it calls this function.
13185215b187SJeff Roberson  */
13195215b187SJeff Roberson void
13205215b187SJeff Roberson kse_purge_group(struct thread *td)
13215215b187SJeff Roberson {
13225215b187SJeff Roberson 	struct ksegrp *kg;
13235215b187SJeff Roberson 	struct kse *ke;
13245215b187SJeff Roberson 
13255215b187SJeff Roberson 	kg = td->td_ksegrp;
13265215b187SJeff Roberson  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
13275215b187SJeff Roberson 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
13285215b187SJeff Roberson 		KASSERT(ke->ke_state == KES_IDLE,
13295215b187SJeff Roberson 			("%s: wrong idle KSE state", __func__));
13305215b187SJeff Roberson 		kse_unlink(ke);
13315215b187SJeff Roberson 	}
13325215b187SJeff Roberson 	KASSERT((kg->kg_kses == 1),
13335215b187SJeff Roberson 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
13345215b187SJeff Roberson 	KASSERT((kg->kg_numupcalls == 0),
13355215b187SJeff Roberson 	        ("%s: ksegrp still has %d upcall datas",
13365215b187SJeff Roberson 		__func__, kg->kg_numupcalls));
13375215b187SJeff Roberson }
13385215b187SJeff Roberson 
13395215b187SJeff Roberson /*
13405215b187SJeff Roberson  * Purge a process's KSE resource. When a process is preparing to
13415215b187SJeff Roberson  * exit, it calls kse_purge to release any extra KSE resources in
13425215b187SJeff Roberson  * the process.
13435215b187SJeff Roberson  */
13445c8329edSJulian Elischer void
13455c8329edSJulian Elischer kse_purge(struct proc *p, struct thread *td)
13465c8329edSJulian Elischer {
13475c8329edSJulian Elischer 	struct ksegrp *kg;
13485215b187SJeff Roberson 	struct kse *ke;
13495c8329edSJulian Elischer 
13505c8329edSJulian Elischer  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
13515c8329edSJulian Elischer 	mtx_lock_spin(&sched_lock);
13525c8329edSJulian Elischer 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
13535c8329edSJulian Elischer 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
13545c8329edSJulian Elischer 		p->p_numksegrps--;
13555215b187SJeff Roberson 		/*
13565215b187SJeff Roberson 		 * There is no ownership for KSE, after all threads
13575215b187SJeff Roberson 		 * in the group exited, it is possible that some KSEs
13585215b187SJeff Roberson 		 * were left in idle queue, gc them now.
13595215b187SJeff Roberson 		 */
13605215b187SJeff Roberson 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
13615215b187SJeff Roberson 			KASSERT(ke->ke_state == KES_IDLE,
13625215b187SJeff Roberson 			   ("%s: wrong idle KSE state", __func__));
13635215b187SJeff Roberson 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
13645215b187SJeff Roberson 			kg->kg_idle_kses--;
13655215b187SJeff Roberson 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
13665215b187SJeff Roberson 			kg->kg_kses--;
13675215b187SJeff Roberson 			kse_stash(ke);
13685215b187SJeff Roberson 		}
13695c8329edSJulian Elischer 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
13705c8329edSJulian Elischer 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
13715215b187SJeff Roberson 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
13725215b187SJeff Roberson 		KASSERT((kg->kg_numupcalls == 0),
13735215b187SJeff Roberson 		        ("%s: ksegrp still has %d upcall datas",
13745215b187SJeff Roberson 			__func__, kg->kg_numupcalls));
13755215b187SJeff Roberson 
13765215b187SJeff Roberson 		if (kg != td->td_ksegrp)
13775c8329edSJulian Elischer 			ksegrp_stash(kg);
13785c8329edSJulian Elischer 	}
13795c8329edSJulian Elischer 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
13805c8329edSJulian Elischer 	p->p_numksegrps++;
13815c8329edSJulian Elischer 	mtx_unlock_spin(&sched_lock);
13825c8329edSJulian Elischer }
13835c8329edSJulian Elischer 
13845215b187SJeff Roberson /*
13855215b187SJeff Roberson  * This function is intended to be used to initialize a spare thread
13865215b187SJeff Roberson  * for upcall. Initialize thread's large data area outside sched_lock
13875215b187SJeff Roberson  * for thread_schedule_upcall().
13885215b187SJeff Roberson  */
13895215b187SJeff Roberson void
13905215b187SJeff Roberson thread_alloc_spare(struct thread *td, struct thread *spare)
13915215b187SJeff Roberson {
13925215b187SJeff Roberson 	if (td->td_standin)
13935215b187SJeff Roberson 		return;
13945215b187SJeff Roberson 	if (spare == NULL)
13955215b187SJeff Roberson 		spare = thread_alloc();
13965215b187SJeff Roberson 	td->td_standin = spare;
13975215b187SJeff Roberson 	bzero(&spare->td_startzero,
13985215b187SJeff Roberson 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
13995215b187SJeff Roberson 	spare->td_proc = td->td_proc;
14005215b187SJeff Roberson 	/* Setup PCB and fork address */
14015215b187SJeff Roberson 	cpu_set_upcall(spare, td->td_pcb);
14025215b187SJeff Roberson 	/*
14035215b187SJeff Roberson 	 * XXXKSE do we really need this? (default values for the
14045215b187SJeff Roberson 	 * frame).
14055215b187SJeff Roberson 	 */
14065215b187SJeff Roberson 	bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
14075215b187SJeff Roberson 	spare->td_ucred = crhold(td->td_ucred);
14085215b187SJeff Roberson }
14095c8329edSJulian Elischer 
141044990b8cSJulian Elischer /*
1411c76e33b6SJonathan Mini  * Create a thread and schedule it for upcall on the KSE given.
141293a7aa79SJulian Elischer  * Use our thread's standin so that we don't have to allocate one.
141344990b8cSJulian Elischer  */
141444990b8cSJulian Elischer struct thread *
14155215b187SJeff Roberson thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
141644990b8cSJulian Elischer {
141744990b8cSJulian Elischer 	struct thread *td2;
141844990b8cSJulian Elischer 
141944990b8cSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
142048bfcdddSJulian Elischer 
142148bfcdddSJulian Elischer 	/*
14225215b187SJeff Roberson 	 * Schedule an upcall thread on specified kse_upcall,
14235215b187SJeff Roberson 	 * the kse_upcall must be free.
14245215b187SJeff Roberson 	 * td must have a spare thread.
142548bfcdddSJulian Elischer 	 */
14265215b187SJeff Roberson 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
142748bfcdddSJulian Elischer 	if ((td2 = td->td_standin) != NULL) {
142848bfcdddSJulian Elischer 		td->td_standin = NULL;
142944990b8cSJulian Elischer 	} else {
14305215b187SJeff Roberson 		panic("no reserve thread when scheduling an upcall");
143148bfcdddSJulian Elischer 		return (NULL);
143244990b8cSJulian Elischer 	}
143344990b8cSJulian Elischer 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
143448bfcdddSJulian Elischer 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
14351faf202eSJulian Elischer 	bcopy(&td->td_startcopy, &td2->td_startcopy,
14361faf202eSJulian Elischer 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
14375215b187SJeff Roberson 	thread_link(td2, ku->ku_ksegrp);
14385215b187SJeff Roberson 	/* Let the new thread become owner of the upcall */
14395215b187SJeff Roberson 	ku->ku_owner   = td2;
14405215b187SJeff Roberson 	td2->td_upcall = ku;
14415215b187SJeff Roberson 	td2->td_flags  = TDF_UPCALLING;
144202bbffafSDavid Xu 	if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
144302bbffafSDavid Xu 		td2->td_flags |= TDF_ASTPENDING;
14445215b187SJeff Roberson 	td2->td_kse    = NULL;
144548bfcdddSJulian Elischer 	td2->td_state  = TDS_CAN_RUN;
144648bfcdddSJulian Elischer 	td2->td_inhibitors = 0;
144744990b8cSJulian Elischer 	setrunqueue(td2);
144848bfcdddSJulian Elischer 	return (td2);	/* bogus.. should be a void function */
144944990b8cSJulian Elischer }
145044990b8cSJulian Elischer 
145158a3c273SJeff Roberson void
145258a3c273SJeff Roberson thread_signal_add(struct thread *td, int sig)
1453c76e33b6SJonathan Mini {
145458a3c273SJeff Roberson 	struct kse_upcall *ku;
145558a3c273SJeff Roberson 	struct proc *p;
1456c76e33b6SJonathan Mini 	sigset_t ss;
1457c76e33b6SJonathan Mini 	int error;
1458c76e33b6SJonathan Mini 
145958a3c273SJeff Roberson 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
146058a3c273SJeff Roberson 	td = curthread;
146158a3c273SJeff Roberson 	ku = td->td_upcall;
146258a3c273SJeff Roberson 	p = td->td_proc;
146358a3c273SJeff Roberson 
1464c76e33b6SJonathan Mini 	PROC_UNLOCK(p);
146558a3c273SJeff Roberson 	error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1466c76e33b6SJonathan Mini 	if (error)
146758a3c273SJeff Roberson 		goto error;
146858a3c273SJeff Roberson 
1469c76e33b6SJonathan Mini 	SIGADDSET(ss, sig);
147058a3c273SJeff Roberson 
147158a3c273SJeff Roberson 	error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t));
1472c76e33b6SJonathan Mini 	if (error)
147358a3c273SJeff Roberson 		goto error;
147458a3c273SJeff Roberson 
147558a3c273SJeff Roberson 	PROC_LOCK(p);
147658a3c273SJeff Roberson 	return;
147758a3c273SJeff Roberson error:
147858a3c273SJeff Roberson 	PROC_LOCK(p);
147958a3c273SJeff Roberson 	sigexit(td, SIGILL);
148058a3c273SJeff Roberson }
148158a3c273SJeff Roberson 
148258a3c273SJeff Roberson 
148358a3c273SJeff Roberson /*
148458a3c273SJeff Roberson  * Schedule an upcall to notify a KSE process recieved signals.
148558a3c273SJeff Roberson  *
148658a3c273SJeff Roberson  */
148758a3c273SJeff Roberson void
148858a3c273SJeff Roberson thread_signal_upcall(struct thread *td)
148958a3c273SJeff Roberson {
1490c76e33b6SJonathan Mini 	mtx_lock_spin(&sched_lock);
149158a3c273SJeff Roberson 	td->td_flags |= TDF_UPCALLING;
1492c76e33b6SJonathan Mini 	mtx_unlock_spin(&sched_lock);
149358a3c273SJeff Roberson 
149458a3c273SJeff Roberson 	return;
1495c76e33b6SJonathan Mini }
1496c76e33b6SJonathan Mini 
1497c76e33b6SJonathan Mini /*
14985215b187SJeff Roberson  * Setup done on the thread when it enters the kernel.
14991434d3feSJulian Elischer  * XXXKSE Presently only for syscalls but eventually all kernel entries.
15001434d3feSJulian Elischer  */
15011434d3feSJulian Elischer void
15021434d3feSJulian Elischer thread_user_enter(struct proc *p, struct thread *td)
15031434d3feSJulian Elischer {
15045215b187SJeff Roberson 	struct ksegrp *kg;
15055215b187SJeff Roberson 	struct kse_upcall *ku;
15061434d3feSJulian Elischer 
15075215b187SJeff Roberson 	kg = td->td_ksegrp;
15081434d3feSJulian Elischer 	/*
15091434d3feSJulian Elischer 	 * First check that we shouldn't just abort.
15101434d3feSJulian Elischer 	 * But check if we are the single thread first!
15111434d3feSJulian Elischer 	 * XXX p_singlethread not locked, but should be safe.
15121434d3feSJulian Elischer 	 */
15135215b187SJeff Roberson 	if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
15141434d3feSJulian Elischer 		PROC_LOCK(p);
15151434d3feSJulian Elischer 		mtx_lock_spin(&sched_lock);
1516e574e444SDavid Xu 		thread_stopped(p);
15171434d3feSJulian Elischer 		thread_exit();
15181434d3feSJulian Elischer 		/* NOTREACHED */
15191434d3feSJulian Elischer 	}
15201434d3feSJulian Elischer 
15211434d3feSJulian Elischer 	/*
15221434d3feSJulian Elischer 	 * If we are doing a syscall in a KSE environment,
15231434d3feSJulian Elischer 	 * note where our mailbox is. There is always the
152493a7aa79SJulian Elischer 	 * possibility that we could do this lazily (in kse_reassign()),
15251434d3feSJulian Elischer 	 * but for now do it every time.
15261434d3feSJulian Elischer 	 */
15275215b187SJeff Roberson 	kg = td->td_ksegrp;
15285215b187SJeff Roberson 	if (kg->kg_numupcalls) {
15295215b187SJeff Roberson 		ku = td->td_upcall;
15305215b187SJeff Roberson 		KASSERT(ku, ("%s: no upcall owned", __func__));
15315215b187SJeff Roberson 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
15321434d3feSJulian Elischer 		td->td_mailbox =
15335215b187SJeff Roberson 		    (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
15341434d3feSJulian Elischer 		if ((td->td_mailbox == NULL) ||
15358798d4f9SDavid Xu 		    (td->td_mailbox == (void *)-1)) {
15365215b187SJeff Roberson 		    	/* Don't schedule upcall when blocked */
15375215b187SJeff Roberson 			td->td_mailbox = NULL;
15388798d4f9SDavid Xu 			mtx_lock_spin(&sched_lock);
15395215b187SJeff Roberson 			td->td_flags &= ~TDF_CAN_UNBIND;
15408798d4f9SDavid Xu 			mtx_unlock_spin(&sched_lock);
15418798d4f9SDavid Xu 		} else {
15421434d3feSJulian Elischer 			if (td->td_standin == NULL)
15435215b187SJeff Roberson 				thread_alloc_spare(td, NULL);
15448798d4f9SDavid Xu 			mtx_lock_spin(&sched_lock);
154593a7aa79SJulian Elischer 			td->td_flags |= TDF_CAN_UNBIND;
15468798d4f9SDavid Xu 			mtx_unlock_spin(&sched_lock);
15475215b187SJeff Roberson 		}
15481434d3feSJulian Elischer 	}
15491434d3feSJulian Elischer }
15501434d3feSJulian Elischer 
15511434d3feSJulian Elischer /*
1552c76e33b6SJonathan Mini  * The extra work we go through if we are a threaded process when we
1553c76e33b6SJonathan Mini  * return to userland.
1554c76e33b6SJonathan Mini  *
1555c76e33b6SJonathan Mini  * If we are a KSE process and returning to user mode, check for
1556c76e33b6SJonathan Mini  * extra work to do before we return (e.g. for more syscalls
1557c76e33b6SJonathan Mini  * to complete first).  If we were in a critical section, we should
1558c76e33b6SJonathan Mini  * just return to let it finish. Same if we were in the UTS (in
1559c76e33b6SJonathan Mini  * which case the mailbox's context's busy indicator will be set).
1560c76e33b6SJonathan Mini  * The only traps we suport will have set the mailbox.
1561c76e33b6SJonathan Mini  * We will clear it here.
156244990b8cSJulian Elischer  */
1563c76e33b6SJonathan Mini int
1564253fdd5bSJulian Elischer thread_userret(struct thread *td, struct trapframe *frame)
1565c76e33b6SJonathan Mini {
15660252d203SDavid Xu 	int error = 0, upcalls;
15675215b187SJeff Roberson 	struct kse_upcall *ku;
15680252d203SDavid Xu 	struct ksegrp *kg, *kg2;
156948bfcdddSJulian Elischer 	struct proc *p;
1570bfd83250SDavid Xu 	struct timespec ts;
1571c76e33b6SJonathan Mini 
15726f8132a8SJulian Elischer 	p = td->td_proc;
15735215b187SJeff Roberson 	kg = td->td_ksegrp;
157493a7aa79SJulian Elischer 
15755215b187SJeff Roberson 	/* Nothing to do with non-threaded group/process */
15765215b187SJeff Roberson 	if (td->td_ksegrp->kg_numupcalls == 0)
15775215b187SJeff Roberson 		return (0);
15785215b187SJeff Roberson 
15795215b187SJeff Roberson 	/*
15805215b187SJeff Roberson 	 * Stat clock interrupt hit in userland, it
15815215b187SJeff Roberson 	 * is returning from interrupt, charge thread's
15825215b187SJeff Roberson 	 * userland time for UTS.
15835215b187SJeff Roberson 	 */
15845215b187SJeff Roberson 	if (td->td_flags & TDF_USTATCLOCK) {
15854b4866edSDavid Xu 		thread_update_usr_ticks(td, 1);
158693a7aa79SJulian Elischer 		mtx_lock_spin(&sched_lock);
15875215b187SJeff Roberson 		td->td_flags &= ~TDF_USTATCLOCK;
15880dbb100bSDavid Xu 		mtx_unlock_spin(&sched_lock);
15894b4866edSDavid Xu 		if (kg->kg_completed ||
15904b4866edSDavid Xu 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
15914b4866edSDavid Xu 			thread_user_enter(p, td);
15925215b187SJeff Roberson 	}
15935215b187SJeff Roberson 
15945215b187SJeff Roberson 	/*
15955215b187SJeff Roberson 	 * Optimisation:
15965215b187SJeff Roberson 	 * This thread has not started any upcall.
15975215b187SJeff Roberson 	 * If there is no work to report other than ourself,
15985215b187SJeff Roberson 	 * then it can return direct to userland.
15995215b187SJeff Roberson 	 */
16005215b187SJeff Roberson 	if (TD_CAN_UNBIND(td)) {
16015215b187SJeff Roberson 		mtx_lock_spin(&sched_lock);
16025215b187SJeff Roberson 		td->td_flags &= ~TDF_CAN_UNBIND;
16035215b187SJeff Roberson 		mtx_unlock_spin(&sched_lock);
16045215b187SJeff Roberson 		if ((kg->kg_completed == NULL) &&
16055215b187SJeff Roberson 		    (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) {
16064b4866edSDavid Xu 			thread_update_usr_ticks(td, 0);
16075614648eSDavid Xu 			if (!(kg->kg_completed ||
16085614648eSDavid Xu 			    (td->td_upcall->ku_flags & KUF_DOUPCALL))) {
160948bfcdddSJulian Elischer 				td->td_mailbox = NULL;
161093a7aa79SJulian Elischer 				return (0);
161193a7aa79SJulian Elischer 			}
16124b4866edSDavid Xu 		}
161393a7aa79SJulian Elischer 		error = thread_export_context(td);
161448bfcdddSJulian Elischer 		if (error) {
161548bfcdddSJulian Elischer 			/*
16165215b187SJeff Roberson 			 * Failing to do the KSE operation just defaults
161748bfcdddSJulian Elischer 			 * back to synchonous operation, so just return from
161893a7aa79SJulian Elischer 			 * the syscall.
161993a7aa79SJulian Elischer 			 */
16205215b187SJeff Roberson 			return (0);
162193a7aa79SJulian Elischer 		}
162293a7aa79SJulian Elischer 		/*
16235215b187SJeff Roberson 		 * There is something to report, and we own an upcall
16245215b187SJeff Roberson 		 * strucuture, we can go to userland.
16255215b187SJeff Roberson 		 * Turn ourself into an upcall thread.
162693a7aa79SJulian Elischer 		 */
16275215b187SJeff Roberson 		mtx_lock_spin(&sched_lock);
162893a7aa79SJulian Elischer 		td->td_flags |= TDF_UPCALLING;
162993a7aa79SJulian Elischer 		mtx_unlock_spin(&sched_lock);
16305215b187SJeff Roberson 	} else if (td->td_mailbox) {
163193a7aa79SJulian Elischer 		error = thread_export_context(td);
163293a7aa79SJulian Elischer 		/* possibly upcall with error? */
1633e574e444SDavid Xu 		PROC_LOCK(p);
16346f8132a8SJulian Elischer 		/*
16355215b187SJeff Roberson 		 * There are upcall threads waiting for
16365215b187SJeff Roberson 		 * work to do, wake one of them up.
16375215b187SJeff Roberson 		 * XXXKSE Maybe wake all of them up.
16386f8132a8SJulian Elischer 		 */
1639e574e444SDavid Xu 		if (!error && kg->kg_upsleeps)
16405215b187SJeff Roberson 			wakeup_one(&kg->kg_completed);
1641e574e444SDavid Xu 		mtx_lock_spin(&sched_lock);
1642e574e444SDavid Xu 		thread_stopped(p);
164393a7aa79SJulian Elischer 		thread_exit();
16445215b187SJeff Roberson 		/* NOTREACHED */
164548bfcdddSJulian Elischer 	}
164693a7aa79SJulian Elischer 
1647a87891eeSDavid Xu 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1648a87891eeSDavid Xu 
1649a87891eeSDavid Xu 	if (p->p_numthreads > max_threads_per_proc) {
1650a87891eeSDavid Xu 		max_threads_hits++;
1651a87891eeSDavid Xu 		PROC_LOCK(p);
1652a87891eeSDavid Xu 		while (p->p_numthreads > max_threads_per_proc) {
1653a87891eeSDavid Xu 			if (P_SHOULDSTOP(p))
1654a87891eeSDavid Xu 				break;
1655a87891eeSDavid Xu 			upcalls = 0;
1656a87891eeSDavid Xu 			mtx_lock_spin(&sched_lock);
1657a87891eeSDavid Xu 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1658a87891eeSDavid Xu 				if (kg2->kg_numupcalls == 0)
1659a87891eeSDavid Xu 					upcalls++;
1660a87891eeSDavid Xu 				else
1661a87891eeSDavid Xu 					upcalls += kg2->kg_numupcalls;
1662a87891eeSDavid Xu 			}
1663a87891eeSDavid Xu 			mtx_unlock_spin(&sched_lock);
1664a87891eeSDavid Xu 			if (upcalls >= max_threads_per_proc)
1665a87891eeSDavid Xu 				break;
1666a87891eeSDavid Xu 			p->p_maxthrwaits++;
1667a87891eeSDavid Xu 			msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1668a87891eeSDavid Xu 			    "maxthreads", NULL);
1669a87891eeSDavid Xu 			p->p_maxthrwaits--;
1670a87891eeSDavid Xu 		}
1671a87891eeSDavid Xu 		PROC_UNLOCK(p);
1672a87891eeSDavid Xu 	}
1673a87891eeSDavid Xu 
167493a7aa79SJulian Elischer 	if (td->td_flags & TDF_UPCALLING) {
16755215b187SJeff Roberson 		ku = td->td_upcall;
167648bfcdddSJulian Elischer 		/*
167744990b8cSJulian Elischer 		 * There is no more work to do and we are going to ride
16785215b187SJeff Roberson 		 * this thread up to userland as an upcall.
167948bfcdddSJulian Elischer 		 * Do the last parts of the setup needed for the upcall.
168044990b8cSJulian Elischer 		 */
1681c76e33b6SJonathan Mini 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1682ed32df81SJulian Elischer 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1683c76e33b6SJonathan Mini 
1684c76e33b6SJonathan Mini 		/*
1685c76e33b6SJonathan Mini 		 * Set user context to the UTS.
1686696058c3SJulian Elischer 		 * Will use Giant in cpu_thread_clean() because it uses
1687696058c3SJulian Elischer 		 * kmem_free(kernel_map, ...)
1688c76e33b6SJonathan Mini 		 */
16895215b187SJeff Roberson 		cpu_set_upcall_kse(td, ku);
16905215b187SJeff Roberson 		mtx_lock_spin(&sched_lock);
16915215b187SJeff Roberson 		td->td_flags &= ~TDF_UPCALLING;
16925215b187SJeff Roberson 		if (ku->ku_flags & KUF_DOUPCALL)
16935215b187SJeff Roberson 			ku->ku_flags &= ~KUF_DOUPCALL;
16945215b187SJeff Roberson 		mtx_unlock_spin(&sched_lock);
16953d0586d4SJulian Elischer 
1696c76e33b6SJonathan Mini 		/*
169793a7aa79SJulian Elischer 		 * Unhook the list of completed threads.
169893a7aa79SJulian Elischer 		 * anything that completes after this gets to
169993a7aa79SJulian Elischer 		 * come in next time.
170093a7aa79SJulian Elischer 		 * Put the list of completed thread mailboxes on
170193a7aa79SJulian Elischer 		 * this KSE's mailbox.
1702c76e33b6SJonathan Mini 		 */
17035215b187SJeff Roberson 		error = thread_link_mboxes(kg, ku);
17043d0586d4SJulian Elischer 		if (error)
17050252d203SDavid Xu 			goto out;
1706c76e33b6SJonathan Mini 
1707c76e33b6SJonathan Mini 		/*
170893a7aa79SJulian Elischer 		 * Set state and clear the  thread mailbox pointer.
170948bfcdddSJulian Elischer 		 * From now on we are just a bound outgoing process.
171048bfcdddSJulian Elischer 		 * **Problem** userret is often called several times.
171193a7aa79SJulian Elischer 		 * it would be nice if this all happenned only on the first
171293a7aa79SJulian Elischer 		 * time through. (the scan for extra work etc.)
1713c76e33b6SJonathan Mini 		 */
17145215b187SJeff Roberson 		error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
171593a7aa79SJulian Elischer 		if (error)
17160252d203SDavid Xu 			goto out;
17175215b187SJeff Roberson 
17185215b187SJeff Roberson 		/* Export current system time */
1719bfd83250SDavid Xu 		nanotime(&ts);
17200252d203SDavid Xu 		error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday,
17210252d203SDavid Xu 			sizeof(ts));
1722bfd83250SDavid Xu 	}
17230252d203SDavid Xu 
17240252d203SDavid Xu out:
17250252d203SDavid Xu 	if (error) {
17263d0586d4SJulian Elischer 		/*
1727fc8cdd87SDavid Xu 		 * Things are going to be so screwed we should just kill
1728fc8cdd87SDavid Xu 		 * the process.
17293d0586d4SJulian Elischer 		 * how do we do that?
17303d0586d4SJulian Elischer 		 */
173148bfcdddSJulian Elischer 		PROC_LOCK(td->td_proc);
173248bfcdddSJulian Elischer 		psignal(td->td_proc, SIGSEGV);
173348bfcdddSJulian Elischer 		PROC_UNLOCK(td->td_proc);
17340252d203SDavid Xu 	} else {
17350252d203SDavid Xu 		/*
17360252d203SDavid Xu 		 * Optimisation:
17370252d203SDavid Xu 		 * Ensure that we have a spare thread available,
17380252d203SDavid Xu 		 * for when we re-enter the kernel.
17390252d203SDavid Xu 		 */
17400252d203SDavid Xu 		if (td->td_standin == NULL)
17410252d203SDavid Xu 			thread_alloc_spare(td, NULL);
17420252d203SDavid Xu 	}
17430252d203SDavid Xu 
17440252d203SDavid Xu 	/*
17450252d203SDavid Xu 	 * Clear thread mailbox first, then clear system tick count.
17460252d203SDavid Xu 	 * The order is important because thread_statclock() use
17470252d203SDavid Xu 	 * mailbox pointer to see if it is an userland thread or
17480252d203SDavid Xu 	 * an UTS kernel thread.
17490252d203SDavid Xu 	 */
175093a7aa79SJulian Elischer 	td->td_mailbox = NULL;
17515215b187SJeff Roberson 	td->td_usticks = 0;
175248bfcdddSJulian Elischer 	return (error);	/* go sync */
175344990b8cSJulian Elischer }
175444990b8cSJulian Elischer 
175544990b8cSJulian Elischer /*
175644990b8cSJulian Elischer  * Enforce single-threading.
175744990b8cSJulian Elischer  *
175844990b8cSJulian Elischer  * Returns 1 if the caller must abort (another thread is waiting to
175944990b8cSJulian Elischer  * exit the process or similar). Process is locked!
176044990b8cSJulian Elischer  * Returns 0 when you are successfully the only thread running.
176144990b8cSJulian Elischer  * A process has successfully single threaded in the suspend mode when
176244990b8cSJulian Elischer  * There are no threads in user mode. Threads in the kernel must be
176344990b8cSJulian Elischer  * allowed to continue until they get to the user boundary. They may even
176444990b8cSJulian Elischer  * copy out their return values and data before suspending. They may however be
176544990b8cSJulian Elischer  * accellerated in reaching the user boundary as we will wake up
176644990b8cSJulian Elischer  * any sleeping threads that are interruptable. (PCATCH).
176744990b8cSJulian Elischer  */
176844990b8cSJulian Elischer int
176944990b8cSJulian Elischer thread_single(int force_exit)
177044990b8cSJulian Elischer {
177144990b8cSJulian Elischer 	struct thread *td;
177244990b8cSJulian Elischer 	struct thread *td2;
177344990b8cSJulian Elischer 	struct proc *p;
177444990b8cSJulian Elischer 
177544990b8cSJulian Elischer 	td = curthread;
177644990b8cSJulian Elischer 	p = td->td_proc;
1777696058c3SJulian Elischer 	mtx_assert(&Giant, MA_OWNED);
177844990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
177944990b8cSJulian Elischer 	KASSERT((td != NULL), ("curthread is NULL"));
178044990b8cSJulian Elischer 
1781ac2e4153SJulian Elischer 	if ((p->p_flag & P_THREADED) == 0)
178244990b8cSJulian Elischer 		return (0);
178344990b8cSJulian Elischer 
1784e3b9bf71SJulian Elischer 	/* Is someone already single threading? */
1785e3b9bf71SJulian Elischer 	if (p->p_singlethread)
178644990b8cSJulian Elischer 		return (1);
178744990b8cSJulian Elischer 
178893a7aa79SJulian Elischer 	if (force_exit == SINGLE_EXIT) {
178944990b8cSJulian Elischer 		p->p_flag |= P_SINGLE_EXIT;
179093a7aa79SJulian Elischer 	} else
179144990b8cSJulian Elischer 		p->p_flag &= ~P_SINGLE_EXIT;
17921279572aSDavid Xu 	p->p_flag |= P_STOPPED_SINGLE;
179344990b8cSJulian Elischer 	p->p_singlethread = td;
17949d102777SJulian Elischer 	/* XXXKSE Which lock protects the below values? */
179544990b8cSJulian Elischer 	while ((p->p_numthreads - p->p_suspcount) != 1) {
179671fad9fdSJulian Elischer 		mtx_lock_spin(&sched_lock);
179744990b8cSJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td2) {
179844990b8cSJulian Elischer 			if (td2 == td)
179944990b8cSJulian Elischer 				continue;
18000252d203SDavid Xu 			td->td_flags |= TDF_ASTPENDING;
180171fad9fdSJulian Elischer 			if (TD_IS_INHIBITED(td2)) {
18021279572aSDavid Xu 				if (force_exit == SINGLE_EXIT) {
18039d102777SJulian Elischer 					if (TD_IS_SUSPENDED(td2)) {
180471fad9fdSJulian Elischer 						thread_unsuspend_one(td2);
180571fad9fdSJulian Elischer 					}
180633862f40SDavid Xu 					if (TD_ON_SLEEPQ(td2) &&
180733862f40SDavid Xu 					    (td2->td_flags & TDF_SINTR)) {
1808e3b9bf71SJulian Elischer 						if (td2->td_flags & TDF_CVWAITQ)
180933862f40SDavid Xu 							cv_abort(td2);
1810e3b9bf71SJulian Elischer 						else
181133862f40SDavid Xu 							abortsleep(td2);
181271fad9fdSJulian Elischer 					}
18139d102777SJulian Elischer 				} else {
18149d102777SJulian Elischer 					if (TD_IS_SUSPENDED(td2))
18159d102777SJulian Elischer 						continue;
18165215b187SJeff Roberson 					/*
18175215b187SJeff Roberson 					 * maybe other inhibitted states too?
18185215b187SJeff Roberson 					 * XXXKSE Is it totally safe to
18195215b187SJeff Roberson 					 * suspend a non-interruptable thread?
18205215b187SJeff Roberson 					 */
182193a7aa79SJulian Elischer 					if (td2->td_inhibitors &
18225215b187SJeff Roberson 					    (TDI_SLEEPING | TDI_SWAPPED))
18239d102777SJulian Elischer 						thread_suspend_one(td2);
182444990b8cSJulian Elischer 				}
182544990b8cSJulian Elischer 			}
18269d102777SJulian Elischer 		}
18279d102777SJulian Elischer 		/*
18289d102777SJulian Elischer 		 * Maybe we suspended some threads.. was it enough?
18299d102777SJulian Elischer 		 */
18309d102777SJulian Elischer 		if ((p->p_numthreads - p->p_suspcount) == 1) {
18319d102777SJulian Elischer 			mtx_unlock_spin(&sched_lock);
18329d102777SJulian Elischer 			break;
18339d102777SJulian Elischer 		}
18349d102777SJulian Elischer 
183544990b8cSJulian Elischer 		/*
183644990b8cSJulian Elischer 		 * Wake us up when everyone else has suspended.
1837e3b9bf71SJulian Elischer 		 * In the mean time we suspend as well.
183844990b8cSJulian Elischer 		 */
183971fad9fdSJulian Elischer 		thread_suspend_one(td);
184044990b8cSJulian Elischer 		mtx_unlock(&Giant);
184144990b8cSJulian Elischer 		PROC_UNLOCK(p);
1842696058c3SJulian Elischer 		p->p_stats->p_ru.ru_nvcsw++;
184344990b8cSJulian Elischer 		mi_switch();
184444990b8cSJulian Elischer 		mtx_unlock_spin(&sched_lock);
184544990b8cSJulian Elischer 		mtx_lock(&Giant);
184644990b8cSJulian Elischer 		PROC_LOCK(p);
184744990b8cSJulian Elischer 	}
18485215b187SJeff Roberson 	if (force_exit == SINGLE_EXIT) {
18495215b187SJeff Roberson 		if (td->td_upcall) {
18505215b187SJeff Roberson 			mtx_lock_spin(&sched_lock);
18515215b187SJeff Roberson 			upcall_remove(td);
18525215b187SJeff Roberson 			mtx_unlock_spin(&sched_lock);
18535215b187SJeff Roberson 		}
18545c8329edSJulian Elischer 		kse_purge(p, td);
18555215b187SJeff Roberson 	}
185644990b8cSJulian Elischer 	return (0);
185744990b8cSJulian Elischer }
185844990b8cSJulian Elischer 
185944990b8cSJulian Elischer /*
186044990b8cSJulian Elischer  * Called in from locations that can safely check to see
186144990b8cSJulian Elischer  * whether we have to suspend or at least throttle for a
186244990b8cSJulian Elischer  * single-thread event (e.g. fork).
186344990b8cSJulian Elischer  *
186444990b8cSJulian Elischer  * Such locations include userret().
186544990b8cSJulian Elischer  * If the "return_instead" argument is non zero, the thread must be able to
186644990b8cSJulian Elischer  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
186744990b8cSJulian Elischer  *
186844990b8cSJulian Elischer  * The 'return_instead' argument tells the function if it may do a
186944990b8cSJulian Elischer  * thread_exit() or suspend, or whether the caller must abort and back
187044990b8cSJulian Elischer  * out instead.
187144990b8cSJulian Elischer  *
187244990b8cSJulian Elischer  * If the thread that set the single_threading request has set the
187344990b8cSJulian Elischer  * P_SINGLE_EXIT bit in the process flags then this call will never return
187444990b8cSJulian Elischer  * if 'return_instead' is false, but will exit.
187544990b8cSJulian Elischer  *
187644990b8cSJulian Elischer  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
187744990b8cSJulian Elischer  *---------------+--------------------+---------------------
187844990b8cSJulian Elischer  *       0       | returns 0          |   returns 0 or 1
187944990b8cSJulian Elischer  *               | when ST ends       |   immediatly
188044990b8cSJulian Elischer  *---------------+--------------------+---------------------
188144990b8cSJulian Elischer  *       1       | thread exits       |   returns 1
188244990b8cSJulian Elischer  *               |                    |  immediatly
188344990b8cSJulian Elischer  * 0 = thread_exit() or suspension ok,
188444990b8cSJulian Elischer  * other = return error instead of stopping the thread.
188544990b8cSJulian Elischer  *
188644990b8cSJulian Elischer  * While a full suspension is under effect, even a single threading
188744990b8cSJulian Elischer  * thread would be suspended if it made this call (but it shouldn't).
188844990b8cSJulian Elischer  * This call should only be made from places where
188944990b8cSJulian Elischer  * thread_exit() would be safe as that may be the outcome unless
189044990b8cSJulian Elischer  * return_instead is set.
189144990b8cSJulian Elischer  */
189244990b8cSJulian Elischer int
189344990b8cSJulian Elischer thread_suspend_check(int return_instead)
189444990b8cSJulian Elischer {
1895ecafb24bSJuli Mallett 	struct thread *td;
1896ecafb24bSJuli Mallett 	struct proc *p;
18975c8329edSJulian Elischer 	struct ksegrp *kg;
189844990b8cSJulian Elischer 
189944990b8cSJulian Elischer 	td = curthread;
190044990b8cSJulian Elischer 	p = td->td_proc;
19015c8329edSJulian Elischer 	kg = td->td_ksegrp;
190244990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
190344990b8cSJulian Elischer 	while (P_SHOULDSTOP(p)) {
19041279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
190544990b8cSJulian Elischer 			KASSERT(p->p_singlethread != NULL,
190644990b8cSJulian Elischer 			    ("singlethread not set"));
190744990b8cSJulian Elischer 			/*
1908e3b9bf71SJulian Elischer 			 * The only suspension in action is a
1909e3b9bf71SJulian Elischer 			 * single-threading. Single threader need not stop.
1910b6d5995eSJulian Elischer 			 * XXX Should be safe to access unlocked
1911b6d5995eSJulian Elischer 			 * as it can only be set to be true by us.
191244990b8cSJulian Elischer 			 */
1913e3b9bf71SJulian Elischer 			if (p->p_singlethread == td)
191444990b8cSJulian Elischer 				return (0);	/* Exempt from stopping. */
191544990b8cSJulian Elischer 		}
1916e3b9bf71SJulian Elischer 		if (return_instead)
191744990b8cSJulian Elischer 			return (1);
191844990b8cSJulian Elischer 
1919e574e444SDavid Xu 		mtx_lock_spin(&sched_lock);
1920e574e444SDavid Xu 		thread_stopped(p);
192144990b8cSJulian Elischer 		/*
192244990b8cSJulian Elischer 		 * If the process is waiting for us to exit,
192344990b8cSJulian Elischer 		 * this thread should just suicide.
19241279572aSDavid Xu 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
192544990b8cSJulian Elischer 		 */
192644990b8cSJulian Elischer 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
192744990b8cSJulian Elischer 			while (mtx_owned(&Giant))
192844990b8cSJulian Elischer 				mtx_unlock(&Giant);
192944990b8cSJulian Elischer 			thread_exit();
193044990b8cSJulian Elischer 		}
193144990b8cSJulian Elischer 
193244990b8cSJulian Elischer 		/*
193344990b8cSJulian Elischer 		 * When a thread suspends, it just
193444990b8cSJulian Elischer 		 * moves to the processes's suspend queue
193544990b8cSJulian Elischer 		 * and stays there.
193644990b8cSJulian Elischer 		 */
193744990b8cSJulian Elischer 		mtx_assert(&Giant, MA_NOTOWNED);
193871fad9fdSJulian Elischer 		thread_suspend_one(td);
193944990b8cSJulian Elischer 		PROC_UNLOCK(p);
19401279572aSDavid Xu 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1941cf19bf91SJulian Elischer 			if (p->p_numthreads == p->p_suspcount) {
194271fad9fdSJulian Elischer 				thread_unsuspend_one(p->p_singlethread);
1943cf19bf91SJulian Elischer 			}
1944cf19bf91SJulian Elischer 		}
194520568366SJulian Elischer 		p->p_stats->p_ru.ru_nivcsw++;
194644990b8cSJulian Elischer 		mi_switch();
194744990b8cSJulian Elischer 		mtx_unlock_spin(&sched_lock);
194844990b8cSJulian Elischer 		PROC_LOCK(p);
194944990b8cSJulian Elischer 	}
195044990b8cSJulian Elischer 	return (0);
195144990b8cSJulian Elischer }
195244990b8cSJulian Elischer 
195335c32a76SDavid Xu void
195435c32a76SDavid Xu thread_suspend_one(struct thread *td)
195535c32a76SDavid Xu {
195635c32a76SDavid Xu 	struct proc *p = td->td_proc;
195735c32a76SDavid Xu 
195835c32a76SDavid Xu 	mtx_assert(&sched_lock, MA_OWNED);
1959e574e444SDavid Xu 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
196035c32a76SDavid Xu 	p->p_suspcount++;
196171fad9fdSJulian Elischer 	TD_SET_SUSPENDED(td);
196235c32a76SDavid Xu 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
196371fad9fdSJulian Elischer 	/*
196471fad9fdSJulian Elischer 	 * Hack: If we are suspending but are on the sleep queue
196571fad9fdSJulian Elischer 	 * then we are in msleep or the cv equivalent. We
196671fad9fdSJulian Elischer 	 * want to look like we have two Inhibitors.
19679d102777SJulian Elischer 	 * May already be set.. doesn't matter.
196871fad9fdSJulian Elischer 	 */
196971fad9fdSJulian Elischer 	if (TD_ON_SLEEPQ(td))
197071fad9fdSJulian Elischer 		TD_SET_SLEEPING(td);
197135c32a76SDavid Xu }
197235c32a76SDavid Xu 
197335c32a76SDavid Xu void
197435c32a76SDavid Xu thread_unsuspend_one(struct thread *td)
197535c32a76SDavid Xu {
197635c32a76SDavid Xu 	struct proc *p = td->td_proc;
197735c32a76SDavid Xu 
197835c32a76SDavid Xu 	mtx_assert(&sched_lock, MA_OWNED);
197935c32a76SDavid Xu 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
198071fad9fdSJulian Elischer 	TD_CLR_SUSPENDED(td);
198135c32a76SDavid Xu 	p->p_suspcount--;
198271fad9fdSJulian Elischer 	setrunnable(td);
198335c32a76SDavid Xu }
198435c32a76SDavid Xu 
198544990b8cSJulian Elischer /*
198644990b8cSJulian Elischer  * Allow all threads blocked by single threading to continue running.
198744990b8cSJulian Elischer  */
198844990b8cSJulian Elischer void
198944990b8cSJulian Elischer thread_unsuspend(struct proc *p)
199044990b8cSJulian Elischer {
199144990b8cSJulian Elischer 	struct thread *td;
199244990b8cSJulian Elischer 
1993b6d5995eSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
199444990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
199544990b8cSJulian Elischer 	if (!P_SHOULDSTOP(p)) {
199644990b8cSJulian Elischer 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
199735c32a76SDavid Xu 			thread_unsuspend_one(td);
199844990b8cSJulian Elischer 		}
19991279572aSDavid Xu 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
200044990b8cSJulian Elischer 	    (p->p_numthreads == p->p_suspcount)) {
200144990b8cSJulian Elischer 		/*
200244990b8cSJulian Elischer 		 * Stopping everything also did the job for the single
200344990b8cSJulian Elischer 		 * threading request. Now we've downgraded to single-threaded,
200444990b8cSJulian Elischer 		 * let it continue.
200544990b8cSJulian Elischer 		 */
200635c32a76SDavid Xu 		thread_unsuspend_one(p->p_singlethread);
200744990b8cSJulian Elischer 	}
200844990b8cSJulian Elischer }
200944990b8cSJulian Elischer 
201044990b8cSJulian Elischer void
201144990b8cSJulian Elischer thread_single_end(void)
201244990b8cSJulian Elischer {
201344990b8cSJulian Elischer 	struct thread *td;
201444990b8cSJulian Elischer 	struct proc *p;
201544990b8cSJulian Elischer 
201644990b8cSJulian Elischer 	td = curthread;
201744990b8cSJulian Elischer 	p = td->td_proc;
201844990b8cSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
20191279572aSDavid Xu 	p->p_flag &= ~P_STOPPED_SINGLE;
202044990b8cSJulian Elischer 	p->p_singlethread = NULL;
202149539972SJulian Elischer 	/*
202249539972SJulian Elischer 	 * If there are other threads they mey now run,
202349539972SJulian Elischer 	 * unless of course there is a blanket 'stop order'
202449539972SJulian Elischer 	 * on the process. The single threader must be allowed
202549539972SJulian Elischer 	 * to continue however as this is a bad place to stop.
202649539972SJulian Elischer 	 */
202749539972SJulian Elischer 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
202849539972SJulian Elischer 		mtx_lock_spin(&sched_lock);
202949539972SJulian Elischer 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
203071fad9fdSJulian Elischer 			thread_unsuspend_one(td);
203144990b8cSJulian Elischer 		}
203249539972SJulian Elischer 		mtx_unlock_spin(&sched_lock);
203349539972SJulian Elischer 	}
203449539972SJulian Elischer }
203549539972SJulian Elischer 
203644990b8cSJulian Elischer 
2037