19454b2d8SWarner Losh /*- 244990b8cSJulian Elischer * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 344990b8cSJulian Elischer * All rights reserved. 444990b8cSJulian Elischer * 544990b8cSJulian Elischer * Redistribution and use in source and binary forms, with or without 644990b8cSJulian Elischer * modification, are permitted provided that the following conditions 744990b8cSJulian Elischer * are met: 844990b8cSJulian Elischer * 1. Redistributions of source code must retain the above copyright 944990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer as 1044990b8cSJulian Elischer * the first lines of this file unmodified other than the possible 1144990b8cSJulian Elischer * addition of one or more copyright notices. 1244990b8cSJulian Elischer * 2. Redistributions in binary form must reproduce the above copyright 1344990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer in the 1444990b8cSJulian Elischer * documentation and/or other materials provided with the distribution. 1544990b8cSJulian Elischer * 1644990b8cSJulian Elischer * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1744990b8cSJulian Elischer * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1844990b8cSJulian Elischer * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1944990b8cSJulian Elischer * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2044990b8cSJulian Elischer * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2144990b8cSJulian Elischer * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2244990b8cSJulian Elischer * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2344990b8cSJulian Elischer * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2444990b8cSJulian Elischer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2544990b8cSJulian Elischer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2644990b8cSJulian Elischer * DAMAGE. 2744990b8cSJulian Elischer */ 2844990b8cSJulian Elischer 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 31677b542eSDavid E. O'Brien 3244990b8cSJulian Elischer #include <sys/param.h> 3344990b8cSJulian Elischer #include <sys/systm.h> 3444990b8cSJulian Elischer #include <sys/kernel.h> 3544990b8cSJulian Elischer #include <sys/lock.h> 3644990b8cSJulian Elischer #include <sys/mutex.h> 3744990b8cSJulian Elischer #include <sys/proc.h> 3894e0a4cdSJulian Elischer #include <sys/smp.h> 3944990b8cSJulian Elischer #include <sys/sysctl.h> 40de028f5aSJeff Roberson #include <sys/sched.h> 4144f3b092SJohn Baldwin #include <sys/sleepqueue.h> 42961a7b24SJohn Baldwin #include <sys/turnstile.h> 4344990b8cSJulian Elischer #include <sys/ktr.h> 44bc8e6d81SDavid Xu #include <sys/umtx.h> 4544990b8cSJulian Elischer 46911b84b0SRobert Watson #include <security/audit/audit.h> 47911b84b0SRobert Watson 4844990b8cSJulian Elischer #include <vm/vm.h> 4949a2507bSAlan Cox #include <vm/vm_extern.h> 5044990b8cSJulian Elischer #include <vm/uma.h> 5102fb42b0SPeter Wemm 5244990b8cSJulian Elischer /* 534f0db5e0SJulian Elischer * KSEGRP related storage. 5444990b8cSJulian Elischer */ 554f0db5e0SJulian Elischer static uma_zone_t ksegrp_zone; 5644990b8cSJulian Elischer static uma_zone_t thread_zone; 5744990b8cSJulian Elischer 584f0db5e0SJulian Elischer /* DEBUG ONLY */ 5944990b8cSJulian Elischer SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 60696058c3SJulian Elischer static int thread_debug = 0; 61696058c3SJulian Elischer SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 62696058c3SJulian Elischer &thread_debug, 0, "thread debug"); 63fdc5ecd2SDavid Xu 64345ad866SJulian Elischer int max_threads_per_proc = 1500; 65fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 664f0db5e0SJulian Elischer &max_threads_per_proc, 0, "Limit on threads per proc"); 674f0db5e0SJulian Elischer 68ed062c8dSJulian Elischer int max_groups_per_proc = 1500; 69fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 70fdc5ecd2SDavid Xu &max_groups_per_proc, 0, "Limit on thread groups per proc"); 71fdc5ecd2SDavid Xu 72345ad866SJulian Elischer int max_threads_hits; 730252d203SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 740252d203SDavid Xu &max_threads_hits, 0, ""); 750252d203SDavid Xu 7694e0a4cdSJulian Elischer int virtual_cpu; 7794e0a4cdSJulian Elischer 785215b187SJeff Roberson TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 795c8329edSJulian Elischer TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 805215b187SJeff Roberson struct mtx kse_zombie_lock; 815215b187SJeff Roberson MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 8244990b8cSJulian Elischer 8394e0a4cdSJulian Elischer static int 8494e0a4cdSJulian Elischer sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 8594e0a4cdSJulian Elischer { 8694e0a4cdSJulian Elischer int error, new_val; 8794e0a4cdSJulian Elischer int def_val; 8894e0a4cdSJulian Elischer 8994e0a4cdSJulian Elischer def_val = mp_ncpus; 9094e0a4cdSJulian Elischer if (virtual_cpu == 0) 9194e0a4cdSJulian Elischer new_val = def_val; 9294e0a4cdSJulian Elischer else 9394e0a4cdSJulian Elischer new_val = virtual_cpu; 9494e0a4cdSJulian Elischer error = sysctl_handle_int(oidp, &new_val, 0, req); 9594e0a4cdSJulian Elischer if (error != 0 || req->newptr == NULL) 9694e0a4cdSJulian Elischer return (error); 9794e0a4cdSJulian Elischer if (new_val < 0) 9894e0a4cdSJulian Elischer return (EINVAL); 9994e0a4cdSJulian Elischer virtual_cpu = new_val; 10094e0a4cdSJulian Elischer return (0); 10194e0a4cdSJulian Elischer } 10294e0a4cdSJulian Elischer 10394e0a4cdSJulian Elischer /* DEBUG ONLY */ 10494e0a4cdSJulian Elischer SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 10594e0a4cdSJulian Elischer 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 10694e0a4cdSJulian Elischer "debug virtual cpus"); 1075c8329edSJulian Elischer 108fdcac928SMarcel Moolenaar struct mtx tid_lock; 1091ea7a6f8SPoul-Henning Kamp static struct unrhdr *tid_unrhdr; 110fdcac928SMarcel Moolenaar 111fdcac928SMarcel Moolenaar /* 112696058c3SJulian Elischer * Prepare a thread for use. 11344990b8cSJulian Elischer */ 114b23f72e9SBrian Feldman static int 115b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags) 11644990b8cSJulian Elischer { 11744990b8cSJulian Elischer struct thread *td; 11844990b8cSJulian Elischer 11944990b8cSJulian Elischer td = (struct thread *)mem; 12071fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 121060563ecSJulian Elischer td->td_oncpu = NOCPU; 1226c27c603SJuli Mallett 123773eff9dSPoul-Henning Kamp td->td_tid = alloc_unr(tid_unrhdr); 124773eff9dSPoul-Henning Kamp 1256c27c603SJuli Mallett /* 1266c27c603SJuli Mallett * Note that td_critnest begins life as 1 because the thread is not 1276c27c603SJuli Mallett * running and is thereby implicitly waiting to be on the receiving 1286c27c603SJuli Mallett * end of a context switch. A context switch must occur inside a 1296c27c603SJuli Mallett * critical section, and in fact, includes hand-off of the sched_lock. 1306c27c603SJuli Mallett * After a context switch to a newly created thread, it will release 1316c27c603SJuli Mallett * sched_lock for the first time, and its td_critnest will hit 0 for 1326c27c603SJuli Mallett * the first time. This happens on the far end of a context switch, 1336c27c603SJuli Mallett * and when it context switches away from itself, it will in fact go 1346c27c603SJuli Mallett * back into a critical section, and hand off the sched lock to the 1356c27c603SJuli Mallett * next thread. 1366c27c603SJuli Mallett */ 137139b7550SJohn Baldwin td->td_critnest = 1; 138911b84b0SRobert Watson 139911b84b0SRobert Watson #ifdef AUDIT 140911b84b0SRobert Watson audit_thread_alloc(td); 141911b84b0SRobert Watson #endif 142b23f72e9SBrian Feldman return (0); 14344990b8cSJulian Elischer } 14444990b8cSJulian Elischer 14544990b8cSJulian Elischer /* 14644990b8cSJulian Elischer * Reclaim a thread after use. 14744990b8cSJulian Elischer */ 14844990b8cSJulian Elischer static void 14944990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg) 15044990b8cSJulian Elischer { 15144990b8cSJulian Elischer struct thread *td; 15244990b8cSJulian Elischer 15344990b8cSJulian Elischer td = (struct thread *)mem; 15444990b8cSJulian Elischer 15544990b8cSJulian Elischer #ifdef INVARIANTS 15644990b8cSJulian Elischer /* Verify that this thread is in a safe state to free. */ 15744990b8cSJulian Elischer switch (td->td_state) { 15871fad9fdSJulian Elischer case TDS_INHIBITED: 15971fad9fdSJulian Elischer case TDS_RUNNING: 16071fad9fdSJulian Elischer case TDS_CAN_RUN: 16144990b8cSJulian Elischer case TDS_RUNQ: 16244990b8cSJulian Elischer /* 16344990b8cSJulian Elischer * We must never unlink a thread that is in one of 16444990b8cSJulian Elischer * these states, because it is currently active. 16544990b8cSJulian Elischer */ 16644990b8cSJulian Elischer panic("bad state for thread unlinking"); 16744990b8cSJulian Elischer /* NOTREACHED */ 16871fad9fdSJulian Elischer case TDS_INACTIVE: 16944990b8cSJulian Elischer break; 17044990b8cSJulian Elischer default: 17144990b8cSJulian Elischer panic("bad thread state"); 17244990b8cSJulian Elischer /* NOTREACHED */ 17344990b8cSJulian Elischer } 17444990b8cSJulian Elischer #endif 175773eff9dSPoul-Henning Kamp 176773eff9dSPoul-Henning Kamp free_unr(tid_unrhdr, td->td_tid); 177ed062c8dSJulian Elischer sched_newthread(td); 17844990b8cSJulian Elischer } 17944990b8cSJulian Elischer 18044990b8cSJulian Elischer /* 18144990b8cSJulian Elischer * Initialize type-stable parts of a thread (when newly created). 18244990b8cSJulian Elischer */ 183b23f72e9SBrian Feldman static int 184b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags) 18544990b8cSJulian Elischer { 18644990b8cSJulian Elischer struct thread *td; 18744990b8cSJulian Elischer 18844990b8cSJulian Elischer td = (struct thread *)mem; 189247aba24SMarcel Moolenaar 19049a2507bSAlan Cox vm_thread_new(td, 0); 19144990b8cSJulian Elischer cpu_thread_setup(td); 19244f3b092SJohn Baldwin td->td_sleepqueue = sleepq_alloc(); 193961a7b24SJohn Baldwin td->td_turnstile = turnstile_alloc(); 194bc8e6d81SDavid Xu td->td_umtxq = umtxq_alloc(); 195de028f5aSJeff Roberson td->td_sched = (struct td_sched *)&td[1]; 196ed062c8dSJulian Elischer sched_newthread(td); 197b23f72e9SBrian Feldman return (0); 19844990b8cSJulian Elischer } 19944990b8cSJulian Elischer 20044990b8cSJulian Elischer /* 20144990b8cSJulian Elischer * Tear down type-stable parts of a thread (just before being discarded). 20244990b8cSJulian Elischer */ 20344990b8cSJulian Elischer static void 20444990b8cSJulian Elischer thread_fini(void *mem, int size) 20544990b8cSJulian Elischer { 20644990b8cSJulian Elischer struct thread *td; 20744990b8cSJulian Elischer 20844990b8cSJulian Elischer td = (struct thread *)mem; 209961a7b24SJohn Baldwin turnstile_free(td->td_turnstile); 21044f3b092SJohn Baldwin sleepq_free(td->td_sleepqueue); 211bc8e6d81SDavid Xu umtxq_free(td->td_umtxq); 21249a2507bSAlan Cox vm_thread_dispose(td); 21344990b8cSJulian Elischer } 2145215b187SJeff Roberson 215de028f5aSJeff Roberson /* 216de028f5aSJeff Roberson * Initialize type-stable parts of a ksegrp (when newly created). 217de028f5aSJeff Roberson */ 218b23f72e9SBrian Feldman static int 219a9b5dc7dSJulian Elischer ksegrp_ctor(void *mem, int size, void *arg, int flags) 220de028f5aSJeff Roberson { 221de028f5aSJeff Roberson struct ksegrp *kg; 222de028f5aSJeff Roberson 223de028f5aSJeff Roberson kg = (struct ksegrp *)mem; 224a9b5dc7dSJulian Elischer bzero(mem, size); 225de028f5aSJeff Roberson kg->kg_sched = (struct kg_sched *)&kg[1]; 226b23f72e9SBrian Feldman return (0); 227de028f5aSJeff Roberson } 22844990b8cSJulian Elischer 2295c8329edSJulian Elischer void 2305c8329edSJulian Elischer ksegrp_link(struct ksegrp *kg, struct proc *p) 2315c8329edSJulian Elischer { 2325c8329edSJulian Elischer 2335c8329edSJulian Elischer TAILQ_INIT(&kg->kg_threads); 2345c8329edSJulian Elischer TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 2355215b187SJeff Roberson TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 2365c8329edSJulian Elischer kg->kg_proc = p; 2375215b187SJeff Roberson /* 2385215b187SJeff Roberson * the following counters are in the -zero- section 2395215b187SJeff Roberson * and may not need clearing 2405215b187SJeff Roberson */ 2415c8329edSJulian Elischer kg->kg_numthreads = 0; 2425215b187SJeff Roberson kg->kg_numupcalls = 0; 2435c8329edSJulian Elischer /* link it in now that it's consistent */ 2445c8329edSJulian Elischer p->p_numksegrps++; 2455c8329edSJulian Elischer TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 2465c8329edSJulian Elischer } 2475c8329edSJulian Elischer 248ed062c8dSJulian Elischer /* 249ed062c8dSJulian Elischer * Called from: 250ed062c8dSJulian Elischer * thread-exit() 251ed062c8dSJulian Elischer */ 2525c8329edSJulian Elischer void 2535c8329edSJulian Elischer ksegrp_unlink(struct ksegrp *kg) 2545c8329edSJulian Elischer { 2555c8329edSJulian Elischer struct proc *p; 2565c8329edSJulian Elischer 2575c8329edSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 2585215b187SJeff Roberson KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 2595215b187SJeff Roberson KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 2605215b187SJeff Roberson 2615c8329edSJulian Elischer p = kg->kg_proc; 2625c8329edSJulian Elischer TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 2635c8329edSJulian Elischer p->p_numksegrps--; 2645c8329edSJulian Elischer /* 2655c8329edSJulian Elischer * Aggregate stats from the KSE 2665c8329edSJulian Elischer */ 26721fc3164SDavid Xu if (p->p_procscopegrp == kg) 26821fc3164SDavid Xu p->p_procscopegrp = NULL; 2695c8329edSJulian Elischer } 2705c8329edSJulian Elischer 2715c8329edSJulian Elischer /* 2725215b187SJeff Roberson * For a newly created process, 2735215b187SJeff Roberson * link up all the structures and its initial threads etc. 274ed062c8dSJulian Elischer * called from: 275ed062c8dSJulian Elischer * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 276ed062c8dSJulian Elischer * proc_dtor() (should go away) 277ed062c8dSJulian Elischer * proc_init() 2785c8329edSJulian Elischer */ 2795c8329edSJulian Elischer void 280ed062c8dSJulian Elischer proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td) 2815c8329edSJulian Elischer { 2825c8329edSJulian Elischer 2835c8329edSJulian Elischer TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 2845c8329edSJulian Elischer TAILQ_INIT(&p->p_threads); /* all threads in proc */ 2855c8329edSJulian Elischer TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 2869104847fSDavid Xu sigqueue_init(&p->p_sigqueue, p); 287ebceaf6dSDavid Xu p->p_ksi = ksiginfo_alloc(1); 288ebceaf6dSDavid Xu if (p->p_ksi != NULL) { 2895c474517SDavid Xu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 290ebceaf6dSDavid Xu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 291ebceaf6dSDavid Xu } 292b2f92ef9SDavid Xu LIST_INIT(&p->p_mqnotifier); 2935c8329edSJulian Elischer p->p_numksegrps = 0; 2945c8329edSJulian Elischer p->p_numthreads = 0; 2955c8329edSJulian Elischer 2965c8329edSJulian Elischer ksegrp_link(kg, p); 2975c8329edSJulian Elischer thread_link(td, kg); 2985c8329edSJulian Elischer } 2995c8329edSJulian Elischer 3005c8329edSJulian Elischer /* 30144990b8cSJulian Elischer * Initialize global thread allocation resources. 30244990b8cSJulian Elischer */ 30344990b8cSJulian Elischer void 30444990b8cSJulian Elischer threadinit(void) 30544990b8cSJulian Elischer { 30644990b8cSJulian Elischer 3071ea7a6f8SPoul-Henning Kamp mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 3081ea7a6f8SPoul-Henning Kamp tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock); 3091ea7a6f8SPoul-Henning Kamp 310de028f5aSJeff Roberson thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 31144990b8cSJulian Elischer thread_ctor, thread_dtor, thread_init, thread_fini, 31244990b8cSJulian Elischer UMA_ALIGN_CACHE, 0); 313de028f5aSJeff Roberson ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 314a9b5dc7dSJulian Elischer ksegrp_ctor, NULL, NULL, NULL, 3154f0db5e0SJulian Elischer UMA_ALIGN_CACHE, 0); 316ed062c8dSJulian Elischer kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 31744990b8cSJulian Elischer } 31844990b8cSJulian Elischer 31944990b8cSJulian Elischer /* 3201faf202eSJulian Elischer * Stash an embarasingly extra thread into the zombie thread queue. 32144990b8cSJulian Elischer */ 32244990b8cSJulian Elischer void 32344990b8cSJulian Elischer thread_stash(struct thread *td) 32444990b8cSJulian Elischer { 3255215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 32644990b8cSJulian Elischer TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 3275215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 32844990b8cSJulian Elischer } 32944990b8cSJulian Elischer 33044990b8cSJulian Elischer /* 3315c8329edSJulian Elischer * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 3325c8329edSJulian Elischer */ 3335c8329edSJulian Elischer void 3345c8329edSJulian Elischer ksegrp_stash(struct ksegrp *kg) 3355c8329edSJulian Elischer { 3365215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 3375c8329edSJulian Elischer TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 3385215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 3395c8329edSJulian Elischer } 3405c8329edSJulian Elischer 3415c8329edSJulian Elischer /* 3425215b187SJeff Roberson * Reap zombie kse resource. 34344990b8cSJulian Elischer */ 34444990b8cSJulian Elischer void 34544990b8cSJulian Elischer thread_reap(void) 34644990b8cSJulian Elischer { 3475c8329edSJulian Elischer struct thread *td_first, *td_next; 3485c8329edSJulian Elischer struct ksegrp *kg_first, * kg_next; 34944990b8cSJulian Elischer 35044990b8cSJulian Elischer /* 3515215b187SJeff Roberson * Don't even bother to lock if none at this instant, 3525215b187SJeff Roberson * we really don't care about the next instant.. 35344990b8cSJulian Elischer */ 3545c8329edSJulian Elischer if ((!TAILQ_EMPTY(&zombie_threads)) 355345ad866SJulian Elischer || (!TAILQ_EMPTY(&zombie_ksegrps))) { 3565215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 3575c8329edSJulian Elischer td_first = TAILQ_FIRST(&zombie_threads); 3585c8329edSJulian Elischer kg_first = TAILQ_FIRST(&zombie_ksegrps); 3595c8329edSJulian Elischer if (td_first) 3605c8329edSJulian Elischer TAILQ_INIT(&zombie_threads); 3615c8329edSJulian Elischer if (kg_first) 3625c8329edSJulian Elischer TAILQ_INIT(&zombie_ksegrps); 3635215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 3645c8329edSJulian Elischer while (td_first) { 3655c8329edSJulian Elischer td_next = TAILQ_NEXT(td_first, td_runq); 3665215b187SJeff Roberson if (td_first->td_ucred) 3675215b187SJeff Roberson crfree(td_first->td_ucred); 3685c8329edSJulian Elischer thread_free(td_first); 3695c8329edSJulian Elischer td_first = td_next; 37044990b8cSJulian Elischer } 3715c8329edSJulian Elischer while (kg_first) { 3725c8329edSJulian Elischer kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 3735c8329edSJulian Elischer ksegrp_free(kg_first); 3745c8329edSJulian Elischer kg_first = kg_next; 3755c8329edSJulian Elischer } 376ed062c8dSJulian Elischer /* 377ed062c8dSJulian Elischer * there will always be a thread on the list if one of these 378ed062c8dSJulian Elischer * is there. 379ed062c8dSJulian Elischer */ 380345ad866SJulian Elischer kse_GC(); 38144990b8cSJulian Elischer } 382ed062c8dSJulian Elischer } 38344990b8cSJulian Elischer 38444990b8cSJulian Elischer /* 3854f0db5e0SJulian Elischer * Allocate a ksegrp. 3864f0db5e0SJulian Elischer */ 3874f0db5e0SJulian Elischer struct ksegrp * 3884f0db5e0SJulian Elischer ksegrp_alloc(void) 3894f0db5e0SJulian Elischer { 390a163d034SWarner Losh return (uma_zalloc(ksegrp_zone, M_WAITOK)); 3914f0db5e0SJulian Elischer } 3924f0db5e0SJulian Elischer 3934f0db5e0SJulian Elischer /* 39444990b8cSJulian Elischer * Allocate a thread. 39544990b8cSJulian Elischer */ 39644990b8cSJulian Elischer struct thread * 39744990b8cSJulian Elischer thread_alloc(void) 39844990b8cSJulian Elischer { 39944990b8cSJulian Elischer thread_reap(); /* check if any zombies to get */ 400a163d034SWarner Losh return (uma_zalloc(thread_zone, M_WAITOK)); 40144990b8cSJulian Elischer } 40244990b8cSJulian Elischer 40344990b8cSJulian Elischer /* 4044f0db5e0SJulian Elischer * Deallocate a ksegrp. 4054f0db5e0SJulian Elischer */ 4064f0db5e0SJulian Elischer void 4074f0db5e0SJulian Elischer ksegrp_free(struct ksegrp *td) 4084f0db5e0SJulian Elischer { 4094f0db5e0SJulian Elischer uma_zfree(ksegrp_zone, td); 4104f0db5e0SJulian Elischer } 4114f0db5e0SJulian Elischer 4124f0db5e0SJulian Elischer /* 41344990b8cSJulian Elischer * Deallocate a thread. 41444990b8cSJulian Elischer */ 41544990b8cSJulian Elischer void 41644990b8cSJulian Elischer thread_free(struct thread *td) 41744990b8cSJulian Elischer { 418696058c3SJulian Elischer 419696058c3SJulian Elischer cpu_thread_clean(td); 42044990b8cSJulian Elischer uma_zfree(thread_zone, td); 42144990b8cSJulian Elischer } 42244990b8cSJulian Elischer 42344990b8cSJulian Elischer /* 42444990b8cSJulian Elischer * Discard the current thread and exit from its context. 42594e0a4cdSJulian Elischer * Always called with scheduler locked. 42644990b8cSJulian Elischer * 42744990b8cSJulian Elischer * Because we can't free a thread while we're operating under its context, 428696058c3SJulian Elischer * push the current thread into our CPU's deadthread holder. This means 429696058c3SJulian Elischer * we needn't worry about someone else grabbing our context before we 43094e0a4cdSJulian Elischer * do a cpu_throw(). This may not be needed now as we are under schedlock. 43194e0a4cdSJulian Elischer * Maybe we can just do a thread_stash() as thr_exit1 does. 43294e0a4cdSJulian Elischer */ 43394e0a4cdSJulian Elischer /* XXX 43494e0a4cdSJulian Elischer * libthr expects its thread exit to return for the last 43594e0a4cdSJulian Elischer * thread, meaning that the program is back to non-threaded 43694e0a4cdSJulian Elischer * mode I guess. Because we do this (cpu_throw) unconditionally 43794e0a4cdSJulian Elischer * here, they have their own version of it. (thr_exit1()) 43894e0a4cdSJulian Elischer * that doesn't do it all if this was the last thread. 43994e0a4cdSJulian Elischer * It is also called from thread_suspend_check(). 44094e0a4cdSJulian Elischer * Of course in the end, they end up coming here through exit1 44194e0a4cdSJulian Elischer * anyhow.. After fixing 'thr' to play by the rules we should be able 44294e0a4cdSJulian Elischer * to merge these two functions together. 443ed062c8dSJulian Elischer * 444ed062c8dSJulian Elischer * called from: 445ed062c8dSJulian Elischer * exit1() 446ed062c8dSJulian Elischer * kse_exit() 447ed062c8dSJulian Elischer * thr_exit() 448ed062c8dSJulian Elischer * thread_user_enter() 449ed062c8dSJulian Elischer * thread_userret() 450ed062c8dSJulian Elischer * thread_suspend_check() 45144990b8cSJulian Elischer */ 45244990b8cSJulian Elischer void 45344990b8cSJulian Elischer thread_exit(void) 45444990b8cSJulian Elischer { 45544990b8cSJulian Elischer struct thread *td; 45644990b8cSJulian Elischer struct proc *p; 45744990b8cSJulian Elischer struct ksegrp *kg; 45844990b8cSJulian Elischer 45944990b8cSJulian Elischer td = curthread; 46044990b8cSJulian Elischer kg = td->td_ksegrp; 46144990b8cSJulian Elischer p = td->td_proc; 46244990b8cSJulian Elischer 46344990b8cSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 464ed062c8dSJulian Elischer mtx_assert(&Giant, MA_NOTOWNED); 46544990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 466ed062c8dSJulian Elischer KASSERT(p != NULL, ("thread exiting without a process")); 467ed062c8dSJulian Elischer KASSERT(kg != NULL, ("thread exiting without a kse group")); 468cc701b73SRobert Watson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 469cc701b73SRobert Watson (long)p->p_pid, p->p_comm); 4709104847fSDavid Xu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 47144990b8cSJulian Elischer 47248bfcdddSJulian Elischer if (td->td_standin != NULL) { 473ed062c8dSJulian Elischer /* 474ed062c8dSJulian Elischer * Note that we don't need to free the cred here as it 475ed062c8dSJulian Elischer * is done in thread_reap(). 476ed062c8dSJulian Elischer */ 47748bfcdddSJulian Elischer thread_stash(td->td_standin); 47848bfcdddSJulian Elischer td->td_standin = NULL; 47948bfcdddSJulian Elischer } 48048bfcdddSJulian Elischer 481ed062c8dSJulian Elischer /* 482ed062c8dSJulian Elischer * drop FPU & debug register state storage, or any other 483ed062c8dSJulian Elischer * architecture specific resources that 484ed062c8dSJulian Elischer * would not be on a new untouched process. 485ed062c8dSJulian Elischer */ 48644990b8cSJulian Elischer cpu_thread_exit(td); /* XXXSMP */ 48744990b8cSJulian Elischer 4881faf202eSJulian Elischer /* 489ed062c8dSJulian Elischer * The thread is exiting. scheduler can release its stuff 490ed062c8dSJulian Elischer * and collect stats etc. 491ed062c8dSJulian Elischer */ 492ed062c8dSJulian Elischer sched_thread_exit(td); 493ed062c8dSJulian Elischer 494ed062c8dSJulian Elischer /* 4951faf202eSJulian Elischer * The last thread is left attached to the process 4961faf202eSJulian Elischer * So that the whole bundle gets recycled. Skip 497ed062c8dSJulian Elischer * all this stuff if we never had threads. 498ed062c8dSJulian Elischer * EXIT clears all sign of other threads when 499ed062c8dSJulian Elischer * it goes to single threading, so the last thread always 500ed062c8dSJulian Elischer * takes the short path. 5011faf202eSJulian Elischer */ 502ed062c8dSJulian Elischer if (p->p_flag & P_HADTHREADS) { 5031faf202eSJulian Elischer if (p->p_numthreads > 1) { 504d3a0bd78SJulian Elischer thread_unlink(td); 505ed062c8dSJulian Elischer 506ed062c8dSJulian Elischer /* XXX first arg not used in 4BSD or ULE */ 507ed062c8dSJulian Elischer sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 508ed062c8dSJulian Elischer 509ed062c8dSJulian Elischer /* 51044990b8cSJulian Elischer * The test below is NOT true if we are the 5111faf202eSJulian Elischer * sole exiting thread. P_STOPPED_SNGL is unset 51244990b8cSJulian Elischer * in exit1() after it is the only survivor. 51344990b8cSJulian Elischer */ 5141279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 51544990b8cSJulian Elischer if (p->p_numthreads == p->p_suspcount) { 51671fad9fdSJulian Elischer thread_unsuspend_one(p->p_singlethread); 51744990b8cSJulian Elischer } 51844990b8cSJulian Elischer } 51948bfcdddSJulian Elischer 5205215b187SJeff Roberson /* 5215215b187SJeff Roberson * Because each upcall structure has an owner thread, 5225215b187SJeff Roberson * owner thread exits only when process is in exiting 5235215b187SJeff Roberson * state, so upcall to userland is no longer needed, 5245215b187SJeff Roberson * deleting upcall structure is safe here. 5255215b187SJeff Roberson * So when all threads in a group is exited, all upcalls 5265215b187SJeff Roberson * in the group should be automatically freed. 527ed062c8dSJulian Elischer * XXXKSE This is a KSE thing and should be exported 528ed062c8dSJulian Elischer * there somehow. 5295215b187SJeff Roberson */ 5305215b187SJeff Roberson upcall_remove(td); 5316f8132a8SJulian Elischer 53248bfcdddSJulian Elischer /* 533ed062c8dSJulian Elischer * If the thread we unlinked above was the last one, 534ed062c8dSJulian Elischer * then this ksegrp should go away too. 53548bfcdddSJulian Elischer */ 536ed062c8dSJulian Elischer if (kg->kg_numthreads == 0) { 537ed062c8dSJulian Elischer /* 538ed062c8dSJulian Elischer * let the scheduler know about this in case 539ed062c8dSJulian Elischer * it needs to recover stats or resources. 540ed062c8dSJulian Elischer * Theoretically we could let 541ed062c8dSJulian Elischer * sched_exit_ksegrp() do the equivalent of 542ed062c8dSJulian Elischer * setting the concurrency to 0 543ed062c8dSJulian Elischer * but don't do it yet to avoid changing 544ed062c8dSJulian Elischer * the existing scheduler code until we 545ed062c8dSJulian Elischer * are ready. 546ed062c8dSJulian Elischer * We supply a random other ksegrp 547ed062c8dSJulian Elischer * as the recipient of any built up 548ed062c8dSJulian Elischer * cpu usage etc. (If the scheduler wants it). 549ed062c8dSJulian Elischer * XXXKSE 550ed062c8dSJulian Elischer * This is probably not fair so think of 551ed062c8dSJulian Elischer * a better answer. 552ed062c8dSJulian Elischer */ 55355d44f79SJulian Elischer sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); 554ed062c8dSJulian Elischer sched_set_concurrency(kg, 0); /* XXX TEMP */ 555ab2baa72SDavid Xu ksegrp_unlink(kg); 556ed062c8dSJulian Elischer ksegrp_stash(kg); 557ab2baa72SDavid Xu } 5586f8132a8SJulian Elischer PROC_UNLOCK(p); 5595c8329edSJulian Elischer td->td_ksegrp = NULL; 560696058c3SJulian Elischer PCPU_SET(deadthread, td); 5611faf202eSJulian Elischer } else { 562ed062c8dSJulian Elischer /* 563ed062c8dSJulian Elischer * The last thread is exiting.. but not through exit() 564ed062c8dSJulian Elischer * what should we do? 565ed062c8dSJulian Elischer * Theoretically this can't happen 566ed062c8dSJulian Elischer * exit1() - clears threading flags before coming here 567ed062c8dSJulian Elischer * kse_exit() - treats last thread specially 568ed062c8dSJulian Elischer * thr_exit() - treats last thread specially 569ed062c8dSJulian Elischer * thread_user_enter() - only if more exist 570ed062c8dSJulian Elischer * thread_userret() - only if more exist 571ed062c8dSJulian Elischer * thread_suspend_check() - only if more exist 572ed062c8dSJulian Elischer */ 573ed062c8dSJulian Elischer panic ("thread_exit: Last thread exiting on its own"); 574ed062c8dSJulian Elischer } 575ed062c8dSJulian Elischer } else { 576ed062c8dSJulian Elischer /* 577ed062c8dSJulian Elischer * non threaded process comes here. 578ed062c8dSJulian Elischer * This includes an EX threaded process that is coming 579ed062c8dSJulian Elischer * here via exit1(). (exit1 dethreads the proc first). 580ed062c8dSJulian Elischer */ 5811faf202eSJulian Elischer PROC_UNLOCK(p); 5821faf202eSJulian Elischer } 583dcc9954eSJulian Elischer td->td_state = TDS_INACTIVE; 584732d9528SJulian Elischer CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 585cc66ebe2SPeter Wemm cpu_throw(td, choosethread()); 586cc66ebe2SPeter Wemm panic("I'm a teapot!"); 58744990b8cSJulian Elischer /* NOTREACHED */ 58844990b8cSJulian Elischer } 58944990b8cSJulian Elischer 59044990b8cSJulian Elischer /* 591696058c3SJulian Elischer * Do any thread specific cleanups that may be needed in wait() 59237814395SPeter Wemm * called with Giant, proc and schedlock not held. 593696058c3SJulian Elischer */ 594696058c3SJulian Elischer void 595696058c3SJulian Elischer thread_wait(struct proc *p) 596696058c3SJulian Elischer { 597696058c3SJulian Elischer struct thread *td; 598696058c3SJulian Elischer 59937814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 60085495c72SJens Schweikhardt KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 60185495c72SJens Schweikhardt KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()")); 602696058c3SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 603696058c3SJulian Elischer if (td->td_standin != NULL) { 604b75b0311SJulian Elischer if (td->td_standin->td_ucred != NULL) { 605b75b0311SJulian Elischer crfree(td->td_standin->td_ucred); 606b75b0311SJulian Elischer td->td_standin->td_ucred = NULL; 607b75b0311SJulian Elischer } 608696058c3SJulian Elischer thread_free(td->td_standin); 609696058c3SJulian Elischer td->td_standin = NULL; 610696058c3SJulian Elischer } 611696058c3SJulian Elischer cpu_thread_clean(td); 612ed062c8dSJulian Elischer crfree(td->td_ucred); 613696058c3SJulian Elischer } 614696058c3SJulian Elischer thread_reap(); /* check for zombie threads etc. */ 615696058c3SJulian Elischer } 616696058c3SJulian Elischer 617696058c3SJulian Elischer /* 61844990b8cSJulian Elischer * Link a thread to a process. 6191faf202eSJulian Elischer * set up anything that needs to be initialized for it to 6201faf202eSJulian Elischer * be used by the process. 62144990b8cSJulian Elischer * 62244990b8cSJulian Elischer * Note that we do not link to the proc's ucred here. 62344990b8cSJulian Elischer * The thread is linked as if running but no KSE assigned. 624ed062c8dSJulian Elischer * Called from: 625ed062c8dSJulian Elischer * proc_linkup() 626ed062c8dSJulian Elischer * thread_schedule_upcall() 627ed062c8dSJulian Elischer * thr_create() 62844990b8cSJulian Elischer */ 62944990b8cSJulian Elischer void 63044990b8cSJulian Elischer thread_link(struct thread *td, struct ksegrp *kg) 63144990b8cSJulian Elischer { 63244990b8cSJulian Elischer struct proc *p; 63344990b8cSJulian Elischer 63444990b8cSJulian Elischer p = kg->kg_proc; 63571fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 63644990b8cSJulian Elischer td->td_proc = p; 63744990b8cSJulian Elischer td->td_ksegrp = kg; 6385215b187SJeff Roberson td->td_flags = 0; 6394fc21c09SDaniel Eischen td->td_kflags = 0; 64044990b8cSJulian Elischer 6411faf202eSJulian Elischer LIST_INIT(&td->td_contested); 6429104847fSDavid Xu sigqueue_init(&td->td_sigqueue, p); 643c06eb4e2SSam Leffler callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 64444990b8cSJulian Elischer TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 64544990b8cSJulian Elischer TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 64644990b8cSJulian Elischer p->p_numthreads++; 64744990b8cSJulian Elischer kg->kg_numthreads++; 64844990b8cSJulian Elischer } 64944990b8cSJulian Elischer 650ed062c8dSJulian Elischer /* 651e5bedcefSJulian Elischer * Convert a process with one thread to an unthreaded process. 652e5bedcefSJulian Elischer * Called from: 653e5bedcefSJulian Elischer * thread_single(exit) (called from execve and exit) 654e5bedcefSJulian Elischer * kse_exit() XXX may need cleaning up wrt KSE stuff 655e5bedcefSJulian Elischer */ 656e5bedcefSJulian Elischer void 657e5bedcefSJulian Elischer thread_unthread(struct thread *td) 658e5bedcefSJulian Elischer { 659e5bedcefSJulian Elischer struct proc *p = td->td_proc; 660e5bedcefSJulian Elischer 661e5bedcefSJulian Elischer KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 662e5bedcefSJulian Elischer upcall_remove(td); 663e5bedcefSJulian Elischer p->p_flag &= ~(P_SA|P_HADTHREADS); 664e5bedcefSJulian Elischer td->td_mailbox = NULL; 665e5bedcefSJulian Elischer td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 666e5bedcefSJulian Elischer if (td->td_standin != NULL) { 667e5bedcefSJulian Elischer thread_stash(td->td_standin); 668e5bedcefSJulian Elischer td->td_standin = NULL; 669e5bedcefSJulian Elischer } 670e5bedcefSJulian Elischer sched_set_concurrency(td->td_ksegrp, 1); 671e5bedcefSJulian Elischer } 672e5bedcefSJulian Elischer 673e5bedcefSJulian Elischer /* 674ed062c8dSJulian Elischer * Called from: 675ed062c8dSJulian Elischer * thread_exit() 676ed062c8dSJulian Elischer */ 677d3a0bd78SJulian Elischer void 678d3a0bd78SJulian Elischer thread_unlink(struct thread *td) 679d3a0bd78SJulian Elischer { 680d3a0bd78SJulian Elischer struct proc *p = td->td_proc; 681d3a0bd78SJulian Elischer struct ksegrp *kg = td->td_ksegrp; 682d3a0bd78SJulian Elischer 683112afcb2SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 684d3a0bd78SJulian Elischer TAILQ_REMOVE(&p->p_threads, td, td_plist); 685d3a0bd78SJulian Elischer p->p_numthreads--; 686d3a0bd78SJulian Elischer TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 687d3a0bd78SJulian Elischer kg->kg_numthreads--; 688d3a0bd78SJulian Elischer /* could clear a few other things here */ 689ed062c8dSJulian Elischer /* Must NOT clear links to proc and ksegrp! */ 6905c8329edSJulian Elischer } 6915c8329edSJulian Elischer 6925215b187SJeff Roberson /* 69344990b8cSJulian Elischer * Enforce single-threading. 69444990b8cSJulian Elischer * 69544990b8cSJulian Elischer * Returns 1 if the caller must abort (another thread is waiting to 69644990b8cSJulian Elischer * exit the process or similar). Process is locked! 69744990b8cSJulian Elischer * Returns 0 when you are successfully the only thread running. 69844990b8cSJulian Elischer * A process has successfully single threaded in the suspend mode when 69944990b8cSJulian Elischer * There are no threads in user mode. Threads in the kernel must be 70044990b8cSJulian Elischer * allowed to continue until they get to the user boundary. They may even 70144990b8cSJulian Elischer * copy out their return values and data before suspending. They may however be 70244990b8cSJulian Elischer * accellerated in reaching the user boundary as we will wake up 70344990b8cSJulian Elischer * any sleeping threads that are interruptable. (PCATCH). 70444990b8cSJulian Elischer */ 70544990b8cSJulian Elischer int 706906ac69dSDavid Xu thread_single(int mode) 70744990b8cSJulian Elischer { 70844990b8cSJulian Elischer struct thread *td; 70944990b8cSJulian Elischer struct thread *td2; 71044990b8cSJulian Elischer struct proc *p; 711ec008e96SDavid Xu int remaining; 71244990b8cSJulian Elischer 71344990b8cSJulian Elischer td = curthread; 71444990b8cSJulian Elischer p = td->td_proc; 71537814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 71644990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 71744990b8cSJulian Elischer KASSERT((td != NULL), ("curthread is NULL")); 71844990b8cSJulian Elischer 719ed062c8dSJulian Elischer if ((p->p_flag & P_HADTHREADS) == 0) 72044990b8cSJulian Elischer return (0); 72144990b8cSJulian Elischer 722e3b9bf71SJulian Elischer /* Is someone already single threading? */ 723906ac69dSDavid Xu if (p->p_singlethread != NULL && p->p_singlethread != td) 72444990b8cSJulian Elischer return (1); 72544990b8cSJulian Elischer 726906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 727906ac69dSDavid Xu p->p_flag |= P_SINGLE_EXIT; 728906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 729906ac69dSDavid Xu } else { 730906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_EXIT; 731906ac69dSDavid Xu if (mode == SINGLE_BOUNDARY) 732906ac69dSDavid Xu p->p_flag |= P_SINGLE_BOUNDARY; 733906ac69dSDavid Xu else 734906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 735906ac69dSDavid Xu } 7361279572aSDavid Xu p->p_flag |= P_STOPPED_SINGLE; 73771fad9fdSJulian Elischer mtx_lock_spin(&sched_lock); 738112afcb2SJohn Baldwin p->p_singlethread = td; 739906ac69dSDavid Xu if (mode == SINGLE_EXIT) 740ec008e96SDavid Xu remaining = p->p_numthreads; 741906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 742906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 743906ac69dSDavid Xu else 744ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 745ec008e96SDavid Xu while (remaining != 1) { 74644990b8cSJulian Elischer FOREACH_THREAD_IN_PROC(p, td2) { 74744990b8cSJulian Elischer if (td2 == td) 74844990b8cSJulian Elischer continue; 749588257e8SDavid Xu td2->td_flags |= TDF_ASTPENDING; 75071fad9fdSJulian Elischer if (TD_IS_INHIBITED(td2)) { 751906ac69dSDavid Xu switch (mode) { 752906ac69dSDavid Xu case SINGLE_EXIT: 753cbf4e354SDavid Xu if (td->td_flags & TDF_DBSUSPEND) 754cbf4e354SDavid Xu td->td_flags &= ~TDF_DBSUSPEND; 755906ac69dSDavid Xu if (TD_IS_SUSPENDED(td2)) 75671fad9fdSJulian Elischer thread_unsuspend_one(td2); 75733862f40SDavid Xu if (TD_ON_SLEEPQ(td2) && 758906ac69dSDavid Xu (td2->td_flags & TDF_SINTR)) 75944f3b092SJohn Baldwin sleepq_abort(td2); 760906ac69dSDavid Xu break; 761906ac69dSDavid Xu case SINGLE_BOUNDARY: 762906ac69dSDavid Xu if (TD_IS_SUSPENDED(td2) && 763906ac69dSDavid Xu !(td2->td_flags & TDF_BOUNDARY)) 764906ac69dSDavid Xu thread_unsuspend_one(td2); 765906ac69dSDavid Xu if (TD_ON_SLEEPQ(td2) && 766906ac69dSDavid Xu (td2->td_flags & TDF_SINTR)) 767906ac69dSDavid Xu sleepq_abort(td2); 768906ac69dSDavid Xu break; 769906ac69dSDavid Xu default: 7709d102777SJulian Elischer if (TD_IS_SUSPENDED(td2)) 7719d102777SJulian Elischer continue; 7725215b187SJeff Roberson /* 7735215b187SJeff Roberson * maybe other inhibitted states too? 7745215b187SJeff Roberson */ 7758acf6057SDavid Xu if ((td2->td_flags & TDF_SINTR) && 7768acf6057SDavid Xu (td2->td_inhibitors & 7778acf6057SDavid Xu (TDI_SLEEPING | TDI_SWAPPED))) 7789d102777SJulian Elischer thread_suspend_one(td2); 779906ac69dSDavid Xu break; 78044990b8cSJulian Elischer } 78144990b8cSJulian Elischer } 7829d102777SJulian Elischer } 783906ac69dSDavid Xu if (mode == SINGLE_EXIT) 784ec008e96SDavid Xu remaining = p->p_numthreads; 785906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 786906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 787ec008e96SDavid Xu else 788ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 789ec008e96SDavid Xu 7909d102777SJulian Elischer /* 7919d102777SJulian Elischer * Maybe we suspended some threads.. was it enough? 7929d102777SJulian Elischer */ 793ec008e96SDavid Xu if (remaining == 1) 7949d102777SJulian Elischer break; 7959d102777SJulian Elischer 79644990b8cSJulian Elischer /* 79744990b8cSJulian Elischer * Wake us up when everyone else has suspended. 798e3b9bf71SJulian Elischer * In the mean time we suspend as well. 79944990b8cSJulian Elischer */ 80071fad9fdSJulian Elischer thread_suspend_one(td); 80144990b8cSJulian Elischer PROC_UNLOCK(p); 802bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 80344990b8cSJulian Elischer mtx_unlock_spin(&sched_lock); 80444990b8cSJulian Elischer PROC_LOCK(p); 805112afcb2SJohn Baldwin mtx_lock_spin(&sched_lock); 806906ac69dSDavid Xu if (mode == SINGLE_EXIT) 807ec008e96SDavid Xu remaining = p->p_numthreads; 808906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 809906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 810ec008e96SDavid Xu else 811ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 81244990b8cSJulian Elischer } 813906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 81491599697SJulian Elischer /* 81591599697SJulian Elischer * We have gotten rid of all the other threads and we 81691599697SJulian Elischer * are about to either exit or exec. In either case, 81791599697SJulian Elischer * we try our utmost to revert to being a non-threaded 81891599697SJulian Elischer * process. 81991599697SJulian Elischer */ 820ed062c8dSJulian Elischer p->p_singlethread = NULL; 82164895117SDavid Xu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 822e5bedcefSJulian Elischer thread_unthread(td); 82391599697SJulian Elischer } 824112afcb2SJohn Baldwin mtx_unlock_spin(&sched_lock); 82544990b8cSJulian Elischer return (0); 82644990b8cSJulian Elischer } 82744990b8cSJulian Elischer 82844990b8cSJulian Elischer /* 82944990b8cSJulian Elischer * Called in from locations that can safely check to see 83044990b8cSJulian Elischer * whether we have to suspend or at least throttle for a 83144990b8cSJulian Elischer * single-thread event (e.g. fork). 83244990b8cSJulian Elischer * 83344990b8cSJulian Elischer * Such locations include userret(). 83444990b8cSJulian Elischer * If the "return_instead" argument is non zero, the thread must be able to 83544990b8cSJulian Elischer * accept 0 (caller may continue), or 1 (caller must abort) as a result. 83644990b8cSJulian Elischer * 83744990b8cSJulian Elischer * The 'return_instead' argument tells the function if it may do a 83844990b8cSJulian Elischer * thread_exit() or suspend, or whether the caller must abort and back 83944990b8cSJulian Elischer * out instead. 84044990b8cSJulian Elischer * 84144990b8cSJulian Elischer * If the thread that set the single_threading request has set the 84244990b8cSJulian Elischer * P_SINGLE_EXIT bit in the process flags then this call will never return 84344990b8cSJulian Elischer * if 'return_instead' is false, but will exit. 84444990b8cSJulian Elischer * 84544990b8cSJulian Elischer * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 84644990b8cSJulian Elischer *---------------+--------------------+--------------------- 84744990b8cSJulian Elischer * 0 | returns 0 | returns 0 or 1 84844990b8cSJulian Elischer * | when ST ends | immediatly 84944990b8cSJulian Elischer *---------------+--------------------+--------------------- 85044990b8cSJulian Elischer * 1 | thread exits | returns 1 85144990b8cSJulian Elischer * | | immediatly 85244990b8cSJulian Elischer * 0 = thread_exit() or suspension ok, 85344990b8cSJulian Elischer * other = return error instead of stopping the thread. 85444990b8cSJulian Elischer * 85544990b8cSJulian Elischer * While a full suspension is under effect, even a single threading 85644990b8cSJulian Elischer * thread would be suspended if it made this call (but it shouldn't). 85744990b8cSJulian Elischer * This call should only be made from places where 85844990b8cSJulian Elischer * thread_exit() would be safe as that may be the outcome unless 85944990b8cSJulian Elischer * return_instead is set. 86044990b8cSJulian Elischer */ 86144990b8cSJulian Elischer int 86244990b8cSJulian Elischer thread_suspend_check(int return_instead) 86344990b8cSJulian Elischer { 864ecafb24bSJuli Mallett struct thread *td; 865ecafb24bSJuli Mallett struct proc *p; 86644990b8cSJulian Elischer 86744990b8cSJulian Elischer td = curthread; 86844990b8cSJulian Elischer p = td->td_proc; 86937814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 87044990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 871cbf4e354SDavid Xu while (P_SHOULDSTOP(p) || 872cbf4e354SDavid Xu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 8731279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 87444990b8cSJulian Elischer KASSERT(p->p_singlethread != NULL, 87544990b8cSJulian Elischer ("singlethread not set")); 87644990b8cSJulian Elischer /* 877e3b9bf71SJulian Elischer * The only suspension in action is a 878e3b9bf71SJulian Elischer * single-threading. Single threader need not stop. 879b6d5995eSJulian Elischer * XXX Should be safe to access unlocked 880b6d5995eSJulian Elischer * as it can only be set to be true by us. 88144990b8cSJulian Elischer */ 882e3b9bf71SJulian Elischer if (p->p_singlethread == td) 88344990b8cSJulian Elischer return (0); /* Exempt from stopping. */ 88444990b8cSJulian Elischer } 88545a4bfa1SDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 88644990b8cSJulian Elischer return (1); 88744990b8cSJulian Elischer 888906ac69dSDavid Xu /* Should we goto user boundary if we didn't come from there? */ 889906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 890906ac69dSDavid Xu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 891906ac69dSDavid Xu return (1); 892906ac69dSDavid Xu 8939104847fSDavid Xu /* If thread will exit, flush its pending signals */ 8949104847fSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 8959104847fSDavid Xu sigqueue_flush(&td->td_sigqueue); 8969104847fSDavid Xu 897e574e444SDavid Xu mtx_lock_spin(&sched_lock); 898e574e444SDavid Xu thread_stopped(p); 89944990b8cSJulian Elischer /* 90044990b8cSJulian Elischer * If the process is waiting for us to exit, 90144990b8cSJulian Elischer * this thread should just suicide. 9021279572aSDavid Xu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 90344990b8cSJulian Elischer */ 904906ac69dSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 90544990b8cSJulian Elischer thread_exit(); 90644990b8cSJulian Elischer 90744990b8cSJulian Elischer /* 90844990b8cSJulian Elischer * When a thread suspends, it just 90944990b8cSJulian Elischer * moves to the processes's suspend queue 91044990b8cSJulian Elischer * and stays there. 91144990b8cSJulian Elischer */ 91271fad9fdSJulian Elischer thread_suspend_one(td); 913906ac69dSDavid Xu if (return_instead == 0) { 914906ac69dSDavid Xu p->p_boundary_count++; 915906ac69dSDavid Xu td->td_flags |= TDF_BOUNDARY; 916cf19bf91SJulian Elischer } 917906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 918906ac69dSDavid Xu if (p->p_numthreads == p->p_suspcount) 919906ac69dSDavid Xu thread_unsuspend_one(p->p_singlethread); 920cf19bf91SJulian Elischer } 921a6f37ac9SJohn Baldwin PROC_UNLOCK(p); 922bf0acc27SJohn Baldwin mi_switch(SW_INVOL, NULL); 923906ac69dSDavid Xu if (return_instead == 0) { 924906ac69dSDavid Xu p->p_boundary_count--; 925906ac69dSDavid Xu td->td_flags &= ~TDF_BOUNDARY; 926906ac69dSDavid Xu } 92744990b8cSJulian Elischer mtx_unlock_spin(&sched_lock); 92844990b8cSJulian Elischer PROC_LOCK(p); 92944990b8cSJulian Elischer } 93044990b8cSJulian Elischer return (0); 93144990b8cSJulian Elischer } 93244990b8cSJulian Elischer 93335c32a76SDavid Xu void 93435c32a76SDavid Xu thread_suspend_one(struct thread *td) 93535c32a76SDavid Xu { 93635c32a76SDavid Xu struct proc *p = td->td_proc; 93735c32a76SDavid Xu 93835c32a76SDavid Xu mtx_assert(&sched_lock, MA_OWNED); 939112afcb2SJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 940e574e444SDavid Xu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 94135c32a76SDavid Xu p->p_suspcount++; 94271fad9fdSJulian Elischer TD_SET_SUSPENDED(td); 94335c32a76SDavid Xu TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 94435c32a76SDavid Xu } 94535c32a76SDavid Xu 94635c32a76SDavid Xu void 94735c32a76SDavid Xu thread_unsuspend_one(struct thread *td) 94835c32a76SDavid Xu { 94935c32a76SDavid Xu struct proc *p = td->td_proc; 95035c32a76SDavid Xu 95135c32a76SDavid Xu mtx_assert(&sched_lock, MA_OWNED); 952112afcb2SJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 95335c32a76SDavid Xu TAILQ_REMOVE(&p->p_suspended, td, td_runq); 95471fad9fdSJulian Elischer TD_CLR_SUSPENDED(td); 95535c32a76SDavid Xu p->p_suspcount--; 95671fad9fdSJulian Elischer setrunnable(td); 95735c32a76SDavid Xu } 95835c32a76SDavid Xu 95944990b8cSJulian Elischer /* 96044990b8cSJulian Elischer * Allow all threads blocked by single threading to continue running. 96144990b8cSJulian Elischer */ 96244990b8cSJulian Elischer void 96344990b8cSJulian Elischer thread_unsuspend(struct proc *p) 96444990b8cSJulian Elischer { 96544990b8cSJulian Elischer struct thread *td; 96644990b8cSJulian Elischer 967b6d5995eSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 96844990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 96944990b8cSJulian Elischer if (!P_SHOULDSTOP(p)) { 97044990b8cSJulian Elischer while ((td = TAILQ_FIRST(&p->p_suspended))) { 97135c32a76SDavid Xu thread_unsuspend_one(td); 97244990b8cSJulian Elischer } 9731279572aSDavid Xu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 97444990b8cSJulian Elischer (p->p_numthreads == p->p_suspcount)) { 97544990b8cSJulian Elischer /* 97644990b8cSJulian Elischer * Stopping everything also did the job for the single 97744990b8cSJulian Elischer * threading request. Now we've downgraded to single-threaded, 97844990b8cSJulian Elischer * let it continue. 97944990b8cSJulian Elischer */ 98035c32a76SDavid Xu thread_unsuspend_one(p->p_singlethread); 98144990b8cSJulian Elischer } 98244990b8cSJulian Elischer } 98344990b8cSJulian Elischer 984ed062c8dSJulian Elischer /* 985ed062c8dSJulian Elischer * End the single threading mode.. 986ed062c8dSJulian Elischer */ 98744990b8cSJulian Elischer void 98844990b8cSJulian Elischer thread_single_end(void) 98944990b8cSJulian Elischer { 99044990b8cSJulian Elischer struct thread *td; 99144990b8cSJulian Elischer struct proc *p; 99244990b8cSJulian Elischer 99344990b8cSJulian Elischer td = curthread; 99444990b8cSJulian Elischer p = td->td_proc; 99544990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 996906ac69dSDavid Xu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 997112afcb2SJohn Baldwin mtx_lock_spin(&sched_lock); 99844990b8cSJulian Elischer p->p_singlethread = NULL; 99921fc3164SDavid Xu p->p_procscopegrp = NULL; 100049539972SJulian Elischer /* 100149539972SJulian Elischer * If there are other threads they mey now run, 100249539972SJulian Elischer * unless of course there is a blanket 'stop order' 100349539972SJulian Elischer * on the process. The single threader must be allowed 100449539972SJulian Elischer * to continue however as this is a bad place to stop. 100549539972SJulian Elischer */ 100649539972SJulian Elischer if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 100749539972SJulian Elischer while ((td = TAILQ_FIRST(&p->p_suspended))) { 100871fad9fdSJulian Elischer thread_unsuspend_one(td); 100944990b8cSJulian Elischer } 101049539972SJulian Elischer } 1011112afcb2SJohn Baldwin mtx_unlock_spin(&sched_lock); 101249539972SJulian Elischer } 10134fc21c09SDaniel Eischen 1014007ddf7eSJohn Baldwin /* 1015007ddf7eSJohn Baldwin * Called before going into an interruptible sleep to see if we have been 1016007ddf7eSJohn Baldwin * interrupted or requested to exit. 1017007ddf7eSJohn Baldwin */ 1018007ddf7eSJohn Baldwin int 1019007ddf7eSJohn Baldwin thread_sleep_check(struct thread *td) 1020007ddf7eSJohn Baldwin { 1021007ddf7eSJohn Baldwin struct proc *p; 1022007ddf7eSJohn Baldwin 1023007ddf7eSJohn Baldwin p = td->td_proc; 1024007ddf7eSJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 10252179a22cSJulian Elischer if (p->p_flag & P_HADTHREADS) { 1026906ac69dSDavid Xu if (p->p_singlethread != td) { 1027906ac69dSDavid Xu if (p->p_flag & P_SINGLE_EXIT) 1028007ddf7eSJohn Baldwin return (EINTR); 1029906ac69dSDavid Xu if (p->p_flag & P_SINGLE_BOUNDARY) 1030906ac69dSDavid Xu return (ERESTART); 1031906ac69dSDavid Xu } 1032007ddf7eSJohn Baldwin if (td->td_flags & TDF_INTERRUPT) 1033007ddf7eSJohn Baldwin return (td->td_intrval); 1034007ddf7eSJohn Baldwin } 1035007ddf7eSJohn Baldwin return (0); 1036007ddf7eSJohn Baldwin } 103744355392SDavid Xu 103844355392SDavid Xu struct thread * 103944355392SDavid Xu thread_find(struct proc *p, lwpid_t tid) 104044355392SDavid Xu { 104144355392SDavid Xu struct thread *td; 104244355392SDavid Xu 104344355392SDavid Xu PROC_LOCK_ASSERT(p, MA_OWNED); 104444355392SDavid Xu mtx_lock_spin(&sched_lock); 104544355392SDavid Xu FOREACH_THREAD_IN_PROC(p, td) { 104644355392SDavid Xu if (td->td_tid == tid) 104744355392SDavid Xu break; 104844355392SDavid Xu } 104944355392SDavid Xu mtx_unlock_spin(&sched_lock); 105044355392SDavid Xu return (td); 105144355392SDavid Xu } 1052