19454b2d8SWarner Losh /*- 244990b8cSJulian Elischer * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 344990b8cSJulian Elischer * All rights reserved. 444990b8cSJulian Elischer * 544990b8cSJulian Elischer * Redistribution and use in source and binary forms, with or without 644990b8cSJulian Elischer * modification, are permitted provided that the following conditions 744990b8cSJulian Elischer * are met: 844990b8cSJulian Elischer * 1. Redistributions of source code must retain the above copyright 944990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer as 1044990b8cSJulian Elischer * the first lines of this file unmodified other than the possible 1144990b8cSJulian Elischer * addition of one or more copyright notices. 1244990b8cSJulian Elischer * 2. Redistributions in binary form must reproduce the above copyright 1344990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer in the 1444990b8cSJulian Elischer * documentation and/or other materials provided with the distribution. 1544990b8cSJulian Elischer * 1644990b8cSJulian Elischer * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1744990b8cSJulian Elischer * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1844990b8cSJulian Elischer * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1944990b8cSJulian Elischer * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2044990b8cSJulian Elischer * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2144990b8cSJulian Elischer * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2244990b8cSJulian Elischer * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2344990b8cSJulian Elischer * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2444990b8cSJulian Elischer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2544990b8cSJulian Elischer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2644990b8cSJulian Elischer * DAMAGE. 2744990b8cSJulian Elischer */ 2844990b8cSJulian Elischer 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 31677b542eSDavid E. O'Brien 3244990b8cSJulian Elischer #include <sys/param.h> 3344990b8cSJulian Elischer #include <sys/systm.h> 3444990b8cSJulian Elischer #include <sys/kernel.h> 3544990b8cSJulian Elischer #include <sys/lock.h> 3644990b8cSJulian Elischer #include <sys/mutex.h> 3744990b8cSJulian Elischer #include <sys/proc.h> 38e170bfdaSDavid Xu #include <sys/resourcevar.h> 3994e0a4cdSJulian Elischer #include <sys/smp.h> 4044990b8cSJulian Elischer #include <sys/sysctl.h> 41de028f5aSJeff Roberson #include <sys/sched.h> 4244f3b092SJohn Baldwin #include <sys/sleepqueue.h> 43961a7b24SJohn Baldwin #include <sys/turnstile.h> 4444990b8cSJulian Elischer #include <sys/ktr.h> 45bc8e6d81SDavid Xu #include <sys/umtx.h> 4644990b8cSJulian Elischer 47911b84b0SRobert Watson #include <security/audit/audit.h> 48911b84b0SRobert Watson 4944990b8cSJulian Elischer #include <vm/vm.h> 5049a2507bSAlan Cox #include <vm/vm_extern.h> 5144990b8cSJulian Elischer #include <vm/uma.h> 5202fb42b0SPeter Wemm 538460a577SJohn Birrell #ifdef KSE 5444990b8cSJulian Elischer /* 554f0db5e0SJulian Elischer * KSEGRP related storage. 5644990b8cSJulian Elischer */ 574f0db5e0SJulian Elischer static uma_zone_t ksegrp_zone; 588460a577SJohn Birrell #else 598460a577SJohn Birrell /* 608460a577SJohn Birrell * thread related storage. 618460a577SJohn Birrell */ 628460a577SJohn Birrell #endif 6344990b8cSJulian Elischer static uma_zone_t thread_zone; 6444990b8cSJulian Elischer 654f0db5e0SJulian Elischer /* DEBUG ONLY */ 6644990b8cSJulian Elischer SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 67696058c3SJulian Elischer static int thread_debug = 0; 68696058c3SJulian Elischer SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 69696058c3SJulian Elischer &thread_debug, 0, "thread debug"); 70fdc5ecd2SDavid Xu 71345ad866SJulian Elischer int max_threads_per_proc = 1500; 72fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 734f0db5e0SJulian Elischer &max_threads_per_proc, 0, "Limit on threads per proc"); 744f0db5e0SJulian Elischer 75ed062c8dSJulian Elischer int max_groups_per_proc = 1500; 76fdc5ecd2SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 77fdc5ecd2SDavid Xu &max_groups_per_proc, 0, "Limit on thread groups per proc"); 78fdc5ecd2SDavid Xu 79345ad866SJulian Elischer int max_threads_hits; 800252d203SDavid Xu SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 810252d203SDavid Xu &max_threads_hits, 0, ""); 820252d203SDavid Xu 838460a577SJohn Birrell #ifdef KSE 8494e0a4cdSJulian Elischer int virtual_cpu; 8594e0a4cdSJulian Elischer 868460a577SJohn Birrell #endif 875215b187SJeff Roberson TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 888460a577SJohn Birrell #ifdef KSE 895c8329edSJulian Elischer TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 908460a577SJohn Birrell #endif 915215b187SJeff Roberson struct mtx kse_zombie_lock; 925215b187SJeff Roberson MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 9344990b8cSJulian Elischer 948460a577SJohn Birrell #ifdef KSE 9594e0a4cdSJulian Elischer static int 9694e0a4cdSJulian Elischer sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 9794e0a4cdSJulian Elischer { 9894e0a4cdSJulian Elischer int error, new_val; 9994e0a4cdSJulian Elischer int def_val; 10094e0a4cdSJulian Elischer 10194e0a4cdSJulian Elischer def_val = mp_ncpus; 10294e0a4cdSJulian Elischer if (virtual_cpu == 0) 10394e0a4cdSJulian Elischer new_val = def_val; 10494e0a4cdSJulian Elischer else 10594e0a4cdSJulian Elischer new_val = virtual_cpu; 10694e0a4cdSJulian Elischer error = sysctl_handle_int(oidp, &new_val, 0, req); 10794e0a4cdSJulian Elischer if (error != 0 || req->newptr == NULL) 10894e0a4cdSJulian Elischer return (error); 10994e0a4cdSJulian Elischer if (new_val < 0) 11094e0a4cdSJulian Elischer return (EINVAL); 11194e0a4cdSJulian Elischer virtual_cpu = new_val; 11294e0a4cdSJulian Elischer return (0); 11394e0a4cdSJulian Elischer } 11494e0a4cdSJulian Elischer 11594e0a4cdSJulian Elischer /* DEBUG ONLY */ 11694e0a4cdSJulian Elischer SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 11794e0a4cdSJulian Elischer 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 11894e0a4cdSJulian Elischer "debug virtual cpus"); 1198460a577SJohn Birrell #endif 1205c8329edSJulian Elischer 121fdcac928SMarcel Moolenaar struct mtx tid_lock; 1221ea7a6f8SPoul-Henning Kamp static struct unrhdr *tid_unrhdr; 123fdcac928SMarcel Moolenaar 124fdcac928SMarcel Moolenaar /* 125696058c3SJulian Elischer * Prepare a thread for use. 12644990b8cSJulian Elischer */ 127b23f72e9SBrian Feldman static int 128b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags) 12944990b8cSJulian Elischer { 13044990b8cSJulian Elischer struct thread *td; 13144990b8cSJulian Elischer 13244990b8cSJulian Elischer td = (struct thread *)mem; 13371fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 134060563ecSJulian Elischer td->td_oncpu = NOCPU; 1356c27c603SJuli Mallett 136773eff9dSPoul-Henning Kamp td->td_tid = alloc_unr(tid_unrhdr); 137773eff9dSPoul-Henning Kamp 1386c27c603SJuli Mallett /* 1396c27c603SJuli Mallett * Note that td_critnest begins life as 1 because the thread is not 1406c27c603SJuli Mallett * running and is thereby implicitly waiting to be on the receiving 1416c27c603SJuli Mallett * end of a context switch. A context switch must occur inside a 1426c27c603SJuli Mallett * critical section, and in fact, includes hand-off of the sched_lock. 1436c27c603SJuli Mallett * After a context switch to a newly created thread, it will release 1446c27c603SJuli Mallett * sched_lock for the first time, and its td_critnest will hit 0 for 1456c27c603SJuli Mallett * the first time. This happens on the far end of a context switch, 1466c27c603SJuli Mallett * and when it context switches away from itself, it will in fact go 1476c27c603SJuli Mallett * back into a critical section, and hand off the sched lock to the 1486c27c603SJuli Mallett * next thread. 1496c27c603SJuli Mallett */ 150139b7550SJohn Baldwin td->td_critnest = 1; 151911b84b0SRobert Watson 152911b84b0SRobert Watson #ifdef AUDIT 153911b84b0SRobert Watson audit_thread_alloc(td); 154911b84b0SRobert Watson #endif 155d10183d9SDavid Xu umtx_thread_alloc(td); 156b23f72e9SBrian Feldman return (0); 15744990b8cSJulian Elischer } 15844990b8cSJulian Elischer 15944990b8cSJulian Elischer /* 16044990b8cSJulian Elischer * Reclaim a thread after use. 16144990b8cSJulian Elischer */ 16244990b8cSJulian Elischer static void 16344990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg) 16444990b8cSJulian Elischer { 16544990b8cSJulian Elischer struct thread *td; 16644990b8cSJulian Elischer 16744990b8cSJulian Elischer td = (struct thread *)mem; 16844990b8cSJulian Elischer 16944990b8cSJulian Elischer #ifdef INVARIANTS 17044990b8cSJulian Elischer /* Verify that this thread is in a safe state to free. */ 17144990b8cSJulian Elischer switch (td->td_state) { 17271fad9fdSJulian Elischer case TDS_INHIBITED: 17371fad9fdSJulian Elischer case TDS_RUNNING: 17471fad9fdSJulian Elischer case TDS_CAN_RUN: 17544990b8cSJulian Elischer case TDS_RUNQ: 17644990b8cSJulian Elischer /* 17744990b8cSJulian Elischer * We must never unlink a thread that is in one of 17844990b8cSJulian Elischer * these states, because it is currently active. 17944990b8cSJulian Elischer */ 18044990b8cSJulian Elischer panic("bad state for thread unlinking"); 18144990b8cSJulian Elischer /* NOTREACHED */ 18271fad9fdSJulian Elischer case TDS_INACTIVE: 18344990b8cSJulian Elischer break; 18444990b8cSJulian Elischer default: 18544990b8cSJulian Elischer panic("bad thread state"); 18644990b8cSJulian Elischer /* NOTREACHED */ 18744990b8cSJulian Elischer } 18844990b8cSJulian Elischer #endif 1896e8525ceSRobert Watson #ifdef AUDIT 1906e8525ceSRobert Watson audit_thread_free(td); 1916e8525ceSRobert Watson #endif 192773eff9dSPoul-Henning Kamp free_unr(tid_unrhdr, td->td_tid); 193ed062c8dSJulian Elischer sched_newthread(td); 19444990b8cSJulian Elischer } 19544990b8cSJulian Elischer 19644990b8cSJulian Elischer /* 19744990b8cSJulian Elischer * Initialize type-stable parts of a thread (when newly created). 19844990b8cSJulian Elischer */ 199b23f72e9SBrian Feldman static int 200b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags) 20144990b8cSJulian Elischer { 20244990b8cSJulian Elischer struct thread *td; 20344990b8cSJulian Elischer 20444990b8cSJulian Elischer td = (struct thread *)mem; 205247aba24SMarcel Moolenaar 20649a2507bSAlan Cox vm_thread_new(td, 0); 20744990b8cSJulian Elischer cpu_thread_setup(td); 20844f3b092SJohn Baldwin td->td_sleepqueue = sleepq_alloc(); 209961a7b24SJohn Baldwin td->td_turnstile = turnstile_alloc(); 210de028f5aSJeff Roberson td->td_sched = (struct td_sched *)&td[1]; 211ed062c8dSJulian Elischer sched_newthread(td); 212d10183d9SDavid Xu umtx_thread_init(td); 213b23f72e9SBrian Feldman return (0); 21444990b8cSJulian Elischer } 21544990b8cSJulian Elischer 21644990b8cSJulian Elischer /* 21744990b8cSJulian Elischer * Tear down type-stable parts of a thread (just before being discarded). 21844990b8cSJulian Elischer */ 21944990b8cSJulian Elischer static void 22044990b8cSJulian Elischer thread_fini(void *mem, int size) 22144990b8cSJulian Elischer { 22244990b8cSJulian Elischer struct thread *td; 22344990b8cSJulian Elischer 22444990b8cSJulian Elischer td = (struct thread *)mem; 225961a7b24SJohn Baldwin turnstile_free(td->td_turnstile); 22644f3b092SJohn Baldwin sleepq_free(td->td_sleepqueue); 227d10183d9SDavid Xu umtx_thread_fini(td); 22849a2507bSAlan Cox vm_thread_dispose(td); 22944990b8cSJulian Elischer } 2305215b187SJeff Roberson 2318460a577SJohn Birrell #ifdef KSE 232de028f5aSJeff Roberson /* 233de028f5aSJeff Roberson * Initialize type-stable parts of a ksegrp (when newly created). 234de028f5aSJeff Roberson */ 235b23f72e9SBrian Feldman static int 236a9b5dc7dSJulian Elischer ksegrp_ctor(void *mem, int size, void *arg, int flags) 237de028f5aSJeff Roberson { 238de028f5aSJeff Roberson struct ksegrp *kg; 239de028f5aSJeff Roberson 240de028f5aSJeff Roberson kg = (struct ksegrp *)mem; 241a9b5dc7dSJulian Elischer bzero(mem, size); 242de028f5aSJeff Roberson kg->kg_sched = (struct kg_sched *)&kg[1]; 243b23f72e9SBrian Feldman return (0); 244de028f5aSJeff Roberson } 24544990b8cSJulian Elischer 2465c8329edSJulian Elischer void 2475c8329edSJulian Elischer ksegrp_link(struct ksegrp *kg, struct proc *p) 2485c8329edSJulian Elischer { 2495c8329edSJulian Elischer 2505c8329edSJulian Elischer TAILQ_INIT(&kg->kg_threads); 2515c8329edSJulian Elischer TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 2525215b187SJeff Roberson TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 2535c8329edSJulian Elischer kg->kg_proc = p; 2545215b187SJeff Roberson /* 2555215b187SJeff Roberson * the following counters are in the -zero- section 2565215b187SJeff Roberson * and may not need clearing 2575215b187SJeff Roberson */ 2585c8329edSJulian Elischer kg->kg_numthreads = 0; 2595215b187SJeff Roberson kg->kg_numupcalls = 0; 2605c8329edSJulian Elischer /* link it in now that it's consistent */ 2615c8329edSJulian Elischer p->p_numksegrps++; 2625c8329edSJulian Elischer TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 2635c8329edSJulian Elischer } 2645c8329edSJulian Elischer 265ed062c8dSJulian Elischer /* 266ed062c8dSJulian Elischer * Called from: 267ed062c8dSJulian Elischer * thread-exit() 268ed062c8dSJulian Elischer */ 2695c8329edSJulian Elischer void 2705c8329edSJulian Elischer ksegrp_unlink(struct ksegrp *kg) 2715c8329edSJulian Elischer { 2725c8329edSJulian Elischer struct proc *p; 2735c8329edSJulian Elischer 2745c8329edSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 2755215b187SJeff Roberson KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 2765215b187SJeff Roberson KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 2775215b187SJeff Roberson 2785c8329edSJulian Elischer p = kg->kg_proc; 2795c8329edSJulian Elischer TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 2805c8329edSJulian Elischer p->p_numksegrps--; 2815c8329edSJulian Elischer /* 2825c8329edSJulian Elischer * Aggregate stats from the KSE 2835c8329edSJulian Elischer */ 28421fc3164SDavid Xu if (p->p_procscopegrp == kg) 28521fc3164SDavid Xu p->p_procscopegrp = NULL; 2865c8329edSJulian Elischer } 2878460a577SJohn Birrell #endif 2885c8329edSJulian Elischer 2895c8329edSJulian Elischer /* 2905215b187SJeff Roberson * For a newly created process, 2915215b187SJeff Roberson * link up all the structures and its initial threads etc. 292ed062c8dSJulian Elischer * called from: 293ed062c8dSJulian Elischer * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 294ed062c8dSJulian Elischer * proc_dtor() (should go away) 295ed062c8dSJulian Elischer * proc_init() 2965c8329edSJulian Elischer */ 2975c8329edSJulian Elischer void 2988460a577SJohn Birrell #ifdef KSE 299ed062c8dSJulian Elischer proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td) 3008460a577SJohn Birrell #else 3018460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td) 3028460a577SJohn Birrell #endif 3035c8329edSJulian Elischer { 3045c8329edSJulian Elischer 3058460a577SJohn Birrell #ifdef KSE 3065c8329edSJulian Elischer TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 3078460a577SJohn Birrell #endif 3085c8329edSJulian Elischer TAILQ_INIT(&p->p_threads); /* all threads in proc */ 3095c8329edSJulian Elischer TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 3109104847fSDavid Xu sigqueue_init(&p->p_sigqueue, p); 311ebceaf6dSDavid Xu p->p_ksi = ksiginfo_alloc(1); 312ebceaf6dSDavid Xu if (p->p_ksi != NULL) { 3135c474517SDavid Xu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 314ebceaf6dSDavid Xu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 315ebceaf6dSDavid Xu } 316b2f92ef9SDavid Xu LIST_INIT(&p->p_mqnotifier); 3178460a577SJohn Birrell #ifdef KSE 3185c8329edSJulian Elischer p->p_numksegrps = 0; 3198460a577SJohn Birrell #endif 3205c8329edSJulian Elischer p->p_numthreads = 0; 3215c8329edSJulian Elischer 3228460a577SJohn Birrell #ifdef KSE 3235c8329edSJulian Elischer ksegrp_link(kg, p); 3245c8329edSJulian Elischer thread_link(td, kg); 3258460a577SJohn Birrell #else 3268460a577SJohn Birrell thread_link(td, p); 3278460a577SJohn Birrell #endif 3285c8329edSJulian Elischer } 3295c8329edSJulian Elischer 3305c8329edSJulian Elischer /* 33144990b8cSJulian Elischer * Initialize global thread allocation resources. 33244990b8cSJulian Elischer */ 33344990b8cSJulian Elischer void 33444990b8cSJulian Elischer threadinit(void) 33544990b8cSJulian Elischer { 33644990b8cSJulian Elischer 3371ea7a6f8SPoul-Henning Kamp mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 3381ea7a6f8SPoul-Henning Kamp tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock); 3391ea7a6f8SPoul-Henning Kamp 340de028f5aSJeff Roberson thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 34144990b8cSJulian Elischer thread_ctor, thread_dtor, thread_init, thread_fini, 34244990b8cSJulian Elischer UMA_ALIGN_CACHE, 0); 3438460a577SJohn Birrell #ifdef KSE 344de028f5aSJeff Roberson ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 345a9b5dc7dSJulian Elischer ksegrp_ctor, NULL, NULL, NULL, 3464f0db5e0SJulian Elischer UMA_ALIGN_CACHE, 0); 347ed062c8dSJulian Elischer kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 3488460a577SJohn Birrell #endif 34944990b8cSJulian Elischer } 35044990b8cSJulian Elischer 35144990b8cSJulian Elischer /* 3521faf202eSJulian Elischer * Stash an embarasingly extra thread into the zombie thread queue. 35344990b8cSJulian Elischer */ 35444990b8cSJulian Elischer void 35544990b8cSJulian Elischer thread_stash(struct thread *td) 35644990b8cSJulian Elischer { 3575215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 35844990b8cSJulian Elischer TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 3595215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 36044990b8cSJulian Elischer } 36144990b8cSJulian Elischer 3628460a577SJohn Birrell #ifdef KSE 36344990b8cSJulian Elischer /* 3645c8329edSJulian Elischer * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 3655c8329edSJulian Elischer */ 3665c8329edSJulian Elischer void 3675c8329edSJulian Elischer ksegrp_stash(struct ksegrp *kg) 3685c8329edSJulian Elischer { 3695215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 3705c8329edSJulian Elischer TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 3715215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 3725c8329edSJulian Elischer } 3738460a577SJohn Birrell #endif 3745c8329edSJulian Elischer 3755c8329edSJulian Elischer /* 3765215b187SJeff Roberson * Reap zombie kse resource. 37744990b8cSJulian Elischer */ 37844990b8cSJulian Elischer void 37944990b8cSJulian Elischer thread_reap(void) 38044990b8cSJulian Elischer { 3815c8329edSJulian Elischer struct thread *td_first, *td_next; 3828460a577SJohn Birrell #ifdef KSE 3835c8329edSJulian Elischer struct ksegrp *kg_first, * kg_next; 3848460a577SJohn Birrell #endif 38544990b8cSJulian Elischer 38644990b8cSJulian Elischer /* 3875215b187SJeff Roberson * Don't even bother to lock if none at this instant, 3885215b187SJeff Roberson * we really don't care about the next instant.. 38944990b8cSJulian Elischer */ 3908460a577SJohn Birrell #ifdef KSE 3915c8329edSJulian Elischer if ((!TAILQ_EMPTY(&zombie_threads)) 392345ad866SJulian Elischer || (!TAILQ_EMPTY(&zombie_ksegrps))) { 3938460a577SJohn Birrell #else 3948460a577SJohn Birrell if (!TAILQ_EMPTY(&zombie_threads)) { 3958460a577SJohn Birrell #endif 3965215b187SJeff Roberson mtx_lock_spin(&kse_zombie_lock); 3975c8329edSJulian Elischer td_first = TAILQ_FIRST(&zombie_threads); 3988460a577SJohn Birrell #ifdef KSE 3995c8329edSJulian Elischer kg_first = TAILQ_FIRST(&zombie_ksegrps); 4008460a577SJohn Birrell #endif 4015c8329edSJulian Elischer if (td_first) 4025c8329edSJulian Elischer TAILQ_INIT(&zombie_threads); 4038460a577SJohn Birrell #ifdef KSE 4045c8329edSJulian Elischer if (kg_first) 4055c8329edSJulian Elischer TAILQ_INIT(&zombie_ksegrps); 4068460a577SJohn Birrell #endif 4075215b187SJeff Roberson mtx_unlock_spin(&kse_zombie_lock); 4085c8329edSJulian Elischer while (td_first) { 4095c8329edSJulian Elischer td_next = TAILQ_NEXT(td_first, td_runq); 4105215b187SJeff Roberson if (td_first->td_ucred) 4115215b187SJeff Roberson crfree(td_first->td_ucred); 4125c8329edSJulian Elischer thread_free(td_first); 4135c8329edSJulian Elischer td_first = td_next; 41444990b8cSJulian Elischer } 4158460a577SJohn Birrell #ifdef KSE 4165c8329edSJulian Elischer while (kg_first) { 4175c8329edSJulian Elischer kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 4185c8329edSJulian Elischer ksegrp_free(kg_first); 4195c8329edSJulian Elischer kg_first = kg_next; 4205c8329edSJulian Elischer } 421ed062c8dSJulian Elischer /* 422ed062c8dSJulian Elischer * there will always be a thread on the list if one of these 423ed062c8dSJulian Elischer * is there. 424ed062c8dSJulian Elischer */ 425345ad866SJulian Elischer kse_GC(); 4268460a577SJohn Birrell #endif 42744990b8cSJulian Elischer } 428ed062c8dSJulian Elischer } 42944990b8cSJulian Elischer 4308460a577SJohn Birrell #ifdef KSE 43144990b8cSJulian Elischer /* 4324f0db5e0SJulian Elischer * Allocate a ksegrp. 4334f0db5e0SJulian Elischer */ 4344f0db5e0SJulian Elischer struct ksegrp * 4354f0db5e0SJulian Elischer ksegrp_alloc(void) 4364f0db5e0SJulian Elischer { 437a163d034SWarner Losh return (uma_zalloc(ksegrp_zone, M_WAITOK)); 4384f0db5e0SJulian Elischer } 4398460a577SJohn Birrell #endif 4404f0db5e0SJulian Elischer 4414f0db5e0SJulian Elischer /* 44244990b8cSJulian Elischer * Allocate a thread. 44344990b8cSJulian Elischer */ 44444990b8cSJulian Elischer struct thread * 44544990b8cSJulian Elischer thread_alloc(void) 44644990b8cSJulian Elischer { 4478460a577SJohn Birrell 44844990b8cSJulian Elischer thread_reap(); /* check if any zombies to get */ 449a163d034SWarner Losh return (uma_zalloc(thread_zone, M_WAITOK)); 45044990b8cSJulian Elischer } 45144990b8cSJulian Elischer 4528460a577SJohn Birrell #ifdef KSE 45344990b8cSJulian Elischer /* 4544f0db5e0SJulian Elischer * Deallocate a ksegrp. 4554f0db5e0SJulian Elischer */ 4564f0db5e0SJulian Elischer void 4574f0db5e0SJulian Elischer ksegrp_free(struct ksegrp *td) 4584f0db5e0SJulian Elischer { 4594f0db5e0SJulian Elischer uma_zfree(ksegrp_zone, td); 4604f0db5e0SJulian Elischer } 4618460a577SJohn Birrell #endif 4624f0db5e0SJulian Elischer 4634f0db5e0SJulian Elischer /* 46444990b8cSJulian Elischer * Deallocate a thread. 46544990b8cSJulian Elischer */ 46644990b8cSJulian Elischer void 46744990b8cSJulian Elischer thread_free(struct thread *td) 46844990b8cSJulian Elischer { 469696058c3SJulian Elischer 470696058c3SJulian Elischer cpu_thread_clean(td); 47144990b8cSJulian Elischer uma_zfree(thread_zone, td); 47244990b8cSJulian Elischer } 47344990b8cSJulian Elischer 47444990b8cSJulian Elischer /* 47544990b8cSJulian Elischer * Discard the current thread and exit from its context. 47694e0a4cdSJulian Elischer * Always called with scheduler locked. 47744990b8cSJulian Elischer * 47844990b8cSJulian Elischer * Because we can't free a thread while we're operating under its context, 479696058c3SJulian Elischer * push the current thread into our CPU's deadthread holder. This means 480696058c3SJulian Elischer * we needn't worry about someone else grabbing our context before we 48194e0a4cdSJulian Elischer * do a cpu_throw(). This may not be needed now as we are under schedlock. 48294e0a4cdSJulian Elischer * Maybe we can just do a thread_stash() as thr_exit1 does. 48394e0a4cdSJulian Elischer */ 48494e0a4cdSJulian Elischer /* XXX 48594e0a4cdSJulian Elischer * libthr expects its thread exit to return for the last 48694e0a4cdSJulian Elischer * thread, meaning that the program is back to non-threaded 48794e0a4cdSJulian Elischer * mode I guess. Because we do this (cpu_throw) unconditionally 48894e0a4cdSJulian Elischer * here, they have their own version of it. (thr_exit1()) 48994e0a4cdSJulian Elischer * that doesn't do it all if this was the last thread. 49094e0a4cdSJulian Elischer * It is also called from thread_suspend_check(). 49194e0a4cdSJulian Elischer * Of course in the end, they end up coming here through exit1 49294e0a4cdSJulian Elischer * anyhow.. After fixing 'thr' to play by the rules we should be able 49394e0a4cdSJulian Elischer * to merge these two functions together. 494ed062c8dSJulian Elischer * 495ed062c8dSJulian Elischer * called from: 496ed062c8dSJulian Elischer * exit1() 497ed062c8dSJulian Elischer * kse_exit() 498ed062c8dSJulian Elischer * thr_exit() 4998460a577SJohn Birrell * ifdef KSE 500ed062c8dSJulian Elischer * thread_user_enter() 501ed062c8dSJulian Elischer * thread_userret() 5028460a577SJohn Birrell * endif 503ed062c8dSJulian Elischer * thread_suspend_check() 50444990b8cSJulian Elischer */ 50544990b8cSJulian Elischer void 50644990b8cSJulian Elischer thread_exit(void) 50744990b8cSJulian Elischer { 508e170bfdaSDavid Xu uint64_t new_switchtime; 50944990b8cSJulian Elischer struct thread *td; 51044990b8cSJulian Elischer struct proc *p; 5118460a577SJohn Birrell #ifdef KSE 51244990b8cSJulian Elischer struct ksegrp *kg; 5138460a577SJohn Birrell #endif 51444990b8cSJulian Elischer 51544990b8cSJulian Elischer td = curthread; 5168460a577SJohn Birrell #ifdef KSE 51744990b8cSJulian Elischer kg = td->td_ksegrp; 5188460a577SJohn Birrell #endif 51944990b8cSJulian Elischer p = td->td_proc; 52044990b8cSJulian Elischer 52144990b8cSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 522ed062c8dSJulian Elischer mtx_assert(&Giant, MA_NOTOWNED); 52344990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 524ed062c8dSJulian Elischer KASSERT(p != NULL, ("thread exiting without a process")); 5258460a577SJohn Birrell #ifdef KSE 526ed062c8dSJulian Elischer KASSERT(kg != NULL, ("thread exiting without a kse group")); 5278460a577SJohn Birrell #endif 528cc701b73SRobert Watson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 529cc701b73SRobert Watson (long)p->p_pid, p->p_comm); 5309104847fSDavid Xu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 53144990b8cSJulian Elischer 53289964dd2SRobert Watson #ifdef AUDIT 53389964dd2SRobert Watson AUDIT_SYSCALL_EXIT(0, td); 53489964dd2SRobert Watson #endif 53589964dd2SRobert Watson 5368460a577SJohn Birrell #ifdef KSE 53748bfcdddSJulian Elischer if (td->td_standin != NULL) { 538ed062c8dSJulian Elischer /* 539ed062c8dSJulian Elischer * Note that we don't need to free the cred here as it 540ed062c8dSJulian Elischer * is done in thread_reap(). 541ed062c8dSJulian Elischer */ 54248bfcdddSJulian Elischer thread_stash(td->td_standin); 54348bfcdddSJulian Elischer td->td_standin = NULL; 54448bfcdddSJulian Elischer } 5458460a577SJohn Birrell #endif 54648bfcdddSJulian Elischer 547d10183d9SDavid Xu umtx_thread_exit(td); 548d10183d9SDavid Xu 549ed062c8dSJulian Elischer /* 550ed062c8dSJulian Elischer * drop FPU & debug register state storage, or any other 551ed062c8dSJulian Elischer * architecture specific resources that 552ed062c8dSJulian Elischer * would not be on a new untouched process. 553ed062c8dSJulian Elischer */ 55444990b8cSJulian Elischer cpu_thread_exit(td); /* XXXSMP */ 55544990b8cSJulian Elischer 5568460a577SJohn Birrell #ifdef KSE 5571faf202eSJulian Elischer /* 558ed062c8dSJulian Elischer * The thread is exiting. scheduler can release its stuff 559ed062c8dSJulian Elischer * and collect stats etc. 560e170bfdaSDavid Xu * XXX this is not very right, since PROC_UNLOCK may still 561e170bfdaSDavid Xu * need scheduler stuff. 562ed062c8dSJulian Elischer */ 563ed062c8dSJulian Elischer sched_thread_exit(td); 5648460a577SJohn Birrell #endif 565ed062c8dSJulian Elischer 566e170bfdaSDavid Xu /* Do the same timestamp bookkeeping that mi_switch() would do. */ 567e170bfdaSDavid Xu new_switchtime = cpu_ticks(); 568e170bfdaSDavid Xu p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 569e170bfdaSDavid Xu p->p_rux.rux_uticks += td->td_uticks; 570e170bfdaSDavid Xu p->p_rux.rux_sticks += td->td_sticks; 571e170bfdaSDavid Xu p->p_rux.rux_iticks += td->td_iticks; 572e170bfdaSDavid Xu PCPU_SET(switchtime, new_switchtime); 573e170bfdaSDavid Xu PCPU_SET(switchticks, ticks); 574e170bfdaSDavid Xu cnt.v_swtch++; 575e170bfdaSDavid Xu 576e170bfdaSDavid Xu /* Add our usage into the usage of all our children. */ 577e170bfdaSDavid Xu if (p->p_numthreads == 1) 578e170bfdaSDavid Xu ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux); 579e170bfdaSDavid Xu 580ed062c8dSJulian Elischer /* 5811faf202eSJulian Elischer * The last thread is left attached to the process 5821faf202eSJulian Elischer * So that the whole bundle gets recycled. Skip 583ed062c8dSJulian Elischer * all this stuff if we never had threads. 584ed062c8dSJulian Elischer * EXIT clears all sign of other threads when 585ed062c8dSJulian Elischer * it goes to single threading, so the last thread always 586ed062c8dSJulian Elischer * takes the short path. 5871faf202eSJulian Elischer */ 588ed062c8dSJulian Elischer if (p->p_flag & P_HADTHREADS) { 5891faf202eSJulian Elischer if (p->p_numthreads > 1) { 590d3a0bd78SJulian Elischer thread_unlink(td); 5918460a577SJohn Birrell #ifdef KSE 592ed062c8dSJulian Elischer 593ed062c8dSJulian Elischer /* XXX first arg not used in 4BSD or ULE */ 594ed062c8dSJulian Elischer sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 5958460a577SJohn Birrell #else 5968460a577SJohn Birrell sched_exit(p, td); 5978460a577SJohn Birrell #endif 598ed062c8dSJulian Elischer 599ed062c8dSJulian Elischer /* 60044990b8cSJulian Elischer * The test below is NOT true if we are the 6011faf202eSJulian Elischer * sole exiting thread. P_STOPPED_SNGL is unset 60244990b8cSJulian Elischer * in exit1() after it is the only survivor. 60344990b8cSJulian Elischer */ 6041279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 60544990b8cSJulian Elischer if (p->p_numthreads == p->p_suspcount) { 60671fad9fdSJulian Elischer thread_unsuspend_one(p->p_singlethread); 60744990b8cSJulian Elischer } 60844990b8cSJulian Elischer } 60948bfcdddSJulian Elischer 6108460a577SJohn Birrell #ifdef KSE 6115215b187SJeff Roberson /* 6125215b187SJeff Roberson * Because each upcall structure has an owner thread, 6135215b187SJeff Roberson * owner thread exits only when process is in exiting 6145215b187SJeff Roberson * state, so upcall to userland is no longer needed, 6155215b187SJeff Roberson * deleting upcall structure is safe here. 6165215b187SJeff Roberson * So when all threads in a group is exited, all upcalls 6175215b187SJeff Roberson * in the group should be automatically freed. 618ed062c8dSJulian Elischer * XXXKSE This is a KSE thing and should be exported 619ed062c8dSJulian Elischer * there somehow. 6205215b187SJeff Roberson */ 6215215b187SJeff Roberson upcall_remove(td); 6226f8132a8SJulian Elischer 62348bfcdddSJulian Elischer /* 624ed062c8dSJulian Elischer * If the thread we unlinked above was the last one, 625ed062c8dSJulian Elischer * then this ksegrp should go away too. 62648bfcdddSJulian Elischer */ 627ed062c8dSJulian Elischer if (kg->kg_numthreads == 0) { 628ed062c8dSJulian Elischer /* 629ed062c8dSJulian Elischer * let the scheduler know about this in case 630ed062c8dSJulian Elischer * it needs to recover stats or resources. 631ed062c8dSJulian Elischer * Theoretically we could let 632ed062c8dSJulian Elischer * sched_exit_ksegrp() do the equivalent of 633ed062c8dSJulian Elischer * setting the concurrency to 0 634ed062c8dSJulian Elischer * but don't do it yet to avoid changing 635ed062c8dSJulian Elischer * the existing scheduler code until we 636ed062c8dSJulian Elischer * are ready. 637ed062c8dSJulian Elischer * We supply a random other ksegrp 638ed062c8dSJulian Elischer * as the recipient of any built up 639ed062c8dSJulian Elischer * cpu usage etc. (If the scheduler wants it). 640ed062c8dSJulian Elischer * XXXKSE 641ed062c8dSJulian Elischer * This is probably not fair so think of 642ed062c8dSJulian Elischer * a better answer. 643ed062c8dSJulian Elischer */ 64455d44f79SJulian Elischer sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); 645ed062c8dSJulian Elischer sched_set_concurrency(kg, 0); /* XXX TEMP */ 646ab2baa72SDavid Xu ksegrp_unlink(kg); 647ed062c8dSJulian Elischer ksegrp_stash(kg); 648ab2baa72SDavid Xu } 6498460a577SJohn Birrell #endif 6506f8132a8SJulian Elischer PROC_UNLOCK(p); 6518460a577SJohn Birrell #ifdef KSE 6525c8329edSJulian Elischer td->td_ksegrp = NULL; 6538460a577SJohn Birrell #endif 654696058c3SJulian Elischer PCPU_SET(deadthread, td); 6551faf202eSJulian Elischer } else { 656ed062c8dSJulian Elischer /* 657ed062c8dSJulian Elischer * The last thread is exiting.. but not through exit() 658ed062c8dSJulian Elischer * what should we do? 659ed062c8dSJulian Elischer * Theoretically this can't happen 660ed062c8dSJulian Elischer * exit1() - clears threading flags before coming here 661ed062c8dSJulian Elischer * kse_exit() - treats last thread specially 662ed062c8dSJulian Elischer * thr_exit() - treats last thread specially 6638460a577SJohn Birrell * ifdef KSE 664ed062c8dSJulian Elischer * thread_user_enter() - only if more exist 665ed062c8dSJulian Elischer * thread_userret() - only if more exist 6668460a577SJohn Birrell * endif 667ed062c8dSJulian Elischer * thread_suspend_check() - only if more exist 668ed062c8dSJulian Elischer */ 669ed062c8dSJulian Elischer panic ("thread_exit: Last thread exiting on its own"); 670ed062c8dSJulian Elischer } 671ed062c8dSJulian Elischer } else { 672ed062c8dSJulian Elischer /* 673ed062c8dSJulian Elischer * non threaded process comes here. 674ed062c8dSJulian Elischer * This includes an EX threaded process that is coming 675ed062c8dSJulian Elischer * here via exit1(). (exit1 dethreads the proc first). 676ed062c8dSJulian Elischer */ 6771faf202eSJulian Elischer PROC_UNLOCK(p); 6781faf202eSJulian Elischer } 679dcc9954eSJulian Elischer td->td_state = TDS_INACTIVE; 680732d9528SJulian Elischer CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 681cc66ebe2SPeter Wemm cpu_throw(td, choosethread()); 682cc66ebe2SPeter Wemm panic("I'm a teapot!"); 68344990b8cSJulian Elischer /* NOTREACHED */ 68444990b8cSJulian Elischer } 68544990b8cSJulian Elischer 68644990b8cSJulian Elischer /* 687696058c3SJulian Elischer * Do any thread specific cleanups that may be needed in wait() 68837814395SPeter Wemm * called with Giant, proc and schedlock not held. 689696058c3SJulian Elischer */ 690696058c3SJulian Elischer void 691696058c3SJulian Elischer thread_wait(struct proc *p) 692696058c3SJulian Elischer { 693696058c3SJulian Elischer struct thread *td; 694696058c3SJulian Elischer 69537814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 69685495c72SJens Schweikhardt KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 6978460a577SJohn Birrell #ifdef KSE 69885495c72SJens Schweikhardt KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()")); 6998460a577SJohn Birrell #endif 700696058c3SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 7018460a577SJohn Birrell #ifdef KSE 702696058c3SJulian Elischer if (td->td_standin != NULL) { 703b75b0311SJulian Elischer if (td->td_standin->td_ucred != NULL) { 704b75b0311SJulian Elischer crfree(td->td_standin->td_ucred); 705b75b0311SJulian Elischer td->td_standin->td_ucred = NULL; 706b75b0311SJulian Elischer } 707696058c3SJulian Elischer thread_free(td->td_standin); 708696058c3SJulian Elischer td->td_standin = NULL; 709696058c3SJulian Elischer } 7108460a577SJohn Birrell #endif 711696058c3SJulian Elischer cpu_thread_clean(td); 712ed062c8dSJulian Elischer crfree(td->td_ucred); 713696058c3SJulian Elischer } 714696058c3SJulian Elischer thread_reap(); /* check for zombie threads etc. */ 715696058c3SJulian Elischer } 716696058c3SJulian Elischer 717696058c3SJulian Elischer /* 71844990b8cSJulian Elischer * Link a thread to a process. 7191faf202eSJulian Elischer * set up anything that needs to be initialized for it to 7201faf202eSJulian Elischer * be used by the process. 72144990b8cSJulian Elischer * 72244990b8cSJulian Elischer * Note that we do not link to the proc's ucred here. 72344990b8cSJulian Elischer * The thread is linked as if running but no KSE assigned. 724ed062c8dSJulian Elischer * Called from: 725ed062c8dSJulian Elischer * proc_linkup() 7268460a577SJohn Birrell * ifdef KSE 727ed062c8dSJulian Elischer * thread_schedule_upcall() 7288460a577SJohn Birrell * endif 729ed062c8dSJulian Elischer * thr_create() 73044990b8cSJulian Elischer */ 73144990b8cSJulian Elischer void 7328460a577SJohn Birrell #ifdef KSE 73344990b8cSJulian Elischer thread_link(struct thread *td, struct ksegrp *kg) 7348460a577SJohn Birrell #else 7358460a577SJohn Birrell thread_link(struct thread *td, struct proc *p) 7368460a577SJohn Birrell #endif 73744990b8cSJulian Elischer { 7388460a577SJohn Birrell #ifdef KSE 73944990b8cSJulian Elischer struct proc *p; 7408460a577SJohn Birrell #endif 74144990b8cSJulian Elischer 7428460a577SJohn Birrell #ifdef KSE 74344990b8cSJulian Elischer p = kg->kg_proc; 7448460a577SJohn Birrell #endif 74571fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 74644990b8cSJulian Elischer td->td_proc = p; 7478460a577SJohn Birrell #ifdef KSE 74844990b8cSJulian Elischer td->td_ksegrp = kg; 7498460a577SJohn Birrell #endif 7505215b187SJeff Roberson td->td_flags = 0; 7518460a577SJohn Birrell #ifdef KSE 7524fc21c09SDaniel Eischen td->td_kflags = 0; 7538460a577SJohn Birrell #endif 75444990b8cSJulian Elischer 7551faf202eSJulian Elischer LIST_INIT(&td->td_contested); 7569104847fSDavid Xu sigqueue_init(&td->td_sigqueue, p); 757c06eb4e2SSam Leffler callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 75844990b8cSJulian Elischer TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 7598460a577SJohn Birrell #ifdef KSE 76044990b8cSJulian Elischer TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 7618460a577SJohn Birrell #endif 76244990b8cSJulian Elischer p->p_numthreads++; 7638460a577SJohn Birrell #ifdef KSE 76444990b8cSJulian Elischer kg->kg_numthreads++; 7658460a577SJohn Birrell #endif 76644990b8cSJulian Elischer } 76744990b8cSJulian Elischer 768ed062c8dSJulian Elischer /* 769e5bedcefSJulian Elischer * Convert a process with one thread to an unthreaded process. 770e5bedcefSJulian Elischer * Called from: 771e5bedcefSJulian Elischer * thread_single(exit) (called from execve and exit) 772e5bedcefSJulian Elischer * kse_exit() XXX may need cleaning up wrt KSE stuff 773e5bedcefSJulian Elischer */ 774e5bedcefSJulian Elischer void 775e5bedcefSJulian Elischer thread_unthread(struct thread *td) 776e5bedcefSJulian Elischer { 777e5bedcefSJulian Elischer struct proc *p = td->td_proc; 778e5bedcefSJulian Elischer 779e5bedcefSJulian Elischer KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 7808460a577SJohn Birrell #ifdef KSE 781e5bedcefSJulian Elischer upcall_remove(td); 782e5bedcefSJulian Elischer p->p_flag &= ~(P_SA|P_HADTHREADS); 783e5bedcefSJulian Elischer td->td_mailbox = NULL; 784e5bedcefSJulian Elischer td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 785e5bedcefSJulian Elischer if (td->td_standin != NULL) { 786e5bedcefSJulian Elischer thread_stash(td->td_standin); 787e5bedcefSJulian Elischer td->td_standin = NULL; 788e5bedcefSJulian Elischer } 789e5bedcefSJulian Elischer sched_set_concurrency(td->td_ksegrp, 1); 7908460a577SJohn Birrell #else 7918460a577SJohn Birrell p->p_flag &= ~P_HADTHREADS; 7928460a577SJohn Birrell #endif 793e5bedcefSJulian Elischer } 794e5bedcefSJulian Elischer 795e5bedcefSJulian Elischer /* 796ed062c8dSJulian Elischer * Called from: 797ed062c8dSJulian Elischer * thread_exit() 798ed062c8dSJulian Elischer */ 799d3a0bd78SJulian Elischer void 800d3a0bd78SJulian Elischer thread_unlink(struct thread *td) 801d3a0bd78SJulian Elischer { 802d3a0bd78SJulian Elischer struct proc *p = td->td_proc; 8038460a577SJohn Birrell #ifdef KSE 804d3a0bd78SJulian Elischer struct ksegrp *kg = td->td_ksegrp; 8058460a577SJohn Birrell #endif 806d3a0bd78SJulian Elischer 807112afcb2SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 808d3a0bd78SJulian Elischer TAILQ_REMOVE(&p->p_threads, td, td_plist); 809d3a0bd78SJulian Elischer p->p_numthreads--; 8108460a577SJohn Birrell #ifdef KSE 811d3a0bd78SJulian Elischer TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 812d3a0bd78SJulian Elischer kg->kg_numthreads--; 8138460a577SJohn Birrell #endif 814d3a0bd78SJulian Elischer /* could clear a few other things here */ 8158460a577SJohn Birrell #ifdef KSE 816ed062c8dSJulian Elischer /* Must NOT clear links to proc and ksegrp! */ 8178460a577SJohn Birrell #else 8188460a577SJohn Birrell /* Must NOT clear links to proc! */ 8198460a577SJohn Birrell #endif 8205c8329edSJulian Elischer } 8215c8329edSJulian Elischer 8225215b187SJeff Roberson /* 82344990b8cSJulian Elischer * Enforce single-threading. 82444990b8cSJulian Elischer * 82544990b8cSJulian Elischer * Returns 1 if the caller must abort (another thread is waiting to 82644990b8cSJulian Elischer * exit the process or similar). Process is locked! 82744990b8cSJulian Elischer * Returns 0 when you are successfully the only thread running. 82844990b8cSJulian Elischer * A process has successfully single threaded in the suspend mode when 82944990b8cSJulian Elischer * There are no threads in user mode. Threads in the kernel must be 83044990b8cSJulian Elischer * allowed to continue until they get to the user boundary. They may even 83144990b8cSJulian Elischer * copy out their return values and data before suspending. They may however be 832e2668f55SMaxim Konovalov * accelerated in reaching the user boundary as we will wake up 83344990b8cSJulian Elischer * any sleeping threads that are interruptable. (PCATCH). 83444990b8cSJulian Elischer */ 83544990b8cSJulian Elischer int 836906ac69dSDavid Xu thread_single(int mode) 83744990b8cSJulian Elischer { 83844990b8cSJulian Elischer struct thread *td; 83944990b8cSJulian Elischer struct thread *td2; 84044990b8cSJulian Elischer struct proc *p; 841ec008e96SDavid Xu int remaining; 84244990b8cSJulian Elischer 84344990b8cSJulian Elischer td = curthread; 84444990b8cSJulian Elischer p = td->td_proc; 84537814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 84644990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 84744990b8cSJulian Elischer KASSERT((td != NULL), ("curthread is NULL")); 84844990b8cSJulian Elischer 849ed062c8dSJulian Elischer if ((p->p_flag & P_HADTHREADS) == 0) 85044990b8cSJulian Elischer return (0); 85144990b8cSJulian Elischer 852e3b9bf71SJulian Elischer /* Is someone already single threading? */ 853906ac69dSDavid Xu if (p->p_singlethread != NULL && p->p_singlethread != td) 85444990b8cSJulian Elischer return (1); 85544990b8cSJulian Elischer 856906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 857906ac69dSDavid Xu p->p_flag |= P_SINGLE_EXIT; 858906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 859906ac69dSDavid Xu } else { 860906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_EXIT; 861906ac69dSDavid Xu if (mode == SINGLE_BOUNDARY) 862906ac69dSDavid Xu p->p_flag |= P_SINGLE_BOUNDARY; 863906ac69dSDavid Xu else 864906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 865906ac69dSDavid Xu } 8661279572aSDavid Xu p->p_flag |= P_STOPPED_SINGLE; 86771fad9fdSJulian Elischer mtx_lock_spin(&sched_lock); 868112afcb2SJohn Baldwin p->p_singlethread = td; 869906ac69dSDavid Xu if (mode == SINGLE_EXIT) 870ec008e96SDavid Xu remaining = p->p_numthreads; 871906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 872906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 873906ac69dSDavid Xu else 874ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 875ec008e96SDavid Xu while (remaining != 1) { 876bf1a3220SDavid Xu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 877bf1a3220SDavid Xu goto stopme; 87844990b8cSJulian Elischer FOREACH_THREAD_IN_PROC(p, td2) { 87944990b8cSJulian Elischer if (td2 == td) 88044990b8cSJulian Elischer continue; 881588257e8SDavid Xu td2->td_flags |= TDF_ASTPENDING; 88271fad9fdSJulian Elischer if (TD_IS_INHIBITED(td2)) { 883906ac69dSDavid Xu switch (mode) { 884906ac69dSDavid Xu case SINGLE_EXIT: 885cbf4e354SDavid Xu if (td->td_flags & TDF_DBSUSPEND) 886cbf4e354SDavid Xu td->td_flags &= ~TDF_DBSUSPEND; 887906ac69dSDavid Xu if (TD_IS_SUSPENDED(td2)) 88871fad9fdSJulian Elischer thread_unsuspend_one(td2); 88933862f40SDavid Xu if (TD_ON_SLEEPQ(td2) && 890906ac69dSDavid Xu (td2->td_flags & TDF_SINTR)) 89194f0972bSDavid Xu sleepq_abort(td2, EINTR); 892906ac69dSDavid Xu break; 893906ac69dSDavid Xu case SINGLE_BOUNDARY: 894906ac69dSDavid Xu if (TD_IS_SUSPENDED(td2) && 895906ac69dSDavid Xu !(td2->td_flags & TDF_BOUNDARY)) 896906ac69dSDavid Xu thread_unsuspend_one(td2); 897906ac69dSDavid Xu if (TD_ON_SLEEPQ(td2) && 898906ac69dSDavid Xu (td2->td_flags & TDF_SINTR)) 89994f0972bSDavid Xu sleepq_abort(td2, ERESTART); 900906ac69dSDavid Xu break; 901906ac69dSDavid Xu default: 9029d102777SJulian Elischer if (TD_IS_SUSPENDED(td2)) 9039d102777SJulian Elischer continue; 9045215b187SJeff Roberson /* 9055215b187SJeff Roberson * maybe other inhibitted states too? 9065215b187SJeff Roberson */ 9078acf6057SDavid Xu if ((td2->td_flags & TDF_SINTR) && 9088acf6057SDavid Xu (td2->td_inhibitors & 9098acf6057SDavid Xu (TDI_SLEEPING | TDI_SWAPPED))) 9109d102777SJulian Elischer thread_suspend_one(td2); 911906ac69dSDavid Xu break; 91244990b8cSJulian Elischer } 91344990b8cSJulian Elischer } 914d8267df7SDavid Xu #ifdef SMP 915d8267df7SDavid Xu else if (TD_IS_RUNNING(td2) && td != td2) { 916d8267df7SDavid Xu forward_signal(td2); 917d8267df7SDavid Xu } 918d8267df7SDavid Xu #endif 9199d102777SJulian Elischer } 920906ac69dSDavid Xu if (mode == SINGLE_EXIT) 921ec008e96SDavid Xu remaining = p->p_numthreads; 922906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 923906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 924ec008e96SDavid Xu else 925ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 926ec008e96SDavid Xu 9279d102777SJulian Elischer /* 9289d102777SJulian Elischer * Maybe we suspended some threads.. was it enough? 9299d102777SJulian Elischer */ 930ec008e96SDavid Xu if (remaining == 1) 9319d102777SJulian Elischer break; 9329d102777SJulian Elischer 933bf1a3220SDavid Xu stopme: 93444990b8cSJulian Elischer /* 93544990b8cSJulian Elischer * Wake us up when everyone else has suspended. 936e3b9bf71SJulian Elischer * In the mean time we suspend as well. 93744990b8cSJulian Elischer */ 938568b4ebbSDavid Xu thread_stopped(p); 93971fad9fdSJulian Elischer thread_suspend_one(td); 94044990b8cSJulian Elischer PROC_UNLOCK(p); 941bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 94244990b8cSJulian Elischer mtx_unlock_spin(&sched_lock); 94344990b8cSJulian Elischer PROC_LOCK(p); 944112afcb2SJohn Baldwin mtx_lock_spin(&sched_lock); 945906ac69dSDavid Xu if (mode == SINGLE_EXIT) 946ec008e96SDavid Xu remaining = p->p_numthreads; 947906ac69dSDavid Xu else if (mode == SINGLE_BOUNDARY) 948906ac69dSDavid Xu remaining = p->p_numthreads - p->p_boundary_count; 949ec008e96SDavid Xu else 950ec008e96SDavid Xu remaining = p->p_numthreads - p->p_suspcount; 95144990b8cSJulian Elischer } 952906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 95391599697SJulian Elischer /* 95491599697SJulian Elischer * We have gotten rid of all the other threads and we 95591599697SJulian Elischer * are about to either exit or exec. In either case, 95691599697SJulian Elischer * we try our utmost to revert to being a non-threaded 95791599697SJulian Elischer * process. 95891599697SJulian Elischer */ 959ed062c8dSJulian Elischer p->p_singlethread = NULL; 96064895117SDavid Xu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 961e5bedcefSJulian Elischer thread_unthread(td); 96291599697SJulian Elischer } 963112afcb2SJohn Baldwin mtx_unlock_spin(&sched_lock); 96444990b8cSJulian Elischer return (0); 96544990b8cSJulian Elischer } 96644990b8cSJulian Elischer 96744990b8cSJulian Elischer /* 96844990b8cSJulian Elischer * Called in from locations that can safely check to see 96944990b8cSJulian Elischer * whether we have to suspend or at least throttle for a 97044990b8cSJulian Elischer * single-thread event (e.g. fork). 97144990b8cSJulian Elischer * 97244990b8cSJulian Elischer * Such locations include userret(). 97344990b8cSJulian Elischer * If the "return_instead" argument is non zero, the thread must be able to 97444990b8cSJulian Elischer * accept 0 (caller may continue), or 1 (caller must abort) as a result. 97544990b8cSJulian Elischer * 97644990b8cSJulian Elischer * The 'return_instead' argument tells the function if it may do a 97744990b8cSJulian Elischer * thread_exit() or suspend, or whether the caller must abort and back 97844990b8cSJulian Elischer * out instead. 97944990b8cSJulian Elischer * 98044990b8cSJulian Elischer * If the thread that set the single_threading request has set the 98144990b8cSJulian Elischer * P_SINGLE_EXIT bit in the process flags then this call will never return 98244990b8cSJulian Elischer * if 'return_instead' is false, but will exit. 98344990b8cSJulian Elischer * 98444990b8cSJulian Elischer * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 98544990b8cSJulian Elischer *---------------+--------------------+--------------------- 98644990b8cSJulian Elischer * 0 | returns 0 | returns 0 or 1 98744990b8cSJulian Elischer * | when ST ends | immediatly 98844990b8cSJulian Elischer *---------------+--------------------+--------------------- 98944990b8cSJulian Elischer * 1 | thread exits | returns 1 99044990b8cSJulian Elischer * | | immediatly 99144990b8cSJulian Elischer * 0 = thread_exit() or suspension ok, 99244990b8cSJulian Elischer * other = return error instead of stopping the thread. 99344990b8cSJulian Elischer * 99444990b8cSJulian Elischer * While a full suspension is under effect, even a single threading 99544990b8cSJulian Elischer * thread would be suspended if it made this call (but it shouldn't). 99644990b8cSJulian Elischer * This call should only be made from places where 99744990b8cSJulian Elischer * thread_exit() would be safe as that may be the outcome unless 99844990b8cSJulian Elischer * return_instead is set. 99944990b8cSJulian Elischer */ 100044990b8cSJulian Elischer int 100144990b8cSJulian Elischer thread_suspend_check(int return_instead) 100244990b8cSJulian Elischer { 1003ecafb24bSJuli Mallett struct thread *td; 1004ecafb24bSJuli Mallett struct proc *p; 100544990b8cSJulian Elischer 100644990b8cSJulian Elischer td = curthread; 100744990b8cSJulian Elischer p = td->td_proc; 100837814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 100944990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 1010cbf4e354SDavid Xu while (P_SHOULDSTOP(p) || 1011cbf4e354SDavid Xu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 10121279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 101344990b8cSJulian Elischer KASSERT(p->p_singlethread != NULL, 101444990b8cSJulian Elischer ("singlethread not set")); 101544990b8cSJulian Elischer /* 1016e3b9bf71SJulian Elischer * The only suspension in action is a 1017e3b9bf71SJulian Elischer * single-threading. Single threader need not stop. 1018b6d5995eSJulian Elischer * XXX Should be safe to access unlocked 1019b6d5995eSJulian Elischer * as it can only be set to be true by us. 102044990b8cSJulian Elischer */ 1021e3b9bf71SJulian Elischer if (p->p_singlethread == td) 102244990b8cSJulian Elischer return (0); /* Exempt from stopping. */ 102344990b8cSJulian Elischer } 102445a4bfa1SDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 102594f0972bSDavid Xu return (EINTR); 102644990b8cSJulian Elischer 1027906ac69dSDavid Xu /* Should we goto user boundary if we didn't come from there? */ 1028906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1029906ac69dSDavid Xu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 103094f0972bSDavid Xu return (ERESTART); 1031906ac69dSDavid Xu 10329104847fSDavid Xu /* If thread will exit, flush its pending signals */ 10339104847fSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 10349104847fSDavid Xu sigqueue_flush(&td->td_sigqueue); 10359104847fSDavid Xu 1036e574e444SDavid Xu mtx_lock_spin(&sched_lock); 1037e574e444SDavid Xu thread_stopped(p); 103844990b8cSJulian Elischer /* 103944990b8cSJulian Elischer * If the process is waiting for us to exit, 104044990b8cSJulian Elischer * this thread should just suicide. 10411279572aSDavid Xu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 104244990b8cSJulian Elischer */ 1043906ac69dSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 104444990b8cSJulian Elischer thread_exit(); 104544990b8cSJulian Elischer 104644990b8cSJulian Elischer /* 104744990b8cSJulian Elischer * When a thread suspends, it just 104844990b8cSJulian Elischer * moves to the processes's suspend queue 104944990b8cSJulian Elischer * and stays there. 105044990b8cSJulian Elischer */ 105171fad9fdSJulian Elischer thread_suspend_one(td); 1052906ac69dSDavid Xu if (return_instead == 0) { 1053906ac69dSDavid Xu p->p_boundary_count++; 1054906ac69dSDavid Xu td->td_flags |= TDF_BOUNDARY; 1055cf19bf91SJulian Elischer } 1056906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1057906ac69dSDavid Xu if (p->p_numthreads == p->p_suspcount) 1058906ac69dSDavid Xu thread_unsuspend_one(p->p_singlethread); 1059cf19bf91SJulian Elischer } 1060a6f37ac9SJohn Baldwin PROC_UNLOCK(p); 1061bf0acc27SJohn Baldwin mi_switch(SW_INVOL, NULL); 1062906ac69dSDavid Xu if (return_instead == 0) { 1063906ac69dSDavid Xu p->p_boundary_count--; 1064906ac69dSDavid Xu td->td_flags &= ~TDF_BOUNDARY; 1065906ac69dSDavid Xu } 106644990b8cSJulian Elischer mtx_unlock_spin(&sched_lock); 106744990b8cSJulian Elischer PROC_LOCK(p); 106844990b8cSJulian Elischer } 106944990b8cSJulian Elischer return (0); 107044990b8cSJulian Elischer } 107144990b8cSJulian Elischer 107235c32a76SDavid Xu void 107335c32a76SDavid Xu thread_suspend_one(struct thread *td) 107435c32a76SDavid Xu { 107535c32a76SDavid Xu struct proc *p = td->td_proc; 107635c32a76SDavid Xu 107735c32a76SDavid Xu mtx_assert(&sched_lock, MA_OWNED); 1078112afcb2SJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 1079e574e444SDavid Xu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 108035c32a76SDavid Xu p->p_suspcount++; 108171fad9fdSJulian Elischer TD_SET_SUSPENDED(td); 108235c32a76SDavid Xu TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 108335c32a76SDavid Xu } 108435c32a76SDavid Xu 108535c32a76SDavid Xu void 108635c32a76SDavid Xu thread_unsuspend_one(struct thread *td) 108735c32a76SDavid Xu { 108835c32a76SDavid Xu struct proc *p = td->td_proc; 108935c32a76SDavid Xu 109035c32a76SDavid Xu mtx_assert(&sched_lock, MA_OWNED); 1091112afcb2SJohn Baldwin PROC_LOCK_ASSERT(p, MA_OWNED); 109235c32a76SDavid Xu TAILQ_REMOVE(&p->p_suspended, td, td_runq); 109371fad9fdSJulian Elischer TD_CLR_SUSPENDED(td); 109435c32a76SDavid Xu p->p_suspcount--; 109571fad9fdSJulian Elischer setrunnable(td); 109635c32a76SDavid Xu } 109735c32a76SDavid Xu 109844990b8cSJulian Elischer /* 109944990b8cSJulian Elischer * Allow all threads blocked by single threading to continue running. 110044990b8cSJulian Elischer */ 110144990b8cSJulian Elischer void 110244990b8cSJulian Elischer thread_unsuspend(struct proc *p) 110344990b8cSJulian Elischer { 110444990b8cSJulian Elischer struct thread *td; 110544990b8cSJulian Elischer 1106b6d5995eSJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 110744990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 110844990b8cSJulian Elischer if (!P_SHOULDSTOP(p)) { 110944990b8cSJulian Elischer while ((td = TAILQ_FIRST(&p->p_suspended))) { 111035c32a76SDavid Xu thread_unsuspend_one(td); 111144990b8cSJulian Elischer } 11121279572aSDavid Xu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 111344990b8cSJulian Elischer (p->p_numthreads == p->p_suspcount)) { 111444990b8cSJulian Elischer /* 111544990b8cSJulian Elischer * Stopping everything also did the job for the single 111644990b8cSJulian Elischer * threading request. Now we've downgraded to single-threaded, 111744990b8cSJulian Elischer * let it continue. 111844990b8cSJulian Elischer */ 111935c32a76SDavid Xu thread_unsuspend_one(p->p_singlethread); 112044990b8cSJulian Elischer } 112144990b8cSJulian Elischer } 112244990b8cSJulian Elischer 1123ed062c8dSJulian Elischer /* 1124ed062c8dSJulian Elischer * End the single threading mode.. 1125ed062c8dSJulian Elischer */ 112644990b8cSJulian Elischer void 112744990b8cSJulian Elischer thread_single_end(void) 112844990b8cSJulian Elischer { 112944990b8cSJulian Elischer struct thread *td; 113044990b8cSJulian Elischer struct proc *p; 113144990b8cSJulian Elischer 113244990b8cSJulian Elischer td = curthread; 113344990b8cSJulian Elischer p = td->td_proc; 113444990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 1135906ac69dSDavid Xu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 1136112afcb2SJohn Baldwin mtx_lock_spin(&sched_lock); 113744990b8cSJulian Elischer p->p_singlethread = NULL; 11388460a577SJohn Birrell #ifdef KSE 113921fc3164SDavid Xu p->p_procscopegrp = NULL; 11408460a577SJohn Birrell #endif 114149539972SJulian Elischer /* 114249539972SJulian Elischer * If there are other threads they mey now run, 114349539972SJulian Elischer * unless of course there is a blanket 'stop order' 114449539972SJulian Elischer * on the process. The single threader must be allowed 114549539972SJulian Elischer * to continue however as this is a bad place to stop. 114649539972SJulian Elischer */ 114749539972SJulian Elischer if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 114849539972SJulian Elischer while ((td = TAILQ_FIRST(&p->p_suspended))) { 114971fad9fdSJulian Elischer thread_unsuspend_one(td); 115044990b8cSJulian Elischer } 115149539972SJulian Elischer } 1152112afcb2SJohn Baldwin mtx_unlock_spin(&sched_lock); 115349539972SJulian Elischer } 11544fc21c09SDaniel Eischen 115544355392SDavid Xu struct thread * 115644355392SDavid Xu thread_find(struct proc *p, lwpid_t tid) 115744355392SDavid Xu { 115844355392SDavid Xu struct thread *td; 115944355392SDavid Xu 116044355392SDavid Xu PROC_LOCK_ASSERT(p, MA_OWNED); 116144355392SDavid Xu mtx_lock_spin(&sched_lock); 116244355392SDavid Xu FOREACH_THREAD_IN_PROC(p, td) { 116344355392SDavid Xu if (td->td_tid == tid) 116444355392SDavid Xu break; 116544355392SDavid Xu } 116644355392SDavid Xu mtx_unlock_spin(&sched_lock); 116744355392SDavid Xu return (td); 116844355392SDavid Xu } 1169