19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 444990b8cSJulian Elischer * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 544990b8cSJulian Elischer * All rights reserved. 644990b8cSJulian Elischer * 744990b8cSJulian Elischer * Redistribution and use in source and binary forms, with or without 844990b8cSJulian Elischer * modification, are permitted provided that the following conditions 944990b8cSJulian Elischer * are met: 1044990b8cSJulian Elischer * 1. Redistributions of source code must retain the above copyright 1144990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer as 1244990b8cSJulian Elischer * the first lines of this file unmodified other than the possible 1344990b8cSJulian Elischer * addition of one or more copyright notices. 1444990b8cSJulian Elischer * 2. Redistributions in binary form must reproduce the above copyright 1544990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer in the 1644990b8cSJulian Elischer * documentation and/or other materials provided with the distribution. 1744990b8cSJulian Elischer * 1844990b8cSJulian Elischer * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1944990b8cSJulian Elischer * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2044990b8cSJulian Elischer * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 2144990b8cSJulian Elischer * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2244990b8cSJulian Elischer * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2344990b8cSJulian Elischer * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2444990b8cSJulian Elischer * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2544990b8cSJulian Elischer * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2644990b8cSJulian Elischer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2744990b8cSJulian Elischer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2844990b8cSJulian Elischer * DAMAGE. 2944990b8cSJulian Elischer */ 3044990b8cSJulian Elischer 313d06b4b3SAttilio Rao #include "opt_witness.h" 3216d95d4fSJoseph Koshy #include "opt_hwpmc_hooks.h" 333d06b4b3SAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3744990b8cSJulian Elischer #include <sys/param.h> 3844990b8cSJulian Elischer #include <sys/systm.h> 3944990b8cSJulian Elischer #include <sys/kernel.h> 4044990b8cSJulian Elischer #include <sys/lock.h> 4144990b8cSJulian Elischer #include <sys/mutex.h> 4244990b8cSJulian Elischer #include <sys/proc.h> 4335bb59edSMateusz Guzik #include <sys/bitstring.h> 446febf180SGleb Smirnoff #include <sys/epoch.h> 458f0e9130SKonstantin Belousov #include <sys/rangelock.h> 46e170bfdaSDavid Xu #include <sys/resourcevar.h> 47b3e9e682SRyan Stone #include <sys/sdt.h> 4894e0a4cdSJulian Elischer #include <sys/smp.h> 49de028f5aSJeff Roberson #include <sys/sched.h> 5044f3b092SJohn Baldwin #include <sys/sleepqueue.h> 51ace8398dSJeff Roberson #include <sys/selinfo.h> 52d1e7a4a5SJohn Baldwin #include <sys/syscallsubr.h> 5391d1786fSDmitry Chagin #include <sys/sysent.h> 54961a7b24SJohn Baldwin #include <sys/turnstile.h> 5544990b8cSJulian Elischer #include <sys/ktr.h> 56cf7d9a8cSDavid Xu #include <sys/rwlock.h> 57bc8e6d81SDavid Xu #include <sys/umtx.h> 589ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 59d7f687fcSJeff Roberson #include <sys/cpuset.h> 6016d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 6116d95d4fSJoseph Koshy #include <sys/pmckern.h> 6216d95d4fSJoseph Koshy #endif 631bd3cf5dSMateusz Guzik #include <sys/priv.h> 6444990b8cSJulian Elischer 65911b84b0SRobert Watson #include <security/audit/audit.h> 66911b84b0SRobert Watson 6744990b8cSJulian Elischer #include <vm/vm.h> 6849a2507bSAlan Cox #include <vm/vm_extern.h> 6944990b8cSJulian Elischer #include <vm/uma.h> 70b209f889SRandall Stewart #include <sys/eventhandler.h> 7102fb42b0SPeter Wemm 72acd9f517SKonstantin Belousov /* 73acd9f517SKonstantin Belousov * Asserts below verify the stability of struct thread and struct proc 74acd9f517SKonstantin Belousov * layout, as exposed by KBI to modules. On head, the KBI is allowed 75acd9f517SKonstantin Belousov * to drift, change to the structures must be accompanied by the 76acd9f517SKonstantin Belousov * assert update. 77acd9f517SKonstantin Belousov * 78acd9f517SKonstantin Belousov * On the stable branches after KBI freeze, conditions must not be 79acd9f517SKonstantin Belousov * violated. Typically new fields are moved to the end of the 80acd9f517SKonstantin Belousov * structures. 81acd9f517SKonstantin Belousov */ 82acd9f517SKonstantin Belousov #ifdef __amd64__ 833f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_flags) == 0xfc, 84acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 853f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_pflags) == 0x104, 86acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 871e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_frame) == 0x4a0, 88acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 891724c563SMateusz Guzik _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0, 90acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 91acd9f517SKonstantin Belousov _Static_assert(offsetof(struct proc, p_flag) == 0xb0, 92acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 93acd9f517SKonstantin Belousov _Static_assert(offsetof(struct proc, p_pid) == 0xbc, 94acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 958de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_filemon) == 0x3b8, 96acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 978de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_comm) == 0x3d0, 98acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 998de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b0, 100acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 101acd9f517SKonstantin Belousov #endif 102acd9f517SKonstantin Belousov #ifdef __i386__ 1033f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_flags) == 0x98, 104acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 1053f289c3fSJeff Roberson _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, 106acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 1071e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_frame) == 0x300, 108acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 1091e2521ffSEdward Tomasz Napierala _Static_assert(offsetof(struct thread, td_emuldata) == 0x344, 110acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 111acd9f517SKonstantin Belousov _Static_assert(offsetof(struct proc, p_flag) == 0x68, 112acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 113acd9f517SKonstantin Belousov _Static_assert(offsetof(struct proc, p_pid) == 0x74, 114acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 1158de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_filemon) == 0x268, 116acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 1178de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_comm) == 0x27c, 118acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 1198de97f39SRick Macklem _Static_assert(offsetof(struct proc, p_emuldata) == 0x308, 120acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 121acd9f517SKonstantin Belousov #endif 122acd9f517SKonstantin Belousov 123b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(proc); 124d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(proc, , , lwp__exit); 125b3e9e682SRyan Stone 1268460a577SJohn Birrell /* 1278460a577SJohn Birrell * thread related storage. 1288460a577SJohn Birrell */ 12944990b8cSJulian Elischer static uma_zone_t thread_zone; 13044990b8cSJulian Elischer 1315215b187SJeff Roberson TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 132c8790f5dSAttilio Rao static struct mtx zombie_lock; 133a54e85fdSJeff Roberson MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 13444990b8cSJulian Elischer 135ff8fbcffSJeff Roberson static void thread_zombie(struct thread *); 13684cdea97SKonstantin Belousov static int thread_unsuspend_one(struct thread *td, struct proc *p, 13784cdea97SKonstantin Belousov bool boundary); 138ff8fbcffSJeff Roberson 139934e7e5eSMateusz Guzik static struct mtx tid_lock; 140934e7e5eSMateusz Guzik static bitstr_t *tid_bitmap; 14135bb59edSMateusz Guzik 142cf7d9a8cSDavid Xu static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 143cf7d9a8cSDavid Xu 1441bd3cf5dSMateusz Guzik static int maxthread; 1451bd3cf5dSMateusz Guzik SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 1461bd3cf5dSMateusz Guzik &maxthread, 0, "Maximum number of threads"); 1471bd3cf5dSMateusz Guzik 1481bd3cf5dSMateusz Guzik static int nthreads; 1491bd3cf5dSMateusz Guzik 150aae3547bSMateusz Guzik static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 151aae3547bSMateusz Guzik static u_long tidhash; 152*26007fe3SMateusz Guzik static u_long tidhashlock; 153*26007fe3SMateusz Guzik static struct rwlock *tidhashtbl_lock; 154aae3547bSMateusz Guzik #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 155*26007fe3SMateusz Guzik #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 156cf7d9a8cSDavid Xu 1572ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_ctor); 1582ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_dtor); 1592ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_init); 1602ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_fini); 1612ca45184SMatt Joras 162ec6ea5e8SDavid Xu static lwpid_t 163ec6ea5e8SDavid Xu tid_alloc(void) 164ec6ea5e8SDavid Xu { 1651bd3cf5dSMateusz Guzik static struct timeval lastfail; 1661bd3cf5dSMateusz Guzik static int curfail; 16735bb59edSMateusz Guzik static lwpid_t trytid; 168ec6ea5e8SDavid Xu lwpid_t tid; 169ec6ea5e8SDavid Xu 17035bb59edSMateusz Guzik mtx_lock(&tid_lock); 17135bb59edSMateusz Guzik if (nthreads + 1 >= maxthread - 100) { 1721bd3cf5dSMateusz Guzik if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 17335bb59edSMateusz Guzik nthreads + 1 >= maxthread) { 17435bb59edSMateusz Guzik mtx_unlock(&tid_lock); 1751bd3cf5dSMateusz Guzik if (ppsratecheck(&lastfail, &curfail, 1)) { 1761bd3cf5dSMateusz Guzik printf("maxthread limit exceeded by uid %u " 1771bd3cf5dSMateusz Guzik "(pid %d); consider increasing kern.maxthread\n", 1781bd3cf5dSMateusz Guzik curthread->td_ucred->cr_ruid, curproc->p_pid); 1791bd3cf5dSMateusz Guzik } 1801bd3cf5dSMateusz Guzik return (-1); 1811bd3cf5dSMateusz Guzik } 1821bd3cf5dSMateusz Guzik } 1831bd3cf5dSMateusz Guzik 18435bb59edSMateusz Guzik nthreads++; 18535bb59edSMateusz Guzik /* 18635bb59edSMateusz Guzik * It is an invariant that the bitmap is big enough to hold maxthread 18735bb59edSMateusz Guzik * IDs. If we got to this point there has to be at least one free. 18835bb59edSMateusz Guzik */ 18935bb59edSMateusz Guzik if (trytid >= maxthread) 19035bb59edSMateusz Guzik trytid = 0; 19135bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 19235bb59edSMateusz Guzik if (tid == -1) { 19335bb59edSMateusz Guzik KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 19435bb59edSMateusz Guzik trytid = 0; 19535bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 19635bb59edSMateusz Guzik KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 197ec6ea5e8SDavid Xu } 19835bb59edSMateusz Guzik bit_set(tid_bitmap, tid); 199934e7e5eSMateusz Guzik trytid = tid + 1; 200ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 20135bb59edSMateusz Guzik return (tid + NO_PID); 202ec6ea5e8SDavid Xu } 203ec6ea5e8SDavid Xu 204ec6ea5e8SDavid Xu static void 20535bb59edSMateusz Guzik tid_free(lwpid_t rtid) 206ec6ea5e8SDavid Xu { 20735bb59edSMateusz Guzik lwpid_t tid; 208ec6ea5e8SDavid Xu 20935bb59edSMateusz Guzik KASSERT(rtid >= NO_PID, 21035bb59edSMateusz Guzik ("%s: invalid tid %d\n", __func__, rtid)); 21135bb59edSMateusz Guzik tid = rtid - NO_PID; 212ec6ea5e8SDavid Xu mtx_lock(&tid_lock); 21335bb59edSMateusz Guzik KASSERT(bit_test(tid_bitmap, tid) != 0, 21435bb59edSMateusz Guzik ("thread ID %d not allocated\n", rtid)); 21535bb59edSMateusz Guzik bit_clear(tid_bitmap, tid); 21635bb59edSMateusz Guzik nthreads--; 217ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 218ec6ea5e8SDavid Xu } 219ec6ea5e8SDavid Xu 220fdcac928SMarcel Moolenaar /* 221696058c3SJulian Elischer * Prepare a thread for use. 22244990b8cSJulian Elischer */ 223b23f72e9SBrian Feldman static int 224b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags) 22544990b8cSJulian Elischer { 22644990b8cSJulian Elischer struct thread *td; 22744990b8cSJulian Elischer 22844990b8cSJulian Elischer td = (struct thread *)mem; 22971fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 23094dd54b9SKonstantin Belousov td->td_lastcpu = td->td_oncpu = NOCPU; 2316c27c603SJuli Mallett 2326c27c603SJuli Mallett /* 2336c27c603SJuli Mallett * Note that td_critnest begins life as 1 because the thread is not 2346c27c603SJuli Mallett * running and is thereby implicitly waiting to be on the receiving 235a54e85fdSJeff Roberson * end of a context switch. 2366c27c603SJuli Mallett */ 237139b7550SJohn Baldwin td->td_critnest = 1; 238acbe332aSDavid Xu td->td_lend_user_pri = PRI_MAX; 239911b84b0SRobert Watson #ifdef AUDIT 240911b84b0SRobert Watson audit_thread_alloc(td); 241911b84b0SRobert Watson #endif 242d10183d9SDavid Xu umtx_thread_alloc(td); 243b23f72e9SBrian Feldman return (0); 24444990b8cSJulian Elischer } 24544990b8cSJulian Elischer 24644990b8cSJulian Elischer /* 24744990b8cSJulian Elischer * Reclaim a thread after use. 24844990b8cSJulian Elischer */ 24944990b8cSJulian Elischer static void 25044990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg) 25144990b8cSJulian Elischer { 25244990b8cSJulian Elischer struct thread *td; 25344990b8cSJulian Elischer 25444990b8cSJulian Elischer td = (struct thread *)mem; 25544990b8cSJulian Elischer 25644990b8cSJulian Elischer #ifdef INVARIANTS 25744990b8cSJulian Elischer /* Verify that this thread is in a safe state to free. */ 25844990b8cSJulian Elischer switch (td->td_state) { 25971fad9fdSJulian Elischer case TDS_INHIBITED: 26071fad9fdSJulian Elischer case TDS_RUNNING: 26171fad9fdSJulian Elischer case TDS_CAN_RUN: 26244990b8cSJulian Elischer case TDS_RUNQ: 26344990b8cSJulian Elischer /* 26444990b8cSJulian Elischer * We must never unlink a thread that is in one of 26544990b8cSJulian Elischer * these states, because it is currently active. 26644990b8cSJulian Elischer */ 26744990b8cSJulian Elischer panic("bad state for thread unlinking"); 26844990b8cSJulian Elischer /* NOTREACHED */ 26971fad9fdSJulian Elischer case TDS_INACTIVE: 27044990b8cSJulian Elischer break; 27144990b8cSJulian Elischer default: 27244990b8cSJulian Elischer panic("bad thread state"); 27344990b8cSJulian Elischer /* NOTREACHED */ 27444990b8cSJulian Elischer } 27544990b8cSJulian Elischer #endif 2766e8525ceSRobert Watson #ifdef AUDIT 2776e8525ceSRobert Watson audit_thread_free(td); 2786e8525ceSRobert Watson #endif 2791ba4a712SPawel Jakub Dawidek /* Free all OSD associated to this thread. */ 2801ba4a712SPawel Jakub Dawidek osd_thread_exit(td); 281aca4bb91SKonstantin Belousov td_softdep_cleanup(td); 282aca4bb91SKonstantin Belousov MPASS(td->td_su == NULL); 28344990b8cSJulian Elischer } 28444990b8cSJulian Elischer 28544990b8cSJulian Elischer /* 28644990b8cSJulian Elischer * Initialize type-stable parts of a thread (when newly created). 28744990b8cSJulian Elischer */ 288b23f72e9SBrian Feldman static int 289b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags) 29044990b8cSJulian Elischer { 29144990b8cSJulian Elischer struct thread *td; 29244990b8cSJulian Elischer 29344990b8cSJulian Elischer td = (struct thread *)mem; 294247aba24SMarcel Moolenaar 29544f3b092SJohn Baldwin td->td_sleepqueue = sleepq_alloc(); 296961a7b24SJohn Baldwin td->td_turnstile = turnstile_alloc(); 2978f0e9130SKonstantin Belousov td->td_rlqe = NULL; 2982ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 299d10183d9SDavid Xu umtx_thread_init(td); 30089b57fcfSKonstantin Belousov td->td_kstack = 0; 301ad8b1d85SKonstantin Belousov td->td_sel = NULL; 302b23f72e9SBrian Feldman return (0); 30344990b8cSJulian Elischer } 30444990b8cSJulian Elischer 30544990b8cSJulian Elischer /* 30644990b8cSJulian Elischer * Tear down type-stable parts of a thread (just before being discarded). 30744990b8cSJulian Elischer */ 30844990b8cSJulian Elischer static void 30944990b8cSJulian Elischer thread_fini(void *mem, int size) 31044990b8cSJulian Elischer { 31144990b8cSJulian Elischer struct thread *td; 31244990b8cSJulian Elischer 31344990b8cSJulian Elischer td = (struct thread *)mem; 3142ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 3158f0e9130SKonstantin Belousov rlqentry_free(td->td_rlqe); 316961a7b24SJohn Baldwin turnstile_free(td->td_turnstile); 31744f3b092SJohn Baldwin sleepq_free(td->td_sleepqueue); 318d10183d9SDavid Xu umtx_thread_fini(td); 319ace8398dSJeff Roberson seltdfini(td); 32044990b8cSJulian Elischer } 3215215b187SJeff Roberson 3225c8329edSJulian Elischer /* 3235215b187SJeff Roberson * For a newly created process, 3245215b187SJeff Roberson * link up all the structures and its initial threads etc. 325ed062c8dSJulian Elischer * called from: 326e7d939bdSMarcel Moolenaar * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 327ed062c8dSJulian Elischer * proc_dtor() (should go away) 328ed062c8dSJulian Elischer * proc_init() 3295c8329edSJulian Elischer */ 3305c8329edSJulian Elischer void 33189b57fcfSKonstantin Belousov proc_linkup0(struct proc *p, struct thread *td) 33289b57fcfSKonstantin Belousov { 33389b57fcfSKonstantin Belousov TAILQ_INIT(&p->p_threads); /* all threads in proc */ 33489b57fcfSKonstantin Belousov proc_linkup(p, td); 33589b57fcfSKonstantin Belousov } 33689b57fcfSKonstantin Belousov 33789b57fcfSKonstantin Belousov void 3388460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td) 3395c8329edSJulian Elischer { 340a54e85fdSJeff Roberson 3419104847fSDavid Xu sigqueue_init(&p->p_sigqueue, p); 342ebceaf6dSDavid Xu p->p_ksi = ksiginfo_alloc(1); 343ebceaf6dSDavid Xu if (p->p_ksi != NULL) { 3445c474517SDavid Xu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 345ebceaf6dSDavid Xu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 346ebceaf6dSDavid Xu } 347b2f92ef9SDavid Xu LIST_INIT(&p->p_mqnotifier); 3485c8329edSJulian Elischer p->p_numthreads = 0; 3498460a577SJohn Birrell thread_link(td, p); 3505c8329edSJulian Elischer } 3515c8329edSJulian Elischer 3521bd3cf5dSMateusz Guzik extern int max_threads_per_proc; 3531bd3cf5dSMateusz Guzik 3545c8329edSJulian Elischer /* 35544990b8cSJulian Elischer * Initialize global thread allocation resources. 35644990b8cSJulian Elischer */ 35744990b8cSJulian Elischer void 35844990b8cSJulian Elischer threadinit(void) 35944990b8cSJulian Elischer { 360*26007fe3SMateusz Guzik u_long i; 361cf31cadeSMateusz Guzik lwpid_t tid0; 3625aa5420fSMark Johnston uint32_t flags; 36344990b8cSJulian Elischer 3641bd3cf5dSMateusz Guzik /* 3651bd3cf5dSMateusz Guzik * Place an upper limit on threads which can be allocated. 3661bd3cf5dSMateusz Guzik * 3671bd3cf5dSMateusz Guzik * Note that other factors may make the de facto limit much lower. 3681bd3cf5dSMateusz Guzik * 3691bd3cf5dSMateusz Guzik * Platform limits are somewhat arbitrary but deemed "more than good 3701bd3cf5dSMateusz Guzik * enough" for the foreseable future. 3711bd3cf5dSMateusz Guzik */ 3721bd3cf5dSMateusz Guzik if (maxthread == 0) { 3731bd3cf5dSMateusz Guzik #ifdef _LP64 3741bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 3751bd3cf5dSMateusz Guzik #else 3761bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 100000); 3771bd3cf5dSMateusz Guzik #endif 3781bd3cf5dSMateusz Guzik } 3791bd3cf5dSMateusz Guzik 3801ea7a6f8SPoul-Henning Kamp mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 38135bb59edSMateusz Guzik tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 382cf31cadeSMateusz Guzik tid0 = tid_alloc(); 383cf31cadeSMateusz Guzik if (tid0 != THREAD0_TID) 384cf31cadeSMateusz Guzik panic("tid0 %d != %d\n", tid0, THREAD0_TID); 3851ea7a6f8SPoul-Henning Kamp 3865aa5420fSMark Johnston flags = UMA_ZONE_NOFREE; 3875aa5420fSMark Johnston #ifdef __aarch64__ 3885aa5420fSMark Johnston /* 3895aa5420fSMark Johnston * Force thread structures to be allocated from the direct map. 3905aa5420fSMark Johnston * Otherwise, superpage promotions and demotions may temporarily 3915aa5420fSMark Johnston * invalidate thread structure mappings. For most dynamically allocated 3925aa5420fSMark Johnston * structures this is not a problem, but translation faults cannot be 3935aa5420fSMark Johnston * handled without accessing curthread. 3945aa5420fSMark Johnston */ 3955aa5420fSMark Johnston flags |= UMA_ZONE_CONTIG; 3965aa5420fSMark Johnston #endif 397de028f5aSJeff Roberson thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 39844990b8cSJulian Elischer thread_ctor, thread_dtor, thread_init, thread_fini, 3995aa5420fSMark Johnston 32 - 1, flags); 400cf7d9a8cSDavid Xu tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 401*26007fe3SMateusz Guzik tidhashlock = (tidhash + 1) / 64; 402*26007fe3SMateusz Guzik if (tidhashlock > 0) 403*26007fe3SMateusz Guzik tidhashlock--; 404*26007fe3SMateusz Guzik tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 405*26007fe3SMateusz Guzik M_TIDHASH, M_WAITOK | M_ZERO); 406*26007fe3SMateusz Guzik for (i = 0; i < tidhashlock + 1; i++) 407*26007fe3SMateusz Guzik rw_init(&tidhashtbl_lock[i], "tidhash"); 40844990b8cSJulian Elischer } 40944990b8cSJulian Elischer 41044990b8cSJulian Elischer /* 411ff8fbcffSJeff Roberson * Place an unused thread on the zombie list. 412ad1e7d28SJulian Elischer * Use the slpq as that must be unused by now. 41344990b8cSJulian Elischer */ 41444990b8cSJulian Elischer void 415ff8fbcffSJeff Roberson thread_zombie(struct thread *td) 41644990b8cSJulian Elischer { 417a54e85fdSJeff Roberson mtx_lock_spin(&zombie_lock); 418ad1e7d28SJulian Elischer TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 419a54e85fdSJeff Roberson mtx_unlock_spin(&zombie_lock); 42044990b8cSJulian Elischer } 42144990b8cSJulian Elischer 4225c8329edSJulian Elischer /* 423ff8fbcffSJeff Roberson * Release a thread that has exited after cpu_throw(). 424ff8fbcffSJeff Roberson */ 425ff8fbcffSJeff Roberson void 426ff8fbcffSJeff Roberson thread_stash(struct thread *td) 427ff8fbcffSJeff Roberson { 428ff8fbcffSJeff Roberson atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 429ff8fbcffSJeff Roberson thread_zombie(td); 430ff8fbcffSJeff Roberson } 431ff8fbcffSJeff Roberson 432ff8fbcffSJeff Roberson /* 4336617724cSJeff Roberson * Reap zombie resources. 43444990b8cSJulian Elischer */ 43544990b8cSJulian Elischer void 43644990b8cSJulian Elischer thread_reap(void) 43744990b8cSJulian Elischer { 4385c8329edSJulian Elischer struct thread *td_first, *td_next; 43944990b8cSJulian Elischer 44044990b8cSJulian Elischer /* 4415215b187SJeff Roberson * Don't even bother to lock if none at this instant, 4422d19b736SKonstantin Belousov * we really don't care about the next instant. 44344990b8cSJulian Elischer */ 4448460a577SJohn Birrell if (!TAILQ_EMPTY(&zombie_threads)) { 445a54e85fdSJeff Roberson mtx_lock_spin(&zombie_lock); 4465c8329edSJulian Elischer td_first = TAILQ_FIRST(&zombie_threads); 4475c8329edSJulian Elischer if (td_first) 4485c8329edSJulian Elischer TAILQ_INIT(&zombie_threads); 449a54e85fdSJeff Roberson mtx_unlock_spin(&zombie_lock); 4505c8329edSJulian Elischer while (td_first) { 451ad1e7d28SJulian Elischer td_next = TAILQ_NEXT(td_first, td_slpq); 4524ea6a9a2SMateusz Guzik thread_cow_free(td_first); 4535c8329edSJulian Elischer thread_free(td_first); 4545c8329edSJulian Elischer td_first = td_next; 45544990b8cSJulian Elischer } 45644990b8cSJulian Elischer } 457ed062c8dSJulian Elischer } 45844990b8cSJulian Elischer 4594f0db5e0SJulian Elischer /* 46044990b8cSJulian Elischer * Allocate a thread. 46144990b8cSJulian Elischer */ 46244990b8cSJulian Elischer struct thread * 4638a945d10SKonstantin Belousov thread_alloc(int pages) 46444990b8cSJulian Elischer { 46589b57fcfSKonstantin Belousov struct thread *td; 4661bd3cf5dSMateusz Guzik lwpid_t tid; 4678460a577SJohn Birrell 46844990b8cSJulian Elischer thread_reap(); /* check if any zombies to get */ 46989b57fcfSKonstantin Belousov 4701bd3cf5dSMateusz Guzik tid = tid_alloc(); 4711bd3cf5dSMateusz Guzik if (tid == -1) { 4721bd3cf5dSMateusz Guzik return (NULL); 4731bd3cf5dSMateusz Guzik } 4741bd3cf5dSMateusz Guzik 4751bd3cf5dSMateusz Guzik td = uma_zalloc(thread_zone, M_WAITOK); 47689b57fcfSKonstantin Belousov KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 4778a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) { 47889b57fcfSKonstantin Belousov uma_zfree(thread_zone, td); 4791bd3cf5dSMateusz Guzik tid_free(tid); 48089b57fcfSKonstantin Belousov return (NULL); 48189b57fcfSKonstantin Belousov } 4821bd3cf5dSMateusz Guzik td->td_tid = tid; 4830c3967e7SMarcel Moolenaar cpu_thread_alloc(td); 4841bd3cf5dSMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 48589b57fcfSKonstantin Belousov return (td); 48644990b8cSJulian Elischer } 48744990b8cSJulian Elischer 4888a945d10SKonstantin Belousov int 4898a945d10SKonstantin Belousov thread_alloc_stack(struct thread *td, int pages) 4908a945d10SKonstantin Belousov { 4918a945d10SKonstantin Belousov 4928a945d10SKonstantin Belousov KASSERT(td->td_kstack == 0, 4938a945d10SKonstantin Belousov ("thread_alloc_stack called on a thread with kstack")); 4948a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) 4958a945d10SKonstantin Belousov return (0); 4968a945d10SKonstantin Belousov cpu_thread_alloc(td); 4978a945d10SKonstantin Belousov return (1); 4988a945d10SKonstantin Belousov } 4994f0db5e0SJulian Elischer 5004f0db5e0SJulian Elischer /* 50144990b8cSJulian Elischer * Deallocate a thread. 50244990b8cSJulian Elischer */ 50344990b8cSJulian Elischer void 50444990b8cSJulian Elischer thread_free(struct thread *td) 50544990b8cSJulian Elischer { 5062e6b8de4SJeff Roberson 5071bd3cf5dSMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 5082e6b8de4SJeff Roberson lock_profile_thread_exit(td); 50945aea8deSJeff Roberson if (td->td_cpuset) 510d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 511d7f687fcSJeff Roberson td->td_cpuset = NULL; 5120c3967e7SMarcel Moolenaar cpu_thread_free(td); 51389b57fcfSKonstantin Belousov if (td->td_kstack != 0) 51489b57fcfSKonstantin Belousov vm_thread_dispose(td); 5152d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 5161bd3cf5dSMateusz Guzik tid_free(td->td_tid); 5171bd3cf5dSMateusz Guzik td->td_tid = -1; 51844990b8cSJulian Elischer uma_zfree(thread_zone, td); 51944990b8cSJulian Elischer } 52044990b8cSJulian Elischer 5214ea6a9a2SMateusz Guzik void 5224ea6a9a2SMateusz Guzik thread_cow_get_proc(struct thread *newtd, struct proc *p) 5234ea6a9a2SMateusz Guzik { 5244ea6a9a2SMateusz Guzik 5254ea6a9a2SMateusz Guzik PROC_LOCK_ASSERT(p, MA_OWNED); 5261724c563SMateusz Guzik newtd->td_realucred = crcowget(p->p_ucred); 5271724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 528f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(p->p_limit); 5294ea6a9a2SMateusz Guzik newtd->td_cowgen = p->p_cowgen; 5304ea6a9a2SMateusz Guzik } 5314ea6a9a2SMateusz Guzik 5324ea6a9a2SMateusz Guzik void 5334ea6a9a2SMateusz Guzik thread_cow_get(struct thread *newtd, struct thread *td) 5344ea6a9a2SMateusz Guzik { 5354ea6a9a2SMateusz Guzik 5361724c563SMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 5371724c563SMateusz Guzik newtd->td_realucred = crcowget(td->td_realucred); 5381724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 539f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(td->td_limit); 5404ea6a9a2SMateusz Guzik newtd->td_cowgen = td->td_cowgen; 5414ea6a9a2SMateusz Guzik } 5424ea6a9a2SMateusz Guzik 5434ea6a9a2SMateusz Guzik void 5444ea6a9a2SMateusz Guzik thread_cow_free(struct thread *td) 5454ea6a9a2SMateusz Guzik { 5464ea6a9a2SMateusz Guzik 5471724c563SMateusz Guzik if (td->td_realucred != NULL) 5481724c563SMateusz Guzik crcowfree(td); 549cd672ca6SMateusz Guzik if (td->td_limit != NULL) 550f6f6d240SMateusz Guzik lim_free(td->td_limit); 5514ea6a9a2SMateusz Guzik } 5524ea6a9a2SMateusz Guzik 5534ea6a9a2SMateusz Guzik void 5544ea6a9a2SMateusz Guzik thread_cow_update(struct thread *td) 5554ea6a9a2SMateusz Guzik { 5564ea6a9a2SMateusz Guzik struct proc *p; 557cd672ca6SMateusz Guzik struct ucred *oldcred; 558cd672ca6SMateusz Guzik struct plimit *oldlimit; 5594ea6a9a2SMateusz Guzik 5604ea6a9a2SMateusz Guzik p = td->td_proc; 561cd672ca6SMateusz Guzik oldlimit = NULL; 5624ea6a9a2SMateusz Guzik PROC_LOCK(p); 5631724c563SMateusz Guzik oldcred = crcowsync(); 564cd672ca6SMateusz Guzik if (td->td_limit != p->p_limit) { 565cd672ca6SMateusz Guzik oldlimit = td->td_limit; 566cd672ca6SMateusz Guzik td->td_limit = lim_hold(p->p_limit); 567cd672ca6SMateusz Guzik } 5684ea6a9a2SMateusz Guzik td->td_cowgen = p->p_cowgen; 5694ea6a9a2SMateusz Guzik PROC_UNLOCK(p); 570cd672ca6SMateusz Guzik if (oldcred != NULL) 571cd672ca6SMateusz Guzik crfree(oldcred); 572cd672ca6SMateusz Guzik if (oldlimit != NULL) 573cd672ca6SMateusz Guzik lim_free(oldlimit); 5744ea6a9a2SMateusz Guzik } 5754ea6a9a2SMateusz Guzik 57644990b8cSJulian Elischer /* 57744990b8cSJulian Elischer * Discard the current thread and exit from its context. 57894e0a4cdSJulian Elischer * Always called with scheduler locked. 57944990b8cSJulian Elischer * 58044990b8cSJulian Elischer * Because we can't free a thread while we're operating under its context, 581696058c3SJulian Elischer * push the current thread into our CPU's deadthread holder. This means 582696058c3SJulian Elischer * we needn't worry about someone else grabbing our context before we 5836617724cSJeff Roberson * do a cpu_throw(). 58444990b8cSJulian Elischer */ 58544990b8cSJulian Elischer void 58644990b8cSJulian Elischer thread_exit(void) 58744990b8cSJulian Elischer { 5887e3a96eaSJohn Baldwin uint64_t runtime, new_switchtime; 58944990b8cSJulian Elischer struct thread *td; 5901c4bcd05SJeff Roberson struct thread *td2; 59144990b8cSJulian Elischer struct proc *p; 5927847a9daSJohn Baldwin int wakeup_swapper; 59344990b8cSJulian Elischer 59444990b8cSJulian Elischer td = curthread; 59544990b8cSJulian Elischer p = td->td_proc; 59644990b8cSJulian Elischer 597a54e85fdSJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 598ed062c8dSJulian Elischer mtx_assert(&Giant, MA_NOTOWNED); 599a54e85fdSJeff Roberson 60044990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 601ed062c8dSJulian Elischer KASSERT(p != NULL, ("thread exiting without a process")); 602cc701b73SRobert Watson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 603e01eafefSJulian Elischer (long)p->p_pid, td->td_name); 6046c9271a9SAndriy Gapon SDT_PROBE0(proc, , , lwp__exit); 6059104847fSDavid Xu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 606936c24faSMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 60744990b8cSJulian Elischer 608ed062c8dSJulian Elischer /* 609ed062c8dSJulian Elischer * drop FPU & debug register state storage, or any other 610ed062c8dSJulian Elischer * architecture specific resources that 611ed062c8dSJulian Elischer * would not be on a new untouched process. 612ed062c8dSJulian Elischer */ 613bd07998eSKonstantin Belousov cpu_thread_exit(td); 61444990b8cSJulian Elischer 615ed062c8dSJulian Elischer /* 6161faf202eSJulian Elischer * The last thread is left attached to the process 6171faf202eSJulian Elischer * So that the whole bundle gets recycled. Skip 618ed062c8dSJulian Elischer * all this stuff if we never had threads. 619ed062c8dSJulian Elischer * EXIT clears all sign of other threads when 620ed062c8dSJulian Elischer * it goes to single threading, so the last thread always 621ed062c8dSJulian Elischer * takes the short path. 6221faf202eSJulian Elischer */ 623ed062c8dSJulian Elischer if (p->p_flag & P_HADTHREADS) { 6241faf202eSJulian Elischer if (p->p_numthreads > 1) { 625fd229b5bSKonstantin Belousov atomic_add_int(&td->td_proc->p_exitthreads, 1); 626d3a0bd78SJulian Elischer thread_unlink(td); 6271c4bcd05SJeff Roberson td2 = FIRST_THREAD_IN_PROC(p); 6281c4bcd05SJeff Roberson sched_exit_thread(td2, td); 629ed062c8dSJulian Elischer 630ed062c8dSJulian Elischer /* 63144990b8cSJulian Elischer * The test below is NOT true if we are the 6329182554aSKonstantin Belousov * sole exiting thread. P_STOPPED_SINGLE is unset 63344990b8cSJulian Elischer * in exit1() after it is the only survivor. 63444990b8cSJulian Elischer */ 6351279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 63644990b8cSJulian Elischer if (p->p_numthreads == p->p_suspcount) { 637a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 6387847a9daSJohn Baldwin wakeup_swapper = thread_unsuspend_one( 63984cdea97SKonstantin Belousov p->p_singlethread, p, false); 6407847a9daSJohn Baldwin if (wakeup_swapper) 6417847a9daSJohn Baldwin kick_proc0(); 64244990b8cSJulian Elischer } 64344990b8cSJulian Elischer } 64448bfcdddSJulian Elischer 645696058c3SJulian Elischer PCPU_SET(deadthread, td); 6461faf202eSJulian Elischer } else { 647ed062c8dSJulian Elischer /* 648ed062c8dSJulian Elischer * The last thread is exiting.. but not through exit() 649ed062c8dSJulian Elischer */ 650ed062c8dSJulian Elischer panic ("thread_exit: Last thread exiting on its own"); 651ed062c8dSJulian Elischer } 6521faf202eSJulian Elischer } 65316d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 65416d95d4fSJoseph Koshy /* 65516d95d4fSJoseph Koshy * If this thread is part of a process that is being tracked by hwpmc(4), 65616d95d4fSJoseph Koshy * inform the module of the thread's impending exit. 65716d95d4fSJoseph Koshy */ 6586161b98cSMatt Macy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 65916d95d4fSJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 6606161b98cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 661ebfaf69cSMatt Macy } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 662ebfaf69cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 66316d95d4fSJoseph Koshy #endif 664a54e85fdSJeff Roberson PROC_UNLOCK(p); 6655c7bebf9SKonstantin Belousov PROC_STATLOCK(p); 6665c7bebf9SKonstantin Belousov thread_lock(td); 6675c7bebf9SKonstantin Belousov PROC_SUNLOCK(p); 6687e3a96eaSJohn Baldwin 6697e3a96eaSJohn Baldwin /* Do the same timestamp bookkeeping that mi_switch() would do. */ 6707e3a96eaSJohn Baldwin new_switchtime = cpu_ticks(); 6717e3a96eaSJohn Baldwin runtime = new_switchtime - PCPU_GET(switchtime); 6727e3a96eaSJohn Baldwin td->td_runtime += runtime; 6737e3a96eaSJohn Baldwin td->td_incruntime += runtime; 6747e3a96eaSJohn Baldwin PCPU_SET(switchtime, new_switchtime); 6757e3a96eaSJohn Baldwin PCPU_SET(switchticks, ticks); 67683c9dea1SGleb Smirnoff VM_CNT_INC(v_swtch); 6777e3a96eaSJohn Baldwin 6787e3a96eaSJohn Baldwin /* Save our resource usage in our process. */ 6797e3a96eaSJohn Baldwin td->td_ru.ru_nvcsw++; 68061a74c5cSJeff Roberson ruxagg_locked(p, td); 6817e3a96eaSJohn Baldwin rucollect(&p->p_ru, &td->td_ru); 6825c7bebf9SKonstantin Belousov PROC_STATUNLOCK(p); 6837e3a96eaSJohn Baldwin 684dcc9954eSJulian Elischer td->td_state = TDS_INACTIVE; 6853d06b4b3SAttilio Rao #ifdef WITNESS 6863d06b4b3SAttilio Rao witness_thread_exit(td); 6873d06b4b3SAttilio Rao #endif 688732d9528SJulian Elischer CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 689a54e85fdSJeff Roberson sched_throw(td); 690cc66ebe2SPeter Wemm panic("I'm a teapot!"); 69144990b8cSJulian Elischer /* NOTREACHED */ 69244990b8cSJulian Elischer } 69344990b8cSJulian Elischer 69444990b8cSJulian Elischer /* 695696058c3SJulian Elischer * Do any thread specific cleanups that may be needed in wait() 69637814395SPeter Wemm * called with Giant, proc and schedlock not held. 697696058c3SJulian Elischer */ 698696058c3SJulian Elischer void 699696058c3SJulian Elischer thread_wait(struct proc *p) 700696058c3SJulian Elischer { 701696058c3SJulian Elischer struct thread *td; 702696058c3SJulian Elischer 70337814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 704624bf9e1SKonstantin Belousov KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 705624bf9e1SKonstantin Belousov KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 706ff8fbcffSJeff Roberson td = FIRST_THREAD_IN_PROC(p); 707ff8fbcffSJeff Roberson /* Lock the last thread so we spin until it exits cpu_throw(). */ 708ff8fbcffSJeff Roberson thread_lock(td); 709ff8fbcffSJeff Roberson thread_unlock(td); 7102e6b8de4SJeff Roberson lock_profile_thread_exit(td); 711d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 712d7f687fcSJeff Roberson td->td_cpuset = NULL; 713696058c3SJulian Elischer cpu_thread_clean(td); 7144ea6a9a2SMateusz Guzik thread_cow_free(td); 7152d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 716696058c3SJulian Elischer thread_reap(); /* check for zombie threads etc. */ 717696058c3SJulian Elischer } 718696058c3SJulian Elischer 719696058c3SJulian Elischer /* 72044990b8cSJulian Elischer * Link a thread to a process. 7211faf202eSJulian Elischer * set up anything that needs to be initialized for it to 7221faf202eSJulian Elischer * be used by the process. 72344990b8cSJulian Elischer */ 72444990b8cSJulian Elischer void 7258460a577SJohn Birrell thread_link(struct thread *td, struct proc *p) 72644990b8cSJulian Elischer { 72744990b8cSJulian Elischer 728a54e85fdSJeff Roberson /* 729a54e85fdSJeff Roberson * XXX This can't be enabled because it's called for proc0 before 730374ae2a3SJeff Roberson * its lock has been created. 731374ae2a3SJeff Roberson * PROC_LOCK_ASSERT(p, MA_OWNED); 732a54e85fdSJeff Roberson */ 73371fad9fdSJulian Elischer td->td_state = TDS_INACTIVE; 73444990b8cSJulian Elischer td->td_proc = p; 735b61ce5b0SJeff Roberson td->td_flags = TDF_INMEM; 73644990b8cSJulian Elischer 7371faf202eSJulian Elischer LIST_INIT(&td->td_contested); 738eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[0]); 739eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[1]); 740f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 741dd902d01SGleb Smirnoff SLIST_INIT(&td->td_epochs); 742f6eccf96SGleb Smirnoff #endif 7439104847fSDavid Xu sigqueue_init(&td->td_sigqueue, p); 744fd90e2edSJung-uk Kim callout_init(&td->td_slpcallout, 1); 74566d8df9dSDaniel Eischen TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 74644990b8cSJulian Elischer p->p_numthreads++; 74744990b8cSJulian Elischer } 74844990b8cSJulian Elischer 749ed062c8dSJulian Elischer /* 750ed062c8dSJulian Elischer * Called from: 751ed062c8dSJulian Elischer * thread_exit() 752ed062c8dSJulian Elischer */ 753d3a0bd78SJulian Elischer void 754d3a0bd78SJulian Elischer thread_unlink(struct thread *td) 755d3a0bd78SJulian Elischer { 756d3a0bd78SJulian Elischer struct proc *p = td->td_proc; 757d3a0bd78SJulian Elischer 758374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 759f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 760dd902d01SGleb Smirnoff MPASS(SLIST_EMPTY(&td->td_epochs)); 761f6eccf96SGleb Smirnoff #endif 762dd902d01SGleb Smirnoff 763d3a0bd78SJulian Elischer TAILQ_REMOVE(&p->p_threads, td, td_plist); 764d3a0bd78SJulian Elischer p->p_numthreads--; 765d3a0bd78SJulian Elischer /* could clear a few other things here */ 7668460a577SJohn Birrell /* Must NOT clear links to proc! */ 7675c8329edSJulian Elischer } 7685c8329edSJulian Elischer 76979799053SKonstantin Belousov static int 77079799053SKonstantin Belousov calc_remaining(struct proc *p, int mode) 77179799053SKonstantin Belousov { 77279799053SKonstantin Belousov int remaining; 77379799053SKonstantin Belousov 7747b519077SKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 7757b519077SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 77679799053SKonstantin Belousov if (mode == SINGLE_EXIT) 77779799053SKonstantin Belousov remaining = p->p_numthreads; 77879799053SKonstantin Belousov else if (mode == SINGLE_BOUNDARY) 77979799053SKonstantin Belousov remaining = p->p_numthreads - p->p_boundary_count; 7806ddcc233SKonstantin Belousov else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 78179799053SKonstantin Belousov remaining = p->p_numthreads - p->p_suspcount; 78279799053SKonstantin Belousov else 78379799053SKonstantin Belousov panic("calc_remaining: wrong mode %d", mode); 78479799053SKonstantin Belousov return (remaining); 78579799053SKonstantin Belousov } 78679799053SKonstantin Belousov 78707a9368aSKonstantin Belousov static int 78807a9368aSKonstantin Belousov remain_for_mode(int mode) 78907a9368aSKonstantin Belousov { 79007a9368aSKonstantin Belousov 7916ddcc233SKonstantin Belousov return (mode == SINGLE_ALLPROC ? 0 : 1); 79207a9368aSKonstantin Belousov } 79307a9368aSKonstantin Belousov 79407a9368aSKonstantin Belousov static int 79507a9368aSKonstantin Belousov weed_inhib(int mode, struct thread *td2, struct proc *p) 79607a9368aSKonstantin Belousov { 79707a9368aSKonstantin Belousov int wakeup_swapper; 79807a9368aSKonstantin Belousov 79907a9368aSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 80007a9368aSKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 80107a9368aSKonstantin Belousov THREAD_LOCK_ASSERT(td2, MA_OWNED); 80207a9368aSKonstantin Belousov 80307a9368aSKonstantin Belousov wakeup_swapper = 0; 80461a74c5cSJeff Roberson 80561a74c5cSJeff Roberson /* 80661a74c5cSJeff Roberson * Since the thread lock is dropped by the scheduler we have 80761a74c5cSJeff Roberson * to retry to check for races. 80861a74c5cSJeff Roberson */ 80961a74c5cSJeff Roberson restart: 81007a9368aSKonstantin Belousov switch (mode) { 81107a9368aSKonstantin Belousov case SINGLE_EXIT: 81261a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2)) { 81384cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, true); 81461a74c5cSJeff Roberson thread_lock(td2); 81561a74c5cSJeff Roberson goto restart; 81661a74c5cSJeff Roberson } 81761a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 81807a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, EINTR); 81961a74c5cSJeff Roberson return (wakeup_swapper); 82061a74c5cSJeff Roberson } 82107a9368aSKonstantin Belousov break; 82207a9368aSKonstantin Belousov case SINGLE_BOUNDARY: 82307a9368aSKonstantin Belousov case SINGLE_NO_EXIT: 82461a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2) && 82561a74c5cSJeff Roberson (td2->td_flags & TDF_BOUNDARY) == 0) { 82684cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 82761a74c5cSJeff Roberson thread_lock(td2); 82861a74c5cSJeff Roberson goto restart; 82961a74c5cSJeff Roberson } 83061a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 83107a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 83261a74c5cSJeff Roberson return (wakeup_swapper); 83361a74c5cSJeff Roberson } 834917dd390SKonstantin Belousov break; 8356ddcc233SKonstantin Belousov case SINGLE_ALLPROC: 8366ddcc233SKonstantin Belousov /* 8376ddcc233SKonstantin Belousov * ALLPROC suspend tries to avoid spurious EINTR for 8386ddcc233SKonstantin Belousov * threads sleeping interruptable, by suspending the 8396ddcc233SKonstantin Belousov * thread directly, similarly to sig_suspend_threads(). 8406ddcc233SKonstantin Belousov * Since such sleep is not performed at the user 8416ddcc233SKonstantin Belousov * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 8426ddcc233SKonstantin Belousov * is used to avoid immediate un-suspend. 8436ddcc233SKonstantin Belousov */ 8446ddcc233SKonstantin Belousov if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 84561a74c5cSJeff Roberson TDF_ALLPROCSUSP)) == 0) { 84684cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 84761a74c5cSJeff Roberson thread_lock(td2); 84861a74c5cSJeff Roberson goto restart; 84961a74c5cSJeff Roberson } 85061a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 8516ddcc233SKonstantin Belousov if ((td2->td_flags & TDF_SBDRY) == 0) { 8526ddcc233SKonstantin Belousov thread_suspend_one(td2); 8536ddcc233SKonstantin Belousov td2->td_flags |= TDF_ALLPROCSUSP; 8546ddcc233SKonstantin Belousov } else { 8556ddcc233SKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 85661a74c5cSJeff Roberson return (wakeup_swapper); 8576ddcc233SKonstantin Belousov } 8586ddcc233SKonstantin Belousov } 85907a9368aSKonstantin Belousov break; 86061a74c5cSJeff Roberson default: 86161a74c5cSJeff Roberson break; 86207a9368aSKonstantin Belousov } 86361a74c5cSJeff Roberson thread_unlock(td2); 86407a9368aSKonstantin Belousov return (wakeup_swapper); 86507a9368aSKonstantin Belousov } 86607a9368aSKonstantin Belousov 8675215b187SJeff Roberson /* 86844990b8cSJulian Elischer * Enforce single-threading. 86944990b8cSJulian Elischer * 87044990b8cSJulian Elischer * Returns 1 if the caller must abort (another thread is waiting to 87144990b8cSJulian Elischer * exit the process or similar). Process is locked! 87244990b8cSJulian Elischer * Returns 0 when you are successfully the only thread running. 87344990b8cSJulian Elischer * A process has successfully single threaded in the suspend mode when 87444990b8cSJulian Elischer * There are no threads in user mode. Threads in the kernel must be 87544990b8cSJulian Elischer * allowed to continue until they get to the user boundary. They may even 87644990b8cSJulian Elischer * copy out their return values and data before suspending. They may however be 877e2668f55SMaxim Konovalov * accelerated in reaching the user boundary as we will wake up 87844990b8cSJulian Elischer * any sleeping threads that are interruptable. (PCATCH). 87944990b8cSJulian Elischer */ 88044990b8cSJulian Elischer int 8816ddcc233SKonstantin Belousov thread_single(struct proc *p, int mode) 88244990b8cSJulian Elischer { 88344990b8cSJulian Elischer struct thread *td; 88444990b8cSJulian Elischer struct thread *td2; 885da7bbd2cSJohn Baldwin int remaining, wakeup_swapper; 88644990b8cSJulian Elischer 88744990b8cSJulian Elischer td = curthread; 8886ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 8896ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 8906ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 8916ddcc233SKonstantin Belousov /* 8926ddcc233SKonstantin Belousov * If allowing non-ALLPROC singlethreading for non-curproc 8936ddcc233SKonstantin Belousov * callers, calc_remaining() and remain_for_mode() should be 8946ddcc233SKonstantin Belousov * adjusted to also account for td->td_proc != p. For now 8956ddcc233SKonstantin Belousov * this is not implemented because it is not used. 8966ddcc233SKonstantin Belousov */ 8976ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 8986ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && td->td_proc == p), 8996ddcc233SKonstantin Belousov ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 90037814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 90144990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 90244990b8cSJulian Elischer 9036ddcc233SKonstantin Belousov if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 90444990b8cSJulian Elischer return (0); 90544990b8cSJulian Elischer 906e3b9bf71SJulian Elischer /* Is someone already single threading? */ 907906ac69dSDavid Xu if (p->p_singlethread != NULL && p->p_singlethread != td) 90844990b8cSJulian Elischer return (1); 90944990b8cSJulian Elischer 910906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 911906ac69dSDavid Xu p->p_flag |= P_SINGLE_EXIT; 912906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 913906ac69dSDavid Xu } else { 914906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_EXIT; 915906ac69dSDavid Xu if (mode == SINGLE_BOUNDARY) 916906ac69dSDavid Xu p->p_flag |= P_SINGLE_BOUNDARY; 917906ac69dSDavid Xu else 918906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 919906ac69dSDavid Xu } 9206ddcc233SKonstantin Belousov if (mode == SINGLE_ALLPROC) 9216ddcc233SKonstantin Belousov p->p_flag |= P_TOTAL_STOP; 9221279572aSDavid Xu p->p_flag |= P_STOPPED_SINGLE; 9237b4a950aSDavid Xu PROC_SLOCK(p); 924112afcb2SJohn Baldwin p->p_singlethread = td; 92579799053SKonstantin Belousov remaining = calc_remaining(p, mode); 92607a9368aSKonstantin Belousov while (remaining != remain_for_mode(mode)) { 927bf1a3220SDavid Xu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 928bf1a3220SDavid Xu goto stopme; 929da7bbd2cSJohn Baldwin wakeup_swapper = 0; 93044990b8cSJulian Elischer FOREACH_THREAD_IN_PROC(p, td2) { 93144990b8cSJulian Elischer if (td2 == td) 93244990b8cSJulian Elischer continue; 933a54e85fdSJeff Roberson thread_lock(td2); 934b7edba77SJeff Roberson td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 9356ddcc233SKonstantin Belousov if (TD_IS_INHIBITED(td2)) { 93607a9368aSKonstantin Belousov wakeup_swapper |= weed_inhib(mode, td2, p); 937d8267df7SDavid Xu #ifdef SMP 9386ddcc233SKonstantin Belousov } else if (TD_IS_RUNNING(td2) && td != td2) { 939d8267df7SDavid Xu forward_signal(td2); 94061a74c5cSJeff Roberson thread_unlock(td2); 941d8267df7SDavid Xu #endif 94261a74c5cSJeff Roberson } else 943a54e85fdSJeff Roberson thread_unlock(td2); 9449d102777SJulian Elischer } 945da7bbd2cSJohn Baldwin if (wakeup_swapper) 946da7bbd2cSJohn Baldwin kick_proc0(); 94779799053SKonstantin Belousov remaining = calc_remaining(p, mode); 948ec008e96SDavid Xu 9499d102777SJulian Elischer /* 9509d102777SJulian Elischer * Maybe we suspended some threads.. was it enough? 9519d102777SJulian Elischer */ 95207a9368aSKonstantin Belousov if (remaining == remain_for_mode(mode)) 9539d102777SJulian Elischer break; 9549d102777SJulian Elischer 955bf1a3220SDavid Xu stopme: 95644990b8cSJulian Elischer /* 95744990b8cSJulian Elischer * Wake us up when everyone else has suspended. 958e3b9bf71SJulian Elischer * In the mean time we suspend as well. 95944990b8cSJulian Elischer */ 9606ddcc233SKonstantin Belousov thread_suspend_switch(td, p); 96179799053SKonstantin Belousov remaining = calc_remaining(p, mode); 96244990b8cSJulian Elischer } 963906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 96491599697SJulian Elischer /* 9658626a0ddSKonstantin Belousov * Convert the process to an unthreaded process. The 9668626a0ddSKonstantin Belousov * SINGLE_EXIT is called by exit1() or execve(), in 9678626a0ddSKonstantin Belousov * both cases other threads must be retired. 96891599697SJulian Elischer */ 9698626a0ddSKonstantin Belousov KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 970ed062c8dSJulian Elischer p->p_singlethread = NULL; 9718626a0ddSKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 972fd229b5bSKonstantin Belousov 973fd229b5bSKonstantin Belousov /* 974fd229b5bSKonstantin Belousov * Wait for any remaining threads to exit cpu_throw(). 975fd229b5bSKonstantin Belousov */ 976fd229b5bSKonstantin Belousov while (p->p_exitthreads != 0) { 977fd229b5bSKonstantin Belousov PROC_SUNLOCK(p); 978fd229b5bSKonstantin Belousov PROC_UNLOCK(p); 979fd229b5bSKonstantin Belousov sched_relinquish(td); 980fd229b5bSKonstantin Belousov PROC_LOCK(p); 981fd229b5bSKonstantin Belousov PROC_SLOCK(p); 982fd229b5bSKonstantin Belousov } 983ac437c07SKonstantin Belousov } else if (mode == SINGLE_BOUNDARY) { 984ac437c07SKonstantin Belousov /* 985ac437c07SKonstantin Belousov * Wait until all suspended threads are removed from 986ac437c07SKonstantin Belousov * the processors. The thread_suspend_check() 987ac437c07SKonstantin Belousov * increments p_boundary_count while it is still 988ac437c07SKonstantin Belousov * running, which makes it possible for the execve() 989ac437c07SKonstantin Belousov * to destroy vmspace while our other threads are 990ac437c07SKonstantin Belousov * still using the address space. 991ac437c07SKonstantin Belousov * 992ac437c07SKonstantin Belousov * We lock the thread, which is only allowed to 993ac437c07SKonstantin Belousov * succeed after context switch code finished using 994ac437c07SKonstantin Belousov * the address space. 995ac437c07SKonstantin Belousov */ 996ac437c07SKonstantin Belousov FOREACH_THREAD_IN_PROC(p, td2) { 997ac437c07SKonstantin Belousov if (td2 == td) 998ac437c07SKonstantin Belousov continue; 999ac437c07SKonstantin Belousov thread_lock(td2); 1000ac437c07SKonstantin Belousov KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1001ac437c07SKonstantin Belousov ("td %p not on boundary", td2)); 1002ac437c07SKonstantin Belousov KASSERT(TD_IS_SUSPENDED(td2), 1003ac437c07SKonstantin Belousov ("td %p is not suspended", td2)); 1004ac437c07SKonstantin Belousov thread_unlock(td2); 1005ac437c07SKonstantin Belousov } 100691599697SJulian Elischer } 10077b4a950aSDavid Xu PROC_SUNLOCK(p); 100844990b8cSJulian Elischer return (0); 100944990b8cSJulian Elischer } 101044990b8cSJulian Elischer 10118638fe7bSKonstantin Belousov bool 10128638fe7bSKonstantin Belousov thread_suspend_check_needed(void) 10138638fe7bSKonstantin Belousov { 10148638fe7bSKonstantin Belousov struct proc *p; 10158638fe7bSKonstantin Belousov struct thread *td; 10168638fe7bSKonstantin Belousov 10178638fe7bSKonstantin Belousov td = curthread; 10188638fe7bSKonstantin Belousov p = td->td_proc; 10198638fe7bSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 10208638fe7bSKonstantin Belousov return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 10218638fe7bSKonstantin Belousov (td->td_dbgflags & TDB_SUSPEND) != 0)); 10228638fe7bSKonstantin Belousov } 10238638fe7bSKonstantin Belousov 102444990b8cSJulian Elischer /* 102544990b8cSJulian Elischer * Called in from locations that can safely check to see 102644990b8cSJulian Elischer * whether we have to suspend or at least throttle for a 102744990b8cSJulian Elischer * single-thread event (e.g. fork). 102844990b8cSJulian Elischer * 102944990b8cSJulian Elischer * Such locations include userret(). 103044990b8cSJulian Elischer * If the "return_instead" argument is non zero, the thread must be able to 103144990b8cSJulian Elischer * accept 0 (caller may continue), or 1 (caller must abort) as a result. 103244990b8cSJulian Elischer * 103344990b8cSJulian Elischer * The 'return_instead' argument tells the function if it may do a 103444990b8cSJulian Elischer * thread_exit() or suspend, or whether the caller must abort and back 103544990b8cSJulian Elischer * out instead. 103644990b8cSJulian Elischer * 103744990b8cSJulian Elischer * If the thread that set the single_threading request has set the 103844990b8cSJulian Elischer * P_SINGLE_EXIT bit in the process flags then this call will never return 103944990b8cSJulian Elischer * if 'return_instead' is false, but will exit. 104044990b8cSJulian Elischer * 104144990b8cSJulian Elischer * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 104244990b8cSJulian Elischer *---------------+--------------------+--------------------- 104344990b8cSJulian Elischer * 0 | returns 0 | returns 0 or 1 1044353374b5SJohn Baldwin * | when ST ends | immediately 104544990b8cSJulian Elischer *---------------+--------------------+--------------------- 104644990b8cSJulian Elischer * 1 | thread exits | returns 1 1047353374b5SJohn Baldwin * | | immediately 104844990b8cSJulian Elischer * 0 = thread_exit() or suspension ok, 104944990b8cSJulian Elischer * other = return error instead of stopping the thread. 105044990b8cSJulian Elischer * 105144990b8cSJulian Elischer * While a full suspension is under effect, even a single threading 105244990b8cSJulian Elischer * thread would be suspended if it made this call (but it shouldn't). 105344990b8cSJulian Elischer * This call should only be made from places where 105444990b8cSJulian Elischer * thread_exit() would be safe as that may be the outcome unless 105544990b8cSJulian Elischer * return_instead is set. 105644990b8cSJulian Elischer */ 105744990b8cSJulian Elischer int 105844990b8cSJulian Elischer thread_suspend_check(int return_instead) 105944990b8cSJulian Elischer { 1060ecafb24bSJuli Mallett struct thread *td; 1061ecafb24bSJuli Mallett struct proc *p; 106246e47c4fSKonstantin Belousov int wakeup_swapper; 106344990b8cSJulian Elischer 106444990b8cSJulian Elischer td = curthread; 106544990b8cSJulian Elischer p = td->td_proc; 106637814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 106744990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 10688638fe7bSKonstantin Belousov while (thread_suspend_check_needed()) { 10691279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 107044990b8cSJulian Elischer KASSERT(p->p_singlethread != NULL, 107144990b8cSJulian Elischer ("singlethread not set")); 107244990b8cSJulian Elischer /* 1073e3b9bf71SJulian Elischer * The only suspension in action is a 1074e3b9bf71SJulian Elischer * single-threading. Single threader need not stop. 1075bd07998eSKonstantin Belousov * It is safe to access p->p_singlethread unlocked 1076bd07998eSKonstantin Belousov * because it can only be set to our address by us. 107744990b8cSJulian Elischer */ 1078e3b9bf71SJulian Elischer if (p->p_singlethread == td) 107944990b8cSJulian Elischer return (0); /* Exempt from stopping. */ 108044990b8cSJulian Elischer } 108145a4bfa1SDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 108294f0972bSDavid Xu return (EINTR); 108344990b8cSJulian Elischer 1084906ac69dSDavid Xu /* Should we goto user boundary if we didn't come from there? */ 1085906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1086906ac69dSDavid Xu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 108794f0972bSDavid Xu return (ERESTART); 1088906ac69dSDavid Xu 108944990b8cSJulian Elischer /* 10903077f938SKonstantin Belousov * Ignore suspend requests if they are deferred. 1091d071a6faSJohn Baldwin */ 10923077f938SKonstantin Belousov if ((td->td_flags & TDF_SBDRY) != 0) { 1093d071a6faSJohn Baldwin KASSERT(return_instead, 1094d071a6faSJohn Baldwin ("TDF_SBDRY set for unsafe thread_suspend_check")); 109546e47c4fSKonstantin Belousov KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 109646e47c4fSKonstantin Belousov (TDF_SEINTR | TDF_SERESTART), 109746e47c4fSKonstantin Belousov ("both TDF_SEINTR and TDF_SERESTART")); 109846e47c4fSKonstantin Belousov return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1099d071a6faSJohn Baldwin } 1100d071a6faSJohn Baldwin 1101d071a6faSJohn Baldwin /* 110244990b8cSJulian Elischer * If the process is waiting for us to exit, 110344990b8cSJulian Elischer * this thread should just suicide. 11041279572aSDavid Xu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 110544990b8cSJulian Elischer */ 1106cf7d9a8cSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1107cf7d9a8cSDavid Xu PROC_UNLOCK(p); 110891d1786fSDmitry Chagin 110991d1786fSDmitry Chagin /* 111091d1786fSDmitry Chagin * Allow Linux emulation layer to do some work 111191d1786fSDmitry Chagin * before thread suicide. 111291d1786fSDmitry Chagin */ 111391d1786fSDmitry Chagin if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 111491d1786fSDmitry Chagin (p->p_sysent->sv_thread_detach)(td); 11152a339d9eSKonstantin Belousov umtx_thread_exit(td); 1116d1e7a4a5SJohn Baldwin kern_thr_exit(td); 1117d1e7a4a5SJohn Baldwin panic("stopped thread did not exit"); 1118cf7d9a8cSDavid Xu } 111921ecd1e9SDavid Xu 112021ecd1e9SDavid Xu PROC_SLOCK(p); 112121ecd1e9SDavid Xu thread_stopped(p); 1122a54e85fdSJeff Roberson if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1123a54e85fdSJeff Roberson if (p->p_numthreads == p->p_suspcount + 1) { 1124a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 112584cdea97SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 112684cdea97SKonstantin Belousov p->p_singlethread, p, false); 11277847a9daSJohn Baldwin if (wakeup_swapper) 11287847a9daSJohn Baldwin kick_proc0(); 1129a54e85fdSJeff Roberson } 1130a54e85fdSJeff Roberson } 11313f9be10eSDavid Xu PROC_UNLOCK(p); 11327b4a950aSDavid Xu thread_lock(td); 113344990b8cSJulian Elischer /* 113444990b8cSJulian Elischer * When a thread suspends, it just 1135ad1e7d28SJulian Elischer * gets taken off all queues. 113644990b8cSJulian Elischer */ 113771fad9fdSJulian Elischer thread_suspend_one(td); 1138906ac69dSDavid Xu if (return_instead == 0) { 1139906ac69dSDavid Xu p->p_boundary_count++; 1140906ac69dSDavid Xu td->td_flags |= TDF_BOUNDARY; 1141cf19bf91SJulian Elischer } 11427b4a950aSDavid Xu PROC_SUNLOCK(p); 1143686bcb5cSJeff Roberson mi_switch(SW_INVOL | SWT_SUSPEND); 114444990b8cSJulian Elischer PROC_LOCK(p); 114544990b8cSJulian Elischer } 114644990b8cSJulian Elischer return (0); 114744990b8cSJulian Elischer } 114844990b8cSJulian Elischer 1149478ca4b0SKonstantin Belousov /* 1150478ca4b0SKonstantin Belousov * Check for possible stops and suspensions while executing a 1151478ca4b0SKonstantin Belousov * casueword or similar transiently failing operation. 1152478ca4b0SKonstantin Belousov * 1153478ca4b0SKonstantin Belousov * The sleep argument controls whether the function can handle a stop 1154478ca4b0SKonstantin Belousov * request itself or it should return ERESTART and the request is 1155478ca4b0SKonstantin Belousov * proceed at the kernel/user boundary in ast. 1156478ca4b0SKonstantin Belousov * 1157478ca4b0SKonstantin Belousov * Typically, when retrying due to casueword(9) failure (rv == 1), we 1158478ca4b0SKonstantin Belousov * should handle the stop requests there, with exception of cases when 1159478ca4b0SKonstantin Belousov * the thread owns a kernel resource, for instance busied the umtx 1160300b525dSKonstantin Belousov * key, or when functions return immediately if thread_check_susp() 1161478ca4b0SKonstantin Belousov * returned non-zero. On the other hand, retrying the whole lock 1162478ca4b0SKonstantin Belousov * operation, we better not stop there but delegate the handling to 1163478ca4b0SKonstantin Belousov * ast. 1164478ca4b0SKonstantin Belousov * 1165478ca4b0SKonstantin Belousov * If the request is for thread termination P_SINGLE_EXIT, we cannot 1166478ca4b0SKonstantin Belousov * handle it at all, and simply return EINTR. 1167478ca4b0SKonstantin Belousov */ 1168478ca4b0SKonstantin Belousov int 1169478ca4b0SKonstantin Belousov thread_check_susp(struct thread *td, bool sleep) 1170478ca4b0SKonstantin Belousov { 1171478ca4b0SKonstantin Belousov struct proc *p; 1172478ca4b0SKonstantin Belousov int error; 1173478ca4b0SKonstantin Belousov 1174478ca4b0SKonstantin Belousov /* 1175478ca4b0SKonstantin Belousov * The check for TDF_NEEDSUSPCHK is racy, but it is enough to 1176478ca4b0SKonstantin Belousov * eventually break the lockstep loop. 1177478ca4b0SKonstantin Belousov */ 1178478ca4b0SKonstantin Belousov if ((td->td_flags & TDF_NEEDSUSPCHK) == 0) 1179478ca4b0SKonstantin Belousov return (0); 1180478ca4b0SKonstantin Belousov error = 0; 1181478ca4b0SKonstantin Belousov p = td->td_proc; 1182478ca4b0SKonstantin Belousov PROC_LOCK(p); 1183478ca4b0SKonstantin Belousov if (p->p_flag & P_SINGLE_EXIT) 1184478ca4b0SKonstantin Belousov error = EINTR; 1185478ca4b0SKonstantin Belousov else if (P_SHOULDSTOP(p) || 1186478ca4b0SKonstantin Belousov ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1187478ca4b0SKonstantin Belousov error = sleep ? thread_suspend_check(0) : ERESTART; 1188478ca4b0SKonstantin Belousov PROC_UNLOCK(p); 1189478ca4b0SKonstantin Belousov return (error); 1190478ca4b0SKonstantin Belousov } 1191478ca4b0SKonstantin Belousov 119235c32a76SDavid Xu void 11936ddcc233SKonstantin Belousov thread_suspend_switch(struct thread *td, struct proc *p) 1194a54e85fdSJeff Roberson { 1195a54e85fdSJeff Roberson 1196a54e85fdSJeff Roberson KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1197a54e85fdSJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 11987b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1199a54e85fdSJeff Roberson /* 1200a54e85fdSJeff Roberson * We implement thread_suspend_one in stages here to avoid 1201a54e85fdSJeff Roberson * dropping the proc lock while the thread lock is owned. 1202a54e85fdSJeff Roberson */ 12036ddcc233SKonstantin Belousov if (p == td->td_proc) { 1204a54e85fdSJeff Roberson thread_stopped(p); 1205a54e85fdSJeff Roberson p->p_suspcount++; 12066ddcc233SKonstantin Belousov } 12073f9be10eSDavid Xu PROC_UNLOCK(p); 12087b4a950aSDavid Xu thread_lock(td); 1209b7edba77SJeff Roberson td->td_flags &= ~TDF_NEEDSUSPCHK; 1210a54e85fdSJeff Roberson TD_SET_SUSPENDED(td); 1211c5aa6b58SJeff Roberson sched_sleep(td, 0); 12127b4a950aSDavid Xu PROC_SUNLOCK(p); 1213a54e85fdSJeff Roberson DROP_GIANT(); 1214686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_SUSPEND); 1215a54e85fdSJeff Roberson PICKUP_GIANT(); 1216a54e85fdSJeff Roberson PROC_LOCK(p); 12177b4a950aSDavid Xu PROC_SLOCK(p); 1218a54e85fdSJeff Roberson } 1219a54e85fdSJeff Roberson 1220a54e85fdSJeff Roberson void 122135c32a76SDavid Xu thread_suspend_one(struct thread *td) 122235c32a76SDavid Xu { 12236ddcc233SKonstantin Belousov struct proc *p; 122435c32a76SDavid Xu 12256ddcc233SKonstantin Belousov p = td->td_proc; 12267b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1227a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1228e574e444SDavid Xu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 122935c32a76SDavid Xu p->p_suspcount++; 1230b7edba77SJeff Roberson td->td_flags &= ~TDF_NEEDSUSPCHK; 123171fad9fdSJulian Elischer TD_SET_SUSPENDED(td); 1232c5aa6b58SJeff Roberson sched_sleep(td, 0); 123335c32a76SDavid Xu } 123435c32a76SDavid Xu 123584cdea97SKonstantin Belousov static int 123684cdea97SKonstantin Belousov thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 123735c32a76SDavid Xu { 123835c32a76SDavid Xu 1239a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1240ad1e7d28SJulian Elischer KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 124171fad9fdSJulian Elischer TD_CLR_SUSPENDED(td); 12426ddcc233SKonstantin Belousov td->td_flags &= ~TDF_ALLPROCSUSP; 12436ddcc233SKonstantin Belousov if (td->td_proc == p) { 12446ddcc233SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 124535c32a76SDavid Xu p->p_suspcount--; 124684cdea97SKonstantin Belousov if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 124784cdea97SKonstantin Belousov td->td_flags &= ~TDF_BOUNDARY; 124884cdea97SKonstantin Belousov p->p_boundary_count--; 124984cdea97SKonstantin Belousov } 12506ddcc233SKonstantin Belousov } 125161a74c5cSJeff Roberson return (setrunnable(td, 0)); 125235c32a76SDavid Xu } 125335c32a76SDavid Xu 125444990b8cSJulian Elischer /* 125544990b8cSJulian Elischer * Allow all threads blocked by single threading to continue running. 125644990b8cSJulian Elischer */ 125744990b8cSJulian Elischer void 125844990b8cSJulian Elischer thread_unsuspend(struct proc *p) 125944990b8cSJulian Elischer { 126044990b8cSJulian Elischer struct thread *td; 12617847a9daSJohn Baldwin int wakeup_swapper; 126244990b8cSJulian Elischer 126344990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 12647b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 12657847a9daSJohn Baldwin wakeup_swapper = 0; 126644990b8cSJulian Elischer if (!P_SHOULDSTOP(p)) { 1267ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1268a54e85fdSJeff Roberson thread_lock(td); 1269ad1e7d28SJulian Elischer if (TD_IS_SUSPENDED(td)) { 127084cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 127184cdea97SKonstantin Belousov true); 127261a74c5cSJeff Roberson } else 1273a54e85fdSJeff Roberson thread_unlock(td); 1274ad1e7d28SJulian Elischer } 127584cdea97SKonstantin Belousov } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 127684cdea97SKonstantin Belousov p->p_numthreads == p->p_suspcount) { 127744990b8cSJulian Elischer /* 127844990b8cSJulian Elischer * Stopping everything also did the job for the single 127944990b8cSJulian Elischer * threading request. Now we've downgraded to single-threaded, 128044990b8cSJulian Elischer * let it continue. 128144990b8cSJulian Elischer */ 12826ddcc233SKonstantin Belousov if (p->p_singlethread->td_proc == p) { 1283a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 12846ddcc233SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 128584cdea97SKonstantin Belousov p->p_singlethread, p, false); 128644990b8cSJulian Elischer } 12876ddcc233SKonstantin Belousov } 12887847a9daSJohn Baldwin if (wakeup_swapper) 12897847a9daSJohn Baldwin kick_proc0(); 129044990b8cSJulian Elischer } 129144990b8cSJulian Elischer 1292ed062c8dSJulian Elischer /* 1293ed062c8dSJulian Elischer * End the single threading mode.. 1294ed062c8dSJulian Elischer */ 129544990b8cSJulian Elischer void 12966ddcc233SKonstantin Belousov thread_single_end(struct proc *p, int mode) 129744990b8cSJulian Elischer { 129844990b8cSJulian Elischer struct thread *td; 12997847a9daSJohn Baldwin int wakeup_swapper; 130044990b8cSJulian Elischer 13016ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 13026ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 13036ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 130444990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 13056ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 13066ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 13076ddcc233SKonstantin Belousov ("mode %d does not match P_TOTAL_STOP", mode)); 130884cdea97SKonstantin Belousov KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 130984cdea97SKonstantin Belousov ("thread_single_end from other thread %p %p", 131084cdea97SKonstantin Belousov curthread, p->p_singlethread)); 131184cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || 131284cdea97SKonstantin Belousov (p->p_flag & P_SINGLE_BOUNDARY) != 0, 131384cdea97SKonstantin Belousov ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 13146ddcc233SKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 13156ddcc233SKonstantin Belousov P_TOTAL_STOP); 13167b4a950aSDavid Xu PROC_SLOCK(p); 131744990b8cSJulian Elischer p->p_singlethread = NULL; 13187847a9daSJohn Baldwin wakeup_swapper = 0; 131949539972SJulian Elischer /* 13207847a9daSJohn Baldwin * If there are other threads they may now run, 132149539972SJulian Elischer * unless of course there is a blanket 'stop order' 132249539972SJulian Elischer * on the process. The single threader must be allowed 132349539972SJulian Elischer * to continue however as this is a bad place to stop. 132449539972SJulian Elischer */ 13256ddcc233SKonstantin Belousov if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1326ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1327a54e85fdSJeff Roberson thread_lock(td); 1328ad1e7d28SJulian Elischer if (TD_IS_SUSPENDED(td)) { 132984cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 133084cdea97SKonstantin Belousov mode == SINGLE_BOUNDARY); 133161a74c5cSJeff Roberson } else 1332a54e85fdSJeff Roberson thread_unlock(td); 133349539972SJulian Elischer } 1334ad1e7d28SJulian Elischer } 133584cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 133684cdea97SKonstantin Belousov ("inconsistent boundary count %d", p->p_boundary_count)); 13377b4a950aSDavid Xu PROC_SUNLOCK(p); 13387847a9daSJohn Baldwin if (wakeup_swapper) 13397847a9daSJohn Baldwin kick_proc0(); 134049539972SJulian Elischer } 13414fc21c09SDaniel Eischen 1342aae3547bSMateusz Guzik /* 1343aae3547bSMateusz Guzik * Locate a thread by number and return with proc lock held. 1344aae3547bSMateusz Guzik * 1345aae3547bSMateusz Guzik * thread exit establishes proc -> tidhash lock ordering, but lookup 1346aae3547bSMateusz Guzik * takes tidhash first and needs to return locked proc. 1347aae3547bSMateusz Guzik * 1348aae3547bSMateusz Guzik * The problem is worked around by relying on type-safety of both 1349aae3547bSMateusz Guzik * structures and doing the work in 2 steps: 1350aae3547bSMateusz Guzik * - tidhash-locked lookup which saves both thread and proc pointers 1351aae3547bSMateusz Guzik * - proc-locked verification that the found thread still matches 1352aae3547bSMateusz Guzik */ 1353aae3547bSMateusz Guzik static bool 1354aae3547bSMateusz Guzik tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1355cf7d9a8cSDavid Xu { 1356cf7d9a8cSDavid Xu #define RUN_THRESH 16 1357aae3547bSMateusz Guzik struct proc *p; 1358cf7d9a8cSDavid Xu struct thread *td; 1359aae3547bSMateusz Guzik int run; 1360aae3547bSMateusz Guzik bool locked; 1361cf7d9a8cSDavid Xu 1362aae3547bSMateusz Guzik run = 0; 1363*26007fe3SMateusz Guzik rw_rlock(TIDHASHLOCK(tid)); 1364aae3547bSMateusz Guzik locked = true; 1365cf7d9a8cSDavid Xu LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1366aae3547bSMateusz Guzik if (td->td_tid != tid) { 1367aae3547bSMateusz Guzik run++; 1368aae3547bSMateusz Guzik continue; 1369cf7d9a8cSDavid Xu } 1370aae3547bSMateusz Guzik p = td->td_proc; 1371aae3547bSMateusz Guzik if (pid != -1 && p->p_pid != pid) { 1372cf7d9a8cSDavid Xu td = NULL; 1373cf7d9a8cSDavid Xu break; 1374cf7d9a8cSDavid Xu } 1375cf7d9a8cSDavid Xu if (run > RUN_THRESH) { 1376*26007fe3SMateusz Guzik if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1377cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 1378cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1379cf7d9a8cSDavid Xu td, td_hash); 1380*26007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(tid)); 1381aae3547bSMateusz Guzik locked = false; 1382aae3547bSMateusz Guzik break; 1383cf7d9a8cSDavid Xu } 1384cf7d9a8cSDavid Xu } 1385cf7d9a8cSDavid Xu break; 1386cf7d9a8cSDavid Xu } 1387aae3547bSMateusz Guzik if (locked) 1388*26007fe3SMateusz Guzik rw_runlock(TIDHASHLOCK(tid)); 1389aae3547bSMateusz Guzik if (td == NULL) 1390aae3547bSMateusz Guzik return (false); 1391aae3547bSMateusz Guzik *pp = p; 1392aae3547bSMateusz Guzik *tdp = td; 1393aae3547bSMateusz Guzik return (true); 1394aae3547bSMateusz Guzik } 1395aae3547bSMateusz Guzik 1396aae3547bSMateusz Guzik struct thread * 1397aae3547bSMateusz Guzik tdfind(lwpid_t tid, pid_t pid) 1398aae3547bSMateusz Guzik { 1399aae3547bSMateusz Guzik struct proc *p; 1400aae3547bSMateusz Guzik struct thread *td; 1401aae3547bSMateusz Guzik 1402aae3547bSMateusz Guzik td = curthread; 1403aae3547bSMateusz Guzik if (td->td_tid == tid) { 1404aae3547bSMateusz Guzik if (pid != -1 && td->td_proc->p_pid != pid) 1405aae3547bSMateusz Guzik return (NULL); 1406aae3547bSMateusz Guzik PROC_LOCK(td->td_proc); 1407cf7d9a8cSDavid Xu return (td); 1408cf7d9a8cSDavid Xu } 1409cf7d9a8cSDavid Xu 1410aae3547bSMateusz Guzik for (;;) { 1411aae3547bSMateusz Guzik if (!tdfind_hash(tid, pid, &p, &td)) 1412aae3547bSMateusz Guzik return (NULL); 1413aae3547bSMateusz Guzik PROC_LOCK(p); 1414aae3547bSMateusz Guzik if (td->td_tid != tid) { 1415aae3547bSMateusz Guzik PROC_UNLOCK(p); 1416aae3547bSMateusz Guzik continue; 1417aae3547bSMateusz Guzik } 1418aae3547bSMateusz Guzik if (td->td_proc != p) { 1419aae3547bSMateusz Guzik PROC_UNLOCK(p); 1420aae3547bSMateusz Guzik continue; 1421aae3547bSMateusz Guzik } 1422aae3547bSMateusz Guzik if (p->p_state == PRS_NEW) { 1423aae3547bSMateusz Guzik PROC_UNLOCK(p); 1424aae3547bSMateusz Guzik return (NULL); 1425aae3547bSMateusz Guzik } 1426aae3547bSMateusz Guzik return (td); 1427aae3547bSMateusz Guzik } 1428aae3547bSMateusz Guzik } 1429aae3547bSMateusz Guzik 1430cf7d9a8cSDavid Xu void 1431cf7d9a8cSDavid Xu tidhash_add(struct thread *td) 1432cf7d9a8cSDavid Xu { 1433*26007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1434cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1435*26007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1436cf7d9a8cSDavid Xu } 1437cf7d9a8cSDavid Xu 1438cf7d9a8cSDavid Xu void 1439cf7d9a8cSDavid Xu tidhash_remove(struct thread *td) 1440cf7d9a8cSDavid Xu { 1441*26007fe3SMateusz Guzik 1442*26007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1443cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 1444*26007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1445cf7d9a8cSDavid Xu } 1446