19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 444990b8cSJulian Elischer * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 544990b8cSJulian Elischer * All rights reserved. 644990b8cSJulian Elischer * 744990b8cSJulian Elischer * Redistribution and use in source and binary forms, with or without 844990b8cSJulian Elischer * modification, are permitted provided that the following conditions 944990b8cSJulian Elischer * are met: 1044990b8cSJulian Elischer * 1. Redistributions of source code must retain the above copyright 1144990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer as 1244990b8cSJulian Elischer * the first lines of this file unmodified other than the possible 1344990b8cSJulian Elischer * addition of one or more copyright notices. 1444990b8cSJulian Elischer * 2. Redistributions in binary form must reproduce the above copyright 1544990b8cSJulian Elischer * notice(s), this list of conditions and the following disclaimer in the 1644990b8cSJulian Elischer * documentation and/or other materials provided with the distribution. 1744990b8cSJulian Elischer * 1844990b8cSJulian Elischer * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1944990b8cSJulian Elischer * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2044990b8cSJulian Elischer * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 2144990b8cSJulian Elischer * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2244990b8cSJulian Elischer * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2344990b8cSJulian Elischer * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2444990b8cSJulian Elischer * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2544990b8cSJulian Elischer * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2644990b8cSJulian Elischer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2744990b8cSJulian Elischer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2844990b8cSJulian Elischer * DAMAGE. 2944990b8cSJulian Elischer */ 3044990b8cSJulian Elischer 313d06b4b3SAttilio Rao #include "opt_witness.h" 3216d95d4fSJoseph Koshy #include "opt_hwpmc_hooks.h" 333d06b4b3SAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3744990b8cSJulian Elischer #include <sys/param.h> 3844990b8cSJulian Elischer #include <sys/systm.h> 3944990b8cSJulian Elischer #include <sys/kernel.h> 4044990b8cSJulian Elischer #include <sys/lock.h> 415dda15adSMark Johnston #include <sys/msan.h> 4244990b8cSJulian Elischer #include <sys/mutex.h> 4344990b8cSJulian Elischer #include <sys/proc.h> 4435bb59edSMateusz Guzik #include <sys/bitstring.h> 456febf180SGleb Smirnoff #include <sys/epoch.h> 468f0e9130SKonstantin Belousov #include <sys/rangelock.h> 47e170bfdaSDavid Xu #include <sys/resourcevar.h> 48b3e9e682SRyan Stone #include <sys/sdt.h> 4994e0a4cdSJulian Elischer #include <sys/smp.h> 50de028f5aSJeff Roberson #include <sys/sched.h> 5144f3b092SJohn Baldwin #include <sys/sleepqueue.h> 52ace8398dSJeff Roberson #include <sys/selinfo.h> 53d1e7a4a5SJohn Baldwin #include <sys/syscallsubr.h> 54598f2b81SMateusz Guzik #include <sys/dtrace_bsd.h> 5591d1786fSDmitry Chagin #include <sys/sysent.h> 56961a7b24SJohn Baldwin #include <sys/turnstile.h> 57d116b9f1SMateusz Guzik #include <sys/taskqueue.h> 5844990b8cSJulian Elischer #include <sys/ktr.h> 59cf7d9a8cSDavid Xu #include <sys/rwlock.h> 60af29f399SDmitry Chagin #include <sys/umtxvar.h> 619ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 62d7f687fcSJeff Roberson #include <sys/cpuset.h> 6316d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 6416d95d4fSJoseph Koshy #include <sys/pmckern.h> 6516d95d4fSJoseph Koshy #endif 661bd3cf5dSMateusz Guzik #include <sys/priv.h> 6744990b8cSJulian Elischer 68911b84b0SRobert Watson #include <security/audit/audit.h> 69911b84b0SRobert Watson 70d116b9f1SMateusz Guzik #include <vm/pmap.h> 7144990b8cSJulian Elischer #include <vm/vm.h> 7249a2507bSAlan Cox #include <vm/vm_extern.h> 7344990b8cSJulian Elischer #include <vm/uma.h> 74d116b9f1SMateusz Guzik #include <vm/vm_phys.h> 75b209f889SRandall Stewart #include <sys/eventhandler.h> 7602fb42b0SPeter Wemm 77acd9f517SKonstantin Belousov /* 78acd9f517SKonstantin Belousov * Asserts below verify the stability of struct thread and struct proc 79acd9f517SKonstantin Belousov * layout, as exposed by KBI to modules. On head, the KBI is allowed 80acd9f517SKonstantin Belousov * to drift, change to the structures must be accompanied by the 81acd9f517SKonstantin Belousov * assert update. 82acd9f517SKonstantin Belousov * 83acd9f517SKonstantin Belousov * On the stable branches after KBI freeze, conditions must not be 84acd9f517SKonstantin Belousov * violated. Typically new fields are moved to the end of the 85acd9f517SKonstantin Belousov * structures. 86acd9f517SKonstantin Belousov */ 87acd9f517SKonstantin Belousov #ifdef __amd64__ 88a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x108, 89acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 90c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_pflags) == 0x114, 91acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 92c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_frame) == 0x4b0, 93acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 94c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0, 95acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 9685078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0xb8, 97acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 9885078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0xc4, 99acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 100351d5f7fSKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 101acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 1024493a13eSKonstantin Belousov _Static_assert(offsetof(struct proc, p_comm) == 0x3e4, 103acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 104351d5f7fSKonstantin Belousov _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8, 105acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 106acd9f517SKonstantin Belousov #endif 107acd9f517SKonstantin Belousov #ifdef __i386__ 108a422084aSMark Johnston _Static_assert(offsetof(struct thread, td_flags) == 0x9c, 109acd9f517SKonstantin Belousov "struct thread KBI td_flags"); 110c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_pflags) == 0xa8, 111acd9f517SKonstantin Belousov "struct thread KBI td_pflags"); 112c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_frame) == 0x30c, 113acd9f517SKonstantin Belousov "struct thread KBI td_frame"); 114c6d31b83SKonstantin Belousov _Static_assert(offsetof(struct thread, td_emuldata) == 0x350, 115acd9f517SKonstantin Belousov "struct thread KBI td_emuldata"); 11685078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_flag) == 0x6c, 117acd9f517SKonstantin Belousov "struct proc KBI p_flag"); 11885078b85SConrad Meyer _Static_assert(offsetof(struct proc, p_pid) == 0x78, 119acd9f517SKonstantin Belousov "struct proc KBI p_pid"); 1204d675b80SKonstantin Belousov _Static_assert(offsetof(struct proc, p_filemon) == 0x270, 121acd9f517SKonstantin Belousov "struct proc KBI p_filemon"); 122d7814015SCy Schubert _Static_assert(offsetof(struct proc, p_comm) == 0x288, 123acd9f517SKonstantin Belousov "struct proc KBI p_comm"); 124d7814015SCy Schubert _Static_assert(offsetof(struct proc, p_emuldata) == 0x314, 125acd9f517SKonstantin Belousov "struct proc KBI p_emuldata"); 126acd9f517SKonstantin Belousov #endif 127acd9f517SKonstantin Belousov 128b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(proc); 129d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(proc, , , lwp__exit); 130b3e9e682SRyan Stone 1318460a577SJohn Birrell /* 1328460a577SJohn Birrell * thread related storage. 1338460a577SJohn Birrell */ 13444990b8cSJulian Elischer static uma_zone_t thread_zone; 13544990b8cSJulian Elischer 136d116b9f1SMateusz Guzik struct thread_domain_data { 137d116b9f1SMateusz Guzik struct thread *tdd_zombies; 138d116b9f1SMateusz Guzik int tdd_reapticks; 139d116b9f1SMateusz Guzik } __aligned(CACHE_LINE_SIZE); 140d116b9f1SMateusz Guzik 141d116b9f1SMateusz Guzik static struct thread_domain_data thread_domain_data[MAXMEMDOM]; 142d116b9f1SMateusz Guzik 143d116b9f1SMateusz Guzik static struct task thread_reap_task; 144d116b9f1SMateusz Guzik static struct callout thread_reap_callout; 14544990b8cSJulian Elischer 146ff8fbcffSJeff Roberson static void thread_zombie(struct thread *); 147b83e94beSMateusz Guzik static void thread_reap(void); 148d116b9f1SMateusz Guzik static void thread_reap_all(void); 149d116b9f1SMateusz Guzik static void thread_reap_task_cb(void *, int); 150d116b9f1SMateusz Guzik static void thread_reap_callout_cb(void *); 15184cdea97SKonstantin Belousov static int thread_unsuspend_one(struct thread *td, struct proc *p, 15284cdea97SKonstantin Belousov bool boundary); 153755341dfSMateusz Guzik static void thread_free_batched(struct thread *td); 154ff8fbcffSJeff Roberson 155d1ca25beSMateusz Guzik static __exclusive_cache_line struct mtx tid_lock; 156934e7e5eSMateusz Guzik static bitstr_t *tid_bitmap; 15735bb59edSMateusz Guzik 158cf7d9a8cSDavid Xu static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 159cf7d9a8cSDavid Xu 1601bd3cf5dSMateusz Guzik static int maxthread; 1611bd3cf5dSMateusz Guzik SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, 1621bd3cf5dSMateusz Guzik &maxthread, 0, "Maximum number of threads"); 1631bd3cf5dSMateusz Guzik 16462dbc992SMateusz Guzik static __exclusive_cache_line int nthreads; 1651bd3cf5dSMateusz Guzik 166aae3547bSMateusz Guzik static LIST_HEAD(tidhashhead, thread) *tidhashtbl; 167aae3547bSMateusz Guzik static u_long tidhash; 16826007fe3SMateusz Guzik static u_long tidhashlock; 16926007fe3SMateusz Guzik static struct rwlock *tidhashtbl_lock; 170aae3547bSMateusz Guzik #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 17126007fe3SMateusz Guzik #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) 172cf7d9a8cSDavid Xu 1732ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_ctor); 1742ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_dtor); 1752ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_init); 1762ca45184SMatt Joras EVENTHANDLER_LIST_DEFINE(thread_fini); 1772ca45184SMatt Joras 17862dbc992SMateusz Guzik static bool 179d116b9f1SMateusz Guzik thread_count_inc_try(void) 180ec6ea5e8SDavid Xu { 18162dbc992SMateusz Guzik int nthreads_new; 182ec6ea5e8SDavid Xu 18362dbc992SMateusz Guzik nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; 18462dbc992SMateusz Guzik if (nthreads_new >= maxthread - 100) { 1851bd3cf5dSMateusz Guzik if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || 18662dbc992SMateusz Guzik nthreads_new >= maxthread) { 18762dbc992SMateusz Guzik atomic_subtract_int(&nthreads, 1); 188d116b9f1SMateusz Guzik return (false); 189d116b9f1SMateusz Guzik } 190d116b9f1SMateusz Guzik } 191d116b9f1SMateusz Guzik return (true); 192d116b9f1SMateusz Guzik } 193d116b9f1SMateusz Guzik 194d116b9f1SMateusz Guzik static bool 195d116b9f1SMateusz Guzik thread_count_inc(void) 196d116b9f1SMateusz Guzik { 197d116b9f1SMateusz Guzik static struct timeval lastfail; 198d116b9f1SMateusz Guzik static int curfail; 199d116b9f1SMateusz Guzik 200d116b9f1SMateusz Guzik thread_reap(); 201d116b9f1SMateusz Guzik if (thread_count_inc_try()) { 202d116b9f1SMateusz Guzik return (true); 203d116b9f1SMateusz Guzik } 204d116b9f1SMateusz Guzik 205d116b9f1SMateusz Guzik thread_reap_all(); 206d116b9f1SMateusz Guzik if (thread_count_inc_try()) { 207d116b9f1SMateusz Guzik return (true); 208d116b9f1SMateusz Guzik } 209d116b9f1SMateusz Guzik 2101bd3cf5dSMateusz Guzik if (ppsratecheck(&lastfail, &curfail, 1)) { 2111bd3cf5dSMateusz Guzik printf("maxthread limit exceeded by uid %u " 2121bd3cf5dSMateusz Guzik "(pid %d); consider increasing kern.maxthread\n", 2131bd3cf5dSMateusz Guzik curthread->td_ucred->cr_ruid, curproc->p_pid); 2141bd3cf5dSMateusz Guzik } 21562dbc992SMateusz Guzik return (false); 2161bd3cf5dSMateusz Guzik } 2171bd3cf5dSMateusz Guzik 21862dbc992SMateusz Guzik static void 21962dbc992SMateusz Guzik thread_count_sub(int n) 22062dbc992SMateusz Guzik { 22162dbc992SMateusz Guzik 22262dbc992SMateusz Guzik atomic_subtract_int(&nthreads, n); 22362dbc992SMateusz Guzik } 22462dbc992SMateusz Guzik 22562dbc992SMateusz Guzik static void 22662dbc992SMateusz Guzik thread_count_dec(void) 22762dbc992SMateusz Guzik { 22862dbc992SMateusz Guzik 22962dbc992SMateusz Guzik thread_count_sub(1); 23062dbc992SMateusz Guzik } 23162dbc992SMateusz Guzik 23262dbc992SMateusz Guzik static lwpid_t 23362dbc992SMateusz Guzik tid_alloc(void) 23462dbc992SMateusz Guzik { 23562dbc992SMateusz Guzik static lwpid_t trytid; 23662dbc992SMateusz Guzik lwpid_t tid; 23762dbc992SMateusz Guzik 23862dbc992SMateusz Guzik mtx_lock(&tid_lock); 23935bb59edSMateusz Guzik /* 24035bb59edSMateusz Guzik * It is an invariant that the bitmap is big enough to hold maxthread 24135bb59edSMateusz Guzik * IDs. If we got to this point there has to be at least one free. 24235bb59edSMateusz Guzik */ 24335bb59edSMateusz Guzik if (trytid >= maxthread) 24435bb59edSMateusz Guzik trytid = 0; 24535bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 24635bb59edSMateusz Guzik if (tid == -1) { 24735bb59edSMateusz Guzik KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); 24835bb59edSMateusz Guzik trytid = 0; 24935bb59edSMateusz Guzik bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); 25035bb59edSMateusz Guzik KASSERT(tid != -1, ("unexpectedly ran out of IDs")); 251ec6ea5e8SDavid Xu } 25235bb59edSMateusz Guzik bit_set(tid_bitmap, tid); 253934e7e5eSMateusz Guzik trytid = tid + 1; 254ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 25535bb59edSMateusz Guzik return (tid + NO_PID); 256ec6ea5e8SDavid Xu } 257ec6ea5e8SDavid Xu 258ec6ea5e8SDavid Xu static void 259755341dfSMateusz Guzik tid_free_locked(lwpid_t rtid) 260ec6ea5e8SDavid Xu { 26135bb59edSMateusz Guzik lwpid_t tid; 262ec6ea5e8SDavid Xu 263755341dfSMateusz Guzik mtx_assert(&tid_lock, MA_OWNED); 26435bb59edSMateusz Guzik KASSERT(rtid >= NO_PID, 26535bb59edSMateusz Guzik ("%s: invalid tid %d\n", __func__, rtid)); 26635bb59edSMateusz Guzik tid = rtid - NO_PID; 26735bb59edSMateusz Guzik KASSERT(bit_test(tid_bitmap, tid) != 0, 26835bb59edSMateusz Guzik ("thread ID %d not allocated\n", rtid)); 26935bb59edSMateusz Guzik bit_clear(tid_bitmap, tid); 270755341dfSMateusz Guzik } 271755341dfSMateusz Guzik 272755341dfSMateusz Guzik static void 273755341dfSMateusz Guzik tid_free(lwpid_t rtid) 274755341dfSMateusz Guzik { 275755341dfSMateusz Guzik 276755341dfSMateusz Guzik mtx_lock(&tid_lock); 277755341dfSMateusz Guzik tid_free_locked(rtid); 278755341dfSMateusz Guzik mtx_unlock(&tid_lock); 279755341dfSMateusz Guzik } 280755341dfSMateusz Guzik 281755341dfSMateusz Guzik static void 282755341dfSMateusz Guzik tid_free_batch(lwpid_t *batch, int n) 283755341dfSMateusz Guzik { 284755341dfSMateusz Guzik int i; 285755341dfSMateusz Guzik 286755341dfSMateusz Guzik mtx_lock(&tid_lock); 287755341dfSMateusz Guzik for (i = 0; i < n; i++) { 288755341dfSMateusz Guzik tid_free_locked(batch[i]); 289755341dfSMateusz Guzik } 290ec6ea5e8SDavid Xu mtx_unlock(&tid_lock); 291ec6ea5e8SDavid Xu } 292ec6ea5e8SDavid Xu 293fdcac928SMarcel Moolenaar /* 2945ef7b7a0SMateusz Guzik * Batching for thread reapping. 2955ef7b7a0SMateusz Guzik */ 2965ef7b7a0SMateusz Guzik struct tidbatch { 2975ef7b7a0SMateusz Guzik lwpid_t tab[16]; 2985ef7b7a0SMateusz Guzik int n; 2995ef7b7a0SMateusz Guzik }; 3005ef7b7a0SMateusz Guzik 3015ef7b7a0SMateusz Guzik static void 3025ef7b7a0SMateusz Guzik tidbatch_prep(struct tidbatch *tb) 3035ef7b7a0SMateusz Guzik { 3045ef7b7a0SMateusz Guzik 3055ef7b7a0SMateusz Guzik tb->n = 0; 3065ef7b7a0SMateusz Guzik } 3075ef7b7a0SMateusz Guzik 3085ef7b7a0SMateusz Guzik static void 3095ef7b7a0SMateusz Guzik tidbatch_add(struct tidbatch *tb, struct thread *td) 3105ef7b7a0SMateusz Guzik { 3115ef7b7a0SMateusz Guzik 3125ef7b7a0SMateusz Guzik KASSERT(tb->n < nitems(tb->tab), 3135ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3145ef7b7a0SMateusz Guzik tb->tab[tb->n] = td->td_tid; 3155ef7b7a0SMateusz Guzik tb->n++; 3165ef7b7a0SMateusz Guzik } 3175ef7b7a0SMateusz Guzik 3185ef7b7a0SMateusz Guzik static void 3195ef7b7a0SMateusz Guzik tidbatch_process(struct tidbatch *tb) 3205ef7b7a0SMateusz Guzik { 3215ef7b7a0SMateusz Guzik 3225ef7b7a0SMateusz Guzik KASSERT(tb->n <= nitems(tb->tab), 3235ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3245ef7b7a0SMateusz Guzik if (tb->n == nitems(tb->tab)) { 3255ef7b7a0SMateusz Guzik tid_free_batch(tb->tab, tb->n); 3265ef7b7a0SMateusz Guzik tb->n = 0; 3275ef7b7a0SMateusz Guzik } 3285ef7b7a0SMateusz Guzik } 3295ef7b7a0SMateusz Guzik 3305ef7b7a0SMateusz Guzik static void 3315ef7b7a0SMateusz Guzik tidbatch_final(struct tidbatch *tb) 3325ef7b7a0SMateusz Guzik { 3335ef7b7a0SMateusz Guzik 3345ef7b7a0SMateusz Guzik KASSERT(tb->n <= nitems(tb->tab), 3355ef7b7a0SMateusz Guzik ("%s: count too high %d", __func__, tb->n)); 3365ef7b7a0SMateusz Guzik if (tb->n != 0) { 3375ef7b7a0SMateusz Guzik tid_free_batch(tb->tab, tb->n); 3385ef7b7a0SMateusz Guzik } 3395ef7b7a0SMateusz Guzik } 3405ef7b7a0SMateusz Guzik 3415ef7b7a0SMateusz Guzik /* 342696058c3SJulian Elischer * Prepare a thread for use. 34344990b8cSJulian Elischer */ 344b23f72e9SBrian Feldman static int 345b23f72e9SBrian Feldman thread_ctor(void *mem, int size, void *arg, int flags) 34644990b8cSJulian Elischer { 34744990b8cSJulian Elischer struct thread *td; 34844990b8cSJulian Elischer 34944990b8cSJulian Elischer td = (struct thread *)mem; 350fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 35194dd54b9SKonstantin Belousov td->td_lastcpu = td->td_oncpu = NOCPU; 3526c27c603SJuli Mallett 3536c27c603SJuli Mallett /* 3546c27c603SJuli Mallett * Note that td_critnest begins life as 1 because the thread is not 3556c27c603SJuli Mallett * running and is thereby implicitly waiting to be on the receiving 356a54e85fdSJeff Roberson * end of a context switch. 3576c27c603SJuli Mallett */ 358139b7550SJohn Baldwin td->td_critnest = 1; 359acbe332aSDavid Xu td->td_lend_user_pri = PRI_MAX; 360911b84b0SRobert Watson #ifdef AUDIT 361911b84b0SRobert Watson audit_thread_alloc(td); 362911b84b0SRobert Watson #endif 363598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS 364598f2b81SMateusz Guzik kdtrace_thread_ctor(td); 365598f2b81SMateusz Guzik #endif 366d10183d9SDavid Xu umtx_thread_alloc(td); 36719d3e47dSMateusz Guzik MPASS(td->td_sel == NULL); 368b23f72e9SBrian Feldman return (0); 36944990b8cSJulian Elischer } 37044990b8cSJulian Elischer 37144990b8cSJulian Elischer /* 37244990b8cSJulian Elischer * Reclaim a thread after use. 37344990b8cSJulian Elischer */ 37444990b8cSJulian Elischer static void 37544990b8cSJulian Elischer thread_dtor(void *mem, int size, void *arg) 37644990b8cSJulian Elischer { 37744990b8cSJulian Elischer struct thread *td; 37844990b8cSJulian Elischer 37944990b8cSJulian Elischer td = (struct thread *)mem; 38044990b8cSJulian Elischer 38144990b8cSJulian Elischer #ifdef INVARIANTS 38244990b8cSJulian Elischer /* Verify that this thread is in a safe state to free. */ 383fa2528acSAlex Richardson switch (TD_GET_STATE(td)) { 38471fad9fdSJulian Elischer case TDS_INHIBITED: 38571fad9fdSJulian Elischer case TDS_RUNNING: 38671fad9fdSJulian Elischer case TDS_CAN_RUN: 38744990b8cSJulian Elischer case TDS_RUNQ: 38844990b8cSJulian Elischer /* 38944990b8cSJulian Elischer * We must never unlink a thread that is in one of 39044990b8cSJulian Elischer * these states, because it is currently active. 39144990b8cSJulian Elischer */ 39244990b8cSJulian Elischer panic("bad state for thread unlinking"); 39344990b8cSJulian Elischer /* NOTREACHED */ 39471fad9fdSJulian Elischer case TDS_INACTIVE: 39544990b8cSJulian Elischer break; 39644990b8cSJulian Elischer default: 39744990b8cSJulian Elischer panic("bad thread state"); 39844990b8cSJulian Elischer /* NOTREACHED */ 39944990b8cSJulian Elischer } 40044990b8cSJulian Elischer #endif 4016e8525ceSRobert Watson #ifdef AUDIT 4026e8525ceSRobert Watson audit_thread_free(td); 4036e8525ceSRobert Watson #endif 404598f2b81SMateusz Guzik #ifdef KDTRACE_HOOKS 405598f2b81SMateusz Guzik kdtrace_thread_dtor(td); 406598f2b81SMateusz Guzik #endif 4071ba4a712SPawel Jakub Dawidek /* Free all OSD associated to this thread. */ 4081ba4a712SPawel Jakub Dawidek osd_thread_exit(td); 409c6d31b83SKonstantin Belousov ast_kclear(td); 41019d3e47dSMateusz Guzik seltdfini(td); 41144990b8cSJulian Elischer } 41244990b8cSJulian Elischer 41344990b8cSJulian Elischer /* 41444990b8cSJulian Elischer * Initialize type-stable parts of a thread (when newly created). 41544990b8cSJulian Elischer */ 416b23f72e9SBrian Feldman static int 417b23f72e9SBrian Feldman thread_init(void *mem, int size, int flags) 41844990b8cSJulian Elischer { 41944990b8cSJulian Elischer struct thread *td; 42044990b8cSJulian Elischer 42144990b8cSJulian Elischer td = (struct thread *)mem; 422247aba24SMarcel Moolenaar 423b83e94beSMateusz Guzik td->td_allocdomain = vm_phys_domain(vtophys(td)); 42444f3b092SJohn Baldwin td->td_sleepqueue = sleepq_alloc(); 425961a7b24SJohn Baldwin td->td_turnstile = turnstile_alloc(); 4268f0e9130SKonstantin Belousov td->td_rlqe = NULL; 4272ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 428d10183d9SDavid Xu umtx_thread_init(td); 42989b57fcfSKonstantin Belousov td->td_kstack = 0; 430ad8b1d85SKonstantin Belousov td->td_sel = NULL; 431b23f72e9SBrian Feldman return (0); 43244990b8cSJulian Elischer } 43344990b8cSJulian Elischer 43444990b8cSJulian Elischer /* 43544990b8cSJulian Elischer * Tear down type-stable parts of a thread (just before being discarded). 43644990b8cSJulian Elischer */ 43744990b8cSJulian Elischer static void 43844990b8cSJulian Elischer thread_fini(void *mem, int size) 43944990b8cSJulian Elischer { 44044990b8cSJulian Elischer struct thread *td; 44144990b8cSJulian Elischer 44244990b8cSJulian Elischer td = (struct thread *)mem; 4432ca45184SMatt Joras EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 4448f0e9130SKonstantin Belousov rlqentry_free(td->td_rlqe); 445961a7b24SJohn Baldwin turnstile_free(td->td_turnstile); 44644f3b092SJohn Baldwin sleepq_free(td->td_sleepqueue); 447d10183d9SDavid Xu umtx_thread_fini(td); 44819d3e47dSMateusz Guzik MPASS(td->td_sel == NULL); 44944990b8cSJulian Elischer } 4505215b187SJeff Roberson 4515c8329edSJulian Elischer /* 4525215b187SJeff Roberson * For a newly created process, 4535215b187SJeff Roberson * link up all the structures and its initial threads etc. 454ed062c8dSJulian Elischer * called from: 455e7d939bdSMarcel Moolenaar * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 456ed062c8dSJulian Elischer * proc_dtor() (should go away) 457ed062c8dSJulian Elischer * proc_init() 4585c8329edSJulian Elischer */ 4595c8329edSJulian Elischer void 46089b57fcfSKonstantin Belousov proc_linkup0(struct proc *p, struct thread *td) 46189b57fcfSKonstantin Belousov { 46289b57fcfSKonstantin Belousov TAILQ_INIT(&p->p_threads); /* all threads in proc */ 46389b57fcfSKonstantin Belousov proc_linkup(p, td); 46489b57fcfSKonstantin Belousov } 46589b57fcfSKonstantin Belousov 46689b57fcfSKonstantin Belousov void 4678460a577SJohn Birrell proc_linkup(struct proc *p, struct thread *td) 4685c8329edSJulian Elischer { 469a54e85fdSJeff Roberson 4709104847fSDavid Xu sigqueue_init(&p->p_sigqueue, p); 471*cc29f221SKonstantin Belousov p->p_ksi = ksiginfo_alloc(M_WAITOK); 472ebceaf6dSDavid Xu if (p->p_ksi != NULL) { 4735c474517SDavid Xu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 474ebceaf6dSDavid Xu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 475ebceaf6dSDavid Xu } 476b2f92ef9SDavid Xu LIST_INIT(&p->p_mqnotifier); 4775c8329edSJulian Elischer p->p_numthreads = 0; 4788460a577SJohn Birrell thread_link(td, p); 4795c8329edSJulian Elischer } 4805c8329edSJulian Elischer 481c6d31b83SKonstantin Belousov static void 482c6d31b83SKonstantin Belousov ast_suspend(struct thread *td, int tda __unused) 483c6d31b83SKonstantin Belousov { 484c6d31b83SKonstantin Belousov struct proc *p; 485c6d31b83SKonstantin Belousov 486c6d31b83SKonstantin Belousov p = td->td_proc; 487c6d31b83SKonstantin Belousov /* 488c6d31b83SKonstantin Belousov * We need to check to see if we have to exit or wait due to a 489c6d31b83SKonstantin Belousov * single threading requirement or some other STOP condition. 490c6d31b83SKonstantin Belousov */ 491c6d31b83SKonstantin Belousov PROC_LOCK(p); 492c6d31b83SKonstantin Belousov thread_suspend_check(0); 493c6d31b83SKonstantin Belousov PROC_UNLOCK(p); 494c6d31b83SKonstantin Belousov } 495c6d31b83SKonstantin Belousov 4961bd3cf5dSMateusz Guzik extern int max_threads_per_proc; 4971bd3cf5dSMateusz Guzik 4985c8329edSJulian Elischer /* 49944990b8cSJulian Elischer * Initialize global thread allocation resources. 50044990b8cSJulian Elischer */ 50144990b8cSJulian Elischer void 50244990b8cSJulian Elischer threadinit(void) 50344990b8cSJulian Elischer { 50426007fe3SMateusz Guzik u_long i; 505cf31cadeSMateusz Guzik lwpid_t tid0; 5065aa5420fSMark Johnston uint32_t flags; 50744990b8cSJulian Elischer 5081bd3cf5dSMateusz Guzik /* 5091bd3cf5dSMateusz Guzik * Place an upper limit on threads which can be allocated. 5101bd3cf5dSMateusz Guzik * 5111bd3cf5dSMateusz Guzik * Note that other factors may make the de facto limit much lower. 5121bd3cf5dSMateusz Guzik * 5131bd3cf5dSMateusz Guzik * Platform limits are somewhat arbitrary but deemed "more than good 5141bd3cf5dSMateusz Guzik * enough" for the foreseable future. 5151bd3cf5dSMateusz Guzik */ 5161bd3cf5dSMateusz Guzik if (maxthread == 0) { 5171bd3cf5dSMateusz Guzik #ifdef _LP64 5181bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 1000000); 5191bd3cf5dSMateusz Guzik #else 5201bd3cf5dSMateusz Guzik maxthread = MIN(maxproc * max_threads_per_proc, 100000); 5211bd3cf5dSMateusz Guzik #endif 5221bd3cf5dSMateusz Guzik } 5231bd3cf5dSMateusz Guzik 5241ea7a6f8SPoul-Henning Kamp mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 52535bb59edSMateusz Guzik tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); 52662dbc992SMateusz Guzik /* 52762dbc992SMateusz Guzik * Handle thread0. 52862dbc992SMateusz Guzik */ 52962dbc992SMateusz Guzik thread_count_inc(); 530cf31cadeSMateusz Guzik tid0 = tid_alloc(); 531cf31cadeSMateusz Guzik if (tid0 != THREAD0_TID) 532cf31cadeSMateusz Guzik panic("tid0 %d != %d\n", tid0, THREAD0_TID); 5331ea7a6f8SPoul-Henning Kamp 5345aa5420fSMark Johnston flags = UMA_ZONE_NOFREE; 5355aa5420fSMark Johnston #ifdef __aarch64__ 5365aa5420fSMark Johnston /* 5375aa5420fSMark Johnston * Force thread structures to be allocated from the direct map. 5385aa5420fSMark Johnston * Otherwise, superpage promotions and demotions may temporarily 5395aa5420fSMark Johnston * invalidate thread structure mappings. For most dynamically allocated 5405aa5420fSMark Johnston * structures this is not a problem, but translation faults cannot be 5415aa5420fSMark Johnston * handled without accessing curthread. 5425aa5420fSMark Johnston */ 5435aa5420fSMark Johnston flags |= UMA_ZONE_CONTIG; 5445aa5420fSMark Johnston #endif 545de028f5aSJeff Roberson thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 54644990b8cSJulian Elischer thread_ctor, thread_dtor, thread_init, thread_fini, 5475aa5420fSMark Johnston 32 - 1, flags); 548cf7d9a8cSDavid Xu tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 54926007fe3SMateusz Guzik tidhashlock = (tidhash + 1) / 64; 55026007fe3SMateusz Guzik if (tidhashlock > 0) 55126007fe3SMateusz Guzik tidhashlock--; 55226007fe3SMateusz Guzik tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), 55326007fe3SMateusz Guzik M_TIDHASH, M_WAITOK | M_ZERO); 55426007fe3SMateusz Guzik for (i = 0; i < tidhashlock + 1; i++) 55526007fe3SMateusz Guzik rw_init(&tidhashtbl_lock[i], "tidhash"); 556d116b9f1SMateusz Guzik 557d116b9f1SMateusz Guzik TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); 558d116b9f1SMateusz Guzik callout_init(&thread_reap_callout, 1); 559845d7797SKonstantin Belousov callout_reset(&thread_reap_callout, 5 * hz, 560845d7797SKonstantin Belousov thread_reap_callout_cb, NULL); 561c6d31b83SKonstantin Belousov ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend); 56244990b8cSJulian Elischer } 56344990b8cSJulian Elischer 56444990b8cSJulian Elischer /* 565ff8fbcffSJeff Roberson * Place an unused thread on the zombie list. 56644990b8cSJulian Elischer */ 56744990b8cSJulian Elischer void 568ff8fbcffSJeff Roberson thread_zombie(struct thread *td) 56944990b8cSJulian Elischer { 570d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 571c5315f51SMateusz Guzik struct thread *ztd; 572c5315f51SMateusz Guzik 573a9568cd2SMateusz Guzik tdd = &thread_domain_data[td->td_allocdomain]; 574d116b9f1SMateusz Guzik ztd = atomic_load_ptr(&tdd->tdd_zombies); 575c5315f51SMateusz Guzik for (;;) { 576c5315f51SMateusz Guzik td->td_zombie = ztd; 577d116b9f1SMateusz Guzik if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies, 578c5315f51SMateusz Guzik (uintptr_t *)&ztd, (uintptr_t)td)) 579c5315f51SMateusz Guzik break; 580c5315f51SMateusz Guzik continue; 581c5315f51SMateusz Guzik } 58244990b8cSJulian Elischer } 58344990b8cSJulian Elischer 5845c8329edSJulian Elischer /* 585ff8fbcffSJeff Roberson * Release a thread that has exited after cpu_throw(). 586ff8fbcffSJeff Roberson */ 587ff8fbcffSJeff Roberson void 588ff8fbcffSJeff Roberson thread_stash(struct thread *td) 589ff8fbcffSJeff Roberson { 590ff8fbcffSJeff Roberson atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 591ff8fbcffSJeff Roberson thread_zombie(td); 592ff8fbcffSJeff Roberson } 593ff8fbcffSJeff Roberson 594ff8fbcffSJeff Roberson /* 595d116b9f1SMateusz Guzik * Reap zombies from passed domain. 59644990b8cSJulian Elischer */ 597d116b9f1SMateusz Guzik static void 598d116b9f1SMateusz Guzik thread_reap_domain(struct thread_domain_data *tdd) 59944990b8cSJulian Elischer { 600c5315f51SMateusz Guzik struct thread *itd, *ntd; 6015ef7b7a0SMateusz Guzik struct tidbatch tidbatch; 602f34a2f56SMateusz Guzik struct credbatch credbatch; 6035ef7b7a0SMateusz Guzik int tdcount; 604fb8ab680SMateusz Guzik struct plimit *lim; 605fb8ab680SMateusz Guzik int limcount; 60644990b8cSJulian Elischer 60744990b8cSJulian Elischer /* 608c5315f51SMateusz Guzik * Reading upfront is pessimal if followed by concurrent atomic_swap, 609c5315f51SMateusz Guzik * but most of the time the list is empty. 61044990b8cSJulian Elischer */ 611d116b9f1SMateusz Guzik if (tdd->tdd_zombies == NULL) 612c5315f51SMateusz Guzik return; 613c5315f51SMateusz Guzik 614d116b9f1SMateusz Guzik itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies, 615c5315f51SMateusz Guzik (uintptr_t)NULL); 6165ef7b7a0SMateusz Guzik if (itd == NULL) 6175ef7b7a0SMateusz Guzik return; 6185ef7b7a0SMateusz Guzik 619d116b9f1SMateusz Guzik /* 620d116b9f1SMateusz Guzik * Multiple CPUs can get here, the race is fine as ticks is only 621d116b9f1SMateusz Guzik * advisory. 622d116b9f1SMateusz Guzik */ 623d116b9f1SMateusz Guzik tdd->tdd_reapticks = ticks; 624d116b9f1SMateusz Guzik 6255ef7b7a0SMateusz Guzik tidbatch_prep(&tidbatch); 626f34a2f56SMateusz Guzik credbatch_prep(&credbatch); 6275ef7b7a0SMateusz Guzik tdcount = 0; 628fb8ab680SMateusz Guzik lim = NULL; 629fb8ab680SMateusz Guzik limcount = 0; 630d116b9f1SMateusz Guzik 631c5315f51SMateusz Guzik while (itd != NULL) { 632c5315f51SMateusz Guzik ntd = itd->td_zombie; 6335ef7b7a0SMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); 6345ef7b7a0SMateusz Guzik tidbatch_add(&tidbatch, itd); 635f34a2f56SMateusz Guzik credbatch_add(&credbatch, itd); 636fb8ab680SMateusz Guzik MPASS(itd->td_limit != NULL); 637fb8ab680SMateusz Guzik if (lim != itd->td_limit) { 638fb8ab680SMateusz Guzik if (limcount != 0) { 639fb8ab680SMateusz Guzik lim_freen(lim, limcount); 640fb8ab680SMateusz Guzik limcount = 0; 641fb8ab680SMateusz Guzik } 642fb8ab680SMateusz Guzik } 643fb8ab680SMateusz Guzik lim = itd->td_limit; 644fb8ab680SMateusz Guzik limcount++; 645755341dfSMateusz Guzik thread_free_batched(itd); 6465ef7b7a0SMateusz Guzik tidbatch_process(&tidbatch); 647f34a2f56SMateusz Guzik credbatch_process(&credbatch); 6485ef7b7a0SMateusz Guzik tdcount++; 6495ef7b7a0SMateusz Guzik if (tdcount == 32) { 6505ef7b7a0SMateusz Guzik thread_count_sub(tdcount); 6515ef7b7a0SMateusz Guzik tdcount = 0; 652755341dfSMateusz Guzik } 653c5315f51SMateusz Guzik itd = ntd; 65444990b8cSJulian Elischer } 655755341dfSMateusz Guzik 6565ef7b7a0SMateusz Guzik tidbatch_final(&tidbatch); 657f34a2f56SMateusz Guzik credbatch_final(&credbatch); 6585ef7b7a0SMateusz Guzik if (tdcount != 0) { 6595ef7b7a0SMateusz Guzik thread_count_sub(tdcount); 660755341dfSMateusz Guzik } 661fb8ab680SMateusz Guzik MPASS(limcount != 0); 662fb8ab680SMateusz Guzik lim_freen(lim, limcount); 663ed062c8dSJulian Elischer } 66444990b8cSJulian Elischer 6654f0db5e0SJulian Elischer /* 666d116b9f1SMateusz Guzik * Reap zombies from all domains. 667d116b9f1SMateusz Guzik */ 668d116b9f1SMateusz Guzik static void 669d116b9f1SMateusz Guzik thread_reap_all(void) 670d116b9f1SMateusz Guzik { 671d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 672d116b9f1SMateusz Guzik int i, domain; 673d116b9f1SMateusz Guzik 674d116b9f1SMateusz Guzik domain = PCPU_GET(domain); 675d116b9f1SMateusz Guzik for (i = 0; i < vm_ndomains; i++) { 676d116b9f1SMateusz Guzik tdd = &thread_domain_data[(i + domain) % vm_ndomains]; 677d116b9f1SMateusz Guzik thread_reap_domain(tdd); 678d116b9f1SMateusz Guzik } 679d116b9f1SMateusz Guzik } 680d116b9f1SMateusz Guzik 681d116b9f1SMateusz Guzik /* 682d116b9f1SMateusz Guzik * Reap zombies from local domain. 683d116b9f1SMateusz Guzik */ 684b83e94beSMateusz Guzik static void 685d116b9f1SMateusz Guzik thread_reap(void) 686d116b9f1SMateusz Guzik { 687d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 688d116b9f1SMateusz Guzik int domain; 689d116b9f1SMateusz Guzik 690d116b9f1SMateusz Guzik domain = PCPU_GET(domain); 691d116b9f1SMateusz Guzik tdd = &thread_domain_data[domain]; 692d116b9f1SMateusz Guzik 693d116b9f1SMateusz Guzik thread_reap_domain(tdd); 694d116b9f1SMateusz Guzik } 695d116b9f1SMateusz Guzik 696d116b9f1SMateusz Guzik static void 697d116b9f1SMateusz Guzik thread_reap_task_cb(void *arg __unused, int pending __unused) 698d116b9f1SMateusz Guzik { 699d116b9f1SMateusz Guzik 700d116b9f1SMateusz Guzik thread_reap_all(); 701d116b9f1SMateusz Guzik } 702d116b9f1SMateusz Guzik 703d116b9f1SMateusz Guzik static void 704d116b9f1SMateusz Guzik thread_reap_callout_cb(void *arg __unused) 705d116b9f1SMateusz Guzik { 706d116b9f1SMateusz Guzik struct thread_domain_data *tdd; 707d116b9f1SMateusz Guzik int i, cticks, lticks; 708d116b9f1SMateusz Guzik bool wantreap; 709d116b9f1SMateusz Guzik 710d116b9f1SMateusz Guzik wantreap = false; 711d116b9f1SMateusz Guzik cticks = atomic_load_int(&ticks); 712d116b9f1SMateusz Guzik for (i = 0; i < vm_ndomains; i++) { 713d116b9f1SMateusz Guzik tdd = &thread_domain_data[i]; 714d116b9f1SMateusz Guzik lticks = tdd->tdd_reapticks; 715d116b9f1SMateusz Guzik if (tdd->tdd_zombies != NULL && 716d116b9f1SMateusz Guzik (u_int)(cticks - lticks) > 5 * hz) { 717d116b9f1SMateusz Guzik wantreap = true; 718d116b9f1SMateusz Guzik break; 719d116b9f1SMateusz Guzik } 720d116b9f1SMateusz Guzik } 721d116b9f1SMateusz Guzik 722d116b9f1SMateusz Guzik if (wantreap) 723d116b9f1SMateusz Guzik taskqueue_enqueue(taskqueue_thread, &thread_reap_task); 724845d7797SKonstantin Belousov callout_reset(&thread_reap_callout, 5 * hz, 725845d7797SKonstantin Belousov thread_reap_callout_cb, NULL); 726d116b9f1SMateusz Guzik } 727d116b9f1SMateusz Guzik 728d116b9f1SMateusz Guzik /* 729f62c7e54SKonstantin Belousov * Calling this function guarantees that any thread that exited before 730f62c7e54SKonstantin Belousov * the call is reaped when the function returns. By 'exited' we mean 731f62c7e54SKonstantin Belousov * a thread removed from the process linkage with thread_unlink(). 732f62c7e54SKonstantin Belousov * Practically this means that caller must lock/unlock corresponding 733f62c7e54SKonstantin Belousov * process lock before the call, to synchronize with thread_exit(). 734f62c7e54SKonstantin Belousov */ 735f62c7e54SKonstantin Belousov void 736f62c7e54SKonstantin Belousov thread_reap_barrier(void) 737f62c7e54SKonstantin Belousov { 738f62c7e54SKonstantin Belousov struct task *t; 739f62c7e54SKonstantin Belousov 740f62c7e54SKonstantin Belousov /* 741f62c7e54SKonstantin Belousov * First do context switches to each CPU to ensure that all 742f62c7e54SKonstantin Belousov * PCPU pc_deadthreads are moved to zombie list. 743f62c7e54SKonstantin Belousov */ 744f62c7e54SKonstantin Belousov quiesce_all_cpus("", PDROP); 745f62c7e54SKonstantin Belousov 746f62c7e54SKonstantin Belousov /* 747f62c7e54SKonstantin Belousov * Second, fire the task in the same thread as normal 748f62c7e54SKonstantin Belousov * thread_reap() is done, to serialize reaping. 749f62c7e54SKonstantin Belousov */ 750f62c7e54SKonstantin Belousov t = malloc(sizeof(*t), M_TEMP, M_WAITOK); 751f62c7e54SKonstantin Belousov TASK_INIT(t, 0, thread_reap_task_cb, t); 752f62c7e54SKonstantin Belousov taskqueue_enqueue(taskqueue_thread, t); 753f62c7e54SKonstantin Belousov taskqueue_drain(taskqueue_thread, t); 754f62c7e54SKonstantin Belousov free(t, M_TEMP); 755f62c7e54SKonstantin Belousov } 756f62c7e54SKonstantin Belousov 757f62c7e54SKonstantin Belousov /* 75844990b8cSJulian Elischer * Allocate a thread. 75944990b8cSJulian Elischer */ 76044990b8cSJulian Elischer struct thread * 7618a945d10SKonstantin Belousov thread_alloc(int pages) 76244990b8cSJulian Elischer { 76389b57fcfSKonstantin Belousov struct thread *td; 7641bd3cf5dSMateusz Guzik lwpid_t tid; 7658460a577SJohn Birrell 76662dbc992SMateusz Guzik if (!thread_count_inc()) { 7671bd3cf5dSMateusz Guzik return (NULL); 7681bd3cf5dSMateusz Guzik } 7691bd3cf5dSMateusz Guzik 77062dbc992SMateusz Guzik tid = tid_alloc(); 7711bd3cf5dSMateusz Guzik td = uma_zalloc(thread_zone, M_WAITOK); 77289b57fcfSKonstantin Belousov KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 7738a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) { 77489b57fcfSKonstantin Belousov uma_zfree(thread_zone, td); 7751bd3cf5dSMateusz Guzik tid_free(tid); 77662dbc992SMateusz Guzik thread_count_dec(); 77789b57fcfSKonstantin Belousov return (NULL); 77889b57fcfSKonstantin Belousov } 7791bd3cf5dSMateusz Guzik td->td_tid = tid; 780f575573cSKonstantin Belousov bzero(&td->td_sa.args, sizeof(td->td_sa.args)); 7815dda15adSMark Johnston kmsan_thread_alloc(td); 7820c3967e7SMarcel Moolenaar cpu_thread_alloc(td); 7831bd3cf5dSMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 78489b57fcfSKonstantin Belousov return (td); 78544990b8cSJulian Elischer } 78644990b8cSJulian Elischer 7878a945d10SKonstantin Belousov int 7888a945d10SKonstantin Belousov thread_alloc_stack(struct thread *td, int pages) 7898a945d10SKonstantin Belousov { 7908a945d10SKonstantin Belousov 7918a945d10SKonstantin Belousov KASSERT(td->td_kstack == 0, 7928a945d10SKonstantin Belousov ("thread_alloc_stack called on a thread with kstack")); 7938a945d10SKonstantin Belousov if (!vm_thread_new(td, pages)) 7948a945d10SKonstantin Belousov return (0); 7958a945d10SKonstantin Belousov cpu_thread_alloc(td); 7968a945d10SKonstantin Belousov return (1); 7978a945d10SKonstantin Belousov } 7984f0db5e0SJulian Elischer 7994f0db5e0SJulian Elischer /* 80044990b8cSJulian Elischer * Deallocate a thread. 80144990b8cSJulian Elischer */ 802755341dfSMateusz Guzik static void 803755341dfSMateusz Guzik thread_free_batched(struct thread *td) 80444990b8cSJulian Elischer { 8052e6b8de4SJeff Roberson 8062e6b8de4SJeff Roberson lock_profile_thread_exit(td); 80745aea8deSJeff Roberson if (td->td_cpuset) 808d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 809d7f687fcSJeff Roberson td->td_cpuset = NULL; 8100c3967e7SMarcel Moolenaar cpu_thread_free(td); 81189b57fcfSKonstantin Belousov if (td->td_kstack != 0) 81289b57fcfSKonstantin Belousov vm_thread_dispose(td); 8132d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 814755341dfSMateusz Guzik /* 815755341dfSMateusz Guzik * Freeing handled by the caller. 816755341dfSMateusz Guzik */ 8171bd3cf5dSMateusz Guzik td->td_tid = -1; 8185dda15adSMark Johnston kmsan_thread_free(td); 81944990b8cSJulian Elischer uma_zfree(thread_zone, td); 82044990b8cSJulian Elischer } 82144990b8cSJulian Elischer 8224ea6a9a2SMateusz Guzik void 823755341dfSMateusz Guzik thread_free(struct thread *td) 824755341dfSMateusz Guzik { 825755341dfSMateusz Guzik lwpid_t tid; 826755341dfSMateusz Guzik 8275ef7b7a0SMateusz Guzik EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 828755341dfSMateusz Guzik tid = td->td_tid; 829755341dfSMateusz Guzik thread_free_batched(td); 830755341dfSMateusz Guzik tid_free(tid); 83162dbc992SMateusz Guzik thread_count_dec(); 832755341dfSMateusz Guzik } 833755341dfSMateusz Guzik 834755341dfSMateusz Guzik void 8354ea6a9a2SMateusz Guzik thread_cow_get_proc(struct thread *newtd, struct proc *p) 8364ea6a9a2SMateusz Guzik { 8374ea6a9a2SMateusz Guzik 8384ea6a9a2SMateusz Guzik PROC_LOCK_ASSERT(p, MA_OWNED); 8391724c563SMateusz Guzik newtd->td_realucred = crcowget(p->p_ucred); 8401724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 841f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(p->p_limit); 8424ea6a9a2SMateusz Guzik newtd->td_cowgen = p->p_cowgen; 8434ea6a9a2SMateusz Guzik } 8444ea6a9a2SMateusz Guzik 8454ea6a9a2SMateusz Guzik void 8464ea6a9a2SMateusz Guzik thread_cow_get(struct thread *newtd, struct thread *td) 8474ea6a9a2SMateusz Guzik { 8484ea6a9a2SMateusz Guzik 8491724c563SMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 8501724c563SMateusz Guzik newtd->td_realucred = crcowget(td->td_realucred); 8511724c563SMateusz Guzik newtd->td_ucred = newtd->td_realucred; 852f6f6d240SMateusz Guzik newtd->td_limit = lim_hold(td->td_limit); 8534ea6a9a2SMateusz Guzik newtd->td_cowgen = td->td_cowgen; 8544ea6a9a2SMateusz Guzik } 8554ea6a9a2SMateusz Guzik 8564ea6a9a2SMateusz Guzik void 8574ea6a9a2SMateusz Guzik thread_cow_free(struct thread *td) 8584ea6a9a2SMateusz Guzik { 8594ea6a9a2SMateusz Guzik 8601724c563SMateusz Guzik if (td->td_realucred != NULL) 8611724c563SMateusz Guzik crcowfree(td); 862cd672ca6SMateusz Guzik if (td->td_limit != NULL) 863f6f6d240SMateusz Guzik lim_free(td->td_limit); 8644ea6a9a2SMateusz Guzik } 8654ea6a9a2SMateusz Guzik 8664ea6a9a2SMateusz Guzik void 8674ea6a9a2SMateusz Guzik thread_cow_update(struct thread *td) 8684ea6a9a2SMateusz Guzik { 8694ea6a9a2SMateusz Guzik struct proc *p; 870cd672ca6SMateusz Guzik struct ucred *oldcred; 871cd672ca6SMateusz Guzik struct plimit *oldlimit; 8724ea6a9a2SMateusz Guzik 8734ea6a9a2SMateusz Guzik p = td->td_proc; 8744ea6a9a2SMateusz Guzik PROC_LOCK(p); 8751724c563SMateusz Guzik oldcred = crcowsync(); 8768a0cb04dSMateusz Guzik oldlimit = lim_cowsync(); 8774ea6a9a2SMateusz Guzik td->td_cowgen = p->p_cowgen; 8784ea6a9a2SMateusz Guzik PROC_UNLOCK(p); 879cd672ca6SMateusz Guzik if (oldcred != NULL) 880cd672ca6SMateusz Guzik crfree(oldcred); 881cd672ca6SMateusz Guzik if (oldlimit != NULL) 882cd672ca6SMateusz Guzik lim_free(oldlimit); 8834ea6a9a2SMateusz Guzik } 8844ea6a9a2SMateusz Guzik 88532114b63SMateusz Guzik void 88632114b63SMateusz Guzik thread_cow_synced(struct thread *td) 88732114b63SMateusz Guzik { 88832114b63SMateusz Guzik struct proc *p; 88932114b63SMateusz Guzik 89032114b63SMateusz Guzik p = td->td_proc; 89132114b63SMateusz Guzik PROC_LOCK_ASSERT(p, MA_OWNED); 89232114b63SMateusz Guzik MPASS(td->td_cowgen != p->p_cowgen); 89332114b63SMateusz Guzik MPASS(td->td_ucred == p->p_ucred); 89432114b63SMateusz Guzik MPASS(td->td_limit == p->p_limit); 89532114b63SMateusz Guzik td->td_cowgen = p->p_cowgen; 89632114b63SMateusz Guzik } 89732114b63SMateusz Guzik 89844990b8cSJulian Elischer /* 89944990b8cSJulian Elischer * Discard the current thread and exit from its context. 90094e0a4cdSJulian Elischer * Always called with scheduler locked. 90144990b8cSJulian Elischer * 90244990b8cSJulian Elischer * Because we can't free a thread while we're operating under its context, 903696058c3SJulian Elischer * push the current thread into our CPU's deadthread holder. This means 904696058c3SJulian Elischer * we needn't worry about someone else grabbing our context before we 9056617724cSJeff Roberson * do a cpu_throw(). 90644990b8cSJulian Elischer */ 90744990b8cSJulian Elischer void 90844990b8cSJulian Elischer thread_exit(void) 90944990b8cSJulian Elischer { 9107e3a96eaSJohn Baldwin uint64_t runtime, new_switchtime; 91144990b8cSJulian Elischer struct thread *td; 9121c4bcd05SJeff Roberson struct thread *td2; 91344990b8cSJulian Elischer struct proc *p; 9147847a9daSJohn Baldwin int wakeup_swapper; 91544990b8cSJulian Elischer 91644990b8cSJulian Elischer td = curthread; 91744990b8cSJulian Elischer p = td->td_proc; 91844990b8cSJulian Elischer 919a54e85fdSJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 920ed062c8dSJulian Elischer mtx_assert(&Giant, MA_NOTOWNED); 921a54e85fdSJeff Roberson 92244990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 923ed062c8dSJulian Elischer KASSERT(p != NULL, ("thread exiting without a process")); 924cc701b73SRobert Watson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 925e01eafefSJulian Elischer (long)p->p_pid, td->td_name); 9266c9271a9SAndriy Gapon SDT_PROBE0(proc, , , lwp__exit); 9279104847fSDavid Xu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 928936c24faSMateusz Guzik MPASS(td->td_realucred == td->td_ucred); 92944990b8cSJulian Elischer 930ed062c8dSJulian Elischer /* 931ed062c8dSJulian Elischer * drop FPU & debug register state storage, or any other 932ed062c8dSJulian Elischer * architecture specific resources that 933ed062c8dSJulian Elischer * would not be on a new untouched process. 934ed062c8dSJulian Elischer */ 935bd07998eSKonstantin Belousov cpu_thread_exit(td); 93644990b8cSJulian Elischer 937ed062c8dSJulian Elischer /* 9381faf202eSJulian Elischer * The last thread is left attached to the process 9391faf202eSJulian Elischer * So that the whole bundle gets recycled. Skip 940ed062c8dSJulian Elischer * all this stuff if we never had threads. 941ed062c8dSJulian Elischer * EXIT clears all sign of other threads when 942ed062c8dSJulian Elischer * it goes to single threading, so the last thread always 943ed062c8dSJulian Elischer * takes the short path. 9441faf202eSJulian Elischer */ 945ed062c8dSJulian Elischer if (p->p_flag & P_HADTHREADS) { 9461faf202eSJulian Elischer if (p->p_numthreads > 1) { 947fd229b5bSKonstantin Belousov atomic_add_int(&td->td_proc->p_exitthreads, 1); 948d3a0bd78SJulian Elischer thread_unlink(td); 9491c4bcd05SJeff Roberson td2 = FIRST_THREAD_IN_PROC(p); 9501c4bcd05SJeff Roberson sched_exit_thread(td2, td); 951ed062c8dSJulian Elischer 952ed062c8dSJulian Elischer /* 95344990b8cSJulian Elischer * The test below is NOT true if we are the 9549182554aSKonstantin Belousov * sole exiting thread. P_STOPPED_SINGLE is unset 95544990b8cSJulian Elischer * in exit1() after it is the only survivor. 95644990b8cSJulian Elischer */ 9571279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 95844990b8cSJulian Elischer if (p->p_numthreads == p->p_suspcount) { 959a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 9607847a9daSJohn Baldwin wakeup_swapper = thread_unsuspend_one( 96184cdea97SKonstantin Belousov p->p_singlethread, p, false); 9627847a9daSJohn Baldwin if (wakeup_swapper) 9637847a9daSJohn Baldwin kick_proc0(); 96444990b8cSJulian Elischer } 96544990b8cSJulian Elischer } 96648bfcdddSJulian Elischer 967696058c3SJulian Elischer PCPU_SET(deadthread, td); 9681faf202eSJulian Elischer } else { 969ed062c8dSJulian Elischer /* 970ed062c8dSJulian Elischer * The last thread is exiting.. but not through exit() 971ed062c8dSJulian Elischer */ 972ed062c8dSJulian Elischer panic ("thread_exit: Last thread exiting on its own"); 973ed062c8dSJulian Elischer } 9741faf202eSJulian Elischer } 97516d95d4fSJoseph Koshy #ifdef HWPMC_HOOKS 97616d95d4fSJoseph Koshy /* 97716d95d4fSJoseph Koshy * If this thread is part of a process that is being tracked by hwpmc(4), 97816d95d4fSJoseph Koshy * inform the module of the thread's impending exit. 97916d95d4fSJoseph Koshy */ 9806161b98cSMatt Macy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 98116d95d4fSJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 9826161b98cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 983ebfaf69cSMatt Macy } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 984ebfaf69cSMatt Macy PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 98516d95d4fSJoseph Koshy #endif 986a54e85fdSJeff Roberson PROC_UNLOCK(p); 9875c7bebf9SKonstantin Belousov PROC_STATLOCK(p); 9885c7bebf9SKonstantin Belousov thread_lock(td); 9895c7bebf9SKonstantin Belousov PROC_SUNLOCK(p); 9907e3a96eaSJohn Baldwin 9917e3a96eaSJohn Baldwin /* Do the same timestamp bookkeeping that mi_switch() would do. */ 9927e3a96eaSJohn Baldwin new_switchtime = cpu_ticks(); 9937e3a96eaSJohn Baldwin runtime = new_switchtime - PCPU_GET(switchtime); 9947e3a96eaSJohn Baldwin td->td_runtime += runtime; 9957e3a96eaSJohn Baldwin td->td_incruntime += runtime; 9967e3a96eaSJohn Baldwin PCPU_SET(switchtime, new_switchtime); 9977e3a96eaSJohn Baldwin PCPU_SET(switchticks, ticks); 99883c9dea1SGleb Smirnoff VM_CNT_INC(v_swtch); 9997e3a96eaSJohn Baldwin 10007e3a96eaSJohn Baldwin /* Save our resource usage in our process. */ 10017e3a96eaSJohn Baldwin td->td_ru.ru_nvcsw++; 100261a74c5cSJeff Roberson ruxagg_locked(p, td); 10037e3a96eaSJohn Baldwin rucollect(&p->p_ru, &td->td_ru); 10045c7bebf9SKonstantin Belousov PROC_STATUNLOCK(p); 10057e3a96eaSJohn Baldwin 1006fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 10073d06b4b3SAttilio Rao #ifdef WITNESS 10083d06b4b3SAttilio Rao witness_thread_exit(td); 10093d06b4b3SAttilio Rao #endif 1010732d9528SJulian Elischer CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 1011a54e85fdSJeff Roberson sched_throw(td); 1012cc66ebe2SPeter Wemm panic("I'm a teapot!"); 101344990b8cSJulian Elischer /* NOTREACHED */ 101444990b8cSJulian Elischer } 101544990b8cSJulian Elischer 101644990b8cSJulian Elischer /* 1017696058c3SJulian Elischer * Do any thread specific cleanups that may be needed in wait() 101837814395SPeter Wemm * called with Giant, proc and schedlock not held. 1019696058c3SJulian Elischer */ 1020696058c3SJulian Elischer void 1021696058c3SJulian Elischer thread_wait(struct proc *p) 1022696058c3SJulian Elischer { 1023696058c3SJulian Elischer struct thread *td; 1024696058c3SJulian Elischer 102537814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 1026624bf9e1SKonstantin Belousov KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 1027624bf9e1SKonstantin Belousov KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 1028ff8fbcffSJeff Roberson td = FIRST_THREAD_IN_PROC(p); 1029ff8fbcffSJeff Roberson /* Lock the last thread so we spin until it exits cpu_throw(). */ 1030ff8fbcffSJeff Roberson thread_lock(td); 1031ff8fbcffSJeff Roberson thread_unlock(td); 10322e6b8de4SJeff Roberson lock_profile_thread_exit(td); 1033d7f687fcSJeff Roberson cpuset_rel(td->td_cpuset); 1034d7f687fcSJeff Roberson td->td_cpuset = NULL; 1035696058c3SJulian Elischer cpu_thread_clean(td); 10364ea6a9a2SMateusz Guzik thread_cow_free(td); 10372d19b736SKonstantin Belousov callout_drain(&td->td_slpcallout); 1038696058c3SJulian Elischer thread_reap(); /* check for zombie threads etc. */ 1039696058c3SJulian Elischer } 1040696058c3SJulian Elischer 1041696058c3SJulian Elischer /* 104244990b8cSJulian Elischer * Link a thread to a process. 10431faf202eSJulian Elischer * set up anything that needs to be initialized for it to 10441faf202eSJulian Elischer * be used by the process. 104544990b8cSJulian Elischer */ 104644990b8cSJulian Elischer void 10478460a577SJohn Birrell thread_link(struct thread *td, struct proc *p) 104844990b8cSJulian Elischer { 104944990b8cSJulian Elischer 1050a54e85fdSJeff Roberson /* 1051a54e85fdSJeff Roberson * XXX This can't be enabled because it's called for proc0 before 1052374ae2a3SJeff Roberson * its lock has been created. 1053374ae2a3SJeff Roberson * PROC_LOCK_ASSERT(p, MA_OWNED); 1054a54e85fdSJeff Roberson */ 1055fa2528acSAlex Richardson TD_SET_STATE(td, TDS_INACTIVE); 105644990b8cSJulian Elischer td->td_proc = p; 1057b61ce5b0SJeff Roberson td->td_flags = TDF_INMEM; 105844990b8cSJulian Elischer 10591faf202eSJulian Elischer LIST_INIT(&td->td_contested); 1060eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[0]); 1061eea4f254SJeff Roberson LIST_INIT(&td->td_lprof[1]); 1062f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 1063dd902d01SGleb Smirnoff SLIST_INIT(&td->td_epochs); 1064f6eccf96SGleb Smirnoff #endif 10659104847fSDavid Xu sigqueue_init(&td->td_sigqueue, p); 1066fd90e2edSJung-uk Kim callout_init(&td->td_slpcallout, 1); 106766d8df9dSDaniel Eischen TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 106844990b8cSJulian Elischer p->p_numthreads++; 106944990b8cSJulian Elischer } 107044990b8cSJulian Elischer 1071ed062c8dSJulian Elischer /* 1072ed062c8dSJulian Elischer * Called from: 1073ed062c8dSJulian Elischer * thread_exit() 1074ed062c8dSJulian Elischer */ 1075d3a0bd78SJulian Elischer void 1076d3a0bd78SJulian Elischer thread_unlink(struct thread *td) 1077d3a0bd78SJulian Elischer { 1078d3a0bd78SJulian Elischer struct proc *p = td->td_proc; 1079d3a0bd78SJulian Elischer 1080374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 1081f6eccf96SGleb Smirnoff #ifdef EPOCH_TRACE 1082dd902d01SGleb Smirnoff MPASS(SLIST_EMPTY(&td->td_epochs)); 1083f6eccf96SGleb Smirnoff #endif 1084dd902d01SGleb Smirnoff 1085d3a0bd78SJulian Elischer TAILQ_REMOVE(&p->p_threads, td, td_plist); 1086d3a0bd78SJulian Elischer p->p_numthreads--; 1087d3a0bd78SJulian Elischer /* could clear a few other things here */ 10888460a577SJohn Birrell /* Must NOT clear links to proc! */ 10895c8329edSJulian Elischer } 10905c8329edSJulian Elischer 109179799053SKonstantin Belousov static int 109279799053SKonstantin Belousov calc_remaining(struct proc *p, int mode) 109379799053SKonstantin Belousov { 109479799053SKonstantin Belousov int remaining; 109579799053SKonstantin Belousov 10967b519077SKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 10977b519077SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 109879799053SKonstantin Belousov if (mode == SINGLE_EXIT) 109979799053SKonstantin Belousov remaining = p->p_numthreads; 110079799053SKonstantin Belousov else if (mode == SINGLE_BOUNDARY) 110179799053SKonstantin Belousov remaining = p->p_numthreads - p->p_boundary_count; 11026ddcc233SKonstantin Belousov else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 110379799053SKonstantin Belousov remaining = p->p_numthreads - p->p_suspcount; 110479799053SKonstantin Belousov else 110579799053SKonstantin Belousov panic("calc_remaining: wrong mode %d", mode); 110679799053SKonstantin Belousov return (remaining); 110779799053SKonstantin Belousov } 110879799053SKonstantin Belousov 110907a9368aSKonstantin Belousov static int 111007a9368aSKonstantin Belousov remain_for_mode(int mode) 111107a9368aSKonstantin Belousov { 111207a9368aSKonstantin Belousov 11136ddcc233SKonstantin Belousov return (mode == SINGLE_ALLPROC ? 0 : 1); 111407a9368aSKonstantin Belousov } 111507a9368aSKonstantin Belousov 111607a9368aSKonstantin Belousov static int 111707a9368aSKonstantin Belousov weed_inhib(int mode, struct thread *td2, struct proc *p) 111807a9368aSKonstantin Belousov { 111907a9368aSKonstantin Belousov int wakeup_swapper; 112007a9368aSKonstantin Belousov 112107a9368aSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 112207a9368aSKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 112307a9368aSKonstantin Belousov THREAD_LOCK_ASSERT(td2, MA_OWNED); 112407a9368aSKonstantin Belousov 112507a9368aSKonstantin Belousov wakeup_swapper = 0; 112661a74c5cSJeff Roberson 112761a74c5cSJeff Roberson /* 112861a74c5cSJeff Roberson * Since the thread lock is dropped by the scheduler we have 112961a74c5cSJeff Roberson * to retry to check for races. 113061a74c5cSJeff Roberson */ 113161a74c5cSJeff Roberson restart: 113207a9368aSKonstantin Belousov switch (mode) { 113307a9368aSKonstantin Belousov case SINGLE_EXIT: 113461a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2)) { 113584cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, true); 113661a74c5cSJeff Roberson thread_lock(td2); 113761a74c5cSJeff Roberson goto restart; 113861a74c5cSJeff Roberson } 113961a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 114007a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, EINTR); 114161a74c5cSJeff Roberson return (wakeup_swapper); 114261a74c5cSJeff Roberson } 114307a9368aSKonstantin Belousov break; 114407a9368aSKonstantin Belousov case SINGLE_BOUNDARY: 114507a9368aSKonstantin Belousov case SINGLE_NO_EXIT: 114661a74c5cSJeff Roberson if (TD_IS_SUSPENDED(td2) && 114761a74c5cSJeff Roberson (td2->td_flags & TDF_BOUNDARY) == 0) { 114884cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 114961a74c5cSJeff Roberson thread_lock(td2); 115061a74c5cSJeff Roberson goto restart; 115161a74c5cSJeff Roberson } 115261a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 115307a9368aSKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 115461a74c5cSJeff Roberson return (wakeup_swapper); 115561a74c5cSJeff Roberson } 1156917dd390SKonstantin Belousov break; 11576ddcc233SKonstantin Belousov case SINGLE_ALLPROC: 11586ddcc233SKonstantin Belousov /* 11596ddcc233SKonstantin Belousov * ALLPROC suspend tries to avoid spurious EINTR for 11606ddcc233SKonstantin Belousov * threads sleeping interruptable, by suspending the 11616ddcc233SKonstantin Belousov * thread directly, similarly to sig_suspend_threads(). 1162dd883e9aSKonstantin Belousov * Since such sleep is not neccessary performed at the user 1163dd883e9aSKonstantin Belousov * boundary, TDF_ALLPROCSUSP is used to avoid immediate 1164dd883e9aSKonstantin Belousov * un-suspend. 11656ddcc233SKonstantin Belousov */ 1166dd883e9aSKonstantin Belousov if (TD_IS_SUSPENDED(td2) && (td2->td_flags & 1167dd883e9aSKonstantin Belousov TDF_ALLPROCSUSP) == 0) { 116884cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td2, p, false); 116961a74c5cSJeff Roberson thread_lock(td2); 117061a74c5cSJeff Roberson goto restart; 117161a74c5cSJeff Roberson } 117261a74c5cSJeff Roberson if (TD_CAN_ABORT(td2)) { 11736ddcc233SKonstantin Belousov td2->td_flags |= TDF_ALLPROCSUSP; 11746ddcc233SKonstantin Belousov wakeup_swapper |= sleepq_abort(td2, ERESTART); 117561a74c5cSJeff Roberson return (wakeup_swapper); 11766ddcc233SKonstantin Belousov } 117707a9368aSKonstantin Belousov break; 117861a74c5cSJeff Roberson default: 117961a74c5cSJeff Roberson break; 118007a9368aSKonstantin Belousov } 118161a74c5cSJeff Roberson thread_unlock(td2); 118207a9368aSKonstantin Belousov return (wakeup_swapper); 118307a9368aSKonstantin Belousov } 118407a9368aSKonstantin Belousov 11855215b187SJeff Roberson /* 118644990b8cSJulian Elischer * Enforce single-threading. 118744990b8cSJulian Elischer * 118844990b8cSJulian Elischer * Returns 1 if the caller must abort (another thread is waiting to 118944990b8cSJulian Elischer * exit the process or similar). Process is locked! 119044990b8cSJulian Elischer * Returns 0 when you are successfully the only thread running. 119144990b8cSJulian Elischer * A process has successfully single threaded in the suspend mode when 119244990b8cSJulian Elischer * There are no threads in user mode. Threads in the kernel must be 119344990b8cSJulian Elischer * allowed to continue until they get to the user boundary. They may even 119444990b8cSJulian Elischer * copy out their return values and data before suspending. They may however be 1195e2668f55SMaxim Konovalov * accelerated in reaching the user boundary as we will wake up 119644990b8cSJulian Elischer * any sleeping threads that are interruptable. (PCATCH). 119744990b8cSJulian Elischer */ 119844990b8cSJulian Elischer int 11996ddcc233SKonstantin Belousov thread_single(struct proc *p, int mode) 120044990b8cSJulian Elischer { 120144990b8cSJulian Elischer struct thread *td; 120244990b8cSJulian Elischer struct thread *td2; 1203da7bbd2cSJohn Baldwin int remaining, wakeup_swapper; 120444990b8cSJulian Elischer 120544990b8cSJulian Elischer td = curthread; 12066ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 12076ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 12086ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 12096ddcc233SKonstantin Belousov /* 12106ddcc233SKonstantin Belousov * If allowing non-ALLPROC singlethreading for non-curproc 12116ddcc233SKonstantin Belousov * callers, calc_remaining() and remain_for_mode() should be 12126ddcc233SKonstantin Belousov * adjusted to also account for td->td_proc != p. For now 12136ddcc233SKonstantin Belousov * this is not implemented because it is not used. 12146ddcc233SKonstantin Belousov */ 12156ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 12166ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && td->td_proc == p), 12176ddcc233SKonstantin Belousov ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 121837814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 121944990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 122044990b8cSJulian Elischer 1221d7a9e6e7SKonstantin Belousov /* 1222d7a9e6e7SKonstantin Belousov * Is someone already single threading? 1223d7a9e6e7SKonstantin Belousov * Or may be singlethreading is not needed at all. 1224d7a9e6e7SKonstantin Belousov */ 1225d7a9e6e7SKonstantin Belousov if (mode == SINGLE_ALLPROC) { 1226d7a9e6e7SKonstantin Belousov while ((p->p_flag & P_STOPPED_SINGLE) != 0) { 1227d7a9e6e7SKonstantin Belousov if ((p->p_flag2 & P2_WEXIT) != 0) 1228d7a9e6e7SKonstantin Belousov return (1); 1229d7a9e6e7SKonstantin Belousov msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0); 1230d7a9e6e7SKonstantin Belousov } 1231d7a9e6e7SKonstantin Belousov } else if ((p->p_flag & P_HADTHREADS) == 0) 123244990b8cSJulian Elischer return (0); 1233906ac69dSDavid Xu if (p->p_singlethread != NULL && p->p_singlethread != td) 123444990b8cSJulian Elischer return (1); 123544990b8cSJulian Elischer 1236906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 1237906ac69dSDavid Xu p->p_flag |= P_SINGLE_EXIT; 1238906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 1239906ac69dSDavid Xu } else { 1240906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_EXIT; 1241906ac69dSDavid Xu if (mode == SINGLE_BOUNDARY) 1242906ac69dSDavid Xu p->p_flag |= P_SINGLE_BOUNDARY; 1243906ac69dSDavid Xu else 1244906ac69dSDavid Xu p->p_flag &= ~P_SINGLE_BOUNDARY; 1245906ac69dSDavid Xu } 12461b4701feSKonstantin Belousov if (mode == SINGLE_ALLPROC) { 12476ddcc233SKonstantin Belousov p->p_flag |= P_TOTAL_STOP; 12481b4701feSKonstantin Belousov thread_lock(td); 12491b4701feSKonstantin Belousov td->td_flags |= TDF_DOING_SA; 12501b4701feSKonstantin Belousov thread_unlock(td); 12511b4701feSKonstantin Belousov } 12521279572aSDavid Xu p->p_flag |= P_STOPPED_SINGLE; 12537b4a950aSDavid Xu PROC_SLOCK(p); 1254112afcb2SJohn Baldwin p->p_singlethread = td; 125579799053SKonstantin Belousov remaining = calc_remaining(p, mode); 125607a9368aSKonstantin Belousov while (remaining != remain_for_mode(mode)) { 1257bf1a3220SDavid Xu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 1258bf1a3220SDavid Xu goto stopme; 1259da7bbd2cSJohn Baldwin wakeup_swapper = 0; 126044990b8cSJulian Elischer FOREACH_THREAD_IN_PROC(p, td2) { 126144990b8cSJulian Elischer if (td2 == td) 126244990b8cSJulian Elischer continue; 1263a54e85fdSJeff Roberson thread_lock(td2); 1264c6d31b83SKonstantin Belousov ast_sched_locked(td2, TDA_SUSPEND); 12656ddcc233SKonstantin Belousov if (TD_IS_INHIBITED(td2)) { 126607a9368aSKonstantin Belousov wakeup_swapper |= weed_inhib(mode, td2, p); 1267d8267df7SDavid Xu #ifdef SMP 1268b9009b17SKonstantin Belousov } else if (TD_IS_RUNNING(td2)) { 1269d8267df7SDavid Xu forward_signal(td2); 127061a74c5cSJeff Roberson thread_unlock(td2); 1271d8267df7SDavid Xu #endif 127261a74c5cSJeff Roberson } else 1273a54e85fdSJeff Roberson thread_unlock(td2); 12749d102777SJulian Elischer } 1275da7bbd2cSJohn Baldwin if (wakeup_swapper) 1276da7bbd2cSJohn Baldwin kick_proc0(); 127779799053SKonstantin Belousov remaining = calc_remaining(p, mode); 1278ec008e96SDavid Xu 12799d102777SJulian Elischer /* 12809d102777SJulian Elischer * Maybe we suspended some threads.. was it enough? 12819d102777SJulian Elischer */ 128207a9368aSKonstantin Belousov if (remaining == remain_for_mode(mode)) 12839d102777SJulian Elischer break; 12849d102777SJulian Elischer 1285bf1a3220SDavid Xu stopme: 128644990b8cSJulian Elischer /* 128744990b8cSJulian Elischer * Wake us up when everyone else has suspended. 1288e3b9bf71SJulian Elischer * In the mean time we suspend as well. 128944990b8cSJulian Elischer */ 12906ddcc233SKonstantin Belousov thread_suspend_switch(td, p); 129179799053SKonstantin Belousov remaining = calc_remaining(p, mode); 129244990b8cSJulian Elischer } 1293906ac69dSDavid Xu if (mode == SINGLE_EXIT) { 129491599697SJulian Elischer /* 12958626a0ddSKonstantin Belousov * Convert the process to an unthreaded process. The 12968626a0ddSKonstantin Belousov * SINGLE_EXIT is called by exit1() or execve(), in 12978626a0ddSKonstantin Belousov * both cases other threads must be retired. 129891599697SJulian Elischer */ 12998626a0ddSKonstantin Belousov KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 1300ed062c8dSJulian Elischer p->p_singlethread = NULL; 13018626a0ddSKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 1302fd229b5bSKonstantin Belousov 1303fd229b5bSKonstantin Belousov /* 1304fd229b5bSKonstantin Belousov * Wait for any remaining threads to exit cpu_throw(). 1305fd229b5bSKonstantin Belousov */ 1306fd229b5bSKonstantin Belousov while (p->p_exitthreads != 0) { 1307fd229b5bSKonstantin Belousov PROC_SUNLOCK(p); 1308fd229b5bSKonstantin Belousov PROC_UNLOCK(p); 1309fd229b5bSKonstantin Belousov sched_relinquish(td); 1310fd229b5bSKonstantin Belousov PROC_LOCK(p); 1311fd229b5bSKonstantin Belousov PROC_SLOCK(p); 1312fd229b5bSKonstantin Belousov } 1313ac437c07SKonstantin Belousov } else if (mode == SINGLE_BOUNDARY) { 1314ac437c07SKonstantin Belousov /* 1315ac437c07SKonstantin Belousov * Wait until all suspended threads are removed from 1316ac437c07SKonstantin Belousov * the processors. The thread_suspend_check() 1317ac437c07SKonstantin Belousov * increments p_boundary_count while it is still 1318ac437c07SKonstantin Belousov * running, which makes it possible for the execve() 1319ac437c07SKonstantin Belousov * to destroy vmspace while our other threads are 1320ac437c07SKonstantin Belousov * still using the address space. 1321ac437c07SKonstantin Belousov * 1322ac437c07SKonstantin Belousov * We lock the thread, which is only allowed to 1323ac437c07SKonstantin Belousov * succeed after context switch code finished using 1324ac437c07SKonstantin Belousov * the address space. 1325ac437c07SKonstantin Belousov */ 1326ac437c07SKonstantin Belousov FOREACH_THREAD_IN_PROC(p, td2) { 1327ac437c07SKonstantin Belousov if (td2 == td) 1328ac437c07SKonstantin Belousov continue; 1329ac437c07SKonstantin Belousov thread_lock(td2); 1330ac437c07SKonstantin Belousov KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 1331ac437c07SKonstantin Belousov ("td %p not on boundary", td2)); 1332ac437c07SKonstantin Belousov KASSERT(TD_IS_SUSPENDED(td2), 1333ac437c07SKonstantin Belousov ("td %p is not suspended", td2)); 1334ac437c07SKonstantin Belousov thread_unlock(td2); 1335ac437c07SKonstantin Belousov } 133691599697SJulian Elischer } 13377b4a950aSDavid Xu PROC_SUNLOCK(p); 13381b4701feSKonstantin Belousov if (mode == SINGLE_ALLPROC) { 13391b4701feSKonstantin Belousov thread_lock(td); 13401b4701feSKonstantin Belousov td->td_flags &= ~TDF_DOING_SA; 13411b4701feSKonstantin Belousov thread_unlock(td); 13421b4701feSKonstantin Belousov } 134344990b8cSJulian Elischer return (0); 134444990b8cSJulian Elischer } 134544990b8cSJulian Elischer 13468638fe7bSKonstantin Belousov bool 13478638fe7bSKonstantin Belousov thread_suspend_check_needed(void) 13488638fe7bSKonstantin Belousov { 13498638fe7bSKonstantin Belousov struct proc *p; 13508638fe7bSKonstantin Belousov struct thread *td; 13518638fe7bSKonstantin Belousov 13528638fe7bSKonstantin Belousov td = curthread; 13538638fe7bSKonstantin Belousov p = td->td_proc; 13548638fe7bSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 13558638fe7bSKonstantin Belousov return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 13568638fe7bSKonstantin Belousov (td->td_dbgflags & TDB_SUSPEND) != 0)); 13578638fe7bSKonstantin Belousov } 13588638fe7bSKonstantin Belousov 135944990b8cSJulian Elischer /* 136044990b8cSJulian Elischer * Called in from locations that can safely check to see 136144990b8cSJulian Elischer * whether we have to suspend or at least throttle for a 136244990b8cSJulian Elischer * single-thread event (e.g. fork). 136344990b8cSJulian Elischer * 136444990b8cSJulian Elischer * Such locations include userret(). 136544990b8cSJulian Elischer * If the "return_instead" argument is non zero, the thread must be able to 136644990b8cSJulian Elischer * accept 0 (caller may continue), or 1 (caller must abort) as a result. 136744990b8cSJulian Elischer * 136844990b8cSJulian Elischer * The 'return_instead' argument tells the function if it may do a 136944990b8cSJulian Elischer * thread_exit() or suspend, or whether the caller must abort and back 137044990b8cSJulian Elischer * out instead. 137144990b8cSJulian Elischer * 137244990b8cSJulian Elischer * If the thread that set the single_threading request has set the 137344990b8cSJulian Elischer * P_SINGLE_EXIT bit in the process flags then this call will never return 137444990b8cSJulian Elischer * if 'return_instead' is false, but will exit. 137544990b8cSJulian Elischer * 137644990b8cSJulian Elischer * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 137744990b8cSJulian Elischer *---------------+--------------------+--------------------- 137844990b8cSJulian Elischer * 0 | returns 0 | returns 0 or 1 1379353374b5SJohn Baldwin * | when ST ends | immediately 138044990b8cSJulian Elischer *---------------+--------------------+--------------------- 138144990b8cSJulian Elischer * 1 | thread exits | returns 1 1382353374b5SJohn Baldwin * | | immediately 138344990b8cSJulian Elischer * 0 = thread_exit() or suspension ok, 138444990b8cSJulian Elischer * other = return error instead of stopping the thread. 138544990b8cSJulian Elischer * 138644990b8cSJulian Elischer * While a full suspension is under effect, even a single threading 138744990b8cSJulian Elischer * thread would be suspended if it made this call (but it shouldn't). 138844990b8cSJulian Elischer * This call should only be made from places where 138944990b8cSJulian Elischer * thread_exit() would be safe as that may be the outcome unless 139044990b8cSJulian Elischer * return_instead is set. 139144990b8cSJulian Elischer */ 139244990b8cSJulian Elischer int 139344990b8cSJulian Elischer thread_suspend_check(int return_instead) 139444990b8cSJulian Elischer { 1395ecafb24bSJuli Mallett struct thread *td; 1396ecafb24bSJuli Mallett struct proc *p; 139746e47c4fSKonstantin Belousov int wakeup_swapper; 139844990b8cSJulian Elischer 139944990b8cSJulian Elischer td = curthread; 140044990b8cSJulian Elischer p = td->td_proc; 140137814395SPeter Wemm mtx_assert(&Giant, MA_NOTOWNED); 140244990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 14038638fe7bSKonstantin Belousov while (thread_suspend_check_needed()) { 14041279572aSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 140544990b8cSJulian Elischer KASSERT(p->p_singlethread != NULL, 140644990b8cSJulian Elischer ("singlethread not set")); 140744990b8cSJulian Elischer /* 1408e3b9bf71SJulian Elischer * The only suspension in action is a 1409e3b9bf71SJulian Elischer * single-threading. Single threader need not stop. 1410bd07998eSKonstantin Belousov * It is safe to access p->p_singlethread unlocked 1411bd07998eSKonstantin Belousov * because it can only be set to our address by us. 141244990b8cSJulian Elischer */ 1413e3b9bf71SJulian Elischer if (p->p_singlethread == td) 141444990b8cSJulian Elischer return (0); /* Exempt from stopping. */ 141544990b8cSJulian Elischer } 141645a4bfa1SDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 141794f0972bSDavid Xu return (EINTR); 141844990b8cSJulian Elischer 1419906ac69dSDavid Xu /* Should we goto user boundary if we didn't come from there? */ 1420906ac69dSDavid Xu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1421906ac69dSDavid Xu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 142294f0972bSDavid Xu return (ERESTART); 1423906ac69dSDavid Xu 142444990b8cSJulian Elischer /* 14253077f938SKonstantin Belousov * Ignore suspend requests if they are deferred. 1426d071a6faSJohn Baldwin */ 14273077f938SKonstantin Belousov if ((td->td_flags & TDF_SBDRY) != 0) { 1428d071a6faSJohn Baldwin KASSERT(return_instead, 1429d071a6faSJohn Baldwin ("TDF_SBDRY set for unsafe thread_suspend_check")); 143046e47c4fSKonstantin Belousov KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 143146e47c4fSKonstantin Belousov (TDF_SEINTR | TDF_SERESTART), 143246e47c4fSKonstantin Belousov ("both TDF_SEINTR and TDF_SERESTART")); 143346e47c4fSKonstantin Belousov return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1434d071a6faSJohn Baldwin } 1435d071a6faSJohn Baldwin 1436d071a6faSJohn Baldwin /* 143744990b8cSJulian Elischer * If the process is waiting for us to exit, 143844990b8cSJulian Elischer * this thread should just suicide. 14391279572aSDavid Xu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 144044990b8cSJulian Elischer */ 1441cf7d9a8cSDavid Xu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1442cf7d9a8cSDavid Xu PROC_UNLOCK(p); 144391d1786fSDmitry Chagin 144491d1786fSDmitry Chagin /* 144591d1786fSDmitry Chagin * Allow Linux emulation layer to do some work 144691d1786fSDmitry Chagin * before thread suicide. 144791d1786fSDmitry Chagin */ 144891d1786fSDmitry Chagin if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 144991d1786fSDmitry Chagin (p->p_sysent->sv_thread_detach)(td); 14502a339d9eSKonstantin Belousov umtx_thread_exit(td); 1451d1e7a4a5SJohn Baldwin kern_thr_exit(td); 1452d1e7a4a5SJohn Baldwin panic("stopped thread did not exit"); 1453cf7d9a8cSDavid Xu } 145421ecd1e9SDavid Xu 145521ecd1e9SDavid Xu PROC_SLOCK(p); 145621ecd1e9SDavid Xu thread_stopped(p); 1457a54e85fdSJeff Roberson if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1458a54e85fdSJeff Roberson if (p->p_numthreads == p->p_suspcount + 1) { 1459a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 146084cdea97SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 146184cdea97SKonstantin Belousov p->p_singlethread, p, false); 14627847a9daSJohn Baldwin if (wakeup_swapper) 14637847a9daSJohn Baldwin kick_proc0(); 1464a54e85fdSJeff Roberson } 1465a54e85fdSJeff Roberson } 14663f9be10eSDavid Xu PROC_UNLOCK(p); 14677b4a950aSDavid Xu thread_lock(td); 146844990b8cSJulian Elischer /* 146944990b8cSJulian Elischer * When a thread suspends, it just 1470ad1e7d28SJulian Elischer * gets taken off all queues. 147144990b8cSJulian Elischer */ 147271fad9fdSJulian Elischer thread_suspend_one(td); 1473906ac69dSDavid Xu if (return_instead == 0) { 1474906ac69dSDavid Xu p->p_boundary_count++; 1475906ac69dSDavid Xu td->td_flags |= TDF_BOUNDARY; 1476cf19bf91SJulian Elischer } 14777b4a950aSDavid Xu PROC_SUNLOCK(p); 1478686bcb5cSJeff Roberson mi_switch(SW_INVOL | SWT_SUSPEND); 147944990b8cSJulian Elischer PROC_LOCK(p); 148044990b8cSJulian Elischer } 148144990b8cSJulian Elischer return (0); 148244990b8cSJulian Elischer } 148344990b8cSJulian Elischer 1484478ca4b0SKonstantin Belousov /* 1485478ca4b0SKonstantin Belousov * Check for possible stops and suspensions while executing a 1486478ca4b0SKonstantin Belousov * casueword or similar transiently failing operation. 1487478ca4b0SKonstantin Belousov * 1488478ca4b0SKonstantin Belousov * The sleep argument controls whether the function can handle a stop 1489478ca4b0SKonstantin Belousov * request itself or it should return ERESTART and the request is 1490478ca4b0SKonstantin Belousov * proceed at the kernel/user boundary in ast. 1491478ca4b0SKonstantin Belousov * 1492478ca4b0SKonstantin Belousov * Typically, when retrying due to casueword(9) failure (rv == 1), we 1493478ca4b0SKonstantin Belousov * should handle the stop requests there, with exception of cases when 1494478ca4b0SKonstantin Belousov * the thread owns a kernel resource, for instance busied the umtx 1495300b525dSKonstantin Belousov * key, or when functions return immediately if thread_check_susp() 1496478ca4b0SKonstantin Belousov * returned non-zero. On the other hand, retrying the whole lock 1497478ca4b0SKonstantin Belousov * operation, we better not stop there but delegate the handling to 1498478ca4b0SKonstantin Belousov * ast. 1499478ca4b0SKonstantin Belousov * 1500478ca4b0SKonstantin Belousov * If the request is for thread termination P_SINGLE_EXIT, we cannot 1501478ca4b0SKonstantin Belousov * handle it at all, and simply return EINTR. 1502478ca4b0SKonstantin Belousov */ 1503478ca4b0SKonstantin Belousov int 1504478ca4b0SKonstantin Belousov thread_check_susp(struct thread *td, bool sleep) 1505478ca4b0SKonstantin Belousov { 1506478ca4b0SKonstantin Belousov struct proc *p; 1507478ca4b0SKonstantin Belousov int error; 1508478ca4b0SKonstantin Belousov 1509478ca4b0SKonstantin Belousov /* 1510c6d31b83SKonstantin Belousov * The check for TDA_SUSPEND is racy, but it is enough to 1511478ca4b0SKonstantin Belousov * eventually break the lockstep loop. 1512478ca4b0SKonstantin Belousov */ 1513c6d31b83SKonstantin Belousov if (!td_ast_pending(td, TDA_SUSPEND)) 1514478ca4b0SKonstantin Belousov return (0); 1515478ca4b0SKonstantin Belousov error = 0; 1516478ca4b0SKonstantin Belousov p = td->td_proc; 1517478ca4b0SKonstantin Belousov PROC_LOCK(p); 1518478ca4b0SKonstantin Belousov if (p->p_flag & P_SINGLE_EXIT) 1519478ca4b0SKonstantin Belousov error = EINTR; 1520478ca4b0SKonstantin Belousov else if (P_SHOULDSTOP(p) || 1521478ca4b0SKonstantin Belousov ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) 1522478ca4b0SKonstantin Belousov error = sleep ? thread_suspend_check(0) : ERESTART; 1523478ca4b0SKonstantin Belousov PROC_UNLOCK(p); 1524478ca4b0SKonstantin Belousov return (error); 1525478ca4b0SKonstantin Belousov } 1526478ca4b0SKonstantin Belousov 152735c32a76SDavid Xu void 15286ddcc233SKonstantin Belousov thread_suspend_switch(struct thread *td, struct proc *p) 1529a54e85fdSJeff Roberson { 1530a54e85fdSJeff Roberson 1531a54e85fdSJeff Roberson KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1532a54e85fdSJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 15337b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1534a54e85fdSJeff Roberson /* 1535a54e85fdSJeff Roberson * We implement thread_suspend_one in stages here to avoid 1536a54e85fdSJeff Roberson * dropping the proc lock while the thread lock is owned. 1537a54e85fdSJeff Roberson */ 15386ddcc233SKonstantin Belousov if (p == td->td_proc) { 1539a54e85fdSJeff Roberson thread_stopped(p); 1540a54e85fdSJeff Roberson p->p_suspcount++; 15416ddcc233SKonstantin Belousov } 15423f9be10eSDavid Xu PROC_UNLOCK(p); 15437b4a950aSDavid Xu thread_lock(td); 1544c6d31b83SKonstantin Belousov ast_unsched_locked(td, TDA_SUSPEND); 1545a54e85fdSJeff Roberson TD_SET_SUSPENDED(td); 1546c5aa6b58SJeff Roberson sched_sleep(td, 0); 15477b4a950aSDavid Xu PROC_SUNLOCK(p); 1548a54e85fdSJeff Roberson DROP_GIANT(); 1549686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_SUSPEND); 1550a54e85fdSJeff Roberson PICKUP_GIANT(); 1551a54e85fdSJeff Roberson PROC_LOCK(p); 15527b4a950aSDavid Xu PROC_SLOCK(p); 1553a54e85fdSJeff Roberson } 1554a54e85fdSJeff Roberson 1555a54e85fdSJeff Roberson void 155635c32a76SDavid Xu thread_suspend_one(struct thread *td) 155735c32a76SDavid Xu { 15586ddcc233SKonstantin Belousov struct proc *p; 155935c32a76SDavid Xu 15606ddcc233SKonstantin Belousov p = td->td_proc; 15617b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 1562a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1563e574e444SDavid Xu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 156435c32a76SDavid Xu p->p_suspcount++; 1565c6d31b83SKonstantin Belousov ast_unsched_locked(td, TDA_SUSPEND); 156671fad9fdSJulian Elischer TD_SET_SUSPENDED(td); 1567c5aa6b58SJeff Roberson sched_sleep(td, 0); 156835c32a76SDavid Xu } 156935c32a76SDavid Xu 157084cdea97SKonstantin Belousov static int 157184cdea97SKonstantin Belousov thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 157235c32a76SDavid Xu { 157335c32a76SDavid Xu 1574a54e85fdSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1575ad1e7d28SJulian Elischer KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 157671fad9fdSJulian Elischer TD_CLR_SUSPENDED(td); 15776ddcc233SKonstantin Belousov td->td_flags &= ~TDF_ALLPROCSUSP; 15786ddcc233SKonstantin Belousov if (td->td_proc == p) { 15796ddcc233SKonstantin Belousov PROC_SLOCK_ASSERT(p, MA_OWNED); 158035c32a76SDavid Xu p->p_suspcount--; 158184cdea97SKonstantin Belousov if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 158284cdea97SKonstantin Belousov td->td_flags &= ~TDF_BOUNDARY; 158384cdea97SKonstantin Belousov p->p_boundary_count--; 158484cdea97SKonstantin Belousov } 15856ddcc233SKonstantin Belousov } 158661a74c5cSJeff Roberson return (setrunnable(td, 0)); 158735c32a76SDavid Xu } 158835c32a76SDavid Xu 1589af928fdeSKonstantin Belousov void 1590af928fdeSKonstantin Belousov thread_run_flash(struct thread *td) 1591af928fdeSKonstantin Belousov { 1592af928fdeSKonstantin Belousov struct proc *p; 1593af928fdeSKonstantin Belousov 1594af928fdeSKonstantin Belousov p = td->td_proc; 1595af928fdeSKonstantin Belousov PROC_LOCK_ASSERT(p, MA_OWNED); 1596af928fdeSKonstantin Belousov 1597af928fdeSKonstantin Belousov if (TD_ON_SLEEPQ(td)) 1598af928fdeSKonstantin Belousov sleepq_remove_nested(td); 1599af928fdeSKonstantin Belousov else 1600af928fdeSKonstantin Belousov thread_lock(td); 1601af928fdeSKonstantin Belousov 1602af928fdeSKonstantin Belousov THREAD_LOCK_ASSERT(td, MA_OWNED); 1603af928fdeSKonstantin Belousov KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1604af928fdeSKonstantin Belousov 1605af928fdeSKonstantin Belousov TD_CLR_SUSPENDED(td); 1606af928fdeSKonstantin Belousov PROC_SLOCK(p); 1607af928fdeSKonstantin Belousov MPASS(p->p_suspcount > 0); 1608af928fdeSKonstantin Belousov p->p_suspcount--; 1609af928fdeSKonstantin Belousov PROC_SUNLOCK(p); 1610af928fdeSKonstantin Belousov if (setrunnable(td, 0)) 1611af928fdeSKonstantin Belousov kick_proc0(); 1612af928fdeSKonstantin Belousov } 1613af928fdeSKonstantin Belousov 161444990b8cSJulian Elischer /* 161544990b8cSJulian Elischer * Allow all threads blocked by single threading to continue running. 161644990b8cSJulian Elischer */ 161744990b8cSJulian Elischer void 161844990b8cSJulian Elischer thread_unsuspend(struct proc *p) 161944990b8cSJulian Elischer { 162044990b8cSJulian Elischer struct thread *td; 16217847a9daSJohn Baldwin int wakeup_swapper; 162244990b8cSJulian Elischer 162344990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 16247b4a950aSDavid Xu PROC_SLOCK_ASSERT(p, MA_OWNED); 16257847a9daSJohn Baldwin wakeup_swapper = 0; 162644990b8cSJulian Elischer if (!P_SHOULDSTOP(p)) { 1627ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1628a54e85fdSJeff Roberson thread_lock(td); 16291b4701feSKonstantin Belousov if (TD_IS_SUSPENDED(td) && (td->td_flags & 16301b4701feSKonstantin Belousov TDF_DOING_SA) == 0) { 163184cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 163284cdea97SKonstantin Belousov true); 163361a74c5cSJeff Roberson } else 1634a54e85fdSJeff Roberson thread_unlock(td); 1635ad1e7d28SJulian Elischer } 163684cdea97SKonstantin Belousov } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 163784cdea97SKonstantin Belousov p->p_numthreads == p->p_suspcount) { 163844990b8cSJulian Elischer /* 163944990b8cSJulian Elischer * Stopping everything also did the job for the single 164044990b8cSJulian Elischer * threading request. Now we've downgraded to single-threaded, 164144990b8cSJulian Elischer * let it continue. 164244990b8cSJulian Elischer */ 16436ddcc233SKonstantin Belousov if (p->p_singlethread->td_proc == p) { 1644a54e85fdSJeff Roberson thread_lock(p->p_singlethread); 16456ddcc233SKonstantin Belousov wakeup_swapper = thread_unsuspend_one( 164684cdea97SKonstantin Belousov p->p_singlethread, p, false); 164744990b8cSJulian Elischer } 16486ddcc233SKonstantin Belousov } 16497847a9daSJohn Baldwin if (wakeup_swapper) 16507847a9daSJohn Baldwin kick_proc0(); 165144990b8cSJulian Elischer } 165244990b8cSJulian Elischer 1653ed062c8dSJulian Elischer /* 1654ed062c8dSJulian Elischer * End the single threading mode.. 1655ed062c8dSJulian Elischer */ 165644990b8cSJulian Elischer void 16576ddcc233SKonstantin Belousov thread_single_end(struct proc *p, int mode) 165844990b8cSJulian Elischer { 165944990b8cSJulian Elischer struct thread *td; 16607847a9daSJohn Baldwin int wakeup_swapper; 166144990b8cSJulian Elischer 16626ddcc233SKonstantin Belousov KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 16636ddcc233SKonstantin Belousov mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 16646ddcc233SKonstantin Belousov ("invalid mode %d", mode)); 166544990b8cSJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 16666ddcc233SKonstantin Belousov KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 16676ddcc233SKonstantin Belousov (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 16686ddcc233SKonstantin Belousov ("mode %d does not match P_TOTAL_STOP", mode)); 166984cdea97SKonstantin Belousov KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 167084cdea97SKonstantin Belousov ("thread_single_end from other thread %p %p", 167184cdea97SKonstantin Belousov curthread, p->p_singlethread)); 167284cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || 167384cdea97SKonstantin Belousov (p->p_flag & P_SINGLE_BOUNDARY) != 0, 167484cdea97SKonstantin Belousov ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 16756ddcc233SKonstantin Belousov p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 16766ddcc233SKonstantin Belousov P_TOTAL_STOP); 16777b4a950aSDavid Xu PROC_SLOCK(p); 167844990b8cSJulian Elischer p->p_singlethread = NULL; 16797847a9daSJohn Baldwin wakeup_swapper = 0; 168049539972SJulian Elischer /* 16817847a9daSJohn Baldwin * If there are other threads they may now run, 168249539972SJulian Elischer * unless of course there is a blanket 'stop order' 168349539972SJulian Elischer * on the process. The single threader must be allowed 168449539972SJulian Elischer * to continue however as this is a bad place to stop. 168549539972SJulian Elischer */ 16866ddcc233SKonstantin Belousov if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1687ad1e7d28SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1688a54e85fdSJeff Roberson thread_lock(td); 1689ad1e7d28SJulian Elischer if (TD_IS_SUSPENDED(td)) { 169084cdea97SKonstantin Belousov wakeup_swapper |= thread_unsuspend_one(td, p, 16912d5ef216SMark Johnston true); 169261a74c5cSJeff Roberson } else 1693a54e85fdSJeff Roberson thread_unlock(td); 169449539972SJulian Elischer } 1695ad1e7d28SJulian Elischer } 169684cdea97SKonstantin Belousov KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 169784cdea97SKonstantin Belousov ("inconsistent boundary count %d", p->p_boundary_count)); 16987b4a950aSDavid Xu PROC_SUNLOCK(p); 16997847a9daSJohn Baldwin if (wakeup_swapper) 17007847a9daSJohn Baldwin kick_proc0(); 1701d7a9e6e7SKonstantin Belousov wakeup(&p->p_flag); 170249539972SJulian Elischer } 17034fc21c09SDaniel Eischen 1704aae3547bSMateusz Guzik /* 1705aae3547bSMateusz Guzik * Locate a thread by number and return with proc lock held. 1706aae3547bSMateusz Guzik * 1707aae3547bSMateusz Guzik * thread exit establishes proc -> tidhash lock ordering, but lookup 1708aae3547bSMateusz Guzik * takes tidhash first and needs to return locked proc. 1709aae3547bSMateusz Guzik * 1710aae3547bSMateusz Guzik * The problem is worked around by relying on type-safety of both 1711aae3547bSMateusz Guzik * structures and doing the work in 2 steps: 1712aae3547bSMateusz Guzik * - tidhash-locked lookup which saves both thread and proc pointers 1713aae3547bSMateusz Guzik * - proc-locked verification that the found thread still matches 1714aae3547bSMateusz Guzik */ 1715aae3547bSMateusz Guzik static bool 1716aae3547bSMateusz Guzik tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) 1717cf7d9a8cSDavid Xu { 1718cf7d9a8cSDavid Xu #define RUN_THRESH 16 1719aae3547bSMateusz Guzik struct proc *p; 1720cf7d9a8cSDavid Xu struct thread *td; 1721aae3547bSMateusz Guzik int run; 1722aae3547bSMateusz Guzik bool locked; 1723cf7d9a8cSDavid Xu 1724aae3547bSMateusz Guzik run = 0; 172526007fe3SMateusz Guzik rw_rlock(TIDHASHLOCK(tid)); 1726aae3547bSMateusz Guzik locked = true; 1727cf7d9a8cSDavid Xu LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1728aae3547bSMateusz Guzik if (td->td_tid != tid) { 1729aae3547bSMateusz Guzik run++; 1730aae3547bSMateusz Guzik continue; 1731cf7d9a8cSDavid Xu } 1732aae3547bSMateusz Guzik p = td->td_proc; 1733aae3547bSMateusz Guzik if (pid != -1 && p->p_pid != pid) { 1734cf7d9a8cSDavid Xu td = NULL; 1735cf7d9a8cSDavid Xu break; 1736cf7d9a8cSDavid Xu } 1737cf7d9a8cSDavid Xu if (run > RUN_THRESH) { 173826007fe3SMateusz Guzik if (rw_try_upgrade(TIDHASHLOCK(tid))) { 1739cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 1740cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1741cf7d9a8cSDavid Xu td, td_hash); 174226007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(tid)); 1743aae3547bSMateusz Guzik locked = false; 1744aae3547bSMateusz Guzik break; 1745cf7d9a8cSDavid Xu } 1746cf7d9a8cSDavid Xu } 1747cf7d9a8cSDavid Xu break; 1748cf7d9a8cSDavid Xu } 1749aae3547bSMateusz Guzik if (locked) 175026007fe3SMateusz Guzik rw_runlock(TIDHASHLOCK(tid)); 1751aae3547bSMateusz Guzik if (td == NULL) 1752aae3547bSMateusz Guzik return (false); 1753aae3547bSMateusz Guzik *pp = p; 1754aae3547bSMateusz Guzik *tdp = td; 1755aae3547bSMateusz Guzik return (true); 1756aae3547bSMateusz Guzik } 1757aae3547bSMateusz Guzik 1758aae3547bSMateusz Guzik struct thread * 1759aae3547bSMateusz Guzik tdfind(lwpid_t tid, pid_t pid) 1760aae3547bSMateusz Guzik { 1761aae3547bSMateusz Guzik struct proc *p; 1762aae3547bSMateusz Guzik struct thread *td; 1763aae3547bSMateusz Guzik 1764aae3547bSMateusz Guzik td = curthread; 1765aae3547bSMateusz Guzik if (td->td_tid == tid) { 1766aae3547bSMateusz Guzik if (pid != -1 && td->td_proc->p_pid != pid) 1767aae3547bSMateusz Guzik return (NULL); 1768aae3547bSMateusz Guzik PROC_LOCK(td->td_proc); 1769cf7d9a8cSDavid Xu return (td); 1770cf7d9a8cSDavid Xu } 1771cf7d9a8cSDavid Xu 1772aae3547bSMateusz Guzik for (;;) { 1773aae3547bSMateusz Guzik if (!tdfind_hash(tid, pid, &p, &td)) 1774aae3547bSMateusz Guzik return (NULL); 1775aae3547bSMateusz Guzik PROC_LOCK(p); 1776aae3547bSMateusz Guzik if (td->td_tid != tid) { 1777aae3547bSMateusz Guzik PROC_UNLOCK(p); 1778aae3547bSMateusz Guzik continue; 1779aae3547bSMateusz Guzik } 1780aae3547bSMateusz Guzik if (td->td_proc != p) { 1781aae3547bSMateusz Guzik PROC_UNLOCK(p); 1782aae3547bSMateusz Guzik continue; 1783aae3547bSMateusz Guzik } 1784aae3547bSMateusz Guzik if (p->p_state == PRS_NEW) { 1785aae3547bSMateusz Guzik PROC_UNLOCK(p); 1786aae3547bSMateusz Guzik return (NULL); 1787aae3547bSMateusz Guzik } 1788aae3547bSMateusz Guzik return (td); 1789aae3547bSMateusz Guzik } 1790aae3547bSMateusz Guzik } 1791aae3547bSMateusz Guzik 1792cf7d9a8cSDavid Xu void 1793cf7d9a8cSDavid Xu tidhash_add(struct thread *td) 1794cf7d9a8cSDavid Xu { 179526007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1796cf7d9a8cSDavid Xu LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 179726007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1798cf7d9a8cSDavid Xu } 1799cf7d9a8cSDavid Xu 1800cf7d9a8cSDavid Xu void 1801cf7d9a8cSDavid Xu tidhash_remove(struct thread *td) 1802cf7d9a8cSDavid Xu { 180326007fe3SMateusz Guzik 180426007fe3SMateusz Guzik rw_wlock(TIDHASHLOCK(td->td_tid)); 1805cf7d9a8cSDavid Xu LIST_REMOVE(td, td_hash); 180626007fe3SMateusz Guzik rw_wunlock(TIDHASHLOCK(td->td_tid)); 1807cf7d9a8cSDavid Xu } 1808