xref: /freebsd/sys/kern/kern_thread.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5  *  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30 
31 #include "opt_witness.h"
32 #include "opt_hwpmc_hooks.h"
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/msan.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/bitstring.h>
43 #include <sys/epoch.h>
44 #include <sys/rangelock.h>
45 #include <sys/resourcevar.h>
46 #include <sys/sdt.h>
47 #include <sys/smp.h>
48 #include <sys/sched.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/selinfo.h>
51 #include <sys/syscallsubr.h>
52 #include <sys/dtrace_bsd.h>
53 #include <sys/sysent.h>
54 #include <sys/turnstile.h>
55 #include <sys/taskqueue.h>
56 #include <sys/ktr.h>
57 #include <sys/rwlock.h>
58 #include <sys/umtxvar.h>
59 #include <sys/vmmeter.h>
60 #include <sys/cpuset.h>
61 #ifdef	HWPMC_HOOKS
62 #include <sys/pmckern.h>
63 #endif
64 #include <sys/priv.h>
65 
66 #include <security/audit/audit.h>
67 
68 #include <vm/pmap.h>
69 #include <vm/vm.h>
70 #include <vm/vm_extern.h>
71 #include <vm/uma.h>
72 #include <vm/vm_phys.h>
73 #include <sys/eventhandler.h>
74 
75 /*
76  * Asserts below verify the stability of struct thread and struct proc
77  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
78  * to drift, change to the structures must be accompanied by the
79  * assert update.
80  *
81  * On the stable branches after KBI freeze, conditions must not be
82  * violated.  Typically new fields are moved to the end of the
83  * structures.
84  */
85 #ifdef __amd64__
86 _Static_assert(offsetof(struct thread, td_flags) == 0x108,
87     "struct thread KBI td_flags");
88 _Static_assert(offsetof(struct thread, td_pflags) == 0x114,
89     "struct thread KBI td_pflags");
90 _Static_assert(offsetof(struct thread, td_frame) == 0x4b8,
91     "struct thread KBI td_frame");
92 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
93     "struct thread KBI td_emuldata");
94 _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
95     "struct proc KBI p_flag");
96 _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
97     "struct proc KBI p_pid");
98 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8,
99     "struct proc KBI p_filemon");
100 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0,
101     "struct proc KBI p_comm");
102 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0,
103     "struct proc KBI p_emuldata");
104 #endif
105 #ifdef __i386__
106 _Static_assert(offsetof(struct thread, td_flags) == 0x9c,
107     "struct thread KBI td_flags");
108 _Static_assert(offsetof(struct thread, td_pflags) == 0xa8,
109     "struct thread KBI td_pflags");
110 _Static_assert(offsetof(struct thread, td_frame) == 0x318,
111     "struct thread KBI td_frame");
112 _Static_assert(offsetof(struct thread, td_emuldata) == 0x35c,
113     "struct thread KBI td_emuldata");
114 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
115     "struct proc KBI p_flag");
116 _Static_assert(offsetof(struct proc, p_pid) == 0x78,
117     "struct proc KBI p_pid");
118 _Static_assert(offsetof(struct proc, p_filemon) == 0x270,
119     "struct proc KBI p_filemon");
120 _Static_assert(offsetof(struct proc, p_comm) == 0x284,
121     "struct proc KBI p_comm");
122 _Static_assert(offsetof(struct proc, p_emuldata) == 0x318,
123     "struct proc KBI p_emuldata");
124 #endif
125 
126 SDT_PROVIDER_DECLARE(proc);
127 SDT_PROBE_DEFINE(proc, , , lwp__exit);
128 
129 /*
130  * thread related storage.
131  */
132 static uma_zone_t thread_zone;
133 
134 struct thread_domain_data {
135 	struct thread	*tdd_zombies;
136 	int		tdd_reapticks;
137 } __aligned(CACHE_LINE_SIZE);
138 
139 static struct thread_domain_data thread_domain_data[MAXMEMDOM];
140 
141 static struct task	thread_reap_task;
142 static struct callout  	thread_reap_callout;
143 
144 static void thread_zombie(struct thread *);
145 static void thread_reap(void);
146 static void thread_reap_all(void);
147 static void thread_reap_task_cb(void *, int);
148 static void thread_reap_callout_cb(void *);
149 static int thread_unsuspend_one(struct thread *td, struct proc *p,
150     bool boundary);
151 static void thread_free_batched(struct thread *td);
152 
153 static __exclusive_cache_line struct mtx tid_lock;
154 static bitstr_t *tid_bitmap;
155 
156 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
157 
158 static int maxthread;
159 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
160     &maxthread, 0, "Maximum number of threads");
161 
162 static __exclusive_cache_line int nthreads;
163 
164 static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
165 static u_long	tidhash;
166 static u_long	tidhashlock;
167 static struct	rwlock *tidhashtbl_lock;
168 #define	TIDHASH(tid)		(&tidhashtbl[(tid) & tidhash])
169 #define	TIDHASHLOCK(tid)	(&tidhashtbl_lock[(tid) & tidhashlock])
170 
171 EVENTHANDLER_LIST_DEFINE(thread_ctor);
172 EVENTHANDLER_LIST_DEFINE(thread_dtor);
173 EVENTHANDLER_LIST_DEFINE(thread_init);
174 EVENTHANDLER_LIST_DEFINE(thread_fini);
175 
176 static bool
177 thread_count_inc_try(void)
178 {
179 	int nthreads_new;
180 
181 	nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
182 	if (nthreads_new >= maxthread - 100) {
183 		if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
184 		    nthreads_new >= maxthread) {
185 			atomic_subtract_int(&nthreads, 1);
186 			return (false);
187 		}
188 	}
189 	return (true);
190 }
191 
192 static bool
193 thread_count_inc(void)
194 {
195 	static struct timeval lastfail;
196 	static int curfail;
197 
198 	thread_reap();
199 	if (thread_count_inc_try()) {
200 		return (true);
201 	}
202 
203 	thread_reap_all();
204 	if (thread_count_inc_try()) {
205 		return (true);
206 	}
207 
208 	if (ppsratecheck(&lastfail, &curfail, 1)) {
209 		printf("maxthread limit exceeded by uid %u "
210 		    "(pid %d); consider increasing kern.maxthread\n",
211 		    curthread->td_ucred->cr_ruid, curproc->p_pid);
212 	}
213 	return (false);
214 }
215 
216 static void
217 thread_count_sub(int n)
218 {
219 
220 	atomic_subtract_int(&nthreads, n);
221 }
222 
223 static void
224 thread_count_dec(void)
225 {
226 
227 	thread_count_sub(1);
228 }
229 
230 static lwpid_t
231 tid_alloc(void)
232 {
233 	static lwpid_t trytid;
234 	lwpid_t tid;
235 
236 	mtx_lock(&tid_lock);
237 	/*
238 	 * It is an invariant that the bitmap is big enough to hold maxthread
239 	 * IDs. If we got to this point there has to be at least one free.
240 	 */
241 	if (trytid >= maxthread)
242 		trytid = 0;
243 	bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
244 	if (tid == -1) {
245 		KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
246 		trytid = 0;
247 		bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
248 		KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
249 	}
250 	bit_set(tid_bitmap, tid);
251 	trytid = tid + 1;
252 	mtx_unlock(&tid_lock);
253 	return (tid + NO_PID);
254 }
255 
256 static void
257 tid_free_locked(lwpid_t rtid)
258 {
259 	lwpid_t tid;
260 
261 	mtx_assert(&tid_lock, MA_OWNED);
262 	KASSERT(rtid >= NO_PID,
263 	    ("%s: invalid tid %d\n", __func__, rtid));
264 	tid = rtid - NO_PID;
265 	KASSERT(bit_test(tid_bitmap, tid) != 0,
266 	    ("thread ID %d not allocated\n", rtid));
267 	bit_clear(tid_bitmap, tid);
268 }
269 
270 static void
271 tid_free(lwpid_t rtid)
272 {
273 
274 	mtx_lock(&tid_lock);
275 	tid_free_locked(rtid);
276 	mtx_unlock(&tid_lock);
277 }
278 
279 static void
280 tid_free_batch(lwpid_t *batch, int n)
281 {
282 	int i;
283 
284 	mtx_lock(&tid_lock);
285 	for (i = 0; i < n; i++) {
286 		tid_free_locked(batch[i]);
287 	}
288 	mtx_unlock(&tid_lock);
289 }
290 
291 /*
292  * Batching for thread reapping.
293  */
294 struct tidbatch {
295 	lwpid_t tab[16];
296 	int n;
297 };
298 
299 static void
300 tidbatch_prep(struct tidbatch *tb)
301 {
302 
303 	tb->n = 0;
304 }
305 
306 static void
307 tidbatch_add(struct tidbatch *tb, struct thread *td)
308 {
309 
310 	KASSERT(tb->n < nitems(tb->tab),
311 	    ("%s: count too high %d", __func__, tb->n));
312 	tb->tab[tb->n] = td->td_tid;
313 	tb->n++;
314 }
315 
316 static void
317 tidbatch_process(struct tidbatch *tb)
318 {
319 
320 	KASSERT(tb->n <= nitems(tb->tab),
321 	    ("%s: count too high %d", __func__, tb->n));
322 	if (tb->n == nitems(tb->tab)) {
323 		tid_free_batch(tb->tab, tb->n);
324 		tb->n = 0;
325 	}
326 }
327 
328 static void
329 tidbatch_final(struct tidbatch *tb)
330 {
331 
332 	KASSERT(tb->n <= nitems(tb->tab),
333 	    ("%s: count too high %d", __func__, tb->n));
334 	if (tb->n != 0) {
335 		tid_free_batch(tb->tab, tb->n);
336 	}
337 }
338 
339 /*
340  * Batching thread count free, for consistency
341  */
342 struct tdcountbatch {
343 	int n;
344 };
345 
346 static void
347 tdcountbatch_prep(struct tdcountbatch *tb)
348 {
349 
350 	tb->n = 0;
351 }
352 
353 static void
354 tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused)
355 {
356 
357 	tb->n++;
358 }
359 
360 static void
361 tdcountbatch_process(struct tdcountbatch *tb)
362 {
363 
364 	if (tb->n == 32) {
365 		thread_count_sub(tb->n);
366 		tb->n = 0;
367 	}
368 }
369 
370 static void
371 tdcountbatch_final(struct tdcountbatch *tb)
372 {
373 
374 	if (tb->n != 0) {
375 		thread_count_sub(tb->n);
376 	}
377 }
378 
379 /*
380  * Prepare a thread for use.
381  */
382 static int
383 thread_ctor(void *mem, int size, void *arg, int flags)
384 {
385 	struct thread	*td;
386 
387 	td = (struct thread *)mem;
388 	TD_SET_STATE(td, TDS_INACTIVE);
389 	td->td_lastcpu = td->td_oncpu = NOCPU;
390 
391 	/*
392 	 * Note that td_critnest begins life as 1 because the thread is not
393 	 * running and is thereby implicitly waiting to be on the receiving
394 	 * end of a context switch.
395 	 */
396 	td->td_critnest = 1;
397 	td->td_lend_user_pri = PRI_MAX;
398 #ifdef AUDIT
399 	audit_thread_alloc(td);
400 #endif
401 #ifdef KDTRACE_HOOKS
402 	kdtrace_thread_ctor(td);
403 #endif
404 	umtx_thread_alloc(td);
405 	MPASS(td->td_sel == NULL);
406 	return (0);
407 }
408 
409 /*
410  * Reclaim a thread after use.
411  */
412 static void
413 thread_dtor(void *mem, int size, void *arg)
414 {
415 	struct thread *td;
416 
417 	td = (struct thread *)mem;
418 
419 #ifdef INVARIANTS
420 	/* Verify that this thread is in a safe state to free. */
421 	switch (TD_GET_STATE(td)) {
422 	case TDS_INHIBITED:
423 	case TDS_RUNNING:
424 	case TDS_CAN_RUN:
425 	case TDS_RUNQ:
426 		/*
427 		 * We must never unlink a thread that is in one of
428 		 * these states, because it is currently active.
429 		 */
430 		panic("bad state for thread unlinking");
431 		/* NOTREACHED */
432 	case TDS_INACTIVE:
433 		break;
434 	default:
435 		panic("bad thread state");
436 		/* NOTREACHED */
437 	}
438 #endif
439 #ifdef AUDIT
440 	audit_thread_free(td);
441 #endif
442 #ifdef KDTRACE_HOOKS
443 	kdtrace_thread_dtor(td);
444 #endif
445 	/* Free all OSD associated to this thread. */
446 	osd_thread_exit(td);
447 	ast_kclear(td);
448 	seltdfini(td);
449 }
450 
451 /*
452  * Initialize type-stable parts of a thread (when newly created).
453  */
454 static int
455 thread_init(void *mem, int size, int flags)
456 {
457 	struct thread *td;
458 
459 	td = (struct thread *)mem;
460 
461 	td->td_allocdomain = vm_phys_domain(vtophys(td));
462 	td->td_sleepqueue = sleepq_alloc();
463 	td->td_turnstile = turnstile_alloc();
464 	td->td_rlqe = NULL;
465 	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
466 	umtx_thread_init(td);
467 	td->td_kstack = 0;
468 	td->td_sel = NULL;
469 	return (0);
470 }
471 
472 /*
473  * Tear down type-stable parts of a thread (just before being discarded).
474  */
475 static void
476 thread_fini(void *mem, int size)
477 {
478 	struct thread *td;
479 
480 	td = (struct thread *)mem;
481 	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
482 	rlqentry_free(td->td_rlqe);
483 	turnstile_free(td->td_turnstile);
484 	sleepq_free(td->td_sleepqueue);
485 	umtx_thread_fini(td);
486 	MPASS(td->td_sel == NULL);
487 }
488 
489 /*
490  * For a newly created process,
491  * link up all the structures and its initial threads etc.
492  * called from:
493  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
494  * proc_dtor() (should go away)
495  * proc_init()
496  */
497 void
498 proc_linkup0(struct proc *p, struct thread *td)
499 {
500 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
501 	proc_linkup(p, td);
502 }
503 
504 void
505 proc_linkup(struct proc *p, struct thread *td)
506 {
507 
508 	sigqueue_init(&p->p_sigqueue, p);
509 	p->p_ksi = ksiginfo_alloc(M_WAITOK);
510 	if (p->p_ksi != NULL) {
511 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
512 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
513 	}
514 	LIST_INIT(&p->p_mqnotifier);
515 	p->p_numthreads = 0;
516 	thread_link(td, p);
517 }
518 
519 static void
520 ast_suspend(struct thread *td, int tda __unused)
521 {
522 	struct proc *p;
523 
524 	p = td->td_proc;
525 	/*
526 	 * We need to check to see if we have to exit or wait due to a
527 	 * single threading requirement or some other STOP condition.
528 	 */
529 	PROC_LOCK(p);
530 	thread_suspend_check(0);
531 	PROC_UNLOCK(p);
532 }
533 
534 extern int max_threads_per_proc;
535 
536 /*
537  * Initialize global thread allocation resources.
538  */
539 void
540 threadinit(void)
541 {
542 	u_long i;
543 	lwpid_t tid0;
544 
545 	/*
546 	 * Place an upper limit on threads which can be allocated.
547 	 *
548 	 * Note that other factors may make the de facto limit much lower.
549 	 *
550 	 * Platform limits are somewhat arbitrary but deemed "more than good
551 	 * enough" for the foreseable future.
552 	 */
553 	if (maxthread == 0) {
554 #ifdef _LP64
555 		maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
556 #else
557 		maxthread = MIN(maxproc * max_threads_per_proc, 100000);
558 #endif
559 	}
560 
561 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
562 	tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
563 	/*
564 	 * Handle thread0.
565 	 */
566 	thread_count_inc();
567 	tid0 = tid_alloc();
568 	if (tid0 != THREAD0_TID)
569 		panic("tid0 %d != %d\n", tid0, THREAD0_TID);
570 
571 	/*
572 	 * Thread structures are specially aligned so that (at least) the
573 	 * 5 lower bits of a pointer to 'struct thead' must be 0.  These bits
574 	 * are used by synchronization primitives to store flags in pointers to
575 	 * such structures.
576 	 */
577 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
578 	    thread_ctor, thread_dtor, thread_init, thread_fini,
579 	    UMA_ALIGN_CACHE_AND_MASK(32 - 1), UMA_ZONE_NOFREE);
580 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
581 	tidhashlock = (tidhash + 1) / 64;
582 	if (tidhashlock > 0)
583 		tidhashlock--;
584 	tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
585 	    M_TIDHASH, M_WAITOK | M_ZERO);
586 	for (i = 0; i < tidhashlock + 1; i++)
587 		rw_init(&tidhashtbl_lock[i], "tidhash");
588 
589 	TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
590 	callout_init(&thread_reap_callout, 1);
591 	callout_reset(&thread_reap_callout, 5 * hz,
592 	    thread_reap_callout_cb, NULL);
593 	ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend);
594 }
595 
596 /*
597  * Place an unused thread on the zombie list.
598  */
599 void
600 thread_zombie(struct thread *td)
601 {
602 	struct thread_domain_data *tdd;
603 	struct thread *ztd;
604 
605 	tdd = &thread_domain_data[td->td_allocdomain];
606 	ztd = atomic_load_ptr(&tdd->tdd_zombies);
607 	for (;;) {
608 		td->td_zombie = ztd;
609 		if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
610 		    (uintptr_t *)&ztd, (uintptr_t)td))
611 			break;
612 		continue;
613 	}
614 }
615 
616 /*
617  * Release a thread that has exited after cpu_throw().
618  */
619 void
620 thread_stash(struct thread *td)
621 {
622 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
623 	thread_zombie(td);
624 }
625 
626 /*
627  * Reap zombies from passed domain.
628  */
629 static void
630 thread_reap_domain(struct thread_domain_data *tdd)
631 {
632 	struct thread *itd, *ntd;
633 	struct tidbatch tidbatch;
634 	struct credbatch credbatch;
635 	struct limbatch limbatch;
636 	struct tdcountbatch tdcountbatch;
637 
638 	/*
639 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
640 	 * but most of the time the list is empty.
641 	 */
642 	if (tdd->tdd_zombies == NULL)
643 		return;
644 
645 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
646 	    (uintptr_t)NULL);
647 	if (itd == NULL)
648 		return;
649 
650 	/*
651 	 * Multiple CPUs can get here, the race is fine as ticks is only
652 	 * advisory.
653 	 */
654 	tdd->tdd_reapticks = ticks;
655 
656 	tidbatch_prep(&tidbatch);
657 	credbatch_prep(&credbatch);
658 	limbatch_prep(&limbatch);
659 	tdcountbatch_prep(&tdcountbatch);
660 
661 	while (itd != NULL) {
662 		ntd = itd->td_zombie;
663 		EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
664 
665 		tidbatch_add(&tidbatch, itd);
666 		credbatch_add(&credbatch, itd);
667 		limbatch_add(&limbatch, itd);
668 		tdcountbatch_add(&tdcountbatch, itd);
669 
670 		thread_free_batched(itd);
671 
672 		tidbatch_process(&tidbatch);
673 		credbatch_process(&credbatch);
674 		limbatch_process(&limbatch);
675 		tdcountbatch_process(&tdcountbatch);
676 
677 		itd = ntd;
678 	}
679 
680 	tidbatch_final(&tidbatch);
681 	credbatch_final(&credbatch);
682 	limbatch_final(&limbatch);
683 	tdcountbatch_final(&tdcountbatch);
684 }
685 
686 /*
687  * Reap zombies from all domains.
688  */
689 static void
690 thread_reap_all(void)
691 {
692 	struct thread_domain_data *tdd;
693 	int i, domain;
694 
695 	domain = PCPU_GET(domain);
696 	for (i = 0; i < vm_ndomains; i++) {
697 		tdd = &thread_domain_data[(i + domain) % vm_ndomains];
698 		thread_reap_domain(tdd);
699 	}
700 }
701 
702 /*
703  * Reap zombies from local domain.
704  */
705 static void
706 thread_reap(void)
707 {
708 	struct thread_domain_data *tdd;
709 	int domain;
710 
711 	domain = PCPU_GET(domain);
712 	tdd = &thread_domain_data[domain];
713 
714 	thread_reap_domain(tdd);
715 }
716 
717 static void
718 thread_reap_task_cb(void *arg __unused, int pending __unused)
719 {
720 
721 	thread_reap_all();
722 }
723 
724 static void
725 thread_reap_callout_cb(void *arg __unused)
726 {
727 	struct thread_domain_data *tdd;
728 	int i, cticks, lticks;
729 	bool wantreap;
730 
731 	wantreap = false;
732 	cticks = atomic_load_int(&ticks);
733 	for (i = 0; i < vm_ndomains; i++) {
734 		tdd = &thread_domain_data[i];
735 		lticks = tdd->tdd_reapticks;
736 		if (tdd->tdd_zombies != NULL &&
737 		    (u_int)(cticks - lticks) > 5 * hz) {
738 			wantreap = true;
739 			break;
740 		}
741 	}
742 
743 	if (wantreap)
744 		taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
745 	callout_reset(&thread_reap_callout, 5 * hz,
746 	    thread_reap_callout_cb, NULL);
747 }
748 
749 /*
750  * Calling this function guarantees that any thread that exited before
751  * the call is reaped when the function returns.  By 'exited' we mean
752  * a thread removed from the process linkage with thread_unlink().
753  * Practically this means that caller must lock/unlock corresponding
754  * process lock before the call, to synchronize with thread_exit().
755  */
756 void
757 thread_reap_barrier(void)
758 {
759 	struct task *t;
760 
761 	/*
762 	 * First do context switches to each CPU to ensure that all
763 	 * PCPU pc_deadthreads are moved to zombie list.
764 	 */
765 	quiesce_all_cpus("", PDROP);
766 
767 	/*
768 	 * Second, fire the task in the same thread as normal
769 	 * thread_reap() is done, to serialize reaping.
770 	 */
771 	t = malloc(sizeof(*t), M_TEMP, M_WAITOK);
772 	TASK_INIT(t, 0, thread_reap_task_cb, t);
773 	taskqueue_enqueue(taskqueue_thread, t);
774 	taskqueue_drain(taskqueue_thread, t);
775 	free(t, M_TEMP);
776 }
777 
778 /*
779  * Allocate a thread.
780  */
781 struct thread *
782 thread_alloc(int pages)
783 {
784 	struct thread *td;
785 	lwpid_t tid;
786 
787 	if (!thread_count_inc()) {
788 		return (NULL);
789 	}
790 
791 	tid = tid_alloc();
792 	td = uma_zalloc(thread_zone, M_WAITOK);
793 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
794 	if (!vm_thread_new(td, pages)) {
795 		uma_zfree(thread_zone, td);
796 		tid_free(tid);
797 		thread_count_dec();
798 		return (NULL);
799 	}
800 	td->td_tid = tid;
801 	bzero(&td->td_sa.args, sizeof(td->td_sa.args));
802 	kmsan_thread_alloc(td);
803 	cpu_thread_alloc(td);
804 	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
805 	return (td);
806 }
807 
808 int
809 thread_alloc_stack(struct thread *td, int pages)
810 {
811 
812 	KASSERT(td->td_kstack == 0,
813 	    ("thread_alloc_stack called on a thread with kstack"));
814 	if (!vm_thread_new(td, pages))
815 		return (0);
816 	cpu_thread_alloc(td);
817 	return (1);
818 }
819 
820 /*
821  * Deallocate a thread.
822  */
823 static void
824 thread_free_batched(struct thread *td)
825 {
826 
827 	lock_profile_thread_exit(td);
828 	if (td->td_cpuset)
829 		cpuset_rel(td->td_cpuset);
830 	td->td_cpuset = NULL;
831 	cpu_thread_free(td);
832 	if (td->td_kstack != 0)
833 		vm_thread_dispose(td);
834 	callout_drain(&td->td_slpcallout);
835 	/*
836 	 * Freeing handled by the caller.
837 	 */
838 	td->td_tid = -1;
839 	kmsan_thread_free(td);
840 	uma_zfree(thread_zone, td);
841 }
842 
843 void
844 thread_free(struct thread *td)
845 {
846 	lwpid_t tid;
847 
848 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
849 	tid = td->td_tid;
850 	thread_free_batched(td);
851 	tid_free(tid);
852 	thread_count_dec();
853 }
854 
855 void
856 thread_cow_get_proc(struct thread *newtd, struct proc *p)
857 {
858 
859 	PROC_LOCK_ASSERT(p, MA_OWNED);
860 	newtd->td_realucred = crcowget(p->p_ucred);
861 	newtd->td_ucred = newtd->td_realucred;
862 	newtd->td_limit = lim_hold(p->p_limit);
863 	newtd->td_cowgen = p->p_cowgen;
864 }
865 
866 void
867 thread_cow_get(struct thread *newtd, struct thread *td)
868 {
869 
870 	MPASS(td->td_realucred == td->td_ucred);
871 	newtd->td_realucred = crcowget(td->td_realucred);
872 	newtd->td_ucred = newtd->td_realucred;
873 	newtd->td_limit = lim_hold(td->td_limit);
874 	newtd->td_cowgen = td->td_cowgen;
875 }
876 
877 void
878 thread_cow_free(struct thread *td)
879 {
880 
881 	if (td->td_realucred != NULL)
882 		crcowfree(td);
883 	if (td->td_limit != NULL)
884 		lim_free(td->td_limit);
885 }
886 
887 void
888 thread_cow_update(struct thread *td)
889 {
890 	struct proc *p;
891 	struct ucred *oldcred;
892 	struct plimit *oldlimit;
893 
894 	p = td->td_proc;
895 	PROC_LOCK(p);
896 	oldcred = crcowsync();
897 	oldlimit = lim_cowsync();
898 	td->td_cowgen = p->p_cowgen;
899 	PROC_UNLOCK(p);
900 	if (oldcred != NULL)
901 		crfree(oldcred);
902 	if (oldlimit != NULL)
903 		lim_free(oldlimit);
904 }
905 
906 void
907 thread_cow_synced(struct thread *td)
908 {
909 	struct proc *p;
910 
911 	p = td->td_proc;
912 	PROC_LOCK_ASSERT(p, MA_OWNED);
913 	MPASS(td->td_cowgen != p->p_cowgen);
914 	MPASS(td->td_ucred == p->p_ucred);
915 	MPASS(td->td_limit == p->p_limit);
916 	td->td_cowgen = p->p_cowgen;
917 }
918 
919 /*
920  * Discard the current thread and exit from its context.
921  * Always called with scheduler locked.
922  *
923  * Because we can't free a thread while we're operating under its context,
924  * push the current thread into our CPU's deadthread holder. This means
925  * we needn't worry about someone else grabbing our context before we
926  * do a cpu_throw().
927  */
928 void
929 thread_exit(void)
930 {
931 	uint64_t runtime, new_switchtime;
932 	struct thread *td;
933 	struct thread *td2;
934 	struct proc *p;
935 	int wakeup_swapper;
936 
937 	td = curthread;
938 	p = td->td_proc;
939 
940 	PROC_SLOCK_ASSERT(p, MA_OWNED);
941 	mtx_assert(&Giant, MA_NOTOWNED);
942 
943 	PROC_LOCK_ASSERT(p, MA_OWNED);
944 	KASSERT(p != NULL, ("thread exiting without a process"));
945 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
946 	    (long)p->p_pid, td->td_name);
947 	SDT_PROBE0(proc, , , lwp__exit);
948 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
949 	MPASS(td->td_realucred == td->td_ucred);
950 
951 	/*
952 	 * drop FPU & debug register state storage, or any other
953 	 * architecture specific resources that
954 	 * would not be on a new untouched process.
955 	 */
956 	cpu_thread_exit(td);
957 
958 	/*
959 	 * The last thread is left attached to the process
960 	 * So that the whole bundle gets recycled. Skip
961 	 * all this stuff if we never had threads.
962 	 * EXIT clears all sign of other threads when
963 	 * it goes to single threading, so the last thread always
964 	 * takes the short path.
965 	 */
966 	if (p->p_flag & P_HADTHREADS) {
967 		if (p->p_numthreads > 1) {
968 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
969 			thread_unlink(td);
970 			td2 = FIRST_THREAD_IN_PROC(p);
971 			sched_exit_thread(td2, td);
972 
973 			/*
974 			 * The test below is NOT true if we are the
975 			 * sole exiting thread. P_STOPPED_SINGLE is unset
976 			 * in exit1() after it is the only survivor.
977 			 */
978 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
979 				if (p->p_numthreads == p->p_suspcount) {
980 					thread_lock(p->p_singlethread);
981 					wakeup_swapper = thread_unsuspend_one(
982 						p->p_singlethread, p, false);
983 					if (wakeup_swapper)
984 						kick_proc0();
985 				}
986 			}
987 
988 			PCPU_SET(deadthread, td);
989 		} else {
990 			/*
991 			 * The last thread is exiting.. but not through exit()
992 			 */
993 			panic ("thread_exit: Last thread exiting on its own");
994 		}
995 	}
996 #ifdef	HWPMC_HOOKS
997 	/*
998 	 * If this thread is part of a process that is being tracked by hwpmc(4),
999 	 * inform the module of the thread's impending exit.
1000 	 */
1001 	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
1002 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1003 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
1004 	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
1005 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
1006 #endif
1007 	PROC_UNLOCK(p);
1008 	PROC_STATLOCK(p);
1009 	thread_lock(td);
1010 	PROC_SUNLOCK(p);
1011 
1012 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
1013 	new_switchtime = cpu_ticks();
1014 	runtime = new_switchtime - PCPU_GET(switchtime);
1015 	td->td_runtime += runtime;
1016 	td->td_incruntime += runtime;
1017 	PCPU_SET(switchtime, new_switchtime);
1018 	PCPU_SET(switchticks, ticks);
1019 	VM_CNT_INC(v_swtch);
1020 
1021 	/* Save our resource usage in our process. */
1022 	td->td_ru.ru_nvcsw++;
1023 	ruxagg_locked(p, td);
1024 	rucollect(&p->p_ru, &td->td_ru);
1025 	PROC_STATUNLOCK(p);
1026 
1027 	TD_SET_STATE(td, TDS_INACTIVE);
1028 #ifdef WITNESS
1029 	witness_thread_exit(td);
1030 #endif
1031 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
1032 	sched_throw(td);
1033 	panic("I'm a teapot!");
1034 	/* NOTREACHED */
1035 }
1036 
1037 /*
1038  * Do any thread specific cleanups that may be needed in wait()
1039  * called with Giant, proc and schedlock not held.
1040  */
1041 void
1042 thread_wait(struct proc *p)
1043 {
1044 	struct thread *td;
1045 
1046 	mtx_assert(&Giant, MA_NOTOWNED);
1047 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
1048 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
1049 	td = FIRST_THREAD_IN_PROC(p);
1050 	/* Lock the last thread so we spin until it exits cpu_throw(). */
1051 	thread_lock(td);
1052 	thread_unlock(td);
1053 	lock_profile_thread_exit(td);
1054 	cpuset_rel(td->td_cpuset);
1055 	td->td_cpuset = NULL;
1056 	cpu_thread_clean(td);
1057 	thread_cow_free(td);
1058 	callout_drain(&td->td_slpcallout);
1059 	thread_reap();	/* check for zombie threads etc. */
1060 }
1061 
1062 /*
1063  * Link a thread to a process.
1064  * set up anything that needs to be initialized for it to
1065  * be used by the process.
1066  */
1067 void
1068 thread_link(struct thread *td, struct proc *p)
1069 {
1070 
1071 	/*
1072 	 * XXX This can't be enabled because it's called for proc0 before
1073 	 * its lock has been created.
1074 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
1075 	 */
1076 	TD_SET_STATE(td, TDS_INACTIVE);
1077 	td->td_proc     = p;
1078 	td->td_flags    = TDF_INMEM;
1079 
1080 	LIST_INIT(&td->td_contested);
1081 	LIST_INIT(&td->td_lprof[0]);
1082 	LIST_INIT(&td->td_lprof[1]);
1083 #ifdef EPOCH_TRACE
1084 	SLIST_INIT(&td->td_epochs);
1085 #endif
1086 	sigqueue_init(&td->td_sigqueue, p);
1087 	callout_init(&td->td_slpcallout, 1);
1088 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
1089 	p->p_numthreads++;
1090 }
1091 
1092 /*
1093  * Called from:
1094  *  thread_exit()
1095  */
1096 void
1097 thread_unlink(struct thread *td)
1098 {
1099 	struct proc *p = td->td_proc;
1100 
1101 	PROC_LOCK_ASSERT(p, MA_OWNED);
1102 #ifdef EPOCH_TRACE
1103 	MPASS(SLIST_EMPTY(&td->td_epochs));
1104 #endif
1105 
1106 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1107 	p->p_numthreads--;
1108 	/* could clear a few other things here */
1109 	/* Must  NOT clear links to proc! */
1110 }
1111 
1112 static int
1113 calc_remaining(struct proc *p, int mode)
1114 {
1115 	int remaining;
1116 
1117 	PROC_LOCK_ASSERT(p, MA_OWNED);
1118 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1119 	if (mode == SINGLE_EXIT)
1120 		remaining = p->p_numthreads;
1121 	else if (mode == SINGLE_BOUNDARY)
1122 		remaining = p->p_numthreads - p->p_boundary_count;
1123 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
1124 		remaining = p->p_numthreads - p->p_suspcount;
1125 	else
1126 		panic("calc_remaining: wrong mode %d", mode);
1127 	return (remaining);
1128 }
1129 
1130 static int
1131 remain_for_mode(int mode)
1132 {
1133 
1134 	return (mode == SINGLE_ALLPROC ? 0 : 1);
1135 }
1136 
1137 static int
1138 weed_inhib(int mode, struct thread *td2, struct proc *p)
1139 {
1140 	int wakeup_swapper;
1141 
1142 	PROC_LOCK_ASSERT(p, MA_OWNED);
1143 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1144 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
1145 
1146 	wakeup_swapper = 0;
1147 
1148 	/*
1149 	 * Since the thread lock is dropped by the scheduler we have
1150 	 * to retry to check for races.
1151 	 */
1152 restart:
1153 	switch (mode) {
1154 	case SINGLE_EXIT:
1155 		if (TD_IS_SUSPENDED(td2)) {
1156 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
1157 			thread_lock(td2);
1158 			goto restart;
1159 		}
1160 		if (TD_CAN_ABORT(td2)) {
1161 			wakeup_swapper |= sleepq_abort(td2, EINTR);
1162 			return (wakeup_swapper);
1163 		}
1164 		break;
1165 	case SINGLE_BOUNDARY:
1166 	case SINGLE_NO_EXIT:
1167 		if (TD_IS_SUSPENDED(td2) &&
1168 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
1169 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1170 			thread_lock(td2);
1171 			goto restart;
1172 		}
1173 		if (TD_CAN_ABORT(td2)) {
1174 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
1175 			return (wakeup_swapper);
1176 		}
1177 		break;
1178 	case SINGLE_ALLPROC:
1179 		/*
1180 		 * ALLPROC suspend tries to avoid spurious EINTR for
1181 		 * threads sleeping interruptable, by suspending the
1182 		 * thread directly, similarly to sig_suspend_threads().
1183 		 * Since such sleep is not neccessary performed at the user
1184 		 * boundary, TDF_ALLPROCSUSP is used to avoid immediate
1185 		 * un-suspend.
1186 		 */
1187 		if (TD_IS_SUSPENDED(td2) &&
1188 		    (td2->td_flags & TDF_ALLPROCSUSP) == 0) {
1189 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1190 			thread_lock(td2);
1191 			goto restart;
1192 		}
1193 		if (TD_CAN_ABORT(td2)) {
1194 			td2->td_flags |= TDF_ALLPROCSUSP;
1195 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
1196 			return (wakeup_swapper);
1197 		}
1198 		break;
1199 	default:
1200 		break;
1201 	}
1202 	thread_unlock(td2);
1203 	return (wakeup_swapper);
1204 }
1205 
1206 /*
1207  * Enforce single-threading.
1208  *
1209  * Returns 1 if the caller must abort (another thread is waiting to
1210  * exit the process or similar). Process is locked!
1211  * Returns 0 when you are successfully the only thread running.
1212  * A process has successfully single threaded in the suspend mode when
1213  * There are no threads in user mode. Threads in the kernel must be
1214  * allowed to continue until they get to the user boundary. They may even
1215  * copy out their return values and data before suspending. They may however be
1216  * accelerated in reaching the user boundary as we will wake up
1217  * any sleeping threads that are interruptable. (PCATCH).
1218  */
1219 int
1220 thread_single(struct proc *p, int mode)
1221 {
1222 	struct thread *td;
1223 	struct thread *td2;
1224 	int remaining, wakeup_swapper;
1225 
1226 	td = curthread;
1227 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1228 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1229 	    ("invalid mode %d", mode));
1230 	/*
1231 	 * If allowing non-ALLPROC singlethreading for non-curproc
1232 	 * callers, calc_remaining() and remain_for_mode() should be
1233 	 * adjusted to also account for td->td_proc != p.  For now
1234 	 * this is not implemented because it is not used.
1235 	 */
1236 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
1237 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
1238 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
1239 	mtx_assert(&Giant, MA_NOTOWNED);
1240 	PROC_LOCK_ASSERT(p, MA_OWNED);
1241 
1242 	/*
1243 	 * Is someone already single threading?
1244 	 * Or may be singlethreading is not needed at all.
1245 	 */
1246 	if (mode == SINGLE_ALLPROC) {
1247 		while ((p->p_flag & P_STOPPED_SINGLE) != 0) {
1248 			if ((p->p_flag2 & P2_WEXIT) != 0)
1249 				return (1);
1250 			msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0);
1251 		}
1252 	} else if ((p->p_flag & P_HADTHREADS) == 0)
1253 		return (0);
1254 	if (p->p_singlethread != NULL && p->p_singlethread != td)
1255 		return (1);
1256 
1257 	if (mode == SINGLE_EXIT) {
1258 		p->p_flag |= P_SINGLE_EXIT;
1259 		p->p_flag &= ~P_SINGLE_BOUNDARY;
1260 	} else {
1261 		p->p_flag &= ~P_SINGLE_EXIT;
1262 		if (mode == SINGLE_BOUNDARY)
1263 			p->p_flag |= P_SINGLE_BOUNDARY;
1264 		else
1265 			p->p_flag &= ~P_SINGLE_BOUNDARY;
1266 	}
1267 	if (mode == SINGLE_ALLPROC)
1268 		p->p_flag |= P_TOTAL_STOP;
1269 	p->p_flag |= P_STOPPED_SINGLE;
1270 	PROC_SLOCK(p);
1271 	p->p_singlethread = td;
1272 	remaining = calc_remaining(p, mode);
1273 	while (remaining != remain_for_mode(mode)) {
1274 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1275 			goto stopme;
1276 		wakeup_swapper = 0;
1277 		FOREACH_THREAD_IN_PROC(p, td2) {
1278 			if (td2 == td)
1279 				continue;
1280 			thread_lock(td2);
1281 			ast_sched_locked(td2, TDA_SUSPEND);
1282 			if (TD_IS_INHIBITED(td2)) {
1283 				wakeup_swapper |= weed_inhib(mode, td2, p);
1284 #ifdef SMP
1285 			} else if (TD_IS_RUNNING(td2)) {
1286 				forward_signal(td2);
1287 				thread_unlock(td2);
1288 #endif
1289 			} else
1290 				thread_unlock(td2);
1291 		}
1292 		if (wakeup_swapper)
1293 			kick_proc0();
1294 		remaining = calc_remaining(p, mode);
1295 
1296 		/*
1297 		 * Maybe we suspended some threads.. was it enough?
1298 		 */
1299 		if (remaining == remain_for_mode(mode))
1300 			break;
1301 
1302 stopme:
1303 		/*
1304 		 * Wake us up when everyone else has suspended.
1305 		 * In the mean time we suspend as well.
1306 		 */
1307 		thread_suspend_switch(td, p);
1308 		remaining = calc_remaining(p, mode);
1309 	}
1310 	if (mode == SINGLE_EXIT) {
1311 		/*
1312 		 * Convert the process to an unthreaded process.  The
1313 		 * SINGLE_EXIT is called by exit1() or execve(), in
1314 		 * both cases other threads must be retired.
1315 		 */
1316 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1317 		p->p_singlethread = NULL;
1318 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1319 
1320 		/*
1321 		 * Wait for any remaining threads to exit cpu_throw().
1322 		 */
1323 		while (p->p_exitthreads != 0) {
1324 			PROC_SUNLOCK(p);
1325 			PROC_UNLOCK(p);
1326 			sched_relinquish(td);
1327 			PROC_LOCK(p);
1328 			PROC_SLOCK(p);
1329 		}
1330 	} else if (mode == SINGLE_BOUNDARY) {
1331 		/*
1332 		 * Wait until all suspended threads are removed from
1333 		 * the processors.  The thread_suspend_check()
1334 		 * increments p_boundary_count while it is still
1335 		 * running, which makes it possible for the execve()
1336 		 * to destroy vmspace while our other threads are
1337 		 * still using the address space.
1338 		 *
1339 		 * We lock the thread, which is only allowed to
1340 		 * succeed after context switch code finished using
1341 		 * the address space.
1342 		 */
1343 		FOREACH_THREAD_IN_PROC(p, td2) {
1344 			if (td2 == td)
1345 				continue;
1346 			thread_lock(td2);
1347 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1348 			    ("td %p not on boundary", td2));
1349 			KASSERT(TD_IS_SUSPENDED(td2),
1350 			    ("td %p is not suspended", td2));
1351 			thread_unlock(td2);
1352 		}
1353 	}
1354 	PROC_SUNLOCK(p);
1355 	return (0);
1356 }
1357 
1358 bool
1359 thread_suspend_check_needed(void)
1360 {
1361 	struct proc *p;
1362 	struct thread *td;
1363 
1364 	td = curthread;
1365 	p = td->td_proc;
1366 	PROC_LOCK_ASSERT(p, MA_OWNED);
1367 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
1368 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
1369 }
1370 
1371 /*
1372  * Called in from locations that can safely check to see
1373  * whether we have to suspend or at least throttle for a
1374  * single-thread event (e.g. fork).
1375  *
1376  * Such locations include userret().
1377  * If the "return_instead" argument is non zero, the thread must be able to
1378  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1379  *
1380  * The 'return_instead' argument tells the function if it may do a
1381  * thread_exit() or suspend, or whether the caller must abort and back
1382  * out instead.
1383  *
1384  * If the thread that set the single_threading request has set the
1385  * P_SINGLE_EXIT bit in the process flags then this call will never return
1386  * if 'return_instead' is false, but will exit.
1387  *
1388  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1389  *---------------+--------------------+---------------------
1390  *       0       | returns 0          |   returns 0 or 1
1391  *               | when ST ends       |   immediately
1392  *---------------+--------------------+---------------------
1393  *       1       | thread exits       |   returns 1
1394  *               |                    |  immediately
1395  * 0 = thread_exit() or suspension ok,
1396  * other = return error instead of stopping the thread.
1397  *
1398  * While a full suspension is under effect, even a single threading
1399  * thread would be suspended if it made this call (but it shouldn't).
1400  * This call should only be made from places where
1401  * thread_exit() would be safe as that may be the outcome unless
1402  * return_instead is set.
1403  */
1404 int
1405 thread_suspend_check(int return_instead)
1406 {
1407 	struct thread *td;
1408 	struct proc *p;
1409 	int wakeup_swapper;
1410 
1411 	td = curthread;
1412 	p = td->td_proc;
1413 	mtx_assert(&Giant, MA_NOTOWNED);
1414 	PROC_LOCK_ASSERT(p, MA_OWNED);
1415 	while (thread_suspend_check_needed()) {
1416 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1417 			KASSERT(p->p_singlethread != NULL,
1418 			    ("singlethread not set"));
1419 			/*
1420 			 * The only suspension in action is a
1421 			 * single-threading. Single threader need not stop.
1422 			 * It is safe to access p->p_singlethread unlocked
1423 			 * because it can only be set to our address by us.
1424 			 */
1425 			if (p->p_singlethread == td)
1426 				return (0);	/* Exempt from stopping. */
1427 		}
1428 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
1429 			return (EINTR);
1430 
1431 		/* Should we goto user boundary if we didn't come from there? */
1432 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1433 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
1434 			return (ERESTART);
1435 
1436 		/*
1437 		 * Ignore suspend requests if they are deferred.
1438 		 */
1439 		if ((td->td_flags & TDF_SBDRY) != 0) {
1440 			KASSERT(return_instead,
1441 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
1442 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
1443 			    (TDF_SEINTR | TDF_SERESTART),
1444 			    ("both TDF_SEINTR and TDF_SERESTART"));
1445 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1446 		}
1447 
1448 		/*
1449 		 * If the process is waiting for us to exit,
1450 		 * this thread should just suicide.
1451 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1452 		 */
1453 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1454 			PROC_UNLOCK(p);
1455 
1456 			/*
1457 			 * Allow Linux emulation layer to do some work
1458 			 * before thread suicide.
1459 			 */
1460 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1461 				(p->p_sysent->sv_thread_detach)(td);
1462 			umtx_thread_exit(td);
1463 			kern_thr_exit(td);
1464 			panic("stopped thread did not exit");
1465 		}
1466 
1467 		PROC_SLOCK(p);
1468 		thread_stopped(p);
1469 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1470 			if (p->p_numthreads == p->p_suspcount + 1) {
1471 				thread_lock(p->p_singlethread);
1472 				wakeup_swapper = thread_unsuspend_one(
1473 				    p->p_singlethread, p, false);
1474 				if (wakeup_swapper)
1475 					kick_proc0();
1476 			}
1477 		}
1478 		PROC_UNLOCK(p);
1479 		thread_lock(td);
1480 		/*
1481 		 * When a thread suspends, it just
1482 		 * gets taken off all queues.
1483 		 */
1484 		thread_suspend_one(td);
1485 		if (return_instead == 0) {
1486 			p->p_boundary_count++;
1487 			td->td_flags |= TDF_BOUNDARY;
1488 		}
1489 		PROC_SUNLOCK(p);
1490 		mi_switch(SW_INVOL | SWT_SUSPEND);
1491 		PROC_LOCK(p);
1492 	}
1493 	return (0);
1494 }
1495 
1496 /*
1497  * Check for possible stops and suspensions while executing a
1498  * casueword or similar transiently failing operation.
1499  *
1500  * The sleep argument controls whether the function can handle a stop
1501  * request itself or it should return ERESTART and the request is
1502  * proceed at the kernel/user boundary in ast.
1503  *
1504  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1505  * should handle the stop requests there, with exception of cases when
1506  * the thread owns a kernel resource, for instance busied the umtx
1507  * key, or when functions return immediately if thread_check_susp()
1508  * returned non-zero.  On the other hand, retrying the whole lock
1509  * operation, we better not stop there but delegate the handling to
1510  * ast.
1511  *
1512  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1513  * handle it at all, and simply return EINTR.
1514  */
1515 int
1516 thread_check_susp(struct thread *td, bool sleep)
1517 {
1518 	struct proc *p;
1519 	int error;
1520 
1521 	/*
1522 	 * The check for TDA_SUSPEND is racy, but it is enough to
1523 	 * eventually break the lockstep loop.
1524 	 */
1525 	if (!td_ast_pending(td, TDA_SUSPEND))
1526 		return (0);
1527 	error = 0;
1528 	p = td->td_proc;
1529 	PROC_LOCK(p);
1530 	if (p->p_flag & P_SINGLE_EXIT)
1531 		error = EINTR;
1532 	else if (P_SHOULDSTOP(p) ||
1533 	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1534 		error = sleep ? thread_suspend_check(0) : ERESTART;
1535 	PROC_UNLOCK(p);
1536 	return (error);
1537 }
1538 
1539 void
1540 thread_suspend_switch(struct thread *td, struct proc *p)
1541 {
1542 
1543 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1544 	PROC_LOCK_ASSERT(p, MA_OWNED);
1545 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1546 	/*
1547 	 * We implement thread_suspend_one in stages here to avoid
1548 	 * dropping the proc lock while the thread lock is owned.
1549 	 */
1550 	if (p == td->td_proc) {
1551 		thread_stopped(p);
1552 		p->p_suspcount++;
1553 	}
1554 	PROC_UNLOCK(p);
1555 	thread_lock(td);
1556 	ast_unsched_locked(td, TDA_SUSPEND);
1557 	TD_SET_SUSPENDED(td);
1558 	sched_sleep(td, 0);
1559 	PROC_SUNLOCK(p);
1560 	DROP_GIANT();
1561 	mi_switch(SW_VOL | SWT_SUSPEND);
1562 	PICKUP_GIANT();
1563 	PROC_LOCK(p);
1564 	PROC_SLOCK(p);
1565 }
1566 
1567 void
1568 thread_suspend_one(struct thread *td)
1569 {
1570 	struct proc *p;
1571 
1572 	p = td->td_proc;
1573 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1574 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1575 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1576 	p->p_suspcount++;
1577 	ast_unsched_locked(td, TDA_SUSPEND);
1578 	TD_SET_SUSPENDED(td);
1579 	sched_sleep(td, 0);
1580 }
1581 
1582 static int
1583 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1584 {
1585 
1586 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1587 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1588 	TD_CLR_SUSPENDED(td);
1589 	td->td_flags &= ~TDF_ALLPROCSUSP;
1590 	if (td->td_proc == p) {
1591 		PROC_SLOCK_ASSERT(p, MA_OWNED);
1592 		p->p_suspcount--;
1593 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1594 			td->td_flags &= ~TDF_BOUNDARY;
1595 			p->p_boundary_count--;
1596 		}
1597 	}
1598 	return (setrunnable(td, 0));
1599 }
1600 
1601 void
1602 thread_run_flash(struct thread *td)
1603 {
1604 	struct proc *p;
1605 
1606 	p = td->td_proc;
1607 	PROC_LOCK_ASSERT(p, MA_OWNED);
1608 
1609 	if (TD_ON_SLEEPQ(td))
1610 		sleepq_remove_nested(td);
1611 	else
1612 		thread_lock(td);
1613 
1614 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1615 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1616 
1617 	TD_CLR_SUSPENDED(td);
1618 	PROC_SLOCK(p);
1619 	MPASS(p->p_suspcount > 0);
1620 	p->p_suspcount--;
1621 	PROC_SUNLOCK(p);
1622 	if (setrunnable(td, 0))
1623 		kick_proc0();
1624 }
1625 
1626 /*
1627  * Allow all threads blocked by single threading to continue running.
1628  */
1629 void
1630 thread_unsuspend(struct proc *p)
1631 {
1632 	struct thread *td;
1633 	int wakeup_swapper;
1634 
1635 	PROC_LOCK_ASSERT(p, MA_OWNED);
1636 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1637 	wakeup_swapper = 0;
1638 	if (!P_SHOULDSTOP(p)) {
1639                 FOREACH_THREAD_IN_PROC(p, td) {
1640 			thread_lock(td);
1641 			if (TD_IS_SUSPENDED(td))
1642 				wakeup_swapper |= thread_unsuspend_one(td, p,
1643 				    true);
1644 			else
1645 				thread_unlock(td);
1646 		}
1647 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1648 	    p->p_numthreads == p->p_suspcount) {
1649 		/*
1650 		 * Stopping everything also did the job for the single
1651 		 * threading request. Now we've downgraded to single-threaded,
1652 		 * let it continue.
1653 		 */
1654 		if (p->p_singlethread->td_proc == p) {
1655 			thread_lock(p->p_singlethread);
1656 			wakeup_swapper = thread_unsuspend_one(
1657 			    p->p_singlethread, p, false);
1658 		}
1659 	}
1660 	if (wakeup_swapper)
1661 		kick_proc0();
1662 }
1663 
1664 /*
1665  * End the single threading mode..
1666  */
1667 void
1668 thread_single_end(struct proc *p, int mode)
1669 {
1670 	struct thread *td;
1671 	int wakeup_swapper;
1672 
1673 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1674 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1675 	    ("invalid mode %d", mode));
1676 	PROC_LOCK_ASSERT(p, MA_OWNED);
1677 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1678 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1679 	    ("mode %d does not match P_TOTAL_STOP", mode));
1680 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1681 	    ("thread_single_end from other thread %p %p",
1682 	    curthread, p->p_singlethread));
1683 	KASSERT(mode != SINGLE_BOUNDARY ||
1684 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1685 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1686 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1687 	    P_TOTAL_STOP);
1688 	PROC_SLOCK(p);
1689 	p->p_singlethread = NULL;
1690 	wakeup_swapper = 0;
1691 	/*
1692 	 * If there are other threads they may now run,
1693 	 * unless of course there is a blanket 'stop order'
1694 	 * on the process. The single threader must be allowed
1695 	 * to continue however as this is a bad place to stop.
1696 	 */
1697 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1698                 FOREACH_THREAD_IN_PROC(p, td) {
1699 			thread_lock(td);
1700 			if (TD_IS_SUSPENDED(td)) {
1701 				wakeup_swapper |= thread_unsuspend_one(td, p,
1702 				    true);
1703 			} else
1704 				thread_unlock(td);
1705 		}
1706 	}
1707 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1708 	    ("inconsistent boundary count %d", p->p_boundary_count));
1709 	PROC_SUNLOCK(p);
1710 	if (wakeup_swapper)
1711 		kick_proc0();
1712 	wakeup(&p->p_flag);
1713 }
1714 
1715 /*
1716  * Locate a thread by number and return with proc lock held.
1717  *
1718  * thread exit establishes proc -> tidhash lock ordering, but lookup
1719  * takes tidhash first and needs to return locked proc.
1720  *
1721  * The problem is worked around by relying on type-safety of both
1722  * structures and doing the work in 2 steps:
1723  * - tidhash-locked lookup which saves both thread and proc pointers
1724  * - proc-locked verification that the found thread still matches
1725  */
1726 static bool
1727 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1728 {
1729 #define RUN_THRESH	16
1730 	struct proc *p;
1731 	struct thread *td;
1732 	int run;
1733 	bool locked;
1734 
1735 	run = 0;
1736 	rw_rlock(TIDHASHLOCK(tid));
1737 	locked = true;
1738 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1739 		if (td->td_tid != tid) {
1740 			run++;
1741 			continue;
1742 		}
1743 		p = td->td_proc;
1744 		if (pid != -1 && p->p_pid != pid) {
1745 			td = NULL;
1746 			break;
1747 		}
1748 		if (run > RUN_THRESH) {
1749 			if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1750 				LIST_REMOVE(td, td_hash);
1751 				LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1752 					td, td_hash);
1753 				rw_wunlock(TIDHASHLOCK(tid));
1754 				locked = false;
1755 				break;
1756 			}
1757 		}
1758 		break;
1759 	}
1760 	if (locked)
1761 		rw_runlock(TIDHASHLOCK(tid));
1762 	if (td == NULL)
1763 		return (false);
1764 	*pp = p;
1765 	*tdp = td;
1766 	return (true);
1767 }
1768 
1769 struct thread *
1770 tdfind(lwpid_t tid, pid_t pid)
1771 {
1772 	struct proc *p;
1773 	struct thread *td;
1774 
1775 	td = curthread;
1776 	if (td->td_tid == tid) {
1777 		if (pid != -1 && td->td_proc->p_pid != pid)
1778 			return (NULL);
1779 		PROC_LOCK(td->td_proc);
1780 		return (td);
1781 	}
1782 
1783 	for (;;) {
1784 		if (!tdfind_hash(tid, pid, &p, &td))
1785 			return (NULL);
1786 		PROC_LOCK(p);
1787 		if (td->td_tid != tid) {
1788 			PROC_UNLOCK(p);
1789 			continue;
1790 		}
1791 		if (td->td_proc != p) {
1792 			PROC_UNLOCK(p);
1793 			continue;
1794 		}
1795 		if (p->p_state == PRS_NEW) {
1796 			PROC_UNLOCK(p);
1797 			return (NULL);
1798 		}
1799 		return (td);
1800 	}
1801 }
1802 
1803 void
1804 tidhash_add(struct thread *td)
1805 {
1806 	rw_wlock(TIDHASHLOCK(td->td_tid));
1807 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1808 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1809 }
1810 
1811 void
1812 tidhash_remove(struct thread *td)
1813 {
1814 
1815 	rw_wlock(TIDHASHLOCK(td->td_tid));
1816 	LIST_REMOVE(td, td_hash);
1817 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1818 }
1819