xref: /freebsd/sys/kern/kern_thread.c (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5  *  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30 
31 #include "opt_witness.h"
32 #include "opt_hwpmc_hooks.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/msan.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/bitstring.h>
45 #include <sys/epoch.h>
46 #include <sys/rangelock.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sdt.h>
49 #include <sys/smp.h>
50 #include <sys/sched.h>
51 #include <sys/sleepqueue.h>
52 #include <sys/selinfo.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/dtrace_bsd.h>
55 #include <sys/sysent.h>
56 #include <sys/turnstile.h>
57 #include <sys/taskqueue.h>
58 #include <sys/ktr.h>
59 #include <sys/rwlock.h>
60 #include <sys/umtxvar.h>
61 #include <sys/vmmeter.h>
62 #include <sys/cpuset.h>
63 #ifdef	HWPMC_HOOKS
64 #include <sys/pmckern.h>
65 #endif
66 #include <sys/priv.h>
67 
68 #include <security/audit/audit.h>
69 
70 #include <vm/pmap.h>
71 #include <vm/vm.h>
72 #include <vm/vm_extern.h>
73 #include <vm/uma.h>
74 #include <vm/vm_phys.h>
75 #include <sys/eventhandler.h>
76 
77 /*
78  * Asserts below verify the stability of struct thread and struct proc
79  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
80  * to drift, change to the structures must be accompanied by the
81  * assert update.
82  *
83  * On the stable branches after KBI freeze, conditions must not be
84  * violated.  Typically new fields are moved to the end of the
85  * structures.
86  */
87 #ifdef __amd64__
88 _Static_assert(offsetof(struct thread, td_flags) == 0x108,
89     "struct thread KBI td_flags");
90 _Static_assert(offsetof(struct thread, td_pflags) == 0x114,
91     "struct thread KBI td_pflags");
92 _Static_assert(offsetof(struct thread, td_frame) == 0x4b8,
93     "struct thread KBI td_frame");
94 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
95     "struct thread KBI td_emuldata");
96 _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
97     "struct proc KBI p_flag");
98 _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
99     "struct proc KBI p_pid");
100 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8,
101     "struct proc KBI p_filemon");
102 _Static_assert(offsetof(struct proc, p_comm) == 0x3e4,
103     "struct proc KBI p_comm");
104 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0,
105     "struct proc KBI p_emuldata");
106 #endif
107 #ifdef __i386__
108 _Static_assert(offsetof(struct thread, td_flags) == 0x9c,
109     "struct thread KBI td_flags");
110 _Static_assert(offsetof(struct thread, td_pflags) == 0xa8,
111     "struct thread KBI td_pflags");
112 _Static_assert(offsetof(struct thread, td_frame) == 0x314,
113     "struct thread KBI td_frame");
114 _Static_assert(offsetof(struct thread, td_emuldata) == 0x358,
115     "struct thread KBI td_emuldata");
116 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
117     "struct proc KBI p_flag");
118 _Static_assert(offsetof(struct proc, p_pid) == 0x78,
119     "struct proc KBI p_pid");
120 _Static_assert(offsetof(struct proc, p_filemon) == 0x270,
121     "struct proc KBI p_filemon");
122 _Static_assert(offsetof(struct proc, p_comm) == 0x288,
123     "struct proc KBI p_comm");
124 _Static_assert(offsetof(struct proc, p_emuldata) == 0x31c,
125     "struct proc KBI p_emuldata");
126 #endif
127 
128 SDT_PROVIDER_DECLARE(proc);
129 SDT_PROBE_DEFINE(proc, , , lwp__exit);
130 
131 /*
132  * thread related storage.
133  */
134 static uma_zone_t thread_zone;
135 
136 struct thread_domain_data {
137 	struct thread	*tdd_zombies;
138 	int		tdd_reapticks;
139 } __aligned(CACHE_LINE_SIZE);
140 
141 static struct thread_domain_data thread_domain_data[MAXMEMDOM];
142 
143 static struct task	thread_reap_task;
144 static struct callout  	thread_reap_callout;
145 
146 static void thread_zombie(struct thread *);
147 static void thread_reap(void);
148 static void thread_reap_all(void);
149 static void thread_reap_task_cb(void *, int);
150 static void thread_reap_callout_cb(void *);
151 static int thread_unsuspend_one(struct thread *td, struct proc *p,
152     bool boundary);
153 static void thread_free_batched(struct thread *td);
154 
155 static __exclusive_cache_line struct mtx tid_lock;
156 static bitstr_t *tid_bitmap;
157 
158 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
159 
160 static int maxthread;
161 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
162     &maxthread, 0, "Maximum number of threads");
163 
164 static __exclusive_cache_line int nthreads;
165 
166 static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
167 static u_long	tidhash;
168 static u_long	tidhashlock;
169 static struct	rwlock *tidhashtbl_lock;
170 #define	TIDHASH(tid)		(&tidhashtbl[(tid) & tidhash])
171 #define	TIDHASHLOCK(tid)	(&tidhashtbl_lock[(tid) & tidhashlock])
172 
173 EVENTHANDLER_LIST_DEFINE(thread_ctor);
174 EVENTHANDLER_LIST_DEFINE(thread_dtor);
175 EVENTHANDLER_LIST_DEFINE(thread_init);
176 EVENTHANDLER_LIST_DEFINE(thread_fini);
177 
178 static bool
179 thread_count_inc_try(void)
180 {
181 	int nthreads_new;
182 
183 	nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
184 	if (nthreads_new >= maxthread - 100) {
185 		if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
186 		    nthreads_new >= maxthread) {
187 			atomic_subtract_int(&nthreads, 1);
188 			return (false);
189 		}
190 	}
191 	return (true);
192 }
193 
194 static bool
195 thread_count_inc(void)
196 {
197 	static struct timeval lastfail;
198 	static int curfail;
199 
200 	thread_reap();
201 	if (thread_count_inc_try()) {
202 		return (true);
203 	}
204 
205 	thread_reap_all();
206 	if (thread_count_inc_try()) {
207 		return (true);
208 	}
209 
210 	if (ppsratecheck(&lastfail, &curfail, 1)) {
211 		printf("maxthread limit exceeded by uid %u "
212 		    "(pid %d); consider increasing kern.maxthread\n",
213 		    curthread->td_ucred->cr_ruid, curproc->p_pid);
214 	}
215 	return (false);
216 }
217 
218 static void
219 thread_count_sub(int n)
220 {
221 
222 	atomic_subtract_int(&nthreads, n);
223 }
224 
225 static void
226 thread_count_dec(void)
227 {
228 
229 	thread_count_sub(1);
230 }
231 
232 static lwpid_t
233 tid_alloc(void)
234 {
235 	static lwpid_t trytid;
236 	lwpid_t tid;
237 
238 	mtx_lock(&tid_lock);
239 	/*
240 	 * It is an invariant that the bitmap is big enough to hold maxthread
241 	 * IDs. If we got to this point there has to be at least one free.
242 	 */
243 	if (trytid >= maxthread)
244 		trytid = 0;
245 	bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
246 	if (tid == -1) {
247 		KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
248 		trytid = 0;
249 		bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
250 		KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
251 	}
252 	bit_set(tid_bitmap, tid);
253 	trytid = tid + 1;
254 	mtx_unlock(&tid_lock);
255 	return (tid + NO_PID);
256 }
257 
258 static void
259 tid_free_locked(lwpid_t rtid)
260 {
261 	lwpid_t tid;
262 
263 	mtx_assert(&tid_lock, MA_OWNED);
264 	KASSERT(rtid >= NO_PID,
265 	    ("%s: invalid tid %d\n", __func__, rtid));
266 	tid = rtid - NO_PID;
267 	KASSERT(bit_test(tid_bitmap, tid) != 0,
268 	    ("thread ID %d not allocated\n", rtid));
269 	bit_clear(tid_bitmap, tid);
270 }
271 
272 static void
273 tid_free(lwpid_t rtid)
274 {
275 
276 	mtx_lock(&tid_lock);
277 	tid_free_locked(rtid);
278 	mtx_unlock(&tid_lock);
279 }
280 
281 static void
282 tid_free_batch(lwpid_t *batch, int n)
283 {
284 	int i;
285 
286 	mtx_lock(&tid_lock);
287 	for (i = 0; i < n; i++) {
288 		tid_free_locked(batch[i]);
289 	}
290 	mtx_unlock(&tid_lock);
291 }
292 
293 /*
294  * Batching for thread reapping.
295  */
296 struct tidbatch {
297 	lwpid_t tab[16];
298 	int n;
299 };
300 
301 static void
302 tidbatch_prep(struct tidbatch *tb)
303 {
304 
305 	tb->n = 0;
306 }
307 
308 static void
309 tidbatch_add(struct tidbatch *tb, struct thread *td)
310 {
311 
312 	KASSERT(tb->n < nitems(tb->tab),
313 	    ("%s: count too high %d", __func__, tb->n));
314 	tb->tab[tb->n] = td->td_tid;
315 	tb->n++;
316 }
317 
318 static void
319 tidbatch_process(struct tidbatch *tb)
320 {
321 
322 	KASSERT(tb->n <= nitems(tb->tab),
323 	    ("%s: count too high %d", __func__, tb->n));
324 	if (tb->n == nitems(tb->tab)) {
325 		tid_free_batch(tb->tab, tb->n);
326 		tb->n = 0;
327 	}
328 }
329 
330 static void
331 tidbatch_final(struct tidbatch *tb)
332 {
333 
334 	KASSERT(tb->n <= nitems(tb->tab),
335 	    ("%s: count too high %d", __func__, tb->n));
336 	if (tb->n != 0) {
337 		tid_free_batch(tb->tab, tb->n);
338 	}
339 }
340 
341 /*
342  * Batching thread count free, for consistency
343  */
344 struct tdcountbatch {
345 	int n;
346 };
347 
348 static void
349 tdcountbatch_prep(struct tdcountbatch *tb)
350 {
351 
352 	tb->n = 0;
353 }
354 
355 static void
356 tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused)
357 {
358 
359 	tb->n++;
360 }
361 
362 static void
363 tdcountbatch_process(struct tdcountbatch *tb)
364 {
365 
366 	if (tb->n == 32) {
367 		thread_count_sub(tb->n);
368 		tb->n = 0;
369 	}
370 }
371 
372 static void
373 tdcountbatch_final(struct tdcountbatch *tb)
374 {
375 
376 	if (tb->n != 0) {
377 		thread_count_sub(tb->n);
378 	}
379 }
380 
381 /*
382  * Prepare a thread for use.
383  */
384 static int
385 thread_ctor(void *mem, int size, void *arg, int flags)
386 {
387 	struct thread	*td;
388 
389 	td = (struct thread *)mem;
390 	TD_SET_STATE(td, TDS_INACTIVE);
391 	td->td_lastcpu = td->td_oncpu = NOCPU;
392 
393 	/*
394 	 * Note that td_critnest begins life as 1 because the thread is not
395 	 * running and is thereby implicitly waiting to be on the receiving
396 	 * end of a context switch.
397 	 */
398 	td->td_critnest = 1;
399 	td->td_lend_user_pri = PRI_MAX;
400 #ifdef AUDIT
401 	audit_thread_alloc(td);
402 #endif
403 #ifdef KDTRACE_HOOKS
404 	kdtrace_thread_ctor(td);
405 #endif
406 	umtx_thread_alloc(td);
407 	MPASS(td->td_sel == NULL);
408 	return (0);
409 }
410 
411 /*
412  * Reclaim a thread after use.
413  */
414 static void
415 thread_dtor(void *mem, int size, void *arg)
416 {
417 	struct thread *td;
418 
419 	td = (struct thread *)mem;
420 
421 #ifdef INVARIANTS
422 	/* Verify that this thread is in a safe state to free. */
423 	switch (TD_GET_STATE(td)) {
424 	case TDS_INHIBITED:
425 	case TDS_RUNNING:
426 	case TDS_CAN_RUN:
427 	case TDS_RUNQ:
428 		/*
429 		 * We must never unlink a thread that is in one of
430 		 * these states, because it is currently active.
431 		 */
432 		panic("bad state for thread unlinking");
433 		/* NOTREACHED */
434 	case TDS_INACTIVE:
435 		break;
436 	default:
437 		panic("bad thread state");
438 		/* NOTREACHED */
439 	}
440 #endif
441 #ifdef AUDIT
442 	audit_thread_free(td);
443 #endif
444 #ifdef KDTRACE_HOOKS
445 	kdtrace_thread_dtor(td);
446 #endif
447 	/* Free all OSD associated to this thread. */
448 	osd_thread_exit(td);
449 	ast_kclear(td);
450 	seltdfini(td);
451 }
452 
453 /*
454  * Initialize type-stable parts of a thread (when newly created).
455  */
456 static int
457 thread_init(void *mem, int size, int flags)
458 {
459 	struct thread *td;
460 
461 	td = (struct thread *)mem;
462 
463 	td->td_allocdomain = vm_phys_domain(vtophys(td));
464 	td->td_sleepqueue = sleepq_alloc();
465 	td->td_turnstile = turnstile_alloc();
466 	td->td_rlqe = NULL;
467 	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
468 	umtx_thread_init(td);
469 	td->td_kstack = 0;
470 	td->td_sel = NULL;
471 	return (0);
472 }
473 
474 /*
475  * Tear down type-stable parts of a thread (just before being discarded).
476  */
477 static void
478 thread_fini(void *mem, int size)
479 {
480 	struct thread *td;
481 
482 	td = (struct thread *)mem;
483 	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
484 	rlqentry_free(td->td_rlqe);
485 	turnstile_free(td->td_turnstile);
486 	sleepq_free(td->td_sleepqueue);
487 	umtx_thread_fini(td);
488 	MPASS(td->td_sel == NULL);
489 }
490 
491 /*
492  * For a newly created process,
493  * link up all the structures and its initial threads etc.
494  * called from:
495  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
496  * proc_dtor() (should go away)
497  * proc_init()
498  */
499 void
500 proc_linkup0(struct proc *p, struct thread *td)
501 {
502 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
503 	proc_linkup(p, td);
504 }
505 
506 void
507 proc_linkup(struct proc *p, struct thread *td)
508 {
509 
510 	sigqueue_init(&p->p_sigqueue, p);
511 	p->p_ksi = ksiginfo_alloc(M_WAITOK);
512 	if (p->p_ksi != NULL) {
513 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
514 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
515 	}
516 	LIST_INIT(&p->p_mqnotifier);
517 	p->p_numthreads = 0;
518 	thread_link(td, p);
519 }
520 
521 static void
522 ast_suspend(struct thread *td, int tda __unused)
523 {
524 	struct proc *p;
525 
526 	p = td->td_proc;
527 	/*
528 	 * We need to check to see if we have to exit or wait due to a
529 	 * single threading requirement or some other STOP condition.
530 	 */
531 	PROC_LOCK(p);
532 	thread_suspend_check(0);
533 	PROC_UNLOCK(p);
534 }
535 
536 extern int max_threads_per_proc;
537 
538 /*
539  * Initialize global thread allocation resources.
540  */
541 void
542 threadinit(void)
543 {
544 	u_long i;
545 	lwpid_t tid0;
546 
547 	/*
548 	 * Place an upper limit on threads which can be allocated.
549 	 *
550 	 * Note that other factors may make the de facto limit much lower.
551 	 *
552 	 * Platform limits are somewhat arbitrary but deemed "more than good
553 	 * enough" for the foreseable future.
554 	 */
555 	if (maxthread == 0) {
556 #ifdef _LP64
557 		maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
558 #else
559 		maxthread = MIN(maxproc * max_threads_per_proc, 100000);
560 #endif
561 	}
562 
563 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
564 	tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
565 	/*
566 	 * Handle thread0.
567 	 */
568 	thread_count_inc();
569 	tid0 = tid_alloc();
570 	if (tid0 != THREAD0_TID)
571 		panic("tid0 %d != %d\n", tid0, THREAD0_TID);
572 
573 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
574 	    thread_ctor, thread_dtor, thread_init, thread_fini,
575 	    32 - 1, UMA_ZONE_NOFREE);
576 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
577 	tidhashlock = (tidhash + 1) / 64;
578 	if (tidhashlock > 0)
579 		tidhashlock--;
580 	tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
581 	    M_TIDHASH, M_WAITOK | M_ZERO);
582 	for (i = 0; i < tidhashlock + 1; i++)
583 		rw_init(&tidhashtbl_lock[i], "tidhash");
584 
585 	TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
586 	callout_init(&thread_reap_callout, 1);
587 	callout_reset(&thread_reap_callout, 5 * hz,
588 	    thread_reap_callout_cb, NULL);
589 	ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend);
590 }
591 
592 /*
593  * Place an unused thread on the zombie list.
594  */
595 void
596 thread_zombie(struct thread *td)
597 {
598 	struct thread_domain_data *tdd;
599 	struct thread *ztd;
600 
601 	tdd = &thread_domain_data[td->td_allocdomain];
602 	ztd = atomic_load_ptr(&tdd->tdd_zombies);
603 	for (;;) {
604 		td->td_zombie = ztd;
605 		if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
606 		    (uintptr_t *)&ztd, (uintptr_t)td))
607 			break;
608 		continue;
609 	}
610 }
611 
612 /*
613  * Release a thread that has exited after cpu_throw().
614  */
615 void
616 thread_stash(struct thread *td)
617 {
618 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
619 	thread_zombie(td);
620 }
621 
622 /*
623  * Reap zombies from passed domain.
624  */
625 static void
626 thread_reap_domain(struct thread_domain_data *tdd)
627 {
628 	struct thread *itd, *ntd;
629 	struct tidbatch tidbatch;
630 	struct credbatch credbatch;
631 	struct limbatch limbatch;
632 	struct tdcountbatch tdcountbatch;
633 
634 	/*
635 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
636 	 * but most of the time the list is empty.
637 	 */
638 	if (tdd->tdd_zombies == NULL)
639 		return;
640 
641 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
642 	    (uintptr_t)NULL);
643 	if (itd == NULL)
644 		return;
645 
646 	/*
647 	 * Multiple CPUs can get here, the race is fine as ticks is only
648 	 * advisory.
649 	 */
650 	tdd->tdd_reapticks = ticks;
651 
652 	tidbatch_prep(&tidbatch);
653 	credbatch_prep(&credbatch);
654 	limbatch_prep(&limbatch);
655 	tdcountbatch_prep(&tdcountbatch);
656 
657 	while (itd != NULL) {
658 		ntd = itd->td_zombie;
659 		EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
660 
661 		tidbatch_add(&tidbatch, itd);
662 		credbatch_add(&credbatch, itd);
663 		limbatch_add(&limbatch, itd);
664 		tdcountbatch_add(&tdcountbatch, itd);
665 
666 		thread_free_batched(itd);
667 
668 		tidbatch_process(&tidbatch);
669 		credbatch_process(&credbatch);
670 		limbatch_process(&limbatch);
671 		tdcountbatch_process(&tdcountbatch);
672 
673 		itd = ntd;
674 	}
675 
676 	tidbatch_final(&tidbatch);
677 	credbatch_final(&credbatch);
678 	limbatch_final(&limbatch);
679 	tdcountbatch_final(&tdcountbatch);
680 }
681 
682 /*
683  * Reap zombies from all domains.
684  */
685 static void
686 thread_reap_all(void)
687 {
688 	struct thread_domain_data *tdd;
689 	int i, domain;
690 
691 	domain = PCPU_GET(domain);
692 	for (i = 0; i < vm_ndomains; i++) {
693 		tdd = &thread_domain_data[(i + domain) % vm_ndomains];
694 		thread_reap_domain(tdd);
695 	}
696 }
697 
698 /*
699  * Reap zombies from local domain.
700  */
701 static void
702 thread_reap(void)
703 {
704 	struct thread_domain_data *tdd;
705 	int domain;
706 
707 	domain = PCPU_GET(domain);
708 	tdd = &thread_domain_data[domain];
709 
710 	thread_reap_domain(tdd);
711 }
712 
713 static void
714 thread_reap_task_cb(void *arg __unused, int pending __unused)
715 {
716 
717 	thread_reap_all();
718 }
719 
720 static void
721 thread_reap_callout_cb(void *arg __unused)
722 {
723 	struct thread_domain_data *tdd;
724 	int i, cticks, lticks;
725 	bool wantreap;
726 
727 	wantreap = false;
728 	cticks = atomic_load_int(&ticks);
729 	for (i = 0; i < vm_ndomains; i++) {
730 		tdd = &thread_domain_data[i];
731 		lticks = tdd->tdd_reapticks;
732 		if (tdd->tdd_zombies != NULL &&
733 		    (u_int)(cticks - lticks) > 5 * hz) {
734 			wantreap = true;
735 			break;
736 		}
737 	}
738 
739 	if (wantreap)
740 		taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
741 	callout_reset(&thread_reap_callout, 5 * hz,
742 	    thread_reap_callout_cb, NULL);
743 }
744 
745 /*
746  * Calling this function guarantees that any thread that exited before
747  * the call is reaped when the function returns.  By 'exited' we mean
748  * a thread removed from the process linkage with thread_unlink().
749  * Practically this means that caller must lock/unlock corresponding
750  * process lock before the call, to synchronize with thread_exit().
751  */
752 void
753 thread_reap_barrier(void)
754 {
755 	struct task *t;
756 
757 	/*
758 	 * First do context switches to each CPU to ensure that all
759 	 * PCPU pc_deadthreads are moved to zombie list.
760 	 */
761 	quiesce_all_cpus("", PDROP);
762 
763 	/*
764 	 * Second, fire the task in the same thread as normal
765 	 * thread_reap() is done, to serialize reaping.
766 	 */
767 	t = malloc(sizeof(*t), M_TEMP, M_WAITOK);
768 	TASK_INIT(t, 0, thread_reap_task_cb, t);
769 	taskqueue_enqueue(taskqueue_thread, t);
770 	taskqueue_drain(taskqueue_thread, t);
771 	free(t, M_TEMP);
772 }
773 
774 /*
775  * Allocate a thread.
776  */
777 struct thread *
778 thread_alloc(int pages)
779 {
780 	struct thread *td;
781 	lwpid_t tid;
782 
783 	if (!thread_count_inc()) {
784 		return (NULL);
785 	}
786 
787 	tid = tid_alloc();
788 	td = uma_zalloc(thread_zone, M_WAITOK);
789 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
790 	if (!vm_thread_new(td, pages)) {
791 		uma_zfree(thread_zone, td);
792 		tid_free(tid);
793 		thread_count_dec();
794 		return (NULL);
795 	}
796 	td->td_tid = tid;
797 	bzero(&td->td_sa.args, sizeof(td->td_sa.args));
798 	kmsan_thread_alloc(td);
799 	cpu_thread_alloc(td);
800 	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
801 	return (td);
802 }
803 
804 int
805 thread_alloc_stack(struct thread *td, int pages)
806 {
807 
808 	KASSERT(td->td_kstack == 0,
809 	    ("thread_alloc_stack called on a thread with kstack"));
810 	if (!vm_thread_new(td, pages))
811 		return (0);
812 	cpu_thread_alloc(td);
813 	return (1);
814 }
815 
816 /*
817  * Deallocate a thread.
818  */
819 static void
820 thread_free_batched(struct thread *td)
821 {
822 
823 	lock_profile_thread_exit(td);
824 	if (td->td_cpuset)
825 		cpuset_rel(td->td_cpuset);
826 	td->td_cpuset = NULL;
827 	cpu_thread_free(td);
828 	if (td->td_kstack != 0)
829 		vm_thread_dispose(td);
830 	callout_drain(&td->td_slpcallout);
831 	/*
832 	 * Freeing handled by the caller.
833 	 */
834 	td->td_tid = -1;
835 	kmsan_thread_free(td);
836 	uma_zfree(thread_zone, td);
837 }
838 
839 void
840 thread_free(struct thread *td)
841 {
842 	lwpid_t tid;
843 
844 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
845 	tid = td->td_tid;
846 	thread_free_batched(td);
847 	tid_free(tid);
848 	thread_count_dec();
849 }
850 
851 void
852 thread_cow_get_proc(struct thread *newtd, struct proc *p)
853 {
854 
855 	PROC_LOCK_ASSERT(p, MA_OWNED);
856 	newtd->td_realucred = crcowget(p->p_ucred);
857 	newtd->td_ucred = newtd->td_realucred;
858 	newtd->td_limit = lim_hold(p->p_limit);
859 	newtd->td_cowgen = p->p_cowgen;
860 }
861 
862 void
863 thread_cow_get(struct thread *newtd, struct thread *td)
864 {
865 
866 	MPASS(td->td_realucred == td->td_ucred);
867 	newtd->td_realucred = crcowget(td->td_realucred);
868 	newtd->td_ucred = newtd->td_realucred;
869 	newtd->td_limit = lim_hold(td->td_limit);
870 	newtd->td_cowgen = td->td_cowgen;
871 }
872 
873 void
874 thread_cow_free(struct thread *td)
875 {
876 
877 	if (td->td_realucred != NULL)
878 		crcowfree(td);
879 	if (td->td_limit != NULL)
880 		lim_free(td->td_limit);
881 }
882 
883 void
884 thread_cow_update(struct thread *td)
885 {
886 	struct proc *p;
887 	struct ucred *oldcred;
888 	struct plimit *oldlimit;
889 
890 	p = td->td_proc;
891 	PROC_LOCK(p);
892 	oldcred = crcowsync();
893 	oldlimit = lim_cowsync();
894 	td->td_cowgen = p->p_cowgen;
895 	PROC_UNLOCK(p);
896 	if (oldcred != NULL)
897 		crfree(oldcred);
898 	if (oldlimit != NULL)
899 		lim_free(oldlimit);
900 }
901 
902 void
903 thread_cow_synced(struct thread *td)
904 {
905 	struct proc *p;
906 
907 	p = td->td_proc;
908 	PROC_LOCK_ASSERT(p, MA_OWNED);
909 	MPASS(td->td_cowgen != p->p_cowgen);
910 	MPASS(td->td_ucred == p->p_ucred);
911 	MPASS(td->td_limit == p->p_limit);
912 	td->td_cowgen = p->p_cowgen;
913 }
914 
915 /*
916  * Discard the current thread and exit from its context.
917  * Always called with scheduler locked.
918  *
919  * Because we can't free a thread while we're operating under its context,
920  * push the current thread into our CPU's deadthread holder. This means
921  * we needn't worry about someone else grabbing our context before we
922  * do a cpu_throw().
923  */
924 void
925 thread_exit(void)
926 {
927 	uint64_t runtime, new_switchtime;
928 	struct thread *td;
929 	struct thread *td2;
930 	struct proc *p;
931 	int wakeup_swapper;
932 
933 	td = curthread;
934 	p = td->td_proc;
935 
936 	PROC_SLOCK_ASSERT(p, MA_OWNED);
937 	mtx_assert(&Giant, MA_NOTOWNED);
938 
939 	PROC_LOCK_ASSERT(p, MA_OWNED);
940 	KASSERT(p != NULL, ("thread exiting without a process"));
941 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
942 	    (long)p->p_pid, td->td_name);
943 	SDT_PROBE0(proc, , , lwp__exit);
944 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
945 	MPASS(td->td_realucred == td->td_ucred);
946 
947 	/*
948 	 * drop FPU & debug register state storage, or any other
949 	 * architecture specific resources that
950 	 * would not be on a new untouched process.
951 	 */
952 	cpu_thread_exit(td);
953 
954 	/*
955 	 * The last thread is left attached to the process
956 	 * So that the whole bundle gets recycled. Skip
957 	 * all this stuff if we never had threads.
958 	 * EXIT clears all sign of other threads when
959 	 * it goes to single threading, so the last thread always
960 	 * takes the short path.
961 	 */
962 	if (p->p_flag & P_HADTHREADS) {
963 		if (p->p_numthreads > 1) {
964 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
965 			thread_unlink(td);
966 			td2 = FIRST_THREAD_IN_PROC(p);
967 			sched_exit_thread(td2, td);
968 
969 			/*
970 			 * The test below is NOT true if we are the
971 			 * sole exiting thread. P_STOPPED_SINGLE is unset
972 			 * in exit1() after it is the only survivor.
973 			 */
974 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
975 				if (p->p_numthreads == p->p_suspcount) {
976 					thread_lock(p->p_singlethread);
977 					wakeup_swapper = thread_unsuspend_one(
978 						p->p_singlethread, p, false);
979 					if (wakeup_swapper)
980 						kick_proc0();
981 				}
982 			}
983 
984 			PCPU_SET(deadthread, td);
985 		} else {
986 			/*
987 			 * The last thread is exiting.. but not through exit()
988 			 */
989 			panic ("thread_exit: Last thread exiting on its own");
990 		}
991 	}
992 #ifdef	HWPMC_HOOKS
993 	/*
994 	 * If this thread is part of a process that is being tracked by hwpmc(4),
995 	 * inform the module of the thread's impending exit.
996 	 */
997 	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
998 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
999 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
1000 	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
1001 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
1002 #endif
1003 	PROC_UNLOCK(p);
1004 	PROC_STATLOCK(p);
1005 	thread_lock(td);
1006 	PROC_SUNLOCK(p);
1007 
1008 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
1009 	new_switchtime = cpu_ticks();
1010 	runtime = new_switchtime - PCPU_GET(switchtime);
1011 	td->td_runtime += runtime;
1012 	td->td_incruntime += runtime;
1013 	PCPU_SET(switchtime, new_switchtime);
1014 	PCPU_SET(switchticks, ticks);
1015 	VM_CNT_INC(v_swtch);
1016 
1017 	/* Save our resource usage in our process. */
1018 	td->td_ru.ru_nvcsw++;
1019 	ruxagg_locked(p, td);
1020 	rucollect(&p->p_ru, &td->td_ru);
1021 	PROC_STATUNLOCK(p);
1022 
1023 	TD_SET_STATE(td, TDS_INACTIVE);
1024 #ifdef WITNESS
1025 	witness_thread_exit(td);
1026 #endif
1027 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
1028 	sched_throw(td);
1029 	panic("I'm a teapot!");
1030 	/* NOTREACHED */
1031 }
1032 
1033 /*
1034  * Do any thread specific cleanups that may be needed in wait()
1035  * called with Giant, proc and schedlock not held.
1036  */
1037 void
1038 thread_wait(struct proc *p)
1039 {
1040 	struct thread *td;
1041 
1042 	mtx_assert(&Giant, MA_NOTOWNED);
1043 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
1044 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
1045 	td = FIRST_THREAD_IN_PROC(p);
1046 	/* Lock the last thread so we spin until it exits cpu_throw(). */
1047 	thread_lock(td);
1048 	thread_unlock(td);
1049 	lock_profile_thread_exit(td);
1050 	cpuset_rel(td->td_cpuset);
1051 	td->td_cpuset = NULL;
1052 	cpu_thread_clean(td);
1053 	thread_cow_free(td);
1054 	callout_drain(&td->td_slpcallout);
1055 	thread_reap();	/* check for zombie threads etc. */
1056 }
1057 
1058 /*
1059  * Link a thread to a process.
1060  * set up anything that needs to be initialized for it to
1061  * be used by the process.
1062  */
1063 void
1064 thread_link(struct thread *td, struct proc *p)
1065 {
1066 
1067 	/*
1068 	 * XXX This can't be enabled because it's called for proc0 before
1069 	 * its lock has been created.
1070 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
1071 	 */
1072 	TD_SET_STATE(td, TDS_INACTIVE);
1073 	td->td_proc     = p;
1074 	td->td_flags    = TDF_INMEM;
1075 
1076 	LIST_INIT(&td->td_contested);
1077 	LIST_INIT(&td->td_lprof[0]);
1078 	LIST_INIT(&td->td_lprof[1]);
1079 #ifdef EPOCH_TRACE
1080 	SLIST_INIT(&td->td_epochs);
1081 #endif
1082 	sigqueue_init(&td->td_sigqueue, p);
1083 	callout_init(&td->td_slpcallout, 1);
1084 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
1085 	p->p_numthreads++;
1086 }
1087 
1088 /*
1089  * Called from:
1090  *  thread_exit()
1091  */
1092 void
1093 thread_unlink(struct thread *td)
1094 {
1095 	struct proc *p = td->td_proc;
1096 
1097 	PROC_LOCK_ASSERT(p, MA_OWNED);
1098 #ifdef EPOCH_TRACE
1099 	MPASS(SLIST_EMPTY(&td->td_epochs));
1100 #endif
1101 
1102 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1103 	p->p_numthreads--;
1104 	/* could clear a few other things here */
1105 	/* Must  NOT clear links to proc! */
1106 }
1107 
1108 static int
1109 calc_remaining(struct proc *p, int mode)
1110 {
1111 	int remaining;
1112 
1113 	PROC_LOCK_ASSERT(p, MA_OWNED);
1114 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1115 	if (mode == SINGLE_EXIT)
1116 		remaining = p->p_numthreads;
1117 	else if (mode == SINGLE_BOUNDARY)
1118 		remaining = p->p_numthreads - p->p_boundary_count;
1119 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
1120 		remaining = p->p_numthreads - p->p_suspcount;
1121 	else
1122 		panic("calc_remaining: wrong mode %d", mode);
1123 	return (remaining);
1124 }
1125 
1126 static int
1127 remain_for_mode(int mode)
1128 {
1129 
1130 	return (mode == SINGLE_ALLPROC ? 0 : 1);
1131 }
1132 
1133 static int
1134 weed_inhib(int mode, struct thread *td2, struct proc *p)
1135 {
1136 	int wakeup_swapper;
1137 
1138 	PROC_LOCK_ASSERT(p, MA_OWNED);
1139 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1140 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
1141 
1142 	wakeup_swapper = 0;
1143 
1144 	/*
1145 	 * Since the thread lock is dropped by the scheduler we have
1146 	 * to retry to check for races.
1147 	 */
1148 restart:
1149 	switch (mode) {
1150 	case SINGLE_EXIT:
1151 		if (TD_IS_SUSPENDED(td2)) {
1152 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
1153 			thread_lock(td2);
1154 			goto restart;
1155 		}
1156 		if (TD_CAN_ABORT(td2)) {
1157 			wakeup_swapper |= sleepq_abort(td2, EINTR);
1158 			return (wakeup_swapper);
1159 		}
1160 		break;
1161 	case SINGLE_BOUNDARY:
1162 	case SINGLE_NO_EXIT:
1163 		if (TD_IS_SUSPENDED(td2) &&
1164 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
1165 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1166 			thread_lock(td2);
1167 			goto restart;
1168 		}
1169 		if (TD_CAN_ABORT(td2)) {
1170 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
1171 			return (wakeup_swapper);
1172 		}
1173 		break;
1174 	case SINGLE_ALLPROC:
1175 		/*
1176 		 * ALLPROC suspend tries to avoid spurious EINTR for
1177 		 * threads sleeping interruptable, by suspending the
1178 		 * thread directly, similarly to sig_suspend_threads().
1179 		 * Since such sleep is not neccessary performed at the user
1180 		 * boundary, TDF_ALLPROCSUSP is used to avoid immediate
1181 		 * un-suspend.
1182 		 */
1183 		if (TD_IS_SUSPENDED(td2) &&
1184 		    (td2->td_flags & TDF_ALLPROCSUSP) == 0) {
1185 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1186 			thread_lock(td2);
1187 			goto restart;
1188 		}
1189 		if (TD_CAN_ABORT(td2)) {
1190 			td2->td_flags |= TDF_ALLPROCSUSP;
1191 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
1192 			return (wakeup_swapper);
1193 		}
1194 		break;
1195 	default:
1196 		break;
1197 	}
1198 	thread_unlock(td2);
1199 	return (wakeup_swapper);
1200 }
1201 
1202 /*
1203  * Enforce single-threading.
1204  *
1205  * Returns 1 if the caller must abort (another thread is waiting to
1206  * exit the process or similar). Process is locked!
1207  * Returns 0 when you are successfully the only thread running.
1208  * A process has successfully single threaded in the suspend mode when
1209  * There are no threads in user mode. Threads in the kernel must be
1210  * allowed to continue until they get to the user boundary. They may even
1211  * copy out their return values and data before suspending. They may however be
1212  * accelerated in reaching the user boundary as we will wake up
1213  * any sleeping threads that are interruptable. (PCATCH).
1214  */
1215 int
1216 thread_single(struct proc *p, int mode)
1217 {
1218 	struct thread *td;
1219 	struct thread *td2;
1220 	int remaining, wakeup_swapper;
1221 
1222 	td = curthread;
1223 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1224 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1225 	    ("invalid mode %d", mode));
1226 	/*
1227 	 * If allowing non-ALLPROC singlethreading for non-curproc
1228 	 * callers, calc_remaining() and remain_for_mode() should be
1229 	 * adjusted to also account for td->td_proc != p.  For now
1230 	 * this is not implemented because it is not used.
1231 	 */
1232 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
1233 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
1234 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
1235 	mtx_assert(&Giant, MA_NOTOWNED);
1236 	PROC_LOCK_ASSERT(p, MA_OWNED);
1237 
1238 	/*
1239 	 * Is someone already single threading?
1240 	 * Or may be singlethreading is not needed at all.
1241 	 */
1242 	if (mode == SINGLE_ALLPROC) {
1243 		while ((p->p_flag & P_STOPPED_SINGLE) != 0) {
1244 			if ((p->p_flag2 & P2_WEXIT) != 0)
1245 				return (1);
1246 			msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0);
1247 		}
1248 	} else if ((p->p_flag & P_HADTHREADS) == 0)
1249 		return (0);
1250 	if (p->p_singlethread != NULL && p->p_singlethread != td)
1251 		return (1);
1252 
1253 	if (mode == SINGLE_EXIT) {
1254 		p->p_flag |= P_SINGLE_EXIT;
1255 		p->p_flag &= ~P_SINGLE_BOUNDARY;
1256 	} else {
1257 		p->p_flag &= ~P_SINGLE_EXIT;
1258 		if (mode == SINGLE_BOUNDARY)
1259 			p->p_flag |= P_SINGLE_BOUNDARY;
1260 		else
1261 			p->p_flag &= ~P_SINGLE_BOUNDARY;
1262 	}
1263 	if (mode == SINGLE_ALLPROC)
1264 		p->p_flag |= P_TOTAL_STOP;
1265 	p->p_flag |= P_STOPPED_SINGLE;
1266 	PROC_SLOCK(p);
1267 	p->p_singlethread = td;
1268 	remaining = calc_remaining(p, mode);
1269 	while (remaining != remain_for_mode(mode)) {
1270 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1271 			goto stopme;
1272 		wakeup_swapper = 0;
1273 		FOREACH_THREAD_IN_PROC(p, td2) {
1274 			if (td2 == td)
1275 				continue;
1276 			thread_lock(td2);
1277 			ast_sched_locked(td2, TDA_SUSPEND);
1278 			if (TD_IS_INHIBITED(td2)) {
1279 				wakeup_swapper |= weed_inhib(mode, td2, p);
1280 #ifdef SMP
1281 			} else if (TD_IS_RUNNING(td2)) {
1282 				forward_signal(td2);
1283 				thread_unlock(td2);
1284 #endif
1285 			} else
1286 				thread_unlock(td2);
1287 		}
1288 		if (wakeup_swapper)
1289 			kick_proc0();
1290 		remaining = calc_remaining(p, mode);
1291 
1292 		/*
1293 		 * Maybe we suspended some threads.. was it enough?
1294 		 */
1295 		if (remaining == remain_for_mode(mode))
1296 			break;
1297 
1298 stopme:
1299 		/*
1300 		 * Wake us up when everyone else has suspended.
1301 		 * In the mean time we suspend as well.
1302 		 */
1303 		thread_suspend_switch(td, p);
1304 		remaining = calc_remaining(p, mode);
1305 	}
1306 	if (mode == SINGLE_EXIT) {
1307 		/*
1308 		 * Convert the process to an unthreaded process.  The
1309 		 * SINGLE_EXIT is called by exit1() or execve(), in
1310 		 * both cases other threads must be retired.
1311 		 */
1312 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1313 		p->p_singlethread = NULL;
1314 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1315 
1316 		/*
1317 		 * Wait for any remaining threads to exit cpu_throw().
1318 		 */
1319 		while (p->p_exitthreads != 0) {
1320 			PROC_SUNLOCK(p);
1321 			PROC_UNLOCK(p);
1322 			sched_relinquish(td);
1323 			PROC_LOCK(p);
1324 			PROC_SLOCK(p);
1325 		}
1326 	} else if (mode == SINGLE_BOUNDARY) {
1327 		/*
1328 		 * Wait until all suspended threads are removed from
1329 		 * the processors.  The thread_suspend_check()
1330 		 * increments p_boundary_count while it is still
1331 		 * running, which makes it possible for the execve()
1332 		 * to destroy vmspace while our other threads are
1333 		 * still using the address space.
1334 		 *
1335 		 * We lock the thread, which is only allowed to
1336 		 * succeed after context switch code finished using
1337 		 * the address space.
1338 		 */
1339 		FOREACH_THREAD_IN_PROC(p, td2) {
1340 			if (td2 == td)
1341 				continue;
1342 			thread_lock(td2);
1343 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1344 			    ("td %p not on boundary", td2));
1345 			KASSERT(TD_IS_SUSPENDED(td2),
1346 			    ("td %p is not suspended", td2));
1347 			thread_unlock(td2);
1348 		}
1349 	}
1350 	PROC_SUNLOCK(p);
1351 	return (0);
1352 }
1353 
1354 bool
1355 thread_suspend_check_needed(void)
1356 {
1357 	struct proc *p;
1358 	struct thread *td;
1359 
1360 	td = curthread;
1361 	p = td->td_proc;
1362 	PROC_LOCK_ASSERT(p, MA_OWNED);
1363 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
1364 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
1365 }
1366 
1367 /*
1368  * Called in from locations that can safely check to see
1369  * whether we have to suspend or at least throttle for a
1370  * single-thread event (e.g. fork).
1371  *
1372  * Such locations include userret().
1373  * If the "return_instead" argument is non zero, the thread must be able to
1374  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1375  *
1376  * The 'return_instead' argument tells the function if it may do a
1377  * thread_exit() or suspend, or whether the caller must abort and back
1378  * out instead.
1379  *
1380  * If the thread that set the single_threading request has set the
1381  * P_SINGLE_EXIT bit in the process flags then this call will never return
1382  * if 'return_instead' is false, but will exit.
1383  *
1384  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1385  *---------------+--------------------+---------------------
1386  *       0       | returns 0          |   returns 0 or 1
1387  *               | when ST ends       |   immediately
1388  *---------------+--------------------+---------------------
1389  *       1       | thread exits       |   returns 1
1390  *               |                    |  immediately
1391  * 0 = thread_exit() or suspension ok,
1392  * other = return error instead of stopping the thread.
1393  *
1394  * While a full suspension is under effect, even a single threading
1395  * thread would be suspended if it made this call (but it shouldn't).
1396  * This call should only be made from places where
1397  * thread_exit() would be safe as that may be the outcome unless
1398  * return_instead is set.
1399  */
1400 int
1401 thread_suspend_check(int return_instead)
1402 {
1403 	struct thread *td;
1404 	struct proc *p;
1405 	int wakeup_swapper;
1406 
1407 	td = curthread;
1408 	p = td->td_proc;
1409 	mtx_assert(&Giant, MA_NOTOWNED);
1410 	PROC_LOCK_ASSERT(p, MA_OWNED);
1411 	while (thread_suspend_check_needed()) {
1412 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1413 			KASSERT(p->p_singlethread != NULL,
1414 			    ("singlethread not set"));
1415 			/*
1416 			 * The only suspension in action is a
1417 			 * single-threading. Single threader need not stop.
1418 			 * It is safe to access p->p_singlethread unlocked
1419 			 * because it can only be set to our address by us.
1420 			 */
1421 			if (p->p_singlethread == td)
1422 				return (0);	/* Exempt from stopping. */
1423 		}
1424 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
1425 			return (EINTR);
1426 
1427 		/* Should we goto user boundary if we didn't come from there? */
1428 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1429 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
1430 			return (ERESTART);
1431 
1432 		/*
1433 		 * Ignore suspend requests if they are deferred.
1434 		 */
1435 		if ((td->td_flags & TDF_SBDRY) != 0) {
1436 			KASSERT(return_instead,
1437 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
1438 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
1439 			    (TDF_SEINTR | TDF_SERESTART),
1440 			    ("both TDF_SEINTR and TDF_SERESTART"));
1441 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1442 		}
1443 
1444 		/*
1445 		 * If the process is waiting for us to exit,
1446 		 * this thread should just suicide.
1447 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1448 		 */
1449 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1450 			PROC_UNLOCK(p);
1451 
1452 			/*
1453 			 * Allow Linux emulation layer to do some work
1454 			 * before thread suicide.
1455 			 */
1456 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1457 				(p->p_sysent->sv_thread_detach)(td);
1458 			umtx_thread_exit(td);
1459 			kern_thr_exit(td);
1460 			panic("stopped thread did not exit");
1461 		}
1462 
1463 		PROC_SLOCK(p);
1464 		thread_stopped(p);
1465 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1466 			if (p->p_numthreads == p->p_suspcount + 1) {
1467 				thread_lock(p->p_singlethread);
1468 				wakeup_swapper = thread_unsuspend_one(
1469 				    p->p_singlethread, p, false);
1470 				if (wakeup_swapper)
1471 					kick_proc0();
1472 			}
1473 		}
1474 		PROC_UNLOCK(p);
1475 		thread_lock(td);
1476 		/*
1477 		 * When a thread suspends, it just
1478 		 * gets taken off all queues.
1479 		 */
1480 		thread_suspend_one(td);
1481 		if (return_instead == 0) {
1482 			p->p_boundary_count++;
1483 			td->td_flags |= TDF_BOUNDARY;
1484 		}
1485 		PROC_SUNLOCK(p);
1486 		mi_switch(SW_INVOL | SWT_SUSPEND);
1487 		PROC_LOCK(p);
1488 	}
1489 	return (0);
1490 }
1491 
1492 /*
1493  * Check for possible stops and suspensions while executing a
1494  * casueword or similar transiently failing operation.
1495  *
1496  * The sleep argument controls whether the function can handle a stop
1497  * request itself or it should return ERESTART and the request is
1498  * proceed at the kernel/user boundary in ast.
1499  *
1500  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1501  * should handle the stop requests there, with exception of cases when
1502  * the thread owns a kernel resource, for instance busied the umtx
1503  * key, or when functions return immediately if thread_check_susp()
1504  * returned non-zero.  On the other hand, retrying the whole lock
1505  * operation, we better not stop there but delegate the handling to
1506  * ast.
1507  *
1508  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1509  * handle it at all, and simply return EINTR.
1510  */
1511 int
1512 thread_check_susp(struct thread *td, bool sleep)
1513 {
1514 	struct proc *p;
1515 	int error;
1516 
1517 	/*
1518 	 * The check for TDA_SUSPEND is racy, but it is enough to
1519 	 * eventually break the lockstep loop.
1520 	 */
1521 	if (!td_ast_pending(td, TDA_SUSPEND))
1522 		return (0);
1523 	error = 0;
1524 	p = td->td_proc;
1525 	PROC_LOCK(p);
1526 	if (p->p_flag & P_SINGLE_EXIT)
1527 		error = EINTR;
1528 	else if (P_SHOULDSTOP(p) ||
1529 	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1530 		error = sleep ? thread_suspend_check(0) : ERESTART;
1531 	PROC_UNLOCK(p);
1532 	return (error);
1533 }
1534 
1535 void
1536 thread_suspend_switch(struct thread *td, struct proc *p)
1537 {
1538 
1539 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1540 	PROC_LOCK_ASSERT(p, MA_OWNED);
1541 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1542 	/*
1543 	 * We implement thread_suspend_one in stages here to avoid
1544 	 * dropping the proc lock while the thread lock is owned.
1545 	 */
1546 	if (p == td->td_proc) {
1547 		thread_stopped(p);
1548 		p->p_suspcount++;
1549 	}
1550 	PROC_UNLOCK(p);
1551 	thread_lock(td);
1552 	ast_unsched_locked(td, TDA_SUSPEND);
1553 	TD_SET_SUSPENDED(td);
1554 	sched_sleep(td, 0);
1555 	PROC_SUNLOCK(p);
1556 	DROP_GIANT();
1557 	mi_switch(SW_VOL | SWT_SUSPEND);
1558 	PICKUP_GIANT();
1559 	PROC_LOCK(p);
1560 	PROC_SLOCK(p);
1561 }
1562 
1563 void
1564 thread_suspend_one(struct thread *td)
1565 {
1566 	struct proc *p;
1567 
1568 	p = td->td_proc;
1569 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1570 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1571 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1572 	p->p_suspcount++;
1573 	ast_unsched_locked(td, TDA_SUSPEND);
1574 	TD_SET_SUSPENDED(td);
1575 	sched_sleep(td, 0);
1576 }
1577 
1578 static int
1579 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1580 {
1581 
1582 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1583 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1584 	TD_CLR_SUSPENDED(td);
1585 	td->td_flags &= ~TDF_ALLPROCSUSP;
1586 	if (td->td_proc == p) {
1587 		PROC_SLOCK_ASSERT(p, MA_OWNED);
1588 		p->p_suspcount--;
1589 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1590 			td->td_flags &= ~TDF_BOUNDARY;
1591 			p->p_boundary_count--;
1592 		}
1593 	}
1594 	return (setrunnable(td, 0));
1595 }
1596 
1597 void
1598 thread_run_flash(struct thread *td)
1599 {
1600 	struct proc *p;
1601 
1602 	p = td->td_proc;
1603 	PROC_LOCK_ASSERT(p, MA_OWNED);
1604 
1605 	if (TD_ON_SLEEPQ(td))
1606 		sleepq_remove_nested(td);
1607 	else
1608 		thread_lock(td);
1609 
1610 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1611 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1612 
1613 	TD_CLR_SUSPENDED(td);
1614 	PROC_SLOCK(p);
1615 	MPASS(p->p_suspcount > 0);
1616 	p->p_suspcount--;
1617 	PROC_SUNLOCK(p);
1618 	if (setrunnable(td, 0))
1619 		kick_proc0();
1620 }
1621 
1622 /*
1623  * Allow all threads blocked by single threading to continue running.
1624  */
1625 void
1626 thread_unsuspend(struct proc *p)
1627 {
1628 	struct thread *td;
1629 	int wakeup_swapper;
1630 
1631 	PROC_LOCK_ASSERT(p, MA_OWNED);
1632 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1633 	wakeup_swapper = 0;
1634 	if (!P_SHOULDSTOP(p)) {
1635                 FOREACH_THREAD_IN_PROC(p, td) {
1636 			thread_lock(td);
1637 			if (TD_IS_SUSPENDED(td))
1638 				wakeup_swapper |= thread_unsuspend_one(td, p,
1639 				    true);
1640 			else
1641 				thread_unlock(td);
1642 		}
1643 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1644 	    p->p_numthreads == p->p_suspcount) {
1645 		/*
1646 		 * Stopping everything also did the job for the single
1647 		 * threading request. Now we've downgraded to single-threaded,
1648 		 * let it continue.
1649 		 */
1650 		if (p->p_singlethread->td_proc == p) {
1651 			thread_lock(p->p_singlethread);
1652 			wakeup_swapper = thread_unsuspend_one(
1653 			    p->p_singlethread, p, false);
1654 		}
1655 	}
1656 	if (wakeup_swapper)
1657 		kick_proc0();
1658 }
1659 
1660 /*
1661  * End the single threading mode..
1662  */
1663 void
1664 thread_single_end(struct proc *p, int mode)
1665 {
1666 	struct thread *td;
1667 	int wakeup_swapper;
1668 
1669 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1670 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1671 	    ("invalid mode %d", mode));
1672 	PROC_LOCK_ASSERT(p, MA_OWNED);
1673 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1674 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1675 	    ("mode %d does not match P_TOTAL_STOP", mode));
1676 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1677 	    ("thread_single_end from other thread %p %p",
1678 	    curthread, p->p_singlethread));
1679 	KASSERT(mode != SINGLE_BOUNDARY ||
1680 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1681 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1682 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1683 	    P_TOTAL_STOP);
1684 	PROC_SLOCK(p);
1685 	p->p_singlethread = NULL;
1686 	wakeup_swapper = 0;
1687 	/*
1688 	 * If there are other threads they may now run,
1689 	 * unless of course there is a blanket 'stop order'
1690 	 * on the process. The single threader must be allowed
1691 	 * to continue however as this is a bad place to stop.
1692 	 */
1693 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1694                 FOREACH_THREAD_IN_PROC(p, td) {
1695 			thread_lock(td);
1696 			if (TD_IS_SUSPENDED(td)) {
1697 				wakeup_swapper |= thread_unsuspend_one(td, p,
1698 				    true);
1699 			} else
1700 				thread_unlock(td);
1701 		}
1702 	}
1703 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1704 	    ("inconsistent boundary count %d", p->p_boundary_count));
1705 	PROC_SUNLOCK(p);
1706 	if (wakeup_swapper)
1707 		kick_proc0();
1708 	wakeup(&p->p_flag);
1709 }
1710 
1711 /*
1712  * Locate a thread by number and return with proc lock held.
1713  *
1714  * thread exit establishes proc -> tidhash lock ordering, but lookup
1715  * takes tidhash first and needs to return locked proc.
1716  *
1717  * The problem is worked around by relying on type-safety of both
1718  * structures and doing the work in 2 steps:
1719  * - tidhash-locked lookup which saves both thread and proc pointers
1720  * - proc-locked verification that the found thread still matches
1721  */
1722 static bool
1723 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1724 {
1725 #define RUN_THRESH	16
1726 	struct proc *p;
1727 	struct thread *td;
1728 	int run;
1729 	bool locked;
1730 
1731 	run = 0;
1732 	rw_rlock(TIDHASHLOCK(tid));
1733 	locked = true;
1734 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1735 		if (td->td_tid != tid) {
1736 			run++;
1737 			continue;
1738 		}
1739 		p = td->td_proc;
1740 		if (pid != -1 && p->p_pid != pid) {
1741 			td = NULL;
1742 			break;
1743 		}
1744 		if (run > RUN_THRESH) {
1745 			if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1746 				LIST_REMOVE(td, td_hash);
1747 				LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1748 					td, td_hash);
1749 				rw_wunlock(TIDHASHLOCK(tid));
1750 				locked = false;
1751 				break;
1752 			}
1753 		}
1754 		break;
1755 	}
1756 	if (locked)
1757 		rw_runlock(TIDHASHLOCK(tid));
1758 	if (td == NULL)
1759 		return (false);
1760 	*pp = p;
1761 	*tdp = td;
1762 	return (true);
1763 }
1764 
1765 struct thread *
1766 tdfind(lwpid_t tid, pid_t pid)
1767 {
1768 	struct proc *p;
1769 	struct thread *td;
1770 
1771 	td = curthread;
1772 	if (td->td_tid == tid) {
1773 		if (pid != -1 && td->td_proc->p_pid != pid)
1774 			return (NULL);
1775 		PROC_LOCK(td->td_proc);
1776 		return (td);
1777 	}
1778 
1779 	for (;;) {
1780 		if (!tdfind_hash(tid, pid, &p, &td))
1781 			return (NULL);
1782 		PROC_LOCK(p);
1783 		if (td->td_tid != tid) {
1784 			PROC_UNLOCK(p);
1785 			continue;
1786 		}
1787 		if (td->td_proc != p) {
1788 			PROC_UNLOCK(p);
1789 			continue;
1790 		}
1791 		if (p->p_state == PRS_NEW) {
1792 			PROC_UNLOCK(p);
1793 			return (NULL);
1794 		}
1795 		return (td);
1796 	}
1797 }
1798 
1799 void
1800 tidhash_add(struct thread *td)
1801 {
1802 	rw_wlock(TIDHASHLOCK(td->td_tid));
1803 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1804 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1805 }
1806 
1807 void
1808 tidhash_remove(struct thread *td)
1809 {
1810 
1811 	rw_wlock(TIDHASHLOCK(td->td_tid));
1812 	LIST_REMOVE(td, td_hash);
1813 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1814 }
1815