xref: /freebsd/sys/kern/kern_thread.c (revision 1e4896b176ff664dc9c2fce5426bf2fdf8017a7d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5  *  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30 
31 #include "opt_witness.h"
32 #include "opt_hwpmc_hooks.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/bitstring.h>
44 #include <sys/epoch.h>
45 #include <sys/rangelock.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sdt.h>
48 #include <sys/smp.h>
49 #include <sys/sched.h>
50 #include <sys/sleepqueue.h>
51 #include <sys/selinfo.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysent.h>
54 #include <sys/turnstile.h>
55 #include <sys/ktr.h>
56 #include <sys/rwlock.h>
57 #include <sys/umtx.h>
58 #include <sys/vmmeter.h>
59 #include <sys/cpuset.h>
60 #ifdef	HWPMC_HOOKS
61 #include <sys/pmckern.h>
62 #endif
63 #include <sys/priv.h>
64 
65 #include <security/audit/audit.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_extern.h>
69 #include <vm/uma.h>
70 #include <sys/eventhandler.h>
71 
72 /*
73  * Asserts below verify the stability of struct thread and struct proc
74  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
75  * to drift, change to the structures must be accompanied by the
76  * assert update.
77  *
78  * On the stable branches after KBI freeze, conditions must not be
79  * violated.  Typically new fields are moved to the end of the
80  * structures.
81  */
82 #ifdef __amd64__
83 _Static_assert(offsetof(struct thread, td_flags) == 0xfc,
84     "struct thread KBI td_flags");
85 _Static_assert(offsetof(struct thread, td_pflags) == 0x104,
86     "struct thread KBI td_pflags");
87 _Static_assert(offsetof(struct thread, td_frame) == 0x4a0,
88     "struct thread KBI td_frame");
89 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
90     "struct thread KBI td_emuldata");
91 _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
92     "struct proc KBI p_flag");
93 _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
94     "struct proc KBI p_pid");
95 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
96     "struct proc KBI p_filemon");
97 _Static_assert(offsetof(struct proc, p_comm) == 0x3d8,
98     "struct proc KBI p_comm");
99 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
100     "struct proc KBI p_emuldata");
101 #endif
102 #ifdef __i386__
103 _Static_assert(offsetof(struct thread, td_flags) == 0x98,
104     "struct thread KBI td_flags");
105 _Static_assert(offsetof(struct thread, td_pflags) == 0xa0,
106     "struct thread KBI td_pflags");
107 _Static_assert(offsetof(struct thread, td_frame) == 0x300,
108     "struct thread KBI td_frame");
109 _Static_assert(offsetof(struct thread, td_emuldata) == 0x344,
110     "struct thread KBI td_emuldata");
111 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
112     "struct proc KBI p_flag");
113 _Static_assert(offsetof(struct proc, p_pid) == 0x78,
114     "struct proc KBI p_pid");
115 _Static_assert(offsetof(struct proc, p_filemon) == 0x26c,
116     "struct proc KBI p_filemon");
117 _Static_assert(offsetof(struct proc, p_comm) == 0x280,
118     "struct proc KBI p_comm");
119 _Static_assert(offsetof(struct proc, p_emuldata) == 0x30c,
120     "struct proc KBI p_emuldata");
121 #endif
122 
123 SDT_PROVIDER_DECLARE(proc);
124 SDT_PROBE_DEFINE(proc, , , lwp__exit);
125 
126 /*
127  * thread related storage.
128  */
129 static uma_zone_t thread_zone;
130 
131 static __exclusive_cache_line struct thread *thread_zombies;
132 
133 static void thread_zombie(struct thread *);
134 static int thread_unsuspend_one(struct thread *td, struct proc *p,
135     bool boundary);
136 static void thread_free_batched(struct thread *td);
137 
138 static __exclusive_cache_line struct mtx tid_lock;
139 static bitstr_t *tid_bitmap;
140 
141 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
142 
143 static int maxthread;
144 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
145     &maxthread, 0, "Maximum number of threads");
146 
147 static __exclusive_cache_line int nthreads;
148 
149 static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
150 static u_long	tidhash;
151 static u_long	tidhashlock;
152 static struct	rwlock *tidhashtbl_lock;
153 #define	TIDHASH(tid)		(&tidhashtbl[(tid) & tidhash])
154 #define	TIDHASHLOCK(tid)	(&tidhashtbl_lock[(tid) & tidhashlock])
155 
156 EVENTHANDLER_LIST_DEFINE(thread_ctor);
157 EVENTHANDLER_LIST_DEFINE(thread_dtor);
158 EVENTHANDLER_LIST_DEFINE(thread_init);
159 EVENTHANDLER_LIST_DEFINE(thread_fini);
160 
161 static bool
162 thread_count_inc(void)
163 {
164 	static struct timeval lastfail;
165 	static int curfail;
166 	int nthreads_new;
167 
168 	thread_reap();
169 
170 	nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
171 	if (nthreads_new >= maxthread - 100) {
172 		if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
173 		    nthreads_new >= maxthread) {
174 			atomic_subtract_int(&nthreads, 1);
175 			if (ppsratecheck(&lastfail, &curfail, 1)) {
176 				printf("maxthread limit exceeded by uid %u "
177 				"(pid %d); consider increasing kern.maxthread\n",
178 				curthread->td_ucred->cr_ruid, curproc->p_pid);
179 			}
180 			return (false);
181 		}
182 	}
183 	return (true);
184 }
185 
186 static void
187 thread_count_sub(int n)
188 {
189 
190 	atomic_subtract_int(&nthreads, n);
191 }
192 
193 static void
194 thread_count_dec(void)
195 {
196 
197 	thread_count_sub(1);
198 }
199 
200 static lwpid_t
201 tid_alloc(void)
202 {
203 	static lwpid_t trytid;
204 	lwpid_t tid;
205 
206 	mtx_lock(&tid_lock);
207 	/*
208 	 * It is an invariant that the bitmap is big enough to hold maxthread
209 	 * IDs. If we got to this point there has to be at least one free.
210 	 */
211 	if (trytid >= maxthread)
212 		trytid = 0;
213 	bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
214 	if (tid == -1) {
215 		KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
216 		trytid = 0;
217 		bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
218 		KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
219 	}
220 	bit_set(tid_bitmap, tid);
221 	trytid = tid + 1;
222 	mtx_unlock(&tid_lock);
223 	return (tid + NO_PID);
224 }
225 
226 static void
227 tid_free_locked(lwpid_t rtid)
228 {
229 	lwpid_t tid;
230 
231 	mtx_assert(&tid_lock, MA_OWNED);
232 	KASSERT(rtid >= NO_PID,
233 	    ("%s: invalid tid %d\n", __func__, rtid));
234 	tid = rtid - NO_PID;
235 	KASSERT(bit_test(tid_bitmap, tid) != 0,
236 	    ("thread ID %d not allocated\n", rtid));
237 	bit_clear(tid_bitmap, tid);
238 }
239 
240 static void
241 tid_free(lwpid_t rtid)
242 {
243 
244 	mtx_lock(&tid_lock);
245 	tid_free_locked(rtid);
246 	mtx_unlock(&tid_lock);
247 }
248 
249 static void
250 tid_free_batch(lwpid_t *batch, int n)
251 {
252 	int i;
253 
254 	mtx_lock(&tid_lock);
255 	for (i = 0; i < n; i++) {
256 		tid_free_locked(batch[i]);
257 	}
258 	mtx_unlock(&tid_lock);
259 }
260 
261 /*
262  * Batching for thread reapping.
263  */
264 struct tidbatch {
265 	lwpid_t tab[16];
266 	int n;
267 };
268 
269 static void
270 tidbatch_prep(struct tidbatch *tb)
271 {
272 
273 	tb->n = 0;
274 }
275 
276 static void
277 tidbatch_add(struct tidbatch *tb, struct thread *td)
278 {
279 
280 	KASSERT(tb->n < nitems(tb->tab),
281 	    ("%s: count too high %d", __func__, tb->n));
282 	tb->tab[tb->n] = td->td_tid;
283 	tb->n++;
284 }
285 
286 static void
287 tidbatch_process(struct tidbatch *tb)
288 {
289 
290 	KASSERT(tb->n <= nitems(tb->tab),
291 	    ("%s: count too high %d", __func__, tb->n));
292 	if (tb->n == nitems(tb->tab)) {
293 		tid_free_batch(tb->tab, tb->n);
294 		tb->n = 0;
295 	}
296 }
297 
298 static void
299 tidbatch_final(struct tidbatch *tb)
300 {
301 
302 	KASSERT(tb->n <= nitems(tb->tab),
303 	    ("%s: count too high %d", __func__, tb->n));
304 	if (tb->n != 0) {
305 		tid_free_batch(tb->tab, tb->n);
306 	}
307 }
308 
309 /*
310  * Prepare a thread for use.
311  */
312 static int
313 thread_ctor(void *mem, int size, void *arg, int flags)
314 {
315 	struct thread	*td;
316 
317 	td = (struct thread *)mem;
318 	td->td_state = TDS_INACTIVE;
319 	td->td_lastcpu = td->td_oncpu = NOCPU;
320 
321 	/*
322 	 * Note that td_critnest begins life as 1 because the thread is not
323 	 * running and is thereby implicitly waiting to be on the receiving
324 	 * end of a context switch.
325 	 */
326 	td->td_critnest = 1;
327 	td->td_lend_user_pri = PRI_MAX;
328 #ifdef AUDIT
329 	audit_thread_alloc(td);
330 #endif
331 	umtx_thread_alloc(td);
332 	MPASS(td->td_sel == NULL);
333 	return (0);
334 }
335 
336 /*
337  * Reclaim a thread after use.
338  */
339 static void
340 thread_dtor(void *mem, int size, void *arg)
341 {
342 	struct thread *td;
343 
344 	td = (struct thread *)mem;
345 
346 #ifdef INVARIANTS
347 	/* Verify that this thread is in a safe state to free. */
348 	switch (td->td_state) {
349 	case TDS_INHIBITED:
350 	case TDS_RUNNING:
351 	case TDS_CAN_RUN:
352 	case TDS_RUNQ:
353 		/*
354 		 * We must never unlink a thread that is in one of
355 		 * these states, because it is currently active.
356 		 */
357 		panic("bad state for thread unlinking");
358 		/* NOTREACHED */
359 	case TDS_INACTIVE:
360 		break;
361 	default:
362 		panic("bad thread state");
363 		/* NOTREACHED */
364 	}
365 #endif
366 #ifdef AUDIT
367 	audit_thread_free(td);
368 #endif
369 	/* Free all OSD associated to this thread. */
370 	osd_thread_exit(td);
371 	td_softdep_cleanup(td);
372 	MPASS(td->td_su == NULL);
373 	seltdfini(td);
374 }
375 
376 /*
377  * Initialize type-stable parts of a thread (when newly created).
378  */
379 static int
380 thread_init(void *mem, int size, int flags)
381 {
382 	struct thread *td;
383 
384 	td = (struct thread *)mem;
385 
386 	td->td_sleepqueue = sleepq_alloc();
387 	td->td_turnstile = turnstile_alloc();
388 	td->td_rlqe = NULL;
389 	EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
390 	umtx_thread_init(td);
391 	td->td_kstack = 0;
392 	td->td_sel = NULL;
393 	return (0);
394 }
395 
396 /*
397  * Tear down type-stable parts of a thread (just before being discarded).
398  */
399 static void
400 thread_fini(void *mem, int size)
401 {
402 	struct thread *td;
403 
404 	td = (struct thread *)mem;
405 	EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
406 	rlqentry_free(td->td_rlqe);
407 	turnstile_free(td->td_turnstile);
408 	sleepq_free(td->td_sleepqueue);
409 	umtx_thread_fini(td);
410 	MPASS(td->td_sel == NULL);
411 }
412 
413 /*
414  * For a newly created process,
415  * link up all the structures and its initial threads etc.
416  * called from:
417  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
418  * proc_dtor() (should go away)
419  * proc_init()
420  */
421 void
422 proc_linkup0(struct proc *p, struct thread *td)
423 {
424 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
425 	proc_linkup(p, td);
426 }
427 
428 void
429 proc_linkup(struct proc *p, struct thread *td)
430 {
431 
432 	sigqueue_init(&p->p_sigqueue, p);
433 	p->p_ksi = ksiginfo_alloc(1);
434 	if (p->p_ksi != NULL) {
435 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
436 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
437 	}
438 	LIST_INIT(&p->p_mqnotifier);
439 	p->p_numthreads = 0;
440 	thread_link(td, p);
441 }
442 
443 extern int max_threads_per_proc;
444 
445 /*
446  * Initialize global thread allocation resources.
447  */
448 void
449 threadinit(void)
450 {
451 	u_long i;
452 	lwpid_t tid0;
453 	uint32_t flags;
454 
455 	/*
456 	 * Place an upper limit on threads which can be allocated.
457 	 *
458 	 * Note that other factors may make the de facto limit much lower.
459 	 *
460 	 * Platform limits are somewhat arbitrary but deemed "more than good
461 	 * enough" for the foreseable future.
462 	 */
463 	if (maxthread == 0) {
464 #ifdef _LP64
465 		maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
466 #else
467 		maxthread = MIN(maxproc * max_threads_per_proc, 100000);
468 #endif
469 	}
470 
471 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
472 	tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
473 	/*
474 	 * Handle thread0.
475 	 */
476 	thread_count_inc();
477 	tid0 = tid_alloc();
478 	if (tid0 != THREAD0_TID)
479 		panic("tid0 %d != %d\n", tid0, THREAD0_TID);
480 
481 	flags = UMA_ZONE_NOFREE;
482 #ifdef __aarch64__
483 	/*
484 	 * Force thread structures to be allocated from the direct map.
485 	 * Otherwise, superpage promotions and demotions may temporarily
486 	 * invalidate thread structure mappings.  For most dynamically allocated
487 	 * structures this is not a problem, but translation faults cannot be
488 	 * handled without accessing curthread.
489 	 */
490 	flags |= UMA_ZONE_CONTIG;
491 #endif
492 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
493 	    thread_ctor, thread_dtor, thread_init, thread_fini,
494 	    32 - 1, flags);
495 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
496 	tidhashlock = (tidhash + 1) / 64;
497 	if (tidhashlock > 0)
498 		tidhashlock--;
499 	tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
500 	    M_TIDHASH, M_WAITOK | M_ZERO);
501 	for (i = 0; i < tidhashlock + 1; i++)
502 		rw_init(&tidhashtbl_lock[i], "tidhash");
503 }
504 
505 /*
506  * Place an unused thread on the zombie list.
507  */
508 void
509 thread_zombie(struct thread *td)
510 {
511 	struct thread *ztd;
512 
513 	ztd = atomic_load_ptr(&thread_zombies);
514 	for (;;) {
515 		td->td_zombie = ztd;
516 		if (atomic_fcmpset_rel_ptr((uintptr_t *)&thread_zombies,
517 		    (uintptr_t *)&ztd, (uintptr_t)td))
518 			break;
519 		continue;
520 	}
521 }
522 
523 /*
524  * Release a thread that has exited after cpu_throw().
525  */
526 void
527 thread_stash(struct thread *td)
528 {
529 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
530 	thread_zombie(td);
531 }
532 
533 /*
534  * Reap zombie threads.
535  */
536 void
537 thread_reap(void)
538 {
539 	struct thread *itd, *ntd;
540 	struct tidbatch tidbatch;
541 	struct credbatch credbatch;
542 	int tdcount;
543 	struct plimit *lim;
544 	int limcount;
545 
546 	/*
547 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
548 	 * but most of the time the list is empty.
549 	 */
550 	if (thread_zombies == NULL)
551 		return;
552 
553 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&thread_zombies,
554 	    (uintptr_t)NULL);
555 	if (itd == NULL)
556 		return;
557 
558 	tidbatch_prep(&tidbatch);
559 	credbatch_prep(&credbatch);
560 	tdcount = 0;
561 	lim = NULL;
562 	limcount = 0;
563 	while (itd != NULL) {
564 		ntd = itd->td_zombie;
565 		EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
566 		tidbatch_add(&tidbatch, itd);
567 		credbatch_add(&credbatch, itd);
568 		MPASS(itd->td_limit != NULL);
569 		if (lim != itd->td_limit) {
570 			if (limcount != 0) {
571 				lim_freen(lim, limcount);
572 				limcount = 0;
573 			}
574 		}
575 		lim = itd->td_limit;
576 		limcount++;
577 		thread_free_batched(itd);
578 		tidbatch_process(&tidbatch);
579 		credbatch_process(&credbatch);
580 		tdcount++;
581 		if (tdcount == 32) {
582 			thread_count_sub(tdcount);
583 			tdcount = 0;
584 		}
585 		itd = ntd;
586 	}
587 
588 	tidbatch_final(&tidbatch);
589 	credbatch_final(&credbatch);
590 	if (tdcount != 0) {
591 		thread_count_sub(tdcount);
592 	}
593 	MPASS(limcount != 0);
594 	lim_freen(lim, limcount);
595 }
596 
597 /*
598  * Allocate a thread.
599  */
600 struct thread *
601 thread_alloc(int pages)
602 {
603 	struct thread *td;
604 	lwpid_t tid;
605 
606 	if (!thread_count_inc()) {
607 		return (NULL);
608 	}
609 
610 	tid = tid_alloc();
611 	td = uma_zalloc(thread_zone, M_WAITOK);
612 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
613 	if (!vm_thread_new(td, pages)) {
614 		uma_zfree(thread_zone, td);
615 		tid_free(tid);
616 		thread_count_dec();
617 		return (NULL);
618 	}
619 	td->td_tid = tid;
620 	cpu_thread_alloc(td);
621 	EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
622 	return (td);
623 }
624 
625 int
626 thread_alloc_stack(struct thread *td, int pages)
627 {
628 
629 	KASSERT(td->td_kstack == 0,
630 	    ("thread_alloc_stack called on a thread with kstack"));
631 	if (!vm_thread_new(td, pages))
632 		return (0);
633 	cpu_thread_alloc(td);
634 	return (1);
635 }
636 
637 /*
638  * Deallocate a thread.
639  */
640 static void
641 thread_free_batched(struct thread *td)
642 {
643 
644 	lock_profile_thread_exit(td);
645 	if (td->td_cpuset)
646 		cpuset_rel(td->td_cpuset);
647 	td->td_cpuset = NULL;
648 	cpu_thread_free(td);
649 	if (td->td_kstack != 0)
650 		vm_thread_dispose(td);
651 	callout_drain(&td->td_slpcallout);
652 	/*
653 	 * Freeing handled by the caller.
654 	 */
655 	td->td_tid = -1;
656 	uma_zfree(thread_zone, td);
657 }
658 
659 void
660 thread_free(struct thread *td)
661 {
662 	lwpid_t tid;
663 
664 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
665 	tid = td->td_tid;
666 	thread_free_batched(td);
667 	tid_free(tid);
668 	thread_count_dec();
669 }
670 
671 void
672 thread_cow_get_proc(struct thread *newtd, struct proc *p)
673 {
674 
675 	PROC_LOCK_ASSERT(p, MA_OWNED);
676 	newtd->td_realucred = crcowget(p->p_ucred);
677 	newtd->td_ucred = newtd->td_realucred;
678 	newtd->td_limit = lim_hold(p->p_limit);
679 	newtd->td_cowgen = p->p_cowgen;
680 }
681 
682 void
683 thread_cow_get(struct thread *newtd, struct thread *td)
684 {
685 
686 	MPASS(td->td_realucred == td->td_ucred);
687 	newtd->td_realucred = crcowget(td->td_realucred);
688 	newtd->td_ucred = newtd->td_realucred;
689 	newtd->td_limit = lim_hold(td->td_limit);
690 	newtd->td_cowgen = td->td_cowgen;
691 }
692 
693 void
694 thread_cow_free(struct thread *td)
695 {
696 
697 	if (td->td_realucred != NULL)
698 		crcowfree(td);
699 	if (td->td_limit != NULL)
700 		lim_free(td->td_limit);
701 }
702 
703 void
704 thread_cow_update(struct thread *td)
705 {
706 	struct proc *p;
707 	struct ucred *oldcred;
708 	struct plimit *oldlimit;
709 
710 	p = td->td_proc;
711 	oldlimit = NULL;
712 	PROC_LOCK(p);
713 	oldcred = crcowsync();
714 	if (td->td_limit != p->p_limit) {
715 		oldlimit = td->td_limit;
716 		td->td_limit = lim_hold(p->p_limit);
717 	}
718 	td->td_cowgen = p->p_cowgen;
719 	PROC_UNLOCK(p);
720 	if (oldcred != NULL)
721 		crfree(oldcred);
722 	if (oldlimit != NULL)
723 		lim_free(oldlimit);
724 }
725 
726 /*
727  * Discard the current thread and exit from its context.
728  * Always called with scheduler locked.
729  *
730  * Because we can't free a thread while we're operating under its context,
731  * push the current thread into our CPU's deadthread holder. This means
732  * we needn't worry about someone else grabbing our context before we
733  * do a cpu_throw().
734  */
735 void
736 thread_exit(void)
737 {
738 	uint64_t runtime, new_switchtime;
739 	struct thread *td;
740 	struct thread *td2;
741 	struct proc *p;
742 	int wakeup_swapper;
743 
744 	td = curthread;
745 	p = td->td_proc;
746 
747 	PROC_SLOCK_ASSERT(p, MA_OWNED);
748 	mtx_assert(&Giant, MA_NOTOWNED);
749 
750 	PROC_LOCK_ASSERT(p, MA_OWNED);
751 	KASSERT(p != NULL, ("thread exiting without a process"));
752 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
753 	    (long)p->p_pid, td->td_name);
754 	SDT_PROBE0(proc, , , lwp__exit);
755 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
756 	MPASS(td->td_realucred == td->td_ucred);
757 
758 	/*
759 	 * drop FPU & debug register state storage, or any other
760 	 * architecture specific resources that
761 	 * would not be on a new untouched process.
762 	 */
763 	cpu_thread_exit(td);
764 
765 	/*
766 	 * The last thread is left attached to the process
767 	 * So that the whole bundle gets recycled. Skip
768 	 * all this stuff if we never had threads.
769 	 * EXIT clears all sign of other threads when
770 	 * it goes to single threading, so the last thread always
771 	 * takes the short path.
772 	 */
773 	if (p->p_flag & P_HADTHREADS) {
774 		if (p->p_numthreads > 1) {
775 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
776 			thread_unlink(td);
777 			td2 = FIRST_THREAD_IN_PROC(p);
778 			sched_exit_thread(td2, td);
779 
780 			/*
781 			 * The test below is NOT true if we are the
782 			 * sole exiting thread. P_STOPPED_SINGLE is unset
783 			 * in exit1() after it is the only survivor.
784 			 */
785 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
786 				if (p->p_numthreads == p->p_suspcount) {
787 					thread_lock(p->p_singlethread);
788 					wakeup_swapper = thread_unsuspend_one(
789 						p->p_singlethread, p, false);
790 					if (wakeup_swapper)
791 						kick_proc0();
792 				}
793 			}
794 
795 			PCPU_SET(deadthread, td);
796 		} else {
797 			/*
798 			 * The last thread is exiting.. but not through exit()
799 			 */
800 			panic ("thread_exit: Last thread exiting on its own");
801 		}
802 	}
803 #ifdef	HWPMC_HOOKS
804 	/*
805 	 * If this thread is part of a process that is being tracked by hwpmc(4),
806 	 * inform the module of the thread's impending exit.
807 	 */
808 	if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
809 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
810 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
811 	} else if (PMC_SYSTEM_SAMPLING_ACTIVE())
812 		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
813 #endif
814 	PROC_UNLOCK(p);
815 	PROC_STATLOCK(p);
816 	thread_lock(td);
817 	PROC_SUNLOCK(p);
818 
819 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
820 	new_switchtime = cpu_ticks();
821 	runtime = new_switchtime - PCPU_GET(switchtime);
822 	td->td_runtime += runtime;
823 	td->td_incruntime += runtime;
824 	PCPU_SET(switchtime, new_switchtime);
825 	PCPU_SET(switchticks, ticks);
826 	VM_CNT_INC(v_swtch);
827 
828 	/* Save our resource usage in our process. */
829 	td->td_ru.ru_nvcsw++;
830 	ruxagg_locked(p, td);
831 	rucollect(&p->p_ru, &td->td_ru);
832 	PROC_STATUNLOCK(p);
833 
834 	td->td_state = TDS_INACTIVE;
835 #ifdef WITNESS
836 	witness_thread_exit(td);
837 #endif
838 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
839 	sched_throw(td);
840 	panic("I'm a teapot!");
841 	/* NOTREACHED */
842 }
843 
844 /*
845  * Do any thread specific cleanups that may be needed in wait()
846  * called with Giant, proc and schedlock not held.
847  */
848 void
849 thread_wait(struct proc *p)
850 {
851 	struct thread *td;
852 
853 	mtx_assert(&Giant, MA_NOTOWNED);
854 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
855 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
856 	td = FIRST_THREAD_IN_PROC(p);
857 	/* Lock the last thread so we spin until it exits cpu_throw(). */
858 	thread_lock(td);
859 	thread_unlock(td);
860 	lock_profile_thread_exit(td);
861 	cpuset_rel(td->td_cpuset);
862 	td->td_cpuset = NULL;
863 	cpu_thread_clean(td);
864 	thread_cow_free(td);
865 	callout_drain(&td->td_slpcallout);
866 	thread_reap();	/* check for zombie threads etc. */
867 }
868 
869 /*
870  * Link a thread to a process.
871  * set up anything that needs to be initialized for it to
872  * be used by the process.
873  */
874 void
875 thread_link(struct thread *td, struct proc *p)
876 {
877 
878 	/*
879 	 * XXX This can't be enabled because it's called for proc0 before
880 	 * its lock has been created.
881 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
882 	 */
883 	td->td_state    = TDS_INACTIVE;
884 	td->td_proc     = p;
885 	td->td_flags    = TDF_INMEM;
886 
887 	LIST_INIT(&td->td_contested);
888 	LIST_INIT(&td->td_lprof[0]);
889 	LIST_INIT(&td->td_lprof[1]);
890 #ifdef EPOCH_TRACE
891 	SLIST_INIT(&td->td_epochs);
892 #endif
893 	sigqueue_init(&td->td_sigqueue, p);
894 	callout_init(&td->td_slpcallout, 1);
895 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
896 	p->p_numthreads++;
897 }
898 
899 /*
900  * Called from:
901  *  thread_exit()
902  */
903 void
904 thread_unlink(struct thread *td)
905 {
906 	struct proc *p = td->td_proc;
907 
908 	PROC_LOCK_ASSERT(p, MA_OWNED);
909 #ifdef EPOCH_TRACE
910 	MPASS(SLIST_EMPTY(&td->td_epochs));
911 #endif
912 
913 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
914 	p->p_numthreads--;
915 	/* could clear a few other things here */
916 	/* Must  NOT clear links to proc! */
917 }
918 
919 static int
920 calc_remaining(struct proc *p, int mode)
921 {
922 	int remaining;
923 
924 	PROC_LOCK_ASSERT(p, MA_OWNED);
925 	PROC_SLOCK_ASSERT(p, MA_OWNED);
926 	if (mode == SINGLE_EXIT)
927 		remaining = p->p_numthreads;
928 	else if (mode == SINGLE_BOUNDARY)
929 		remaining = p->p_numthreads - p->p_boundary_count;
930 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
931 		remaining = p->p_numthreads - p->p_suspcount;
932 	else
933 		panic("calc_remaining: wrong mode %d", mode);
934 	return (remaining);
935 }
936 
937 static int
938 remain_for_mode(int mode)
939 {
940 
941 	return (mode == SINGLE_ALLPROC ? 0 : 1);
942 }
943 
944 static int
945 weed_inhib(int mode, struct thread *td2, struct proc *p)
946 {
947 	int wakeup_swapper;
948 
949 	PROC_LOCK_ASSERT(p, MA_OWNED);
950 	PROC_SLOCK_ASSERT(p, MA_OWNED);
951 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
952 
953 	wakeup_swapper = 0;
954 
955 	/*
956 	 * Since the thread lock is dropped by the scheduler we have
957 	 * to retry to check for races.
958 	 */
959 restart:
960 	switch (mode) {
961 	case SINGLE_EXIT:
962 		if (TD_IS_SUSPENDED(td2)) {
963 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
964 			thread_lock(td2);
965 			goto restart;
966 		}
967 		if (TD_CAN_ABORT(td2)) {
968 			wakeup_swapper |= sleepq_abort(td2, EINTR);
969 			return (wakeup_swapper);
970 		}
971 		break;
972 	case SINGLE_BOUNDARY:
973 	case SINGLE_NO_EXIT:
974 		if (TD_IS_SUSPENDED(td2) &&
975 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
976 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
977 			thread_lock(td2);
978 			goto restart;
979 		}
980 		if (TD_CAN_ABORT(td2)) {
981 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
982 			return (wakeup_swapper);
983 		}
984 		break;
985 	case SINGLE_ALLPROC:
986 		/*
987 		 * ALLPROC suspend tries to avoid spurious EINTR for
988 		 * threads sleeping interruptable, by suspending the
989 		 * thread directly, similarly to sig_suspend_threads().
990 		 * Since such sleep is not performed at the user
991 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
992 		 * is used to avoid immediate un-suspend.
993 		 */
994 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
995 		    TDF_ALLPROCSUSP)) == 0) {
996 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
997 			thread_lock(td2);
998 			goto restart;
999 		}
1000 		if (TD_CAN_ABORT(td2)) {
1001 			if ((td2->td_flags & TDF_SBDRY) == 0) {
1002 				thread_suspend_one(td2);
1003 				td2->td_flags |= TDF_ALLPROCSUSP;
1004 			} else {
1005 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
1006 				return (wakeup_swapper);
1007 			}
1008 		}
1009 		break;
1010 	default:
1011 		break;
1012 	}
1013 	thread_unlock(td2);
1014 	return (wakeup_swapper);
1015 }
1016 
1017 /*
1018  * Enforce single-threading.
1019  *
1020  * Returns 1 if the caller must abort (another thread is waiting to
1021  * exit the process or similar). Process is locked!
1022  * Returns 0 when you are successfully the only thread running.
1023  * A process has successfully single threaded in the suspend mode when
1024  * There are no threads in user mode. Threads in the kernel must be
1025  * allowed to continue until they get to the user boundary. They may even
1026  * copy out their return values and data before suspending. They may however be
1027  * accelerated in reaching the user boundary as we will wake up
1028  * any sleeping threads that are interruptable. (PCATCH).
1029  */
1030 int
1031 thread_single(struct proc *p, int mode)
1032 {
1033 	struct thread *td;
1034 	struct thread *td2;
1035 	int remaining, wakeup_swapper;
1036 
1037 	td = curthread;
1038 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1039 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1040 	    ("invalid mode %d", mode));
1041 	/*
1042 	 * If allowing non-ALLPROC singlethreading for non-curproc
1043 	 * callers, calc_remaining() and remain_for_mode() should be
1044 	 * adjusted to also account for td->td_proc != p.  For now
1045 	 * this is not implemented because it is not used.
1046 	 */
1047 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
1048 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
1049 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
1050 	mtx_assert(&Giant, MA_NOTOWNED);
1051 	PROC_LOCK_ASSERT(p, MA_OWNED);
1052 
1053 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
1054 		return (0);
1055 
1056 	/* Is someone already single threading? */
1057 	if (p->p_singlethread != NULL && p->p_singlethread != td)
1058 		return (1);
1059 
1060 	if (mode == SINGLE_EXIT) {
1061 		p->p_flag |= P_SINGLE_EXIT;
1062 		p->p_flag &= ~P_SINGLE_BOUNDARY;
1063 	} else {
1064 		p->p_flag &= ~P_SINGLE_EXIT;
1065 		if (mode == SINGLE_BOUNDARY)
1066 			p->p_flag |= P_SINGLE_BOUNDARY;
1067 		else
1068 			p->p_flag &= ~P_SINGLE_BOUNDARY;
1069 	}
1070 	if (mode == SINGLE_ALLPROC)
1071 		p->p_flag |= P_TOTAL_STOP;
1072 	p->p_flag |= P_STOPPED_SINGLE;
1073 	PROC_SLOCK(p);
1074 	p->p_singlethread = td;
1075 	remaining = calc_remaining(p, mode);
1076 	while (remaining != remain_for_mode(mode)) {
1077 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1078 			goto stopme;
1079 		wakeup_swapper = 0;
1080 		FOREACH_THREAD_IN_PROC(p, td2) {
1081 			if (td2 == td)
1082 				continue;
1083 			thread_lock(td2);
1084 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
1085 			if (TD_IS_INHIBITED(td2)) {
1086 				wakeup_swapper |= weed_inhib(mode, td2, p);
1087 #ifdef SMP
1088 			} else if (TD_IS_RUNNING(td2) && td != td2) {
1089 				forward_signal(td2);
1090 				thread_unlock(td2);
1091 #endif
1092 			} else
1093 				thread_unlock(td2);
1094 		}
1095 		if (wakeup_swapper)
1096 			kick_proc0();
1097 		remaining = calc_remaining(p, mode);
1098 
1099 		/*
1100 		 * Maybe we suspended some threads.. was it enough?
1101 		 */
1102 		if (remaining == remain_for_mode(mode))
1103 			break;
1104 
1105 stopme:
1106 		/*
1107 		 * Wake us up when everyone else has suspended.
1108 		 * In the mean time we suspend as well.
1109 		 */
1110 		thread_suspend_switch(td, p);
1111 		remaining = calc_remaining(p, mode);
1112 	}
1113 	if (mode == SINGLE_EXIT) {
1114 		/*
1115 		 * Convert the process to an unthreaded process.  The
1116 		 * SINGLE_EXIT is called by exit1() or execve(), in
1117 		 * both cases other threads must be retired.
1118 		 */
1119 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1120 		p->p_singlethread = NULL;
1121 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1122 
1123 		/*
1124 		 * Wait for any remaining threads to exit cpu_throw().
1125 		 */
1126 		while (p->p_exitthreads != 0) {
1127 			PROC_SUNLOCK(p);
1128 			PROC_UNLOCK(p);
1129 			sched_relinquish(td);
1130 			PROC_LOCK(p);
1131 			PROC_SLOCK(p);
1132 		}
1133 	} else if (mode == SINGLE_BOUNDARY) {
1134 		/*
1135 		 * Wait until all suspended threads are removed from
1136 		 * the processors.  The thread_suspend_check()
1137 		 * increments p_boundary_count while it is still
1138 		 * running, which makes it possible for the execve()
1139 		 * to destroy vmspace while our other threads are
1140 		 * still using the address space.
1141 		 *
1142 		 * We lock the thread, which is only allowed to
1143 		 * succeed after context switch code finished using
1144 		 * the address space.
1145 		 */
1146 		FOREACH_THREAD_IN_PROC(p, td2) {
1147 			if (td2 == td)
1148 				continue;
1149 			thread_lock(td2);
1150 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1151 			    ("td %p not on boundary", td2));
1152 			KASSERT(TD_IS_SUSPENDED(td2),
1153 			    ("td %p is not suspended", td2));
1154 			thread_unlock(td2);
1155 		}
1156 	}
1157 	PROC_SUNLOCK(p);
1158 	return (0);
1159 }
1160 
1161 bool
1162 thread_suspend_check_needed(void)
1163 {
1164 	struct proc *p;
1165 	struct thread *td;
1166 
1167 	td = curthread;
1168 	p = td->td_proc;
1169 	PROC_LOCK_ASSERT(p, MA_OWNED);
1170 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
1171 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
1172 }
1173 
1174 /*
1175  * Called in from locations that can safely check to see
1176  * whether we have to suspend or at least throttle for a
1177  * single-thread event (e.g. fork).
1178  *
1179  * Such locations include userret().
1180  * If the "return_instead" argument is non zero, the thread must be able to
1181  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1182  *
1183  * The 'return_instead' argument tells the function if it may do a
1184  * thread_exit() or suspend, or whether the caller must abort and back
1185  * out instead.
1186  *
1187  * If the thread that set the single_threading request has set the
1188  * P_SINGLE_EXIT bit in the process flags then this call will never return
1189  * if 'return_instead' is false, but will exit.
1190  *
1191  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1192  *---------------+--------------------+---------------------
1193  *       0       | returns 0          |   returns 0 or 1
1194  *               | when ST ends       |   immediately
1195  *---------------+--------------------+---------------------
1196  *       1       | thread exits       |   returns 1
1197  *               |                    |  immediately
1198  * 0 = thread_exit() or suspension ok,
1199  * other = return error instead of stopping the thread.
1200  *
1201  * While a full suspension is under effect, even a single threading
1202  * thread would be suspended if it made this call (but it shouldn't).
1203  * This call should only be made from places where
1204  * thread_exit() would be safe as that may be the outcome unless
1205  * return_instead is set.
1206  */
1207 int
1208 thread_suspend_check(int return_instead)
1209 {
1210 	struct thread *td;
1211 	struct proc *p;
1212 	int wakeup_swapper;
1213 
1214 	td = curthread;
1215 	p = td->td_proc;
1216 	mtx_assert(&Giant, MA_NOTOWNED);
1217 	PROC_LOCK_ASSERT(p, MA_OWNED);
1218 	while (thread_suspend_check_needed()) {
1219 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1220 			KASSERT(p->p_singlethread != NULL,
1221 			    ("singlethread not set"));
1222 			/*
1223 			 * The only suspension in action is a
1224 			 * single-threading. Single threader need not stop.
1225 			 * It is safe to access p->p_singlethread unlocked
1226 			 * because it can only be set to our address by us.
1227 			 */
1228 			if (p->p_singlethread == td)
1229 				return (0);	/* Exempt from stopping. */
1230 		}
1231 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
1232 			return (EINTR);
1233 
1234 		/* Should we goto user boundary if we didn't come from there? */
1235 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1236 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
1237 			return (ERESTART);
1238 
1239 		/*
1240 		 * Ignore suspend requests if they are deferred.
1241 		 */
1242 		if ((td->td_flags & TDF_SBDRY) != 0) {
1243 			KASSERT(return_instead,
1244 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
1245 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
1246 			    (TDF_SEINTR | TDF_SERESTART),
1247 			    ("both TDF_SEINTR and TDF_SERESTART"));
1248 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1249 		}
1250 
1251 		/*
1252 		 * If the process is waiting for us to exit,
1253 		 * this thread should just suicide.
1254 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1255 		 */
1256 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1257 			PROC_UNLOCK(p);
1258 
1259 			/*
1260 			 * Allow Linux emulation layer to do some work
1261 			 * before thread suicide.
1262 			 */
1263 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1264 				(p->p_sysent->sv_thread_detach)(td);
1265 			umtx_thread_exit(td);
1266 			kern_thr_exit(td);
1267 			panic("stopped thread did not exit");
1268 		}
1269 
1270 		PROC_SLOCK(p);
1271 		thread_stopped(p);
1272 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1273 			if (p->p_numthreads == p->p_suspcount + 1) {
1274 				thread_lock(p->p_singlethread);
1275 				wakeup_swapper = thread_unsuspend_one(
1276 				    p->p_singlethread, p, false);
1277 				if (wakeup_swapper)
1278 					kick_proc0();
1279 			}
1280 		}
1281 		PROC_UNLOCK(p);
1282 		thread_lock(td);
1283 		/*
1284 		 * When a thread suspends, it just
1285 		 * gets taken off all queues.
1286 		 */
1287 		thread_suspend_one(td);
1288 		if (return_instead == 0) {
1289 			p->p_boundary_count++;
1290 			td->td_flags |= TDF_BOUNDARY;
1291 		}
1292 		PROC_SUNLOCK(p);
1293 		mi_switch(SW_INVOL | SWT_SUSPEND);
1294 		PROC_LOCK(p);
1295 	}
1296 	return (0);
1297 }
1298 
1299 /*
1300  * Check for possible stops and suspensions while executing a
1301  * casueword or similar transiently failing operation.
1302  *
1303  * The sleep argument controls whether the function can handle a stop
1304  * request itself or it should return ERESTART and the request is
1305  * proceed at the kernel/user boundary in ast.
1306  *
1307  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1308  * should handle the stop requests there, with exception of cases when
1309  * the thread owns a kernel resource, for instance busied the umtx
1310  * key, or when functions return immediately if thread_check_susp()
1311  * returned non-zero.  On the other hand, retrying the whole lock
1312  * operation, we better not stop there but delegate the handling to
1313  * ast.
1314  *
1315  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1316  * handle it at all, and simply return EINTR.
1317  */
1318 int
1319 thread_check_susp(struct thread *td, bool sleep)
1320 {
1321 	struct proc *p;
1322 	int error;
1323 
1324 	/*
1325 	 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
1326 	 * eventually break the lockstep loop.
1327 	 */
1328 	if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
1329 		return (0);
1330 	error = 0;
1331 	p = td->td_proc;
1332 	PROC_LOCK(p);
1333 	if (p->p_flag & P_SINGLE_EXIT)
1334 		error = EINTR;
1335 	else if (P_SHOULDSTOP(p) ||
1336 	    ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1337 		error = sleep ? thread_suspend_check(0) : ERESTART;
1338 	PROC_UNLOCK(p);
1339 	return (error);
1340 }
1341 
1342 void
1343 thread_suspend_switch(struct thread *td, struct proc *p)
1344 {
1345 
1346 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1347 	PROC_LOCK_ASSERT(p, MA_OWNED);
1348 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1349 	/*
1350 	 * We implement thread_suspend_one in stages here to avoid
1351 	 * dropping the proc lock while the thread lock is owned.
1352 	 */
1353 	if (p == td->td_proc) {
1354 		thread_stopped(p);
1355 		p->p_suspcount++;
1356 	}
1357 	PROC_UNLOCK(p);
1358 	thread_lock(td);
1359 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1360 	TD_SET_SUSPENDED(td);
1361 	sched_sleep(td, 0);
1362 	PROC_SUNLOCK(p);
1363 	DROP_GIANT();
1364 	mi_switch(SW_VOL | SWT_SUSPEND);
1365 	PICKUP_GIANT();
1366 	PROC_LOCK(p);
1367 	PROC_SLOCK(p);
1368 }
1369 
1370 void
1371 thread_suspend_one(struct thread *td)
1372 {
1373 	struct proc *p;
1374 
1375 	p = td->td_proc;
1376 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1377 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1378 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1379 	p->p_suspcount++;
1380 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1381 	TD_SET_SUSPENDED(td);
1382 	sched_sleep(td, 0);
1383 }
1384 
1385 static int
1386 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1387 {
1388 
1389 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1390 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1391 	TD_CLR_SUSPENDED(td);
1392 	td->td_flags &= ~TDF_ALLPROCSUSP;
1393 	if (td->td_proc == p) {
1394 		PROC_SLOCK_ASSERT(p, MA_OWNED);
1395 		p->p_suspcount--;
1396 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1397 			td->td_flags &= ~TDF_BOUNDARY;
1398 			p->p_boundary_count--;
1399 		}
1400 	}
1401 	return (setrunnable(td, 0));
1402 }
1403 
1404 /*
1405  * Allow all threads blocked by single threading to continue running.
1406  */
1407 void
1408 thread_unsuspend(struct proc *p)
1409 {
1410 	struct thread *td;
1411 	int wakeup_swapper;
1412 
1413 	PROC_LOCK_ASSERT(p, MA_OWNED);
1414 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1415 	wakeup_swapper = 0;
1416 	if (!P_SHOULDSTOP(p)) {
1417                 FOREACH_THREAD_IN_PROC(p, td) {
1418 			thread_lock(td);
1419 			if (TD_IS_SUSPENDED(td)) {
1420 				wakeup_swapper |= thread_unsuspend_one(td, p,
1421 				    true);
1422 			} else
1423 				thread_unlock(td);
1424 		}
1425 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1426 	    p->p_numthreads == p->p_suspcount) {
1427 		/*
1428 		 * Stopping everything also did the job for the single
1429 		 * threading request. Now we've downgraded to single-threaded,
1430 		 * let it continue.
1431 		 */
1432 		if (p->p_singlethread->td_proc == p) {
1433 			thread_lock(p->p_singlethread);
1434 			wakeup_swapper = thread_unsuspend_one(
1435 			    p->p_singlethread, p, false);
1436 		}
1437 	}
1438 	if (wakeup_swapper)
1439 		kick_proc0();
1440 }
1441 
1442 /*
1443  * End the single threading mode..
1444  */
1445 void
1446 thread_single_end(struct proc *p, int mode)
1447 {
1448 	struct thread *td;
1449 	int wakeup_swapper;
1450 
1451 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1452 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1453 	    ("invalid mode %d", mode));
1454 	PROC_LOCK_ASSERT(p, MA_OWNED);
1455 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1456 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1457 	    ("mode %d does not match P_TOTAL_STOP", mode));
1458 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1459 	    ("thread_single_end from other thread %p %p",
1460 	    curthread, p->p_singlethread));
1461 	KASSERT(mode != SINGLE_BOUNDARY ||
1462 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1463 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1464 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1465 	    P_TOTAL_STOP);
1466 	PROC_SLOCK(p);
1467 	p->p_singlethread = NULL;
1468 	wakeup_swapper = 0;
1469 	/*
1470 	 * If there are other threads they may now run,
1471 	 * unless of course there is a blanket 'stop order'
1472 	 * on the process. The single threader must be allowed
1473 	 * to continue however as this is a bad place to stop.
1474 	 */
1475 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1476                 FOREACH_THREAD_IN_PROC(p, td) {
1477 			thread_lock(td);
1478 			if (TD_IS_SUSPENDED(td)) {
1479 				wakeup_swapper |= thread_unsuspend_one(td, p,
1480 				    mode == SINGLE_BOUNDARY);
1481 			} else
1482 				thread_unlock(td);
1483 		}
1484 	}
1485 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1486 	    ("inconsistent boundary count %d", p->p_boundary_count));
1487 	PROC_SUNLOCK(p);
1488 	if (wakeup_swapper)
1489 		kick_proc0();
1490 }
1491 
1492 /*
1493  * Locate a thread by number and return with proc lock held.
1494  *
1495  * thread exit establishes proc -> tidhash lock ordering, but lookup
1496  * takes tidhash first and needs to return locked proc.
1497  *
1498  * The problem is worked around by relying on type-safety of both
1499  * structures and doing the work in 2 steps:
1500  * - tidhash-locked lookup which saves both thread and proc pointers
1501  * - proc-locked verification that the found thread still matches
1502  */
1503 static bool
1504 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1505 {
1506 #define RUN_THRESH	16
1507 	struct proc *p;
1508 	struct thread *td;
1509 	int run;
1510 	bool locked;
1511 
1512 	run = 0;
1513 	rw_rlock(TIDHASHLOCK(tid));
1514 	locked = true;
1515 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1516 		if (td->td_tid != tid) {
1517 			run++;
1518 			continue;
1519 		}
1520 		p = td->td_proc;
1521 		if (pid != -1 && p->p_pid != pid) {
1522 			td = NULL;
1523 			break;
1524 		}
1525 		if (run > RUN_THRESH) {
1526 			if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1527 				LIST_REMOVE(td, td_hash);
1528 				LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1529 					td, td_hash);
1530 				rw_wunlock(TIDHASHLOCK(tid));
1531 				locked = false;
1532 				break;
1533 			}
1534 		}
1535 		break;
1536 	}
1537 	if (locked)
1538 		rw_runlock(TIDHASHLOCK(tid));
1539 	if (td == NULL)
1540 		return (false);
1541 	*pp = p;
1542 	*tdp = td;
1543 	return (true);
1544 }
1545 
1546 struct thread *
1547 tdfind(lwpid_t tid, pid_t pid)
1548 {
1549 	struct proc *p;
1550 	struct thread *td;
1551 
1552 	td = curthread;
1553 	if (td->td_tid == tid) {
1554 		if (pid != -1 && td->td_proc->p_pid != pid)
1555 			return (NULL);
1556 		PROC_LOCK(td->td_proc);
1557 		return (td);
1558 	}
1559 
1560 	for (;;) {
1561 		if (!tdfind_hash(tid, pid, &p, &td))
1562 			return (NULL);
1563 		PROC_LOCK(p);
1564 		if (td->td_tid != tid) {
1565 			PROC_UNLOCK(p);
1566 			continue;
1567 		}
1568 		if (td->td_proc != p) {
1569 			PROC_UNLOCK(p);
1570 			continue;
1571 		}
1572 		if (p->p_state == PRS_NEW) {
1573 			PROC_UNLOCK(p);
1574 			return (NULL);
1575 		}
1576 		return (td);
1577 	}
1578 }
1579 
1580 void
1581 tidhash_add(struct thread *td)
1582 {
1583 	rw_wlock(TIDHASHLOCK(td->td_tid));
1584 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1585 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1586 }
1587 
1588 void
1589 tidhash_remove(struct thread *td)
1590 {
1591 
1592 	rw_wlock(TIDHASHLOCK(td->td_tid));
1593 	LIST_REMOVE(td, td_hash);
1594 	rw_wunlock(TIDHASHLOCK(td->td_tid));
1595 }
1596