xref: /freebsd/sys/kern/kern_thread.c (revision a3422d96bd4c08d07bb6c1984c86578b67ee6a41)
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/rangelock.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sdt.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sleepqueue.h>
47 #include <sys/selinfo.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysent.h>
50 #include <sys/turnstile.h>
51 #include <sys/ktr.h>
52 #include <sys/rwlock.h>
53 #include <sys/umtx.h>
54 #include <sys/vmmeter.h>
55 #include <sys/cpuset.h>
56 #ifdef	HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59 
60 #include <security/audit/audit.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/uma.h>
65 #include <vm/vm_domain.h>
66 #include <sys/eventhandler.h>
67 
68 SDT_PROVIDER_DECLARE(proc);
69 SDT_PROBE_DEFINE(proc, , , lwp__exit);
70 
71 /*
72  * thread related storage.
73  */
74 static uma_zone_t thread_zone;
75 
76 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
77 static struct mtx zombie_lock;
78 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
79 
80 static void thread_zombie(struct thread *);
81 static int thread_unsuspend_one(struct thread *td, struct proc *p,
82     bool boundary);
83 
84 #define TID_BUFFER_SIZE	1024
85 
86 struct mtx tid_lock;
87 static struct unrhdr *tid_unrhdr;
88 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
89 static int tid_head, tid_tail;
90 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
91 
92 struct	tidhashhead *tidhashtbl;
93 u_long	tidhash;
94 struct	rwlock tidhash_lock;
95 
96 static lwpid_t
97 tid_alloc(void)
98 {
99 	lwpid_t	tid;
100 
101 	tid = alloc_unr(tid_unrhdr);
102 	if (tid != -1)
103 		return (tid);
104 	mtx_lock(&tid_lock);
105 	if (tid_head == tid_tail) {
106 		mtx_unlock(&tid_lock);
107 		return (-1);
108 	}
109 	tid = tid_buffer[tid_head];
110 	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
111 	mtx_unlock(&tid_lock);
112 	return (tid);
113 }
114 
115 static void
116 tid_free(lwpid_t tid)
117 {
118 	lwpid_t tmp_tid = -1;
119 
120 	mtx_lock(&tid_lock);
121 	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
122 		tmp_tid = tid_buffer[tid_head];
123 		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
124 	}
125 	tid_buffer[tid_tail] = tid;
126 	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
127 	mtx_unlock(&tid_lock);
128 	if (tmp_tid != -1)
129 		free_unr(tid_unrhdr, tmp_tid);
130 }
131 
132 /*
133  * Prepare a thread for use.
134  */
135 static int
136 thread_ctor(void *mem, int size, void *arg, int flags)
137 {
138 	struct thread	*td;
139 
140 	td = (struct thread *)mem;
141 	td->td_state = TDS_INACTIVE;
142 	td->td_oncpu = NOCPU;
143 
144 	td->td_tid = tid_alloc();
145 
146 	/*
147 	 * Note that td_critnest begins life as 1 because the thread is not
148 	 * running and is thereby implicitly waiting to be on the receiving
149 	 * end of a context switch.
150 	 */
151 	td->td_critnest = 1;
152 	td->td_lend_user_pri = PRI_MAX;
153 	EVENTHANDLER_INVOKE(thread_ctor, td);
154 #ifdef AUDIT
155 	audit_thread_alloc(td);
156 #endif
157 	umtx_thread_alloc(td);
158 	return (0);
159 }
160 
161 /*
162  * Reclaim a thread after use.
163  */
164 static void
165 thread_dtor(void *mem, int size, void *arg)
166 {
167 	struct thread *td;
168 
169 	td = (struct thread *)mem;
170 
171 #ifdef INVARIANTS
172 	/* Verify that this thread is in a safe state to free. */
173 	switch (td->td_state) {
174 	case TDS_INHIBITED:
175 	case TDS_RUNNING:
176 	case TDS_CAN_RUN:
177 	case TDS_RUNQ:
178 		/*
179 		 * We must never unlink a thread that is in one of
180 		 * these states, because it is currently active.
181 		 */
182 		panic("bad state for thread unlinking");
183 		/* NOTREACHED */
184 	case TDS_INACTIVE:
185 		break;
186 	default:
187 		panic("bad thread state");
188 		/* NOTREACHED */
189 	}
190 #endif
191 #ifdef AUDIT
192 	audit_thread_free(td);
193 #endif
194 	/* Free all OSD associated to this thread. */
195 	osd_thread_exit(td);
196 	td_softdep_cleanup(td);
197 	MPASS(td->td_su == NULL);
198 
199 	EVENTHANDLER_INVOKE(thread_dtor, td);
200 	tid_free(td->td_tid);
201 }
202 
203 /*
204  * Initialize type-stable parts of a thread (when newly created).
205  */
206 static int
207 thread_init(void *mem, int size, int flags)
208 {
209 	struct thread *td;
210 
211 	td = (struct thread *)mem;
212 
213 	td->td_sleepqueue = sleepq_alloc();
214 	td->td_turnstile = turnstile_alloc();
215 	td->td_rlqe = NULL;
216 	EVENTHANDLER_INVOKE(thread_init, td);
217 	umtx_thread_init(td);
218 	td->td_kstack = 0;
219 	td->td_sel = NULL;
220 	return (0);
221 }
222 
223 /*
224  * Tear down type-stable parts of a thread (just before being discarded).
225  */
226 static void
227 thread_fini(void *mem, int size)
228 {
229 	struct thread *td;
230 
231 	td = (struct thread *)mem;
232 	EVENTHANDLER_INVOKE(thread_fini, td);
233 	rlqentry_free(td->td_rlqe);
234 	turnstile_free(td->td_turnstile);
235 	sleepq_free(td->td_sleepqueue);
236 	umtx_thread_fini(td);
237 	seltdfini(td);
238 }
239 
240 /*
241  * For a newly created process,
242  * link up all the structures and its initial threads etc.
243  * called from:
244  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
245  * proc_dtor() (should go away)
246  * proc_init()
247  */
248 void
249 proc_linkup0(struct proc *p, struct thread *td)
250 {
251 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
252 	proc_linkup(p, td);
253 }
254 
255 void
256 proc_linkup(struct proc *p, struct thread *td)
257 {
258 
259 	sigqueue_init(&p->p_sigqueue, p);
260 	p->p_ksi = ksiginfo_alloc(1);
261 	if (p->p_ksi != NULL) {
262 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
263 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
264 	}
265 	LIST_INIT(&p->p_mqnotifier);
266 	p->p_numthreads = 0;
267 	thread_link(td, p);
268 }
269 
270 /*
271  * Initialize global thread allocation resources.
272  */
273 void
274 threadinit(void)
275 {
276 
277 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
278 
279 	/*
280 	 * pid_max cannot be greater than PID_MAX.
281 	 * leave one number for thread0.
282 	 */
283 	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
284 
285 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
286 	    thread_ctor, thread_dtor, thread_init, thread_fini,
287 	    32 - 1, UMA_ZONE_NOFREE);
288 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
289 	rw_init(&tidhash_lock, "tidhash");
290 }
291 
292 /*
293  * Place an unused thread on the zombie list.
294  * Use the slpq as that must be unused by now.
295  */
296 void
297 thread_zombie(struct thread *td)
298 {
299 	mtx_lock_spin(&zombie_lock);
300 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
301 	mtx_unlock_spin(&zombie_lock);
302 }
303 
304 /*
305  * Release a thread that has exited after cpu_throw().
306  */
307 void
308 thread_stash(struct thread *td)
309 {
310 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
311 	thread_zombie(td);
312 }
313 
314 /*
315  * Reap zombie resources.
316  */
317 void
318 thread_reap(void)
319 {
320 	struct thread *td_first, *td_next;
321 
322 	/*
323 	 * Don't even bother to lock if none at this instant,
324 	 * we really don't care about the next instant.
325 	 */
326 	if (!TAILQ_EMPTY(&zombie_threads)) {
327 		mtx_lock_spin(&zombie_lock);
328 		td_first = TAILQ_FIRST(&zombie_threads);
329 		if (td_first)
330 			TAILQ_INIT(&zombie_threads);
331 		mtx_unlock_spin(&zombie_lock);
332 		while (td_first) {
333 			td_next = TAILQ_NEXT(td_first, td_slpq);
334 			thread_cow_free(td_first);
335 			thread_free(td_first);
336 			td_first = td_next;
337 		}
338 	}
339 }
340 
341 /*
342  * Allocate a thread.
343  */
344 struct thread *
345 thread_alloc(int pages)
346 {
347 	struct thread *td;
348 
349 	thread_reap(); /* check if any zombies to get */
350 
351 	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
352 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
353 	if (!vm_thread_new(td, pages)) {
354 		uma_zfree(thread_zone, td);
355 		return (NULL);
356 	}
357 	cpu_thread_alloc(td);
358 	vm_domain_policy_init(&td->td_vm_dom_policy);
359 	return (td);
360 }
361 
362 int
363 thread_alloc_stack(struct thread *td, int pages)
364 {
365 
366 	KASSERT(td->td_kstack == 0,
367 	    ("thread_alloc_stack called on a thread with kstack"));
368 	if (!vm_thread_new(td, pages))
369 		return (0);
370 	cpu_thread_alloc(td);
371 	return (1);
372 }
373 
374 /*
375  * Deallocate a thread.
376  */
377 void
378 thread_free(struct thread *td)
379 {
380 
381 	lock_profile_thread_exit(td);
382 	if (td->td_cpuset)
383 		cpuset_rel(td->td_cpuset);
384 	td->td_cpuset = NULL;
385 	cpu_thread_free(td);
386 	if (td->td_kstack != 0)
387 		vm_thread_dispose(td);
388 	vm_domain_policy_cleanup(&td->td_vm_dom_policy);
389 	callout_drain(&td->td_slpcallout);
390 	uma_zfree(thread_zone, td);
391 }
392 
393 void
394 thread_cow_get_proc(struct thread *newtd, struct proc *p)
395 {
396 
397 	PROC_LOCK_ASSERT(p, MA_OWNED);
398 	newtd->td_ucred = crhold(p->p_ucred);
399 	newtd->td_limit = lim_hold(p->p_limit);
400 	newtd->td_cowgen = p->p_cowgen;
401 }
402 
403 void
404 thread_cow_get(struct thread *newtd, struct thread *td)
405 {
406 
407 	newtd->td_ucred = crhold(td->td_ucred);
408 	newtd->td_limit = lim_hold(td->td_limit);
409 	newtd->td_cowgen = td->td_cowgen;
410 }
411 
412 void
413 thread_cow_free(struct thread *td)
414 {
415 
416 	if (td->td_ucred != NULL)
417 		crfree(td->td_ucred);
418 	if (td->td_limit != NULL)
419 		lim_free(td->td_limit);
420 }
421 
422 void
423 thread_cow_update(struct thread *td)
424 {
425 	struct proc *p;
426 	struct ucred *oldcred;
427 	struct plimit *oldlimit;
428 
429 	p = td->td_proc;
430 	oldcred = NULL;
431 	oldlimit = NULL;
432 	PROC_LOCK(p);
433 	if (td->td_ucred != p->p_ucred) {
434 		oldcred = td->td_ucred;
435 		td->td_ucred = crhold(p->p_ucred);
436 	}
437 	if (td->td_limit != p->p_limit) {
438 		oldlimit = td->td_limit;
439 		td->td_limit = lim_hold(p->p_limit);
440 	}
441 	td->td_cowgen = p->p_cowgen;
442 	PROC_UNLOCK(p);
443 	if (oldcred != NULL)
444 		crfree(oldcred);
445 	if (oldlimit != NULL)
446 		lim_free(oldlimit);
447 }
448 
449 /*
450  * Discard the current thread and exit from its context.
451  * Always called with scheduler locked.
452  *
453  * Because we can't free a thread while we're operating under its context,
454  * push the current thread into our CPU's deadthread holder. This means
455  * we needn't worry about someone else grabbing our context before we
456  * do a cpu_throw().
457  */
458 void
459 thread_exit(void)
460 {
461 	uint64_t runtime, new_switchtime;
462 	struct thread *td;
463 	struct thread *td2;
464 	struct proc *p;
465 	int wakeup_swapper;
466 
467 	td = curthread;
468 	p = td->td_proc;
469 
470 	PROC_SLOCK_ASSERT(p, MA_OWNED);
471 	mtx_assert(&Giant, MA_NOTOWNED);
472 
473 	PROC_LOCK_ASSERT(p, MA_OWNED);
474 	KASSERT(p != NULL, ("thread exiting without a process"));
475 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
476 	    (long)p->p_pid, td->td_name);
477 	SDT_PROBE0(proc, , , lwp__exit);
478 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
479 
480 #ifdef AUDIT
481 	AUDIT_SYSCALL_EXIT(0, td);
482 #endif
483 	/*
484 	 * drop FPU & debug register state storage, or any other
485 	 * architecture specific resources that
486 	 * would not be on a new untouched process.
487 	 */
488 	cpu_thread_exit(td);
489 
490 	/*
491 	 * The last thread is left attached to the process
492 	 * So that the whole bundle gets recycled. Skip
493 	 * all this stuff if we never had threads.
494 	 * EXIT clears all sign of other threads when
495 	 * it goes to single threading, so the last thread always
496 	 * takes the short path.
497 	 */
498 	if (p->p_flag & P_HADTHREADS) {
499 		if (p->p_numthreads > 1) {
500 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
501 			thread_unlink(td);
502 			td2 = FIRST_THREAD_IN_PROC(p);
503 			sched_exit_thread(td2, td);
504 
505 			/*
506 			 * The test below is NOT true if we are the
507 			 * sole exiting thread. P_STOPPED_SINGLE is unset
508 			 * in exit1() after it is the only survivor.
509 			 */
510 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
511 				if (p->p_numthreads == p->p_suspcount) {
512 					thread_lock(p->p_singlethread);
513 					wakeup_swapper = thread_unsuspend_one(
514 						p->p_singlethread, p, false);
515 					thread_unlock(p->p_singlethread);
516 					if (wakeup_swapper)
517 						kick_proc0();
518 				}
519 			}
520 
521 			PCPU_SET(deadthread, td);
522 		} else {
523 			/*
524 			 * The last thread is exiting.. but not through exit()
525 			 */
526 			panic ("thread_exit: Last thread exiting on its own");
527 		}
528 	}
529 #ifdef	HWPMC_HOOKS
530 	/*
531 	 * If this thread is part of a process that is being tracked by hwpmc(4),
532 	 * inform the module of the thread's impending exit.
533 	 */
534 	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
535 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
536 #endif
537 	PROC_UNLOCK(p);
538 	PROC_STATLOCK(p);
539 	thread_lock(td);
540 	PROC_SUNLOCK(p);
541 
542 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
543 	new_switchtime = cpu_ticks();
544 	runtime = new_switchtime - PCPU_GET(switchtime);
545 	td->td_runtime += runtime;
546 	td->td_incruntime += runtime;
547 	PCPU_SET(switchtime, new_switchtime);
548 	PCPU_SET(switchticks, ticks);
549 	VM_CNT_INC(v_swtch);
550 
551 	/* Save our resource usage in our process. */
552 	td->td_ru.ru_nvcsw++;
553 	ruxagg(p, td);
554 	rucollect(&p->p_ru, &td->td_ru);
555 	PROC_STATUNLOCK(p);
556 
557 	td->td_state = TDS_INACTIVE;
558 #ifdef WITNESS
559 	witness_thread_exit(td);
560 #endif
561 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
562 	sched_throw(td);
563 	panic("I'm a teapot!");
564 	/* NOTREACHED */
565 }
566 
567 /*
568  * Do any thread specific cleanups that may be needed in wait()
569  * called with Giant, proc and schedlock not held.
570  */
571 void
572 thread_wait(struct proc *p)
573 {
574 	struct thread *td;
575 
576 	mtx_assert(&Giant, MA_NOTOWNED);
577 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
578 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
579 	td = FIRST_THREAD_IN_PROC(p);
580 	/* Lock the last thread so we spin until it exits cpu_throw(). */
581 	thread_lock(td);
582 	thread_unlock(td);
583 	lock_profile_thread_exit(td);
584 	cpuset_rel(td->td_cpuset);
585 	td->td_cpuset = NULL;
586 	cpu_thread_clean(td);
587 	thread_cow_free(td);
588 	callout_drain(&td->td_slpcallout);
589 	thread_reap();	/* check for zombie threads etc. */
590 }
591 
592 /*
593  * Link a thread to a process.
594  * set up anything that needs to be initialized for it to
595  * be used by the process.
596  */
597 void
598 thread_link(struct thread *td, struct proc *p)
599 {
600 
601 	/*
602 	 * XXX This can't be enabled because it's called for proc0 before
603 	 * its lock has been created.
604 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
605 	 */
606 	td->td_state    = TDS_INACTIVE;
607 	td->td_proc     = p;
608 	td->td_flags    = TDF_INMEM;
609 
610 	LIST_INIT(&td->td_contested);
611 	LIST_INIT(&td->td_lprof[0]);
612 	LIST_INIT(&td->td_lprof[1]);
613 	sigqueue_init(&td->td_sigqueue, p);
614 	callout_init(&td->td_slpcallout, 1);
615 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
616 	p->p_numthreads++;
617 }
618 
619 /*
620  * Called from:
621  *  thread_exit()
622  */
623 void
624 thread_unlink(struct thread *td)
625 {
626 	struct proc *p = td->td_proc;
627 
628 	PROC_LOCK_ASSERT(p, MA_OWNED);
629 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
630 	p->p_numthreads--;
631 	/* could clear a few other things here */
632 	/* Must  NOT clear links to proc! */
633 }
634 
635 static int
636 calc_remaining(struct proc *p, int mode)
637 {
638 	int remaining;
639 
640 	PROC_LOCK_ASSERT(p, MA_OWNED);
641 	PROC_SLOCK_ASSERT(p, MA_OWNED);
642 	if (mode == SINGLE_EXIT)
643 		remaining = p->p_numthreads;
644 	else if (mode == SINGLE_BOUNDARY)
645 		remaining = p->p_numthreads - p->p_boundary_count;
646 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
647 		remaining = p->p_numthreads - p->p_suspcount;
648 	else
649 		panic("calc_remaining: wrong mode %d", mode);
650 	return (remaining);
651 }
652 
653 static int
654 remain_for_mode(int mode)
655 {
656 
657 	return (mode == SINGLE_ALLPROC ? 0 : 1);
658 }
659 
660 static int
661 weed_inhib(int mode, struct thread *td2, struct proc *p)
662 {
663 	int wakeup_swapper;
664 
665 	PROC_LOCK_ASSERT(p, MA_OWNED);
666 	PROC_SLOCK_ASSERT(p, MA_OWNED);
667 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
668 
669 	wakeup_swapper = 0;
670 	switch (mode) {
671 	case SINGLE_EXIT:
672 		if (TD_IS_SUSPENDED(td2))
673 			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
674 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
675 			wakeup_swapper |= sleepq_abort(td2, EINTR);
676 		break;
677 	case SINGLE_BOUNDARY:
678 	case SINGLE_NO_EXIT:
679 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
680 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
681 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
682 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
683 		break;
684 	case SINGLE_ALLPROC:
685 		/*
686 		 * ALLPROC suspend tries to avoid spurious EINTR for
687 		 * threads sleeping interruptable, by suspending the
688 		 * thread directly, similarly to sig_suspend_threads().
689 		 * Since such sleep is not performed at the user
690 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
691 		 * is used to avoid immediate un-suspend.
692 		 */
693 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
694 		    TDF_ALLPROCSUSP)) == 0)
695 			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
696 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
697 			if ((td2->td_flags & TDF_SBDRY) == 0) {
698 				thread_suspend_one(td2);
699 				td2->td_flags |= TDF_ALLPROCSUSP;
700 			} else {
701 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
702 			}
703 		}
704 		break;
705 	}
706 	return (wakeup_swapper);
707 }
708 
709 /*
710  * Enforce single-threading.
711  *
712  * Returns 1 if the caller must abort (another thread is waiting to
713  * exit the process or similar). Process is locked!
714  * Returns 0 when you are successfully the only thread running.
715  * A process has successfully single threaded in the suspend mode when
716  * There are no threads in user mode. Threads in the kernel must be
717  * allowed to continue until they get to the user boundary. They may even
718  * copy out their return values and data before suspending. They may however be
719  * accelerated in reaching the user boundary as we will wake up
720  * any sleeping threads that are interruptable. (PCATCH).
721  */
722 int
723 thread_single(struct proc *p, int mode)
724 {
725 	struct thread *td;
726 	struct thread *td2;
727 	int remaining, wakeup_swapper;
728 
729 	td = curthread;
730 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
731 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
732 	    ("invalid mode %d", mode));
733 	/*
734 	 * If allowing non-ALLPROC singlethreading for non-curproc
735 	 * callers, calc_remaining() and remain_for_mode() should be
736 	 * adjusted to also account for td->td_proc != p.  For now
737 	 * this is not implemented because it is not used.
738 	 */
739 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
740 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
741 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
742 	mtx_assert(&Giant, MA_NOTOWNED);
743 	PROC_LOCK_ASSERT(p, MA_OWNED);
744 
745 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
746 		return (0);
747 
748 	/* Is someone already single threading? */
749 	if (p->p_singlethread != NULL && p->p_singlethread != td)
750 		return (1);
751 
752 	if (mode == SINGLE_EXIT) {
753 		p->p_flag |= P_SINGLE_EXIT;
754 		p->p_flag &= ~P_SINGLE_BOUNDARY;
755 	} else {
756 		p->p_flag &= ~P_SINGLE_EXIT;
757 		if (mode == SINGLE_BOUNDARY)
758 			p->p_flag |= P_SINGLE_BOUNDARY;
759 		else
760 			p->p_flag &= ~P_SINGLE_BOUNDARY;
761 	}
762 	if (mode == SINGLE_ALLPROC)
763 		p->p_flag |= P_TOTAL_STOP;
764 	p->p_flag |= P_STOPPED_SINGLE;
765 	PROC_SLOCK(p);
766 	p->p_singlethread = td;
767 	remaining = calc_remaining(p, mode);
768 	while (remaining != remain_for_mode(mode)) {
769 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
770 			goto stopme;
771 		wakeup_swapper = 0;
772 		FOREACH_THREAD_IN_PROC(p, td2) {
773 			if (td2 == td)
774 				continue;
775 			thread_lock(td2);
776 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
777 			if (TD_IS_INHIBITED(td2)) {
778 				wakeup_swapper |= weed_inhib(mode, td2, p);
779 #ifdef SMP
780 			} else if (TD_IS_RUNNING(td2) && td != td2) {
781 				forward_signal(td2);
782 #endif
783 			}
784 			thread_unlock(td2);
785 		}
786 		if (wakeup_swapper)
787 			kick_proc0();
788 		remaining = calc_remaining(p, mode);
789 
790 		/*
791 		 * Maybe we suspended some threads.. was it enough?
792 		 */
793 		if (remaining == remain_for_mode(mode))
794 			break;
795 
796 stopme:
797 		/*
798 		 * Wake us up when everyone else has suspended.
799 		 * In the mean time we suspend as well.
800 		 */
801 		thread_suspend_switch(td, p);
802 		remaining = calc_remaining(p, mode);
803 	}
804 	if (mode == SINGLE_EXIT) {
805 		/*
806 		 * Convert the process to an unthreaded process.  The
807 		 * SINGLE_EXIT is called by exit1() or execve(), in
808 		 * both cases other threads must be retired.
809 		 */
810 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
811 		p->p_singlethread = NULL;
812 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
813 
814 		/*
815 		 * Wait for any remaining threads to exit cpu_throw().
816 		 */
817 		while (p->p_exitthreads != 0) {
818 			PROC_SUNLOCK(p);
819 			PROC_UNLOCK(p);
820 			sched_relinquish(td);
821 			PROC_LOCK(p);
822 			PROC_SLOCK(p);
823 		}
824 	} else if (mode == SINGLE_BOUNDARY) {
825 		/*
826 		 * Wait until all suspended threads are removed from
827 		 * the processors.  The thread_suspend_check()
828 		 * increments p_boundary_count while it is still
829 		 * running, which makes it possible for the execve()
830 		 * to destroy vmspace while our other threads are
831 		 * still using the address space.
832 		 *
833 		 * We lock the thread, which is only allowed to
834 		 * succeed after context switch code finished using
835 		 * the address space.
836 		 */
837 		FOREACH_THREAD_IN_PROC(p, td2) {
838 			if (td2 == td)
839 				continue;
840 			thread_lock(td2);
841 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
842 			    ("td %p not on boundary", td2));
843 			KASSERT(TD_IS_SUSPENDED(td2),
844 			    ("td %p is not suspended", td2));
845 			thread_unlock(td2);
846 		}
847 	}
848 	PROC_SUNLOCK(p);
849 	return (0);
850 }
851 
852 bool
853 thread_suspend_check_needed(void)
854 {
855 	struct proc *p;
856 	struct thread *td;
857 
858 	td = curthread;
859 	p = td->td_proc;
860 	PROC_LOCK_ASSERT(p, MA_OWNED);
861 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
862 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
863 }
864 
865 /*
866  * Called in from locations that can safely check to see
867  * whether we have to suspend or at least throttle for a
868  * single-thread event (e.g. fork).
869  *
870  * Such locations include userret().
871  * If the "return_instead" argument is non zero, the thread must be able to
872  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
873  *
874  * The 'return_instead' argument tells the function if it may do a
875  * thread_exit() or suspend, or whether the caller must abort and back
876  * out instead.
877  *
878  * If the thread that set the single_threading request has set the
879  * P_SINGLE_EXIT bit in the process flags then this call will never return
880  * if 'return_instead' is false, but will exit.
881  *
882  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
883  *---------------+--------------------+---------------------
884  *       0       | returns 0          |   returns 0 or 1
885  *               | when ST ends       |   immediately
886  *---------------+--------------------+---------------------
887  *       1       | thread exits       |   returns 1
888  *               |                    |  immediately
889  * 0 = thread_exit() or suspension ok,
890  * other = return error instead of stopping the thread.
891  *
892  * While a full suspension is under effect, even a single threading
893  * thread would be suspended if it made this call (but it shouldn't).
894  * This call should only be made from places where
895  * thread_exit() would be safe as that may be the outcome unless
896  * return_instead is set.
897  */
898 int
899 thread_suspend_check(int return_instead)
900 {
901 	struct thread *td;
902 	struct proc *p;
903 	int wakeup_swapper;
904 
905 	td = curthread;
906 	p = td->td_proc;
907 	mtx_assert(&Giant, MA_NOTOWNED);
908 	PROC_LOCK_ASSERT(p, MA_OWNED);
909 	while (thread_suspend_check_needed()) {
910 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
911 			KASSERT(p->p_singlethread != NULL,
912 			    ("singlethread not set"));
913 			/*
914 			 * The only suspension in action is a
915 			 * single-threading. Single threader need not stop.
916 			 * It is safe to access p->p_singlethread unlocked
917 			 * because it can only be set to our address by us.
918 			 */
919 			if (p->p_singlethread == td)
920 				return (0);	/* Exempt from stopping. */
921 		}
922 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
923 			return (EINTR);
924 
925 		/* Should we goto user boundary if we didn't come from there? */
926 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
927 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
928 			return (ERESTART);
929 
930 		/*
931 		 * Ignore suspend requests if they are deferred.
932 		 */
933 		if ((td->td_flags & TDF_SBDRY) != 0) {
934 			KASSERT(return_instead,
935 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
936 			KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
937 			    (TDF_SEINTR | TDF_SERESTART),
938 			    ("both TDF_SEINTR and TDF_SERESTART"));
939 			return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
940 		}
941 
942 		/*
943 		 * If the process is waiting for us to exit,
944 		 * this thread should just suicide.
945 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
946 		 */
947 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
948 			PROC_UNLOCK(p);
949 
950 			/*
951 			 * Allow Linux emulation layer to do some work
952 			 * before thread suicide.
953 			 */
954 			if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
955 				(p->p_sysent->sv_thread_detach)(td);
956 			umtx_thread_exit(td);
957 			kern_thr_exit(td);
958 			panic("stopped thread did not exit");
959 		}
960 
961 		PROC_SLOCK(p);
962 		thread_stopped(p);
963 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
964 			if (p->p_numthreads == p->p_suspcount + 1) {
965 				thread_lock(p->p_singlethread);
966 				wakeup_swapper = thread_unsuspend_one(
967 				    p->p_singlethread, p, false);
968 				thread_unlock(p->p_singlethread);
969 				if (wakeup_swapper)
970 					kick_proc0();
971 			}
972 		}
973 		PROC_UNLOCK(p);
974 		thread_lock(td);
975 		/*
976 		 * When a thread suspends, it just
977 		 * gets taken off all queues.
978 		 */
979 		thread_suspend_one(td);
980 		if (return_instead == 0) {
981 			p->p_boundary_count++;
982 			td->td_flags |= TDF_BOUNDARY;
983 		}
984 		PROC_SUNLOCK(p);
985 		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
986 		thread_unlock(td);
987 		PROC_LOCK(p);
988 	}
989 	return (0);
990 }
991 
992 void
993 thread_suspend_switch(struct thread *td, struct proc *p)
994 {
995 
996 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
997 	PROC_LOCK_ASSERT(p, MA_OWNED);
998 	PROC_SLOCK_ASSERT(p, MA_OWNED);
999 	/*
1000 	 * We implement thread_suspend_one in stages here to avoid
1001 	 * dropping the proc lock while the thread lock is owned.
1002 	 */
1003 	if (p == td->td_proc) {
1004 		thread_stopped(p);
1005 		p->p_suspcount++;
1006 	}
1007 	PROC_UNLOCK(p);
1008 	thread_lock(td);
1009 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1010 	TD_SET_SUSPENDED(td);
1011 	sched_sleep(td, 0);
1012 	PROC_SUNLOCK(p);
1013 	DROP_GIANT();
1014 	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
1015 	thread_unlock(td);
1016 	PICKUP_GIANT();
1017 	PROC_LOCK(p);
1018 	PROC_SLOCK(p);
1019 }
1020 
1021 void
1022 thread_suspend_one(struct thread *td)
1023 {
1024 	struct proc *p;
1025 
1026 	p = td->td_proc;
1027 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1028 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1029 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1030 	p->p_suspcount++;
1031 	td->td_flags &= ~TDF_NEEDSUSPCHK;
1032 	TD_SET_SUSPENDED(td);
1033 	sched_sleep(td, 0);
1034 }
1035 
1036 static int
1037 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1038 {
1039 
1040 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1041 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1042 	TD_CLR_SUSPENDED(td);
1043 	td->td_flags &= ~TDF_ALLPROCSUSP;
1044 	if (td->td_proc == p) {
1045 		PROC_SLOCK_ASSERT(p, MA_OWNED);
1046 		p->p_suspcount--;
1047 		if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1048 			td->td_flags &= ~TDF_BOUNDARY;
1049 			p->p_boundary_count--;
1050 		}
1051 	}
1052 	return (setrunnable(td));
1053 }
1054 
1055 /*
1056  * Allow all threads blocked by single threading to continue running.
1057  */
1058 void
1059 thread_unsuspend(struct proc *p)
1060 {
1061 	struct thread *td;
1062 	int wakeup_swapper;
1063 
1064 	PROC_LOCK_ASSERT(p, MA_OWNED);
1065 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1066 	wakeup_swapper = 0;
1067 	if (!P_SHOULDSTOP(p)) {
1068                 FOREACH_THREAD_IN_PROC(p, td) {
1069 			thread_lock(td);
1070 			if (TD_IS_SUSPENDED(td)) {
1071 				wakeup_swapper |= thread_unsuspend_one(td, p,
1072 				    true);
1073 			}
1074 			thread_unlock(td);
1075 		}
1076 	} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1077 	    p->p_numthreads == p->p_suspcount) {
1078 		/*
1079 		 * Stopping everything also did the job for the single
1080 		 * threading request. Now we've downgraded to single-threaded,
1081 		 * let it continue.
1082 		 */
1083 		if (p->p_singlethread->td_proc == p) {
1084 			thread_lock(p->p_singlethread);
1085 			wakeup_swapper = thread_unsuspend_one(
1086 			    p->p_singlethread, p, false);
1087 			thread_unlock(p->p_singlethread);
1088 		}
1089 	}
1090 	if (wakeup_swapper)
1091 		kick_proc0();
1092 }
1093 
1094 /*
1095  * End the single threading mode..
1096  */
1097 void
1098 thread_single_end(struct proc *p, int mode)
1099 {
1100 	struct thread *td;
1101 	int wakeup_swapper;
1102 
1103 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1104 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1105 	    ("invalid mode %d", mode));
1106 	PROC_LOCK_ASSERT(p, MA_OWNED);
1107 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1108 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1109 	    ("mode %d does not match P_TOTAL_STOP", mode));
1110 	KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1111 	    ("thread_single_end from other thread %p %p",
1112 	    curthread, p->p_singlethread));
1113 	KASSERT(mode != SINGLE_BOUNDARY ||
1114 	    (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1115 	    ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1116 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1117 	    P_TOTAL_STOP);
1118 	PROC_SLOCK(p);
1119 	p->p_singlethread = NULL;
1120 	wakeup_swapper = 0;
1121 	/*
1122 	 * If there are other threads they may now run,
1123 	 * unless of course there is a blanket 'stop order'
1124 	 * on the process. The single threader must be allowed
1125 	 * to continue however as this is a bad place to stop.
1126 	 */
1127 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1128                 FOREACH_THREAD_IN_PROC(p, td) {
1129 			thread_lock(td);
1130 			if (TD_IS_SUSPENDED(td)) {
1131 				wakeup_swapper |= thread_unsuspend_one(td, p,
1132 				    mode == SINGLE_BOUNDARY);
1133 			}
1134 			thread_unlock(td);
1135 		}
1136 	}
1137 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1138 	    ("inconsistent boundary count %d", p->p_boundary_count));
1139 	PROC_SUNLOCK(p);
1140 	if (wakeup_swapper)
1141 		kick_proc0();
1142 }
1143 
1144 struct thread *
1145 thread_find(struct proc *p, lwpid_t tid)
1146 {
1147 	struct thread *td;
1148 
1149 	PROC_LOCK_ASSERT(p, MA_OWNED);
1150 	FOREACH_THREAD_IN_PROC(p, td) {
1151 		if (td->td_tid == tid)
1152 			break;
1153 	}
1154 	return (td);
1155 }
1156 
1157 /* Locate a thread by number; return with proc lock held. */
1158 struct thread *
1159 tdfind(lwpid_t tid, pid_t pid)
1160 {
1161 #define RUN_THRESH	16
1162 	struct thread *td;
1163 	int run = 0;
1164 
1165 	rw_rlock(&tidhash_lock);
1166 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1167 		if (td->td_tid == tid) {
1168 			if (pid != -1 && td->td_proc->p_pid != pid) {
1169 				td = NULL;
1170 				break;
1171 			}
1172 			PROC_LOCK(td->td_proc);
1173 			if (td->td_proc->p_state == PRS_NEW) {
1174 				PROC_UNLOCK(td->td_proc);
1175 				td = NULL;
1176 				break;
1177 			}
1178 			if (run > RUN_THRESH) {
1179 				if (rw_try_upgrade(&tidhash_lock)) {
1180 					LIST_REMOVE(td, td_hash);
1181 					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1182 						td, td_hash);
1183 					rw_wunlock(&tidhash_lock);
1184 					return (td);
1185 				}
1186 			}
1187 			break;
1188 		}
1189 		run++;
1190 	}
1191 	rw_runlock(&tidhash_lock);
1192 	return (td);
1193 }
1194 
1195 void
1196 tidhash_add(struct thread *td)
1197 {
1198 	rw_wlock(&tidhash_lock);
1199 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1200 	rw_wunlock(&tidhash_lock);
1201 }
1202 
1203 void
1204 tidhash_remove(struct thread *td)
1205 {
1206 	rw_wlock(&tidhash_lock);
1207 	LIST_REMOVE(td, td_hash);
1208 	rw_wunlock(&tidhash_lock);
1209 }
1210