xref: /freebsd/sys/kern/kern_thread.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/rangelock.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sdt.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sleepqueue.h>
47 #include <sys/selinfo.h>
48 #include <sys/turnstile.h>
49 #include <sys/ktr.h>
50 #include <sys/rwlock.h>
51 #include <sys/umtx.h>
52 #include <sys/cpuset.h>
53 #ifdef	HWPMC_HOOKS
54 #include <sys/pmckern.h>
55 #endif
56 
57 #include <security/audit/audit.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 #include <vm/uma.h>
62 #include <sys/eventhandler.h>
63 
64 SDT_PROVIDER_DECLARE(proc);
65 SDT_PROBE_DEFINE(proc, , , lwp__exit);
66 
67 /*
68  * thread related storage.
69  */
70 static uma_zone_t thread_zone;
71 
72 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
73 static struct mtx zombie_lock;
74 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
75 
76 static void thread_zombie(struct thread *);
77 
78 #define TID_BUFFER_SIZE	1024
79 
80 struct mtx tid_lock;
81 static struct unrhdr *tid_unrhdr;
82 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
83 static int tid_head, tid_tail;
84 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
85 
86 struct	tidhashhead *tidhashtbl;
87 u_long	tidhash;
88 struct	rwlock tidhash_lock;
89 
90 static lwpid_t
91 tid_alloc(void)
92 {
93 	lwpid_t	tid;
94 
95 	tid = alloc_unr(tid_unrhdr);
96 	if (tid != -1)
97 		return (tid);
98 	mtx_lock(&tid_lock);
99 	if (tid_head == tid_tail) {
100 		mtx_unlock(&tid_lock);
101 		return (-1);
102 	}
103 	tid = tid_buffer[tid_head];
104 	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
105 	mtx_unlock(&tid_lock);
106 	return (tid);
107 }
108 
109 static void
110 tid_free(lwpid_t tid)
111 {
112 	lwpid_t tmp_tid = -1;
113 
114 	mtx_lock(&tid_lock);
115 	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
116 		tmp_tid = tid_buffer[tid_head];
117 		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
118 	}
119 	tid_buffer[tid_tail] = tid;
120 	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
121 	mtx_unlock(&tid_lock);
122 	if (tmp_tid != -1)
123 		free_unr(tid_unrhdr, tmp_tid);
124 }
125 
126 /*
127  * Prepare a thread for use.
128  */
129 static int
130 thread_ctor(void *mem, int size, void *arg, int flags)
131 {
132 	struct thread	*td;
133 
134 	td = (struct thread *)mem;
135 	td->td_state = TDS_INACTIVE;
136 	td->td_oncpu = NOCPU;
137 
138 	td->td_tid = tid_alloc();
139 
140 	/*
141 	 * Note that td_critnest begins life as 1 because the thread is not
142 	 * running and is thereby implicitly waiting to be on the receiving
143 	 * end of a context switch.
144 	 */
145 	td->td_critnest = 1;
146 	td->td_lend_user_pri = PRI_MAX;
147 	EVENTHANDLER_INVOKE(thread_ctor, td);
148 #ifdef AUDIT
149 	audit_thread_alloc(td);
150 #endif
151 	umtx_thread_alloc(td);
152 	return (0);
153 }
154 
155 /*
156  * Reclaim a thread after use.
157  */
158 static void
159 thread_dtor(void *mem, int size, void *arg)
160 {
161 	struct thread *td;
162 
163 	td = (struct thread *)mem;
164 
165 #ifdef INVARIANTS
166 	/* Verify that this thread is in a safe state to free. */
167 	switch (td->td_state) {
168 	case TDS_INHIBITED:
169 	case TDS_RUNNING:
170 	case TDS_CAN_RUN:
171 	case TDS_RUNQ:
172 		/*
173 		 * We must never unlink a thread that is in one of
174 		 * these states, because it is currently active.
175 		 */
176 		panic("bad state for thread unlinking");
177 		/* NOTREACHED */
178 	case TDS_INACTIVE:
179 		break;
180 	default:
181 		panic("bad thread state");
182 		/* NOTREACHED */
183 	}
184 #endif
185 #ifdef AUDIT
186 	audit_thread_free(td);
187 #endif
188 	/* Free all OSD associated to this thread. */
189 	osd_thread_exit(td);
190 
191 	EVENTHANDLER_INVOKE(thread_dtor, td);
192 	tid_free(td->td_tid);
193 }
194 
195 /*
196  * Initialize type-stable parts of a thread (when newly created).
197  */
198 static int
199 thread_init(void *mem, int size, int flags)
200 {
201 	struct thread *td;
202 
203 	td = (struct thread *)mem;
204 
205 	td->td_sleepqueue = sleepq_alloc();
206 	td->td_turnstile = turnstile_alloc();
207 	td->td_rlqe = NULL;
208 	EVENTHANDLER_INVOKE(thread_init, td);
209 	td->td_sched = (struct td_sched *)&td[1];
210 	umtx_thread_init(td);
211 	td->td_kstack = 0;
212 	return (0);
213 }
214 
215 /*
216  * Tear down type-stable parts of a thread (just before being discarded).
217  */
218 static void
219 thread_fini(void *mem, int size)
220 {
221 	struct thread *td;
222 
223 	td = (struct thread *)mem;
224 	EVENTHANDLER_INVOKE(thread_fini, td);
225 	rlqentry_free(td->td_rlqe);
226 	turnstile_free(td->td_turnstile);
227 	sleepq_free(td->td_sleepqueue);
228 	umtx_thread_fini(td);
229 	seltdfini(td);
230 }
231 
232 /*
233  * For a newly created process,
234  * link up all the structures and its initial threads etc.
235  * called from:
236  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
237  * proc_dtor() (should go away)
238  * proc_init()
239  */
240 void
241 proc_linkup0(struct proc *p, struct thread *td)
242 {
243 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
244 	proc_linkup(p, td);
245 }
246 
247 void
248 proc_linkup(struct proc *p, struct thread *td)
249 {
250 
251 	sigqueue_init(&p->p_sigqueue, p);
252 	p->p_ksi = ksiginfo_alloc(1);
253 	if (p->p_ksi != NULL) {
254 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
255 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
256 	}
257 	LIST_INIT(&p->p_mqnotifier);
258 	p->p_numthreads = 0;
259 	thread_link(td, p);
260 }
261 
262 /*
263  * Initialize global thread allocation resources.
264  */
265 void
266 threadinit(void)
267 {
268 
269 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
270 
271 	/*
272 	 * pid_max cannot be greater than PID_MAX.
273 	 * leave one number for thread0.
274 	 */
275 	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
276 
277 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
278 	    thread_ctor, thread_dtor, thread_init, thread_fini,
279 	    16 - 1, 0);
280 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
281 	rw_init(&tidhash_lock, "tidhash");
282 }
283 
284 /*
285  * Place an unused thread on the zombie list.
286  * Use the slpq as that must be unused by now.
287  */
288 void
289 thread_zombie(struct thread *td)
290 {
291 	mtx_lock_spin(&zombie_lock);
292 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
293 	mtx_unlock_spin(&zombie_lock);
294 }
295 
296 /*
297  * Release a thread that has exited after cpu_throw().
298  */
299 void
300 thread_stash(struct thread *td)
301 {
302 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
303 	thread_zombie(td);
304 }
305 
306 /*
307  * Reap zombie resources.
308  */
309 void
310 thread_reap(void)
311 {
312 	struct thread *td_first, *td_next;
313 
314 	/*
315 	 * Don't even bother to lock if none at this instant,
316 	 * we really don't care about the next instant..
317 	 */
318 	if (!TAILQ_EMPTY(&zombie_threads)) {
319 		mtx_lock_spin(&zombie_lock);
320 		td_first = TAILQ_FIRST(&zombie_threads);
321 		if (td_first)
322 			TAILQ_INIT(&zombie_threads);
323 		mtx_unlock_spin(&zombie_lock);
324 		while (td_first) {
325 			td_next = TAILQ_NEXT(td_first, td_slpq);
326 			if (td_first->td_ucred)
327 				crfree(td_first->td_ucred);
328 			thread_free(td_first);
329 			td_first = td_next;
330 		}
331 	}
332 }
333 
334 /*
335  * Allocate a thread.
336  */
337 struct thread *
338 thread_alloc(int pages)
339 {
340 	struct thread *td;
341 
342 	thread_reap(); /* check if any zombies to get */
343 
344 	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
345 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
346 	if (!vm_thread_new(td, pages)) {
347 		uma_zfree(thread_zone, td);
348 		return (NULL);
349 	}
350 	cpu_thread_alloc(td);
351 	return (td);
352 }
353 
354 int
355 thread_alloc_stack(struct thread *td, int pages)
356 {
357 
358 	KASSERT(td->td_kstack == 0,
359 	    ("thread_alloc_stack called on a thread with kstack"));
360 	if (!vm_thread_new(td, pages))
361 		return (0);
362 	cpu_thread_alloc(td);
363 	return (1);
364 }
365 
366 /*
367  * Deallocate a thread.
368  */
369 void
370 thread_free(struct thread *td)
371 {
372 
373 	lock_profile_thread_exit(td);
374 	if (td->td_cpuset)
375 		cpuset_rel(td->td_cpuset);
376 	td->td_cpuset = NULL;
377 	cpu_thread_free(td);
378 	if (td->td_kstack != 0)
379 		vm_thread_dispose(td);
380 	uma_zfree(thread_zone, td);
381 }
382 
383 /*
384  * Discard the current thread and exit from its context.
385  * Always called with scheduler locked.
386  *
387  * Because we can't free a thread while we're operating under its context,
388  * push the current thread into our CPU's deadthread holder. This means
389  * we needn't worry about someone else grabbing our context before we
390  * do a cpu_throw().
391  */
392 void
393 thread_exit(void)
394 {
395 	uint64_t runtime, new_switchtime;
396 	struct thread *td;
397 	struct thread *td2;
398 	struct proc *p;
399 	int wakeup_swapper;
400 
401 	td = curthread;
402 	p = td->td_proc;
403 
404 	PROC_SLOCK_ASSERT(p, MA_OWNED);
405 	mtx_assert(&Giant, MA_NOTOWNED);
406 
407 	PROC_LOCK_ASSERT(p, MA_OWNED);
408 	KASSERT(p != NULL, ("thread exiting without a process"));
409 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
410 	    (long)p->p_pid, td->td_name);
411 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
412 
413 #ifdef AUDIT
414 	AUDIT_SYSCALL_EXIT(0, td);
415 #endif
416 	/*
417 	 * drop FPU & debug register state storage, or any other
418 	 * architecture specific resources that
419 	 * would not be on a new untouched process.
420 	 */
421 	cpu_thread_exit(td);	/* XXXSMP */
422 
423 	/*
424 	 * The last thread is left attached to the process
425 	 * So that the whole bundle gets recycled. Skip
426 	 * all this stuff if we never had threads.
427 	 * EXIT clears all sign of other threads when
428 	 * it goes to single threading, so the last thread always
429 	 * takes the short path.
430 	 */
431 	if (p->p_flag & P_HADTHREADS) {
432 		if (p->p_numthreads > 1) {
433 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
434 			thread_unlink(td);
435 			td2 = FIRST_THREAD_IN_PROC(p);
436 			sched_exit_thread(td2, td);
437 
438 			/*
439 			 * The test below is NOT true if we are the
440 			 * sole exiting thread. P_STOPPED_SINGLE is unset
441 			 * in exit1() after it is the only survivor.
442 			 */
443 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
444 				if (p->p_numthreads == p->p_suspcount) {
445 					thread_lock(p->p_singlethread);
446 					wakeup_swapper = thread_unsuspend_one(
447 						p->p_singlethread, p);
448 					thread_unlock(p->p_singlethread);
449 					if (wakeup_swapper)
450 						kick_proc0();
451 				}
452 			}
453 
454 			PCPU_SET(deadthread, td);
455 		} else {
456 			/*
457 			 * The last thread is exiting.. but not through exit()
458 			 */
459 			panic ("thread_exit: Last thread exiting on its own");
460 		}
461 	}
462 #ifdef	HWPMC_HOOKS
463 	/*
464 	 * If this thread is part of a process that is being tracked by hwpmc(4),
465 	 * inform the module of the thread's impending exit.
466 	 */
467 	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
468 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
469 #endif
470 	PROC_UNLOCK(p);
471 	PROC_STATLOCK(p);
472 	thread_lock(td);
473 	PROC_SUNLOCK(p);
474 
475 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
476 	new_switchtime = cpu_ticks();
477 	runtime = new_switchtime - PCPU_GET(switchtime);
478 	td->td_runtime += runtime;
479 	td->td_incruntime += runtime;
480 	PCPU_SET(switchtime, new_switchtime);
481 	PCPU_SET(switchticks, ticks);
482 	PCPU_INC(cnt.v_swtch);
483 
484 	/* Save our resource usage in our process. */
485 	td->td_ru.ru_nvcsw++;
486 	ruxagg(p, td);
487 	rucollect(&p->p_ru, &td->td_ru);
488 	PROC_STATUNLOCK(p);
489 
490 	td->td_state = TDS_INACTIVE;
491 #ifdef WITNESS
492 	witness_thread_exit(td);
493 #endif
494 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
495 	sched_throw(td);
496 	panic("I'm a teapot!");
497 	/* NOTREACHED */
498 }
499 
500 /*
501  * Do any thread specific cleanups that may be needed in wait()
502  * called with Giant, proc and schedlock not held.
503  */
504 void
505 thread_wait(struct proc *p)
506 {
507 	struct thread *td;
508 
509 	mtx_assert(&Giant, MA_NOTOWNED);
510 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
511 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
512 	td = FIRST_THREAD_IN_PROC(p);
513 	/* Lock the last thread so we spin until it exits cpu_throw(). */
514 	thread_lock(td);
515 	thread_unlock(td);
516 	lock_profile_thread_exit(td);
517 	cpuset_rel(td->td_cpuset);
518 	td->td_cpuset = NULL;
519 	cpu_thread_clean(td);
520 	crfree(td->td_ucred);
521 	thread_reap();	/* check for zombie threads etc. */
522 }
523 
524 /*
525  * Link a thread to a process.
526  * set up anything that needs to be initialized for it to
527  * be used by the process.
528  */
529 void
530 thread_link(struct thread *td, struct proc *p)
531 {
532 
533 	/*
534 	 * XXX This can't be enabled because it's called for proc0 before
535 	 * its lock has been created.
536 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
537 	 */
538 	td->td_state    = TDS_INACTIVE;
539 	td->td_proc     = p;
540 	td->td_flags    = TDF_INMEM;
541 
542 	LIST_INIT(&td->td_contested);
543 	LIST_INIT(&td->td_lprof[0]);
544 	LIST_INIT(&td->td_lprof[1]);
545 	sigqueue_init(&td->td_sigqueue, p);
546 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
547 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
548 	p->p_numthreads++;
549 }
550 
551 /*
552  * Called from:
553  *  thread_exit()
554  */
555 void
556 thread_unlink(struct thread *td)
557 {
558 	struct proc *p = td->td_proc;
559 
560 	PROC_LOCK_ASSERT(p, MA_OWNED);
561 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
562 	p->p_numthreads--;
563 	/* could clear a few other things here */
564 	/* Must  NOT clear links to proc! */
565 }
566 
567 static int
568 calc_remaining(struct proc *p, int mode)
569 {
570 	int remaining;
571 
572 	PROC_LOCK_ASSERT(p, MA_OWNED);
573 	PROC_SLOCK_ASSERT(p, MA_OWNED);
574 	if (mode == SINGLE_EXIT)
575 		remaining = p->p_numthreads;
576 	else if (mode == SINGLE_BOUNDARY)
577 		remaining = p->p_numthreads - p->p_boundary_count;
578 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
579 		remaining = p->p_numthreads - p->p_suspcount;
580 	else
581 		panic("calc_remaining: wrong mode %d", mode);
582 	return (remaining);
583 }
584 
585 static int
586 remain_for_mode(int mode)
587 {
588 
589 	return (mode == SINGLE_ALLPROC ? 0 : 1);
590 }
591 
592 static int
593 weed_inhib(int mode, struct thread *td2, struct proc *p)
594 {
595 	int wakeup_swapper;
596 
597 	PROC_LOCK_ASSERT(p, MA_OWNED);
598 	PROC_SLOCK_ASSERT(p, MA_OWNED);
599 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
600 
601 	wakeup_swapper = 0;
602 	switch (mode) {
603 	case SINGLE_EXIT:
604 		if (TD_IS_SUSPENDED(td2))
605 			wakeup_swapper |= thread_unsuspend_one(td2, p);
606 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
607 			wakeup_swapper |= sleepq_abort(td2, EINTR);
608 		break;
609 	case SINGLE_BOUNDARY:
610 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
611 			wakeup_swapper |= thread_unsuspend_one(td2, p);
612 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
613 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
614 		break;
615 	case SINGLE_NO_EXIT:
616 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
617 			wakeup_swapper |= thread_unsuspend_one(td2, p);
618 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
619 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
620 		break;
621 	case SINGLE_ALLPROC:
622 		/*
623 		 * ALLPROC suspend tries to avoid spurious EINTR for
624 		 * threads sleeping interruptable, by suspending the
625 		 * thread directly, similarly to sig_suspend_threads().
626 		 * Since such sleep is not performed at the user
627 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
628 		 * is used to avoid immediate un-suspend.
629 		 */
630 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
631 		    TDF_ALLPROCSUSP)) == 0)
632 			wakeup_swapper |= thread_unsuspend_one(td2, p);
633 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
634 			if ((td2->td_flags & TDF_SBDRY) == 0) {
635 				thread_suspend_one(td2);
636 				td2->td_flags |= TDF_ALLPROCSUSP;
637 			} else {
638 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
639 			}
640 		}
641 		break;
642 	}
643 	return (wakeup_swapper);
644 }
645 
646 /*
647  * Enforce single-threading.
648  *
649  * Returns 1 if the caller must abort (another thread is waiting to
650  * exit the process or similar). Process is locked!
651  * Returns 0 when you are successfully the only thread running.
652  * A process has successfully single threaded in the suspend mode when
653  * There are no threads in user mode. Threads in the kernel must be
654  * allowed to continue until they get to the user boundary. They may even
655  * copy out their return values and data before suspending. They may however be
656  * accelerated in reaching the user boundary as we will wake up
657  * any sleeping threads that are interruptable. (PCATCH).
658  */
659 int
660 thread_single(struct proc *p, int mode)
661 {
662 	struct thread *td;
663 	struct thread *td2;
664 	int remaining, wakeup_swapper;
665 
666 	td = curthread;
667 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
668 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
669 	    ("invalid mode %d", mode));
670 	/*
671 	 * If allowing non-ALLPROC singlethreading for non-curproc
672 	 * callers, calc_remaining() and remain_for_mode() should be
673 	 * adjusted to also account for td->td_proc != p.  For now
674 	 * this is not implemented because it is not used.
675 	 */
676 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
677 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
678 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
679 	mtx_assert(&Giant, MA_NOTOWNED);
680 	PROC_LOCK_ASSERT(p, MA_OWNED);
681 
682 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
683 		return (0);
684 
685 	/* Is someone already single threading? */
686 	if (p->p_singlethread != NULL && p->p_singlethread != td)
687 		return (1);
688 
689 	if (mode == SINGLE_EXIT) {
690 		p->p_flag |= P_SINGLE_EXIT;
691 		p->p_flag &= ~P_SINGLE_BOUNDARY;
692 	} else {
693 		p->p_flag &= ~P_SINGLE_EXIT;
694 		if (mode == SINGLE_BOUNDARY)
695 			p->p_flag |= P_SINGLE_BOUNDARY;
696 		else
697 			p->p_flag &= ~P_SINGLE_BOUNDARY;
698 	}
699 	if (mode == SINGLE_ALLPROC)
700 		p->p_flag |= P_TOTAL_STOP;
701 	p->p_flag |= P_STOPPED_SINGLE;
702 	PROC_SLOCK(p);
703 	p->p_singlethread = td;
704 	remaining = calc_remaining(p, mode);
705 	while (remaining != remain_for_mode(mode)) {
706 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
707 			goto stopme;
708 		wakeup_swapper = 0;
709 		FOREACH_THREAD_IN_PROC(p, td2) {
710 			if (td2 == td)
711 				continue;
712 			thread_lock(td2);
713 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
714 			if (TD_IS_INHIBITED(td2)) {
715 				wakeup_swapper |= weed_inhib(mode, td2, p);
716 #ifdef SMP
717 			} else if (TD_IS_RUNNING(td2) && td != td2) {
718 				forward_signal(td2);
719 #endif
720 			}
721 			thread_unlock(td2);
722 		}
723 		if (wakeup_swapper)
724 			kick_proc0();
725 		remaining = calc_remaining(p, mode);
726 
727 		/*
728 		 * Maybe we suspended some threads.. was it enough?
729 		 */
730 		if (remaining == remain_for_mode(mode))
731 			break;
732 
733 stopme:
734 		/*
735 		 * Wake us up when everyone else has suspended.
736 		 * In the mean time we suspend as well.
737 		 */
738 		thread_suspend_switch(td, p);
739 		remaining = calc_remaining(p, mode);
740 	}
741 	if (mode == SINGLE_EXIT) {
742 		/*
743 		 * Convert the process to an unthreaded process.  The
744 		 * SINGLE_EXIT is called by exit1() or execve(), in
745 		 * both cases other threads must be retired.
746 		 */
747 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
748 		p->p_singlethread = NULL;
749 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
750 
751 		/*
752 		 * Wait for any remaining threads to exit cpu_throw().
753 		 */
754 		while (p->p_exitthreads != 0) {
755 			PROC_SUNLOCK(p);
756 			PROC_UNLOCK(p);
757 			sched_relinquish(td);
758 			PROC_LOCK(p);
759 			PROC_SLOCK(p);
760 		}
761 	}
762 	PROC_SUNLOCK(p);
763 	return (0);
764 }
765 
766 bool
767 thread_suspend_check_needed(void)
768 {
769 	struct proc *p;
770 	struct thread *td;
771 
772 	td = curthread;
773 	p = td->td_proc;
774 	PROC_LOCK_ASSERT(p, MA_OWNED);
775 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
776 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
777 }
778 
779 /*
780  * Called in from locations that can safely check to see
781  * whether we have to suspend or at least throttle for a
782  * single-thread event (e.g. fork).
783  *
784  * Such locations include userret().
785  * If the "return_instead" argument is non zero, the thread must be able to
786  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
787  *
788  * The 'return_instead' argument tells the function if it may do a
789  * thread_exit() or suspend, or whether the caller must abort and back
790  * out instead.
791  *
792  * If the thread that set the single_threading request has set the
793  * P_SINGLE_EXIT bit in the process flags then this call will never return
794  * if 'return_instead' is false, but will exit.
795  *
796  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
797  *---------------+--------------------+---------------------
798  *       0       | returns 0          |   returns 0 or 1
799  *               | when ST ends       |   immediately
800  *---------------+--------------------+---------------------
801  *       1       | thread exits       |   returns 1
802  *               |                    |  immediately
803  * 0 = thread_exit() or suspension ok,
804  * other = return error instead of stopping the thread.
805  *
806  * While a full suspension is under effect, even a single threading
807  * thread would be suspended if it made this call (but it shouldn't).
808  * This call should only be made from places where
809  * thread_exit() would be safe as that may be the outcome unless
810  * return_instead is set.
811  */
812 int
813 thread_suspend_check(int return_instead)
814 {
815 	struct thread *td;
816 	struct proc *p;
817 	int wakeup_swapper;
818 
819 	td = curthread;
820 	p = td->td_proc;
821 	mtx_assert(&Giant, MA_NOTOWNED);
822 	PROC_LOCK_ASSERT(p, MA_OWNED);
823 	while (thread_suspend_check_needed()) {
824 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
825 			KASSERT(p->p_singlethread != NULL,
826 			    ("singlethread not set"));
827 			/*
828 			 * The only suspension in action is a
829 			 * single-threading. Single threader need not stop.
830 			 * XXX Should be safe to access unlocked
831 			 * as it can only be set to be true by us.
832 			 */
833 			if (p->p_singlethread == td)
834 				return (0);	/* Exempt from stopping. */
835 		}
836 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
837 			return (EINTR);
838 
839 		/* Should we goto user boundary if we didn't come from there? */
840 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
841 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
842 			return (ERESTART);
843 
844 		/*
845 		 * Ignore suspend requests for stop signals if they
846 		 * are deferred.
847 		 */
848 		if ((P_SHOULDSTOP(p) == P_STOPPED_SIG ||
849 		    (p->p_flag & P_TOTAL_STOP) != 0) &&
850 		    (td->td_flags & TDF_SBDRY) != 0) {
851 			KASSERT(return_instead,
852 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
853 			return (0);
854 		}
855 
856 		/*
857 		 * If the process is waiting for us to exit,
858 		 * this thread should just suicide.
859 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
860 		 */
861 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
862 			PROC_UNLOCK(p);
863 			tidhash_remove(td);
864 			PROC_LOCK(p);
865 			tdsigcleanup(td);
866 			umtx_thread_exit(td);
867 			PROC_SLOCK(p);
868 			thread_stopped(p);
869 			thread_exit();
870 		}
871 
872 		PROC_SLOCK(p);
873 		thread_stopped(p);
874 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
875 			if (p->p_numthreads == p->p_suspcount + 1) {
876 				thread_lock(p->p_singlethread);
877 				wakeup_swapper =
878 				    thread_unsuspend_one(p->p_singlethread, p);
879 				thread_unlock(p->p_singlethread);
880 				if (wakeup_swapper)
881 					kick_proc0();
882 			}
883 		}
884 		PROC_UNLOCK(p);
885 		thread_lock(td);
886 		/*
887 		 * When a thread suspends, it just
888 		 * gets taken off all queues.
889 		 */
890 		thread_suspend_one(td);
891 		if (return_instead == 0) {
892 			p->p_boundary_count++;
893 			td->td_flags |= TDF_BOUNDARY;
894 		}
895 		PROC_SUNLOCK(p);
896 		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
897 		if (return_instead == 0)
898 			td->td_flags &= ~TDF_BOUNDARY;
899 		thread_unlock(td);
900 		PROC_LOCK(p);
901 		if (return_instead == 0) {
902 			PROC_SLOCK(p);
903 			p->p_boundary_count--;
904 			PROC_SUNLOCK(p);
905 		}
906 	}
907 	return (0);
908 }
909 
910 void
911 thread_suspend_switch(struct thread *td, struct proc *p)
912 {
913 
914 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
915 	PROC_LOCK_ASSERT(p, MA_OWNED);
916 	PROC_SLOCK_ASSERT(p, MA_OWNED);
917 	/*
918 	 * We implement thread_suspend_one in stages here to avoid
919 	 * dropping the proc lock while the thread lock is owned.
920 	 */
921 	if (p == td->td_proc) {
922 		thread_stopped(p);
923 		p->p_suspcount++;
924 	}
925 	PROC_UNLOCK(p);
926 	thread_lock(td);
927 	td->td_flags &= ~TDF_NEEDSUSPCHK;
928 	TD_SET_SUSPENDED(td);
929 	sched_sleep(td, 0);
930 	PROC_SUNLOCK(p);
931 	DROP_GIANT();
932 	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
933 	thread_unlock(td);
934 	PICKUP_GIANT();
935 	PROC_LOCK(p);
936 	PROC_SLOCK(p);
937 }
938 
939 void
940 thread_suspend_one(struct thread *td)
941 {
942 	struct proc *p;
943 
944 	p = td->td_proc;
945 	PROC_SLOCK_ASSERT(p, MA_OWNED);
946 	THREAD_LOCK_ASSERT(td, MA_OWNED);
947 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
948 	p->p_suspcount++;
949 	td->td_flags &= ~TDF_NEEDSUSPCHK;
950 	TD_SET_SUSPENDED(td);
951 	sched_sleep(td, 0);
952 }
953 
954 int
955 thread_unsuspend_one(struct thread *td, struct proc *p)
956 {
957 
958 	THREAD_LOCK_ASSERT(td, MA_OWNED);
959 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
960 	TD_CLR_SUSPENDED(td);
961 	td->td_flags &= ~TDF_ALLPROCSUSP;
962 	if (td->td_proc == p) {
963 		PROC_SLOCK_ASSERT(p, MA_OWNED);
964 		p->p_suspcount--;
965 	}
966 	return (setrunnable(td));
967 }
968 
969 /*
970  * Allow all threads blocked by single threading to continue running.
971  */
972 void
973 thread_unsuspend(struct proc *p)
974 {
975 	struct thread *td;
976 	int wakeup_swapper;
977 
978 	PROC_LOCK_ASSERT(p, MA_OWNED);
979 	PROC_SLOCK_ASSERT(p, MA_OWNED);
980 	wakeup_swapper = 0;
981 	if (!P_SHOULDSTOP(p)) {
982                 FOREACH_THREAD_IN_PROC(p, td) {
983 			thread_lock(td);
984 			if (TD_IS_SUSPENDED(td)) {
985 				wakeup_swapper |= thread_unsuspend_one(td, p);
986 			}
987 			thread_unlock(td);
988 		}
989 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
990 	    (p->p_numthreads == p->p_suspcount)) {
991 		/*
992 		 * Stopping everything also did the job for the single
993 		 * threading request. Now we've downgraded to single-threaded,
994 		 * let it continue.
995 		 */
996 		if (p->p_singlethread->td_proc == p) {
997 			thread_lock(p->p_singlethread);
998 			wakeup_swapper = thread_unsuspend_one(
999 			    p->p_singlethread, p);
1000 			thread_unlock(p->p_singlethread);
1001 		}
1002 	}
1003 	if (wakeup_swapper)
1004 		kick_proc0();
1005 }
1006 
1007 /*
1008  * End the single threading mode..
1009  */
1010 void
1011 thread_single_end(struct proc *p, int mode)
1012 {
1013 	struct thread *td;
1014 	int wakeup_swapper;
1015 
1016 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1017 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1018 	    ("invalid mode %d", mode));
1019 	PROC_LOCK_ASSERT(p, MA_OWNED);
1020 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1021 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1022 	    ("mode %d does not match P_TOTAL_STOP", mode));
1023 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1024 	    P_TOTAL_STOP);
1025 	PROC_SLOCK(p);
1026 	p->p_singlethread = NULL;
1027 	wakeup_swapper = 0;
1028 	/*
1029 	 * If there are other threads they may now run,
1030 	 * unless of course there is a blanket 'stop order'
1031 	 * on the process. The single threader must be allowed
1032 	 * to continue however as this is a bad place to stop.
1033 	 */
1034 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1035                 FOREACH_THREAD_IN_PROC(p, td) {
1036 			thread_lock(td);
1037 			if (TD_IS_SUSPENDED(td)) {
1038 				wakeup_swapper |= thread_unsuspend_one(td, p);
1039 			}
1040 			thread_unlock(td);
1041 		}
1042 	}
1043 	PROC_SUNLOCK(p);
1044 	if (wakeup_swapper)
1045 		kick_proc0();
1046 }
1047 
1048 struct thread *
1049 thread_find(struct proc *p, lwpid_t tid)
1050 {
1051 	struct thread *td;
1052 
1053 	PROC_LOCK_ASSERT(p, MA_OWNED);
1054 	FOREACH_THREAD_IN_PROC(p, td) {
1055 		if (td->td_tid == tid)
1056 			break;
1057 	}
1058 	return (td);
1059 }
1060 
1061 /* Locate a thread by number; return with proc lock held. */
1062 struct thread *
1063 tdfind(lwpid_t tid, pid_t pid)
1064 {
1065 #define RUN_THRESH	16
1066 	struct thread *td;
1067 	int run = 0;
1068 
1069 	rw_rlock(&tidhash_lock);
1070 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1071 		if (td->td_tid == tid) {
1072 			if (pid != -1 && td->td_proc->p_pid != pid) {
1073 				td = NULL;
1074 				break;
1075 			}
1076 			PROC_LOCK(td->td_proc);
1077 			if (td->td_proc->p_state == PRS_NEW) {
1078 				PROC_UNLOCK(td->td_proc);
1079 				td = NULL;
1080 				break;
1081 			}
1082 			if (run > RUN_THRESH) {
1083 				if (rw_try_upgrade(&tidhash_lock)) {
1084 					LIST_REMOVE(td, td_hash);
1085 					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1086 						td, td_hash);
1087 					rw_wunlock(&tidhash_lock);
1088 					return (td);
1089 				}
1090 			}
1091 			break;
1092 		}
1093 		run++;
1094 	}
1095 	rw_runlock(&tidhash_lock);
1096 	return (td);
1097 }
1098 
1099 void
1100 tidhash_add(struct thread *td)
1101 {
1102 	rw_wlock(&tidhash_lock);
1103 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1104 	rw_wunlock(&tidhash_lock);
1105 }
1106 
1107 void
1108 tidhash_remove(struct thread *td)
1109 {
1110 	rw_wlock(&tidhash_lock);
1111 	LIST_REMOVE(td, td_hash);
1112 	rw_wunlock(&tidhash_lock);
1113 }
1114