xref: /freebsd/sys/kern/kern_thread.c (revision b00fe64f4acfe315181f65999af16e9a7bdc600b)
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/rangelock.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sdt.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sleepqueue.h>
47 #include <sys/selinfo.h>
48 #include <sys/turnstile.h>
49 #include <sys/ktr.h>
50 #include <sys/rwlock.h>
51 #include <sys/umtx.h>
52 #include <sys/cpuset.h>
53 #ifdef	HWPMC_HOOKS
54 #include <sys/pmckern.h>
55 #endif
56 
57 #include <security/audit/audit.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 #include <vm/uma.h>
62 #include <sys/eventhandler.h>
63 
64 SDT_PROVIDER_DECLARE(proc);
65 SDT_PROBE_DEFINE(proc, , , lwp__exit);
66 
67 /*
68  * thread related storage.
69  */
70 static uma_zone_t thread_zone;
71 
72 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
73 static struct mtx zombie_lock;
74 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
75 
76 static void thread_zombie(struct thread *);
77 
78 #define TID_BUFFER_SIZE	1024
79 
80 struct mtx tid_lock;
81 static struct unrhdr *tid_unrhdr;
82 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
83 static int tid_head, tid_tail;
84 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
85 
86 struct	tidhashhead *tidhashtbl;
87 u_long	tidhash;
88 struct	rwlock tidhash_lock;
89 
90 static lwpid_t
91 tid_alloc(void)
92 {
93 	lwpid_t	tid;
94 
95 	tid = alloc_unr(tid_unrhdr);
96 	if (tid != -1)
97 		return (tid);
98 	mtx_lock(&tid_lock);
99 	if (tid_head == tid_tail) {
100 		mtx_unlock(&tid_lock);
101 		return (-1);
102 	}
103 	tid = tid_buffer[tid_head];
104 	tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
105 	mtx_unlock(&tid_lock);
106 	return (tid);
107 }
108 
109 static void
110 tid_free(lwpid_t tid)
111 {
112 	lwpid_t tmp_tid = -1;
113 
114 	mtx_lock(&tid_lock);
115 	if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
116 		tmp_tid = tid_buffer[tid_head];
117 		tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
118 	}
119 	tid_buffer[tid_tail] = tid;
120 	tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
121 	mtx_unlock(&tid_lock);
122 	if (tmp_tid != -1)
123 		free_unr(tid_unrhdr, tmp_tid);
124 }
125 
126 /*
127  * Prepare a thread for use.
128  */
129 static int
130 thread_ctor(void *mem, int size, void *arg, int flags)
131 {
132 	struct thread	*td;
133 
134 	td = (struct thread *)mem;
135 	td->td_state = TDS_INACTIVE;
136 	td->td_oncpu = NOCPU;
137 
138 	td->td_tid = tid_alloc();
139 
140 	/*
141 	 * Note that td_critnest begins life as 1 because the thread is not
142 	 * running and is thereby implicitly waiting to be on the receiving
143 	 * end of a context switch.
144 	 */
145 	td->td_critnest = 1;
146 	td->td_lend_user_pri = PRI_MAX;
147 	EVENTHANDLER_INVOKE(thread_ctor, td);
148 #ifdef AUDIT
149 	audit_thread_alloc(td);
150 #endif
151 	umtx_thread_alloc(td);
152 	return (0);
153 }
154 
155 /*
156  * Reclaim a thread after use.
157  */
158 static void
159 thread_dtor(void *mem, int size, void *arg)
160 {
161 	struct thread *td;
162 
163 	td = (struct thread *)mem;
164 
165 #ifdef INVARIANTS
166 	/* Verify that this thread is in a safe state to free. */
167 	switch (td->td_state) {
168 	case TDS_INHIBITED:
169 	case TDS_RUNNING:
170 	case TDS_CAN_RUN:
171 	case TDS_RUNQ:
172 		/*
173 		 * We must never unlink a thread that is in one of
174 		 * these states, because it is currently active.
175 		 */
176 		panic("bad state for thread unlinking");
177 		/* NOTREACHED */
178 	case TDS_INACTIVE:
179 		break;
180 	default:
181 		panic("bad thread state");
182 		/* NOTREACHED */
183 	}
184 #endif
185 #ifdef AUDIT
186 	audit_thread_free(td);
187 #endif
188 	/* Free all OSD associated to this thread. */
189 	osd_thread_exit(td);
190 
191 	EVENTHANDLER_INVOKE(thread_dtor, td);
192 	tid_free(td->td_tid);
193 }
194 
195 /*
196  * Initialize type-stable parts of a thread (when newly created).
197  */
198 static int
199 thread_init(void *mem, int size, int flags)
200 {
201 	struct thread *td;
202 
203 	td = (struct thread *)mem;
204 
205 	td->td_sleepqueue = sleepq_alloc();
206 	td->td_turnstile = turnstile_alloc();
207 	td->td_rlqe = NULL;
208 	EVENTHANDLER_INVOKE(thread_init, td);
209 	td->td_sched = (struct td_sched *)&td[1];
210 	umtx_thread_init(td);
211 	td->td_kstack = 0;
212 	td->td_sel = NULL;
213 	return (0);
214 }
215 
216 /*
217  * Tear down type-stable parts of a thread (just before being discarded).
218  */
219 static void
220 thread_fini(void *mem, int size)
221 {
222 	struct thread *td;
223 
224 	td = (struct thread *)mem;
225 	EVENTHANDLER_INVOKE(thread_fini, td);
226 	rlqentry_free(td->td_rlqe);
227 	turnstile_free(td->td_turnstile);
228 	sleepq_free(td->td_sleepqueue);
229 	umtx_thread_fini(td);
230 	seltdfini(td);
231 }
232 
233 /*
234  * For a newly created process,
235  * link up all the structures and its initial threads etc.
236  * called from:
237  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
238  * proc_dtor() (should go away)
239  * proc_init()
240  */
241 void
242 proc_linkup0(struct proc *p, struct thread *td)
243 {
244 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
245 	proc_linkup(p, td);
246 }
247 
248 void
249 proc_linkup(struct proc *p, struct thread *td)
250 {
251 
252 	sigqueue_init(&p->p_sigqueue, p);
253 	p->p_ksi = ksiginfo_alloc(1);
254 	if (p->p_ksi != NULL) {
255 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
256 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
257 	}
258 	LIST_INIT(&p->p_mqnotifier);
259 	p->p_numthreads = 0;
260 	thread_link(td, p);
261 }
262 
263 /*
264  * Initialize global thread allocation resources.
265  */
266 void
267 threadinit(void)
268 {
269 
270 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
271 
272 	/*
273 	 * pid_max cannot be greater than PID_MAX.
274 	 * leave one number for thread0.
275 	 */
276 	tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
277 
278 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
279 	    thread_ctor, thread_dtor, thread_init, thread_fini,
280 	    16 - 1, 0);
281 	tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
282 	rw_init(&tidhash_lock, "tidhash");
283 }
284 
285 /*
286  * Place an unused thread on the zombie list.
287  * Use the slpq as that must be unused by now.
288  */
289 void
290 thread_zombie(struct thread *td)
291 {
292 	mtx_lock_spin(&zombie_lock);
293 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
294 	mtx_unlock_spin(&zombie_lock);
295 }
296 
297 /*
298  * Release a thread that has exited after cpu_throw().
299  */
300 void
301 thread_stash(struct thread *td)
302 {
303 	atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
304 	thread_zombie(td);
305 }
306 
307 /*
308  * Reap zombie resources.
309  */
310 void
311 thread_reap(void)
312 {
313 	struct thread *td_first, *td_next;
314 
315 	/*
316 	 * Don't even bother to lock if none at this instant,
317 	 * we really don't care about the next instant..
318 	 */
319 	if (!TAILQ_EMPTY(&zombie_threads)) {
320 		mtx_lock_spin(&zombie_lock);
321 		td_first = TAILQ_FIRST(&zombie_threads);
322 		if (td_first)
323 			TAILQ_INIT(&zombie_threads);
324 		mtx_unlock_spin(&zombie_lock);
325 		while (td_first) {
326 			td_next = TAILQ_NEXT(td_first, td_slpq);
327 			if (td_first->td_ucred)
328 				crfree(td_first->td_ucred);
329 			thread_free(td_first);
330 			td_first = td_next;
331 		}
332 	}
333 }
334 
335 /*
336  * Allocate a thread.
337  */
338 struct thread *
339 thread_alloc(int pages)
340 {
341 	struct thread *td;
342 
343 	thread_reap(); /* check if any zombies to get */
344 
345 	td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
346 	KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
347 	if (!vm_thread_new(td, pages)) {
348 		uma_zfree(thread_zone, td);
349 		return (NULL);
350 	}
351 	cpu_thread_alloc(td);
352 	return (td);
353 }
354 
355 int
356 thread_alloc_stack(struct thread *td, int pages)
357 {
358 
359 	KASSERT(td->td_kstack == 0,
360 	    ("thread_alloc_stack called on a thread with kstack"));
361 	if (!vm_thread_new(td, pages))
362 		return (0);
363 	cpu_thread_alloc(td);
364 	return (1);
365 }
366 
367 /*
368  * Deallocate a thread.
369  */
370 void
371 thread_free(struct thread *td)
372 {
373 
374 	lock_profile_thread_exit(td);
375 	if (td->td_cpuset)
376 		cpuset_rel(td->td_cpuset);
377 	td->td_cpuset = NULL;
378 	cpu_thread_free(td);
379 	if (td->td_kstack != 0)
380 		vm_thread_dispose(td);
381 	uma_zfree(thread_zone, td);
382 }
383 
384 /*
385  * Discard the current thread and exit from its context.
386  * Always called with scheduler locked.
387  *
388  * Because we can't free a thread while we're operating under its context,
389  * push the current thread into our CPU's deadthread holder. This means
390  * we needn't worry about someone else grabbing our context before we
391  * do a cpu_throw().
392  */
393 void
394 thread_exit(void)
395 {
396 	uint64_t runtime, new_switchtime;
397 	struct thread *td;
398 	struct thread *td2;
399 	struct proc *p;
400 	int wakeup_swapper;
401 
402 	td = curthread;
403 	p = td->td_proc;
404 
405 	PROC_SLOCK_ASSERT(p, MA_OWNED);
406 	mtx_assert(&Giant, MA_NOTOWNED);
407 
408 	PROC_LOCK_ASSERT(p, MA_OWNED);
409 	KASSERT(p != NULL, ("thread exiting without a process"));
410 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
411 	    (long)p->p_pid, td->td_name);
412 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
413 
414 #ifdef AUDIT
415 	AUDIT_SYSCALL_EXIT(0, td);
416 #endif
417 	/*
418 	 * drop FPU & debug register state storage, or any other
419 	 * architecture specific resources that
420 	 * would not be on a new untouched process.
421 	 */
422 	cpu_thread_exit(td);	/* XXXSMP */
423 
424 	/*
425 	 * The last thread is left attached to the process
426 	 * So that the whole bundle gets recycled. Skip
427 	 * all this stuff if we never had threads.
428 	 * EXIT clears all sign of other threads when
429 	 * it goes to single threading, so the last thread always
430 	 * takes the short path.
431 	 */
432 	if (p->p_flag & P_HADTHREADS) {
433 		if (p->p_numthreads > 1) {
434 			atomic_add_int(&td->td_proc->p_exitthreads, 1);
435 			thread_unlink(td);
436 			td2 = FIRST_THREAD_IN_PROC(p);
437 			sched_exit_thread(td2, td);
438 
439 			/*
440 			 * The test below is NOT true if we are the
441 			 * sole exiting thread. P_STOPPED_SINGLE is unset
442 			 * in exit1() after it is the only survivor.
443 			 */
444 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
445 				if (p->p_numthreads == p->p_suspcount) {
446 					thread_lock(p->p_singlethread);
447 					wakeup_swapper = thread_unsuspend_one(
448 						p->p_singlethread, p);
449 					thread_unlock(p->p_singlethread);
450 					if (wakeup_swapper)
451 						kick_proc0();
452 				}
453 			}
454 
455 			PCPU_SET(deadthread, td);
456 		} else {
457 			/*
458 			 * The last thread is exiting.. but not through exit()
459 			 */
460 			panic ("thread_exit: Last thread exiting on its own");
461 		}
462 	}
463 #ifdef	HWPMC_HOOKS
464 	/*
465 	 * If this thread is part of a process that is being tracked by hwpmc(4),
466 	 * inform the module of the thread's impending exit.
467 	 */
468 	if (PMC_PROC_IS_USING_PMCS(td->td_proc))
469 		PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
470 #endif
471 	PROC_UNLOCK(p);
472 	PROC_STATLOCK(p);
473 	thread_lock(td);
474 	PROC_SUNLOCK(p);
475 
476 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
477 	new_switchtime = cpu_ticks();
478 	runtime = new_switchtime - PCPU_GET(switchtime);
479 	td->td_runtime += runtime;
480 	td->td_incruntime += runtime;
481 	PCPU_SET(switchtime, new_switchtime);
482 	PCPU_SET(switchticks, ticks);
483 	PCPU_INC(cnt.v_swtch);
484 
485 	/* Save our resource usage in our process. */
486 	td->td_ru.ru_nvcsw++;
487 	ruxagg(p, td);
488 	rucollect(&p->p_ru, &td->td_ru);
489 	PROC_STATUNLOCK(p);
490 
491 	td->td_state = TDS_INACTIVE;
492 #ifdef WITNESS
493 	witness_thread_exit(td);
494 #endif
495 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
496 	sched_throw(td);
497 	panic("I'm a teapot!");
498 	/* NOTREACHED */
499 }
500 
501 /*
502  * Do any thread specific cleanups that may be needed in wait()
503  * called with Giant, proc and schedlock not held.
504  */
505 void
506 thread_wait(struct proc *p)
507 {
508 	struct thread *td;
509 
510 	mtx_assert(&Giant, MA_NOTOWNED);
511 	KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
512 	KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
513 	td = FIRST_THREAD_IN_PROC(p);
514 	/* Lock the last thread so we spin until it exits cpu_throw(). */
515 	thread_lock(td);
516 	thread_unlock(td);
517 	lock_profile_thread_exit(td);
518 	cpuset_rel(td->td_cpuset);
519 	td->td_cpuset = NULL;
520 	cpu_thread_clean(td);
521 	crfree(td->td_ucred);
522 	thread_reap();	/* check for zombie threads etc. */
523 }
524 
525 /*
526  * Link a thread to a process.
527  * set up anything that needs to be initialized for it to
528  * be used by the process.
529  */
530 void
531 thread_link(struct thread *td, struct proc *p)
532 {
533 
534 	/*
535 	 * XXX This can't be enabled because it's called for proc0 before
536 	 * its lock has been created.
537 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
538 	 */
539 	td->td_state    = TDS_INACTIVE;
540 	td->td_proc     = p;
541 	td->td_flags    = TDF_INMEM;
542 
543 	LIST_INIT(&td->td_contested);
544 	LIST_INIT(&td->td_lprof[0]);
545 	LIST_INIT(&td->td_lprof[1]);
546 	sigqueue_init(&td->td_sigqueue, p);
547 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
548 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
549 	p->p_numthreads++;
550 }
551 
552 /*
553  * Called from:
554  *  thread_exit()
555  */
556 void
557 thread_unlink(struct thread *td)
558 {
559 	struct proc *p = td->td_proc;
560 
561 	PROC_LOCK_ASSERT(p, MA_OWNED);
562 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
563 	p->p_numthreads--;
564 	/* could clear a few other things here */
565 	/* Must  NOT clear links to proc! */
566 }
567 
568 static int
569 calc_remaining(struct proc *p, int mode)
570 {
571 	int remaining;
572 
573 	PROC_LOCK_ASSERT(p, MA_OWNED);
574 	PROC_SLOCK_ASSERT(p, MA_OWNED);
575 	if (mode == SINGLE_EXIT)
576 		remaining = p->p_numthreads;
577 	else if (mode == SINGLE_BOUNDARY)
578 		remaining = p->p_numthreads - p->p_boundary_count;
579 	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
580 		remaining = p->p_numthreads - p->p_suspcount;
581 	else
582 		panic("calc_remaining: wrong mode %d", mode);
583 	return (remaining);
584 }
585 
586 static int
587 remain_for_mode(int mode)
588 {
589 
590 	return (mode == SINGLE_ALLPROC ? 0 : 1);
591 }
592 
593 static int
594 weed_inhib(int mode, struct thread *td2, struct proc *p)
595 {
596 	int wakeup_swapper;
597 
598 	PROC_LOCK_ASSERT(p, MA_OWNED);
599 	PROC_SLOCK_ASSERT(p, MA_OWNED);
600 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
601 
602 	wakeup_swapper = 0;
603 	switch (mode) {
604 	case SINGLE_EXIT:
605 		if (TD_IS_SUSPENDED(td2))
606 			wakeup_swapper |= thread_unsuspend_one(td2, p);
607 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
608 			wakeup_swapper |= sleepq_abort(td2, EINTR);
609 		break;
610 	case SINGLE_BOUNDARY:
611 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
612 			wakeup_swapper |= thread_unsuspend_one(td2, p);
613 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
614 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
615 		break;
616 	case SINGLE_NO_EXIT:
617 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
618 			wakeup_swapper |= thread_unsuspend_one(td2, p);
619 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
620 			wakeup_swapper |= sleepq_abort(td2, ERESTART);
621 		break;
622 	case SINGLE_ALLPROC:
623 		/*
624 		 * ALLPROC suspend tries to avoid spurious EINTR for
625 		 * threads sleeping interruptable, by suspending the
626 		 * thread directly, similarly to sig_suspend_threads().
627 		 * Since such sleep is not performed at the user
628 		 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
629 		 * is used to avoid immediate un-suspend.
630 		 */
631 		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
632 		    TDF_ALLPROCSUSP)) == 0)
633 			wakeup_swapper |= thread_unsuspend_one(td2, p);
634 		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
635 			if ((td2->td_flags & TDF_SBDRY) == 0) {
636 				thread_suspend_one(td2);
637 				td2->td_flags |= TDF_ALLPROCSUSP;
638 			} else {
639 				wakeup_swapper |= sleepq_abort(td2, ERESTART);
640 			}
641 		}
642 		break;
643 	}
644 	return (wakeup_swapper);
645 }
646 
647 /*
648  * Enforce single-threading.
649  *
650  * Returns 1 if the caller must abort (another thread is waiting to
651  * exit the process or similar). Process is locked!
652  * Returns 0 when you are successfully the only thread running.
653  * A process has successfully single threaded in the suspend mode when
654  * There are no threads in user mode. Threads in the kernel must be
655  * allowed to continue until they get to the user boundary. They may even
656  * copy out their return values and data before suspending. They may however be
657  * accelerated in reaching the user boundary as we will wake up
658  * any sleeping threads that are interruptable. (PCATCH).
659  */
660 int
661 thread_single(struct proc *p, int mode)
662 {
663 	struct thread *td;
664 	struct thread *td2;
665 	int remaining, wakeup_swapper;
666 
667 	td = curthread;
668 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
669 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
670 	    ("invalid mode %d", mode));
671 	/*
672 	 * If allowing non-ALLPROC singlethreading for non-curproc
673 	 * callers, calc_remaining() and remain_for_mode() should be
674 	 * adjusted to also account for td->td_proc != p.  For now
675 	 * this is not implemented because it is not used.
676 	 */
677 	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
678 	    (mode != SINGLE_ALLPROC && td->td_proc == p),
679 	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
680 	mtx_assert(&Giant, MA_NOTOWNED);
681 	PROC_LOCK_ASSERT(p, MA_OWNED);
682 
683 	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
684 		return (0);
685 
686 	/* Is someone already single threading? */
687 	if (p->p_singlethread != NULL && p->p_singlethread != td)
688 		return (1);
689 
690 	if (mode == SINGLE_EXIT) {
691 		p->p_flag |= P_SINGLE_EXIT;
692 		p->p_flag &= ~P_SINGLE_BOUNDARY;
693 	} else {
694 		p->p_flag &= ~P_SINGLE_EXIT;
695 		if (mode == SINGLE_BOUNDARY)
696 			p->p_flag |= P_SINGLE_BOUNDARY;
697 		else
698 			p->p_flag &= ~P_SINGLE_BOUNDARY;
699 	}
700 	if (mode == SINGLE_ALLPROC)
701 		p->p_flag |= P_TOTAL_STOP;
702 	p->p_flag |= P_STOPPED_SINGLE;
703 	PROC_SLOCK(p);
704 	p->p_singlethread = td;
705 	remaining = calc_remaining(p, mode);
706 	while (remaining != remain_for_mode(mode)) {
707 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
708 			goto stopme;
709 		wakeup_swapper = 0;
710 		FOREACH_THREAD_IN_PROC(p, td2) {
711 			if (td2 == td)
712 				continue;
713 			thread_lock(td2);
714 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
715 			if (TD_IS_INHIBITED(td2)) {
716 				wakeup_swapper |= weed_inhib(mode, td2, p);
717 #ifdef SMP
718 			} else if (TD_IS_RUNNING(td2) && td != td2) {
719 				forward_signal(td2);
720 #endif
721 			}
722 			thread_unlock(td2);
723 		}
724 		if (wakeup_swapper)
725 			kick_proc0();
726 		remaining = calc_remaining(p, mode);
727 
728 		/*
729 		 * Maybe we suspended some threads.. was it enough?
730 		 */
731 		if (remaining == remain_for_mode(mode))
732 			break;
733 
734 stopme:
735 		/*
736 		 * Wake us up when everyone else has suspended.
737 		 * In the mean time we suspend as well.
738 		 */
739 		thread_suspend_switch(td, p);
740 		remaining = calc_remaining(p, mode);
741 	}
742 	if (mode == SINGLE_EXIT) {
743 		/*
744 		 * Convert the process to an unthreaded process.  The
745 		 * SINGLE_EXIT is called by exit1() or execve(), in
746 		 * both cases other threads must be retired.
747 		 */
748 		KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
749 		p->p_singlethread = NULL;
750 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
751 
752 		/*
753 		 * Wait for any remaining threads to exit cpu_throw().
754 		 */
755 		while (p->p_exitthreads != 0) {
756 			PROC_SUNLOCK(p);
757 			PROC_UNLOCK(p);
758 			sched_relinquish(td);
759 			PROC_LOCK(p);
760 			PROC_SLOCK(p);
761 		}
762 	} else if (mode == SINGLE_BOUNDARY) {
763 		/*
764 		 * Wait until all suspended threads are removed from
765 		 * the processors.  The thread_suspend_check()
766 		 * increments p_boundary_count while it is still
767 		 * running, which makes it possible for the execve()
768 		 * to destroy vmspace while our other threads are
769 		 * still using the address space.
770 		 *
771 		 * We lock the thread, which is only allowed to
772 		 * succeed after context switch code finished using
773 		 * the address space.
774 		 */
775 		FOREACH_THREAD_IN_PROC(p, td2) {
776 			if (td2 == td)
777 				continue;
778 			thread_lock(td2);
779 			KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
780 			    ("td %p not on boundary", td2));
781 			KASSERT(TD_IS_SUSPENDED(td2),
782 			    ("td %p is not suspended", td2));
783 			thread_unlock(td2);
784 		}
785 	}
786 	PROC_SUNLOCK(p);
787 	return (0);
788 }
789 
790 bool
791 thread_suspend_check_needed(void)
792 {
793 	struct proc *p;
794 	struct thread *td;
795 
796 	td = curthread;
797 	p = td->td_proc;
798 	PROC_LOCK_ASSERT(p, MA_OWNED);
799 	return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
800 	    (td->td_dbgflags & TDB_SUSPEND) != 0));
801 }
802 
803 /*
804  * Called in from locations that can safely check to see
805  * whether we have to suspend or at least throttle for a
806  * single-thread event (e.g. fork).
807  *
808  * Such locations include userret().
809  * If the "return_instead" argument is non zero, the thread must be able to
810  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
811  *
812  * The 'return_instead' argument tells the function if it may do a
813  * thread_exit() or suspend, or whether the caller must abort and back
814  * out instead.
815  *
816  * If the thread that set the single_threading request has set the
817  * P_SINGLE_EXIT bit in the process flags then this call will never return
818  * if 'return_instead' is false, but will exit.
819  *
820  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
821  *---------------+--------------------+---------------------
822  *       0       | returns 0          |   returns 0 or 1
823  *               | when ST ends       |   immediately
824  *---------------+--------------------+---------------------
825  *       1       | thread exits       |   returns 1
826  *               |                    |  immediately
827  * 0 = thread_exit() or suspension ok,
828  * other = return error instead of stopping the thread.
829  *
830  * While a full suspension is under effect, even a single threading
831  * thread would be suspended if it made this call (but it shouldn't).
832  * This call should only be made from places where
833  * thread_exit() would be safe as that may be the outcome unless
834  * return_instead is set.
835  */
836 int
837 thread_suspend_check(int return_instead)
838 {
839 	struct thread *td;
840 	struct proc *p;
841 	int wakeup_swapper;
842 
843 	td = curthread;
844 	p = td->td_proc;
845 	mtx_assert(&Giant, MA_NOTOWNED);
846 	PROC_LOCK_ASSERT(p, MA_OWNED);
847 	while (thread_suspend_check_needed()) {
848 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
849 			KASSERT(p->p_singlethread != NULL,
850 			    ("singlethread not set"));
851 			/*
852 			 * The only suspension in action is a
853 			 * single-threading. Single threader need not stop.
854 			 * XXX Should be safe to access unlocked
855 			 * as it can only be set to be true by us.
856 			 */
857 			if (p->p_singlethread == td)
858 				return (0);	/* Exempt from stopping. */
859 		}
860 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
861 			return (EINTR);
862 
863 		/* Should we goto user boundary if we didn't come from there? */
864 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
865 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
866 			return (ERESTART);
867 
868 		/*
869 		 * Ignore suspend requests for stop signals if they
870 		 * are deferred.
871 		 */
872 		if ((P_SHOULDSTOP(p) == P_STOPPED_SIG ||
873 		    (p->p_flag & P_TOTAL_STOP) != 0) &&
874 		    (td->td_flags & TDF_SBDRY) != 0) {
875 			KASSERT(return_instead,
876 			    ("TDF_SBDRY set for unsafe thread_suspend_check"));
877 			return (0);
878 		}
879 
880 		/*
881 		 * If the process is waiting for us to exit,
882 		 * this thread should just suicide.
883 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
884 		 */
885 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
886 			PROC_UNLOCK(p);
887 			tidhash_remove(td);
888 			PROC_LOCK(p);
889 			tdsigcleanup(td);
890 			umtx_thread_exit(td);
891 			PROC_SLOCK(p);
892 			thread_stopped(p);
893 			thread_exit();
894 		}
895 
896 		PROC_SLOCK(p);
897 		thread_stopped(p);
898 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
899 			if (p->p_numthreads == p->p_suspcount + 1) {
900 				thread_lock(p->p_singlethread);
901 				wakeup_swapper =
902 				    thread_unsuspend_one(p->p_singlethread, p);
903 				thread_unlock(p->p_singlethread);
904 				if (wakeup_swapper)
905 					kick_proc0();
906 			}
907 		}
908 		PROC_UNLOCK(p);
909 		thread_lock(td);
910 		/*
911 		 * When a thread suspends, it just
912 		 * gets taken off all queues.
913 		 */
914 		thread_suspend_one(td);
915 		if (return_instead == 0) {
916 			p->p_boundary_count++;
917 			td->td_flags |= TDF_BOUNDARY;
918 		}
919 		PROC_SUNLOCK(p);
920 		mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
921 		if (return_instead == 0)
922 			td->td_flags &= ~TDF_BOUNDARY;
923 		thread_unlock(td);
924 		PROC_LOCK(p);
925 		if (return_instead == 0) {
926 			PROC_SLOCK(p);
927 			p->p_boundary_count--;
928 			PROC_SUNLOCK(p);
929 		}
930 	}
931 	return (0);
932 }
933 
934 void
935 thread_suspend_switch(struct thread *td, struct proc *p)
936 {
937 
938 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
939 	PROC_LOCK_ASSERT(p, MA_OWNED);
940 	PROC_SLOCK_ASSERT(p, MA_OWNED);
941 	/*
942 	 * We implement thread_suspend_one in stages here to avoid
943 	 * dropping the proc lock while the thread lock is owned.
944 	 */
945 	if (p == td->td_proc) {
946 		thread_stopped(p);
947 		p->p_suspcount++;
948 	}
949 	PROC_UNLOCK(p);
950 	thread_lock(td);
951 	td->td_flags &= ~TDF_NEEDSUSPCHK;
952 	TD_SET_SUSPENDED(td);
953 	sched_sleep(td, 0);
954 	PROC_SUNLOCK(p);
955 	DROP_GIANT();
956 	mi_switch(SW_VOL | SWT_SUSPEND, NULL);
957 	thread_unlock(td);
958 	PICKUP_GIANT();
959 	PROC_LOCK(p);
960 	PROC_SLOCK(p);
961 }
962 
963 void
964 thread_suspend_one(struct thread *td)
965 {
966 	struct proc *p;
967 
968 	p = td->td_proc;
969 	PROC_SLOCK_ASSERT(p, MA_OWNED);
970 	THREAD_LOCK_ASSERT(td, MA_OWNED);
971 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
972 	p->p_suspcount++;
973 	td->td_flags &= ~TDF_NEEDSUSPCHK;
974 	TD_SET_SUSPENDED(td);
975 	sched_sleep(td, 0);
976 }
977 
978 int
979 thread_unsuspend_one(struct thread *td, struct proc *p)
980 {
981 
982 	THREAD_LOCK_ASSERT(td, MA_OWNED);
983 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
984 	TD_CLR_SUSPENDED(td);
985 	td->td_flags &= ~TDF_ALLPROCSUSP;
986 	if (td->td_proc == p) {
987 		PROC_SLOCK_ASSERT(p, MA_OWNED);
988 		p->p_suspcount--;
989 	}
990 	return (setrunnable(td));
991 }
992 
993 /*
994  * Allow all threads blocked by single threading to continue running.
995  */
996 void
997 thread_unsuspend(struct proc *p)
998 {
999 	struct thread *td;
1000 	int wakeup_swapper;
1001 
1002 	PROC_LOCK_ASSERT(p, MA_OWNED);
1003 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1004 	wakeup_swapper = 0;
1005 	if (!P_SHOULDSTOP(p)) {
1006                 FOREACH_THREAD_IN_PROC(p, td) {
1007 			thread_lock(td);
1008 			if (TD_IS_SUSPENDED(td)) {
1009 				wakeup_swapper |= thread_unsuspend_one(td, p);
1010 			}
1011 			thread_unlock(td);
1012 		}
1013 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1014 	    (p->p_numthreads == p->p_suspcount)) {
1015 		/*
1016 		 * Stopping everything also did the job for the single
1017 		 * threading request. Now we've downgraded to single-threaded,
1018 		 * let it continue.
1019 		 */
1020 		if (p->p_singlethread->td_proc == p) {
1021 			thread_lock(p->p_singlethread);
1022 			wakeup_swapper = thread_unsuspend_one(
1023 			    p->p_singlethread, p);
1024 			thread_unlock(p->p_singlethread);
1025 		}
1026 	}
1027 	if (wakeup_swapper)
1028 		kick_proc0();
1029 }
1030 
1031 /*
1032  * End the single threading mode..
1033  */
1034 void
1035 thread_single_end(struct proc *p, int mode)
1036 {
1037 	struct thread *td;
1038 	int wakeup_swapper;
1039 
1040 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1041 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1042 	    ("invalid mode %d", mode));
1043 	PROC_LOCK_ASSERT(p, MA_OWNED);
1044 	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1045 	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1046 	    ("mode %d does not match P_TOTAL_STOP", mode));
1047 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1048 	    P_TOTAL_STOP);
1049 	PROC_SLOCK(p);
1050 	p->p_singlethread = NULL;
1051 	wakeup_swapper = 0;
1052 	/*
1053 	 * If there are other threads they may now run,
1054 	 * unless of course there is a blanket 'stop order'
1055 	 * on the process. The single threader must be allowed
1056 	 * to continue however as this is a bad place to stop.
1057 	 */
1058 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1059                 FOREACH_THREAD_IN_PROC(p, td) {
1060 			thread_lock(td);
1061 			if (TD_IS_SUSPENDED(td)) {
1062 				wakeup_swapper |= thread_unsuspend_one(td, p);
1063 			}
1064 			thread_unlock(td);
1065 		}
1066 	}
1067 	PROC_SUNLOCK(p);
1068 	if (wakeup_swapper)
1069 		kick_proc0();
1070 }
1071 
1072 struct thread *
1073 thread_find(struct proc *p, lwpid_t tid)
1074 {
1075 	struct thread *td;
1076 
1077 	PROC_LOCK_ASSERT(p, MA_OWNED);
1078 	FOREACH_THREAD_IN_PROC(p, td) {
1079 		if (td->td_tid == tid)
1080 			break;
1081 	}
1082 	return (td);
1083 }
1084 
1085 /* Locate a thread by number; return with proc lock held. */
1086 struct thread *
1087 tdfind(lwpid_t tid, pid_t pid)
1088 {
1089 #define RUN_THRESH	16
1090 	struct thread *td;
1091 	int run = 0;
1092 
1093 	rw_rlock(&tidhash_lock);
1094 	LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1095 		if (td->td_tid == tid) {
1096 			if (pid != -1 && td->td_proc->p_pid != pid) {
1097 				td = NULL;
1098 				break;
1099 			}
1100 			PROC_LOCK(td->td_proc);
1101 			if (td->td_proc->p_state == PRS_NEW) {
1102 				PROC_UNLOCK(td->td_proc);
1103 				td = NULL;
1104 				break;
1105 			}
1106 			if (run > RUN_THRESH) {
1107 				if (rw_try_upgrade(&tidhash_lock)) {
1108 					LIST_REMOVE(td, td_hash);
1109 					LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1110 						td, td_hash);
1111 					rw_wunlock(&tidhash_lock);
1112 					return (td);
1113 				}
1114 			}
1115 			break;
1116 		}
1117 		run++;
1118 	}
1119 	rw_runlock(&tidhash_lock);
1120 	return (td);
1121 }
1122 
1123 void
1124 tidhash_add(struct thread *td)
1125 {
1126 	rw_wlock(&tidhash_lock);
1127 	LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1128 	rw_wunlock(&tidhash_lock);
1129 }
1130 
1131 void
1132 tidhash_remove(struct thread *td)
1133 {
1134 	rw_wlock(&tidhash_lock);
1135 	LIST_REMOVE(td, td_hash);
1136 	rw_wunlock(&tidhash_lock);
1137 }
1138