xref: /freebsd/sys/kern/kern_thread.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/sched.h>
41 #include <sys/sleepqueue.h>
42 #include <sys/turnstile.h>
43 #include <sys/ktr.h>
44 #include <sys/umtx.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/uma.h>
49 
50 /*
51  * KSEGRP related storage.
52  */
53 static uma_zone_t ksegrp_zone;
54 static uma_zone_t thread_zone;
55 
56 /* DEBUG ONLY */
57 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
58 static int thread_debug = 0;
59 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
60 	&thread_debug, 0, "thread debug");
61 
62 int max_threads_per_proc = 1500;
63 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
64 	&max_threads_per_proc, 0, "Limit on threads per proc");
65 
66 int max_groups_per_proc = 1500;
67 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
68 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
69 
70 int max_threads_hits;
71 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
72 	&max_threads_hits, 0, "");
73 
74 int virtual_cpu;
75 
76 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
77 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
78 struct mtx kse_zombie_lock;
79 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
80 
81 static int
82 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
83 {
84 	int error, new_val;
85 	int def_val;
86 
87 	def_val = mp_ncpus;
88 	if (virtual_cpu == 0)
89 		new_val = def_val;
90 	else
91 		new_val = virtual_cpu;
92 	error = sysctl_handle_int(oidp, &new_val, 0, req);
93 	if (error != 0 || req->newptr == NULL)
94 		return (error);
95 	if (new_val < 0)
96 		return (EINVAL);
97 	virtual_cpu = new_val;
98 	return (0);
99 }
100 
101 /* DEBUG ONLY */
102 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
103 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
104 	"debug virtual cpus");
105 
106 struct mtx tid_lock;
107 static struct unrhdr *tid_unrhdr;
108 
109 /*
110  * Prepare a thread for use.
111  */
112 static int
113 thread_ctor(void *mem, int size, void *arg, int flags)
114 {
115 	struct thread	*td;
116 
117 	td = (struct thread *)mem;
118 	td->td_state = TDS_INACTIVE;
119 	td->td_oncpu = NOCPU;
120 
121 	td->td_tid = alloc_unr(tid_unrhdr);
122 
123 	/*
124 	 * Note that td_critnest begins life as 1 because the thread is not
125 	 * running and is thereby implicitly waiting to be on the receiving
126 	 * end of a context switch.  A context switch must occur inside a
127 	 * critical section, and in fact, includes hand-off of the sched_lock.
128 	 * After a context switch to a newly created thread, it will release
129 	 * sched_lock for the first time, and its td_critnest will hit 0 for
130 	 * the first time.  This happens on the far end of a context switch,
131 	 * and when it context switches away from itself, it will in fact go
132 	 * back into a critical section, and hand off the sched lock to the
133 	 * next thread.
134 	 */
135 	td->td_critnest = 1;
136 	return (0);
137 }
138 
139 /*
140  * Reclaim a thread after use.
141  */
142 static void
143 thread_dtor(void *mem, int size, void *arg)
144 {
145 	struct thread *td;
146 
147 	td = (struct thread *)mem;
148 
149 #ifdef INVARIANTS
150 	/* Verify that this thread is in a safe state to free. */
151 	switch (td->td_state) {
152 	case TDS_INHIBITED:
153 	case TDS_RUNNING:
154 	case TDS_CAN_RUN:
155 	case TDS_RUNQ:
156 		/*
157 		 * We must never unlink a thread that is in one of
158 		 * these states, because it is currently active.
159 		 */
160 		panic("bad state for thread unlinking");
161 		/* NOTREACHED */
162 	case TDS_INACTIVE:
163 		break;
164 	default:
165 		panic("bad thread state");
166 		/* NOTREACHED */
167 	}
168 #endif
169 
170 	free_unr(tid_unrhdr, td->td_tid);
171 	sched_newthread(td);
172 }
173 
174 /*
175  * Initialize type-stable parts of a thread (when newly created).
176  */
177 static int
178 thread_init(void *mem, int size, int flags)
179 {
180 	struct thread *td;
181 
182 	td = (struct thread *)mem;
183 
184 	vm_thread_new(td, 0);
185 	cpu_thread_setup(td);
186 	td->td_sleepqueue = sleepq_alloc();
187 	td->td_turnstile = turnstile_alloc();
188 	td->td_umtxq = umtxq_alloc();
189 	td->td_sched = (struct td_sched *)&td[1];
190 	sched_newthread(td);
191 	return (0);
192 }
193 
194 /*
195  * Tear down type-stable parts of a thread (just before being discarded).
196  */
197 static void
198 thread_fini(void *mem, int size)
199 {
200 	struct thread *td;
201 
202 	td = (struct thread *)mem;
203 	turnstile_free(td->td_turnstile);
204 	sleepq_free(td->td_sleepqueue);
205 	umtxq_free(td->td_umtxq);
206 	vm_thread_dispose(td);
207 }
208 
209 /*
210  * Initialize type-stable parts of a ksegrp (when newly created).
211  */
212 static int
213 ksegrp_ctor(void *mem, int size, void *arg, int flags)
214 {
215 	struct ksegrp	*kg;
216 
217 	kg = (struct ksegrp *)mem;
218 	bzero(mem, size);
219 	kg->kg_sched = (struct kg_sched *)&kg[1];
220 	return (0);
221 }
222 
223 void
224 ksegrp_link(struct ksegrp *kg, struct proc *p)
225 {
226 
227 	TAILQ_INIT(&kg->kg_threads);
228 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
229 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
230 	kg->kg_proc = p;
231 	/*
232 	 * the following counters are in the -zero- section
233 	 * and may not need clearing
234 	 */
235 	kg->kg_numthreads = 0;
236 	kg->kg_numupcalls = 0;
237 	/* link it in now that it's consistent */
238 	p->p_numksegrps++;
239 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
240 }
241 
242 /*
243  * Called from:
244  *   thread-exit()
245  */
246 void
247 ksegrp_unlink(struct ksegrp *kg)
248 {
249 	struct proc *p;
250 
251 	mtx_assert(&sched_lock, MA_OWNED);
252 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
253 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
254 
255 	p = kg->kg_proc;
256 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
257 	p->p_numksegrps--;
258 	/*
259 	 * Aggregate stats from the KSE
260 	 */
261 	if (p->p_procscopegrp == kg)
262 		p->p_procscopegrp = NULL;
263 }
264 
265 /*
266  * For a newly created process,
267  * link up all the structures and its initial threads etc.
268  * called from:
269  * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
270  * proc_dtor() (should go away)
271  * proc_init()
272  */
273 void
274 proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
275 {
276 
277 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
278 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
279 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
280 	sigqueue_init(&p->p_sigqueue, p);
281 	p->p_ksi = ksiginfo_alloc(1);
282 	if (p->p_ksi != NULL) {
283 		/* XXX p_ksi may be null if ksiginfo zone is not ready */
284 		p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
285 	}
286 	LIST_INIT(&p->p_mqnotifier);
287 	p->p_numksegrps = 0;
288 	p->p_numthreads = 0;
289 
290 	ksegrp_link(kg, p);
291 	thread_link(td, kg);
292 }
293 
294 /*
295  * Initialize global thread allocation resources.
296  */
297 void
298 threadinit(void)
299 {
300 
301 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
302 	tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
303 
304 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
305 	    thread_ctor, thread_dtor, thread_init, thread_fini,
306 	    UMA_ALIGN_CACHE, 0);
307 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
308 	    ksegrp_ctor, NULL, NULL, NULL,
309 	    UMA_ALIGN_CACHE, 0);
310 	kseinit();	/* set up kse specific stuff  e.g. upcall zone*/
311 }
312 
313 /*
314  * Stash an embarasingly extra thread into the zombie thread queue.
315  */
316 void
317 thread_stash(struct thread *td)
318 {
319 	mtx_lock_spin(&kse_zombie_lock);
320 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
321 	mtx_unlock_spin(&kse_zombie_lock);
322 }
323 
324 /*
325  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
326  */
327 void
328 ksegrp_stash(struct ksegrp *kg)
329 {
330 	mtx_lock_spin(&kse_zombie_lock);
331 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
332 	mtx_unlock_spin(&kse_zombie_lock);
333 }
334 
335 /*
336  * Reap zombie kse resource.
337  */
338 void
339 thread_reap(void)
340 {
341 	struct thread *td_first, *td_next;
342 	struct ksegrp *kg_first, * kg_next;
343 
344 	/*
345 	 * Don't even bother to lock if none at this instant,
346 	 * we really don't care about the next instant..
347 	 */
348 	if ((!TAILQ_EMPTY(&zombie_threads))
349 	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
350 		mtx_lock_spin(&kse_zombie_lock);
351 		td_first = TAILQ_FIRST(&zombie_threads);
352 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
353 		if (td_first)
354 			TAILQ_INIT(&zombie_threads);
355 		if (kg_first)
356 			TAILQ_INIT(&zombie_ksegrps);
357 		mtx_unlock_spin(&kse_zombie_lock);
358 		while (td_first) {
359 			td_next = TAILQ_NEXT(td_first, td_runq);
360 			if (td_first->td_ucred)
361 				crfree(td_first->td_ucred);
362 			thread_free(td_first);
363 			td_first = td_next;
364 		}
365 		while (kg_first) {
366 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
367 			ksegrp_free(kg_first);
368 			kg_first = kg_next;
369 		}
370 		/*
371 		 * there will always be a thread on the list if one of these
372 		 * is there.
373 		 */
374 		kse_GC();
375 	}
376 }
377 
378 /*
379  * Allocate a ksegrp.
380  */
381 struct ksegrp *
382 ksegrp_alloc(void)
383 {
384 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
385 }
386 
387 /*
388  * Allocate a thread.
389  */
390 struct thread *
391 thread_alloc(void)
392 {
393 	thread_reap(); /* check if any zombies to get */
394 	return (uma_zalloc(thread_zone, M_WAITOK));
395 }
396 
397 /*
398  * Deallocate a ksegrp.
399  */
400 void
401 ksegrp_free(struct ksegrp *td)
402 {
403 	uma_zfree(ksegrp_zone, td);
404 }
405 
406 /*
407  * Deallocate a thread.
408  */
409 void
410 thread_free(struct thread *td)
411 {
412 
413 	cpu_thread_clean(td);
414 	uma_zfree(thread_zone, td);
415 }
416 
417 /*
418  * Discard the current thread and exit from its context.
419  * Always called with scheduler locked.
420  *
421  * Because we can't free a thread while we're operating under its context,
422  * push the current thread into our CPU's deadthread holder. This means
423  * we needn't worry about someone else grabbing our context before we
424  * do a cpu_throw().  This may not be needed now as we are under schedlock.
425  * Maybe we can just do a thread_stash() as thr_exit1 does.
426  */
427 /*  XXX
428  * libthr expects its thread exit to return for the last
429  * thread, meaning that the program is back to non-threaded
430  * mode I guess. Because we do this (cpu_throw) unconditionally
431  * here, they have their own version of it. (thr_exit1())
432  * that doesn't do it all if this was the last thread.
433  * It is also called from thread_suspend_check().
434  * Of course in the end, they end up coming here through exit1
435  * anyhow..  After fixing 'thr' to play by the rules we should be able
436  * to merge these two functions together.
437  *
438  * called from:
439  * exit1()
440  * kse_exit()
441  * thr_exit()
442  * thread_user_enter()
443  * thread_userret()
444  * thread_suspend_check()
445  */
446 void
447 thread_exit(void)
448 {
449 	struct thread *td;
450 	struct proc *p;
451 	struct ksegrp	*kg;
452 
453 	td = curthread;
454 	kg = td->td_ksegrp;
455 	p = td->td_proc;
456 
457 	mtx_assert(&sched_lock, MA_OWNED);
458 	mtx_assert(&Giant, MA_NOTOWNED);
459 	PROC_LOCK_ASSERT(p, MA_OWNED);
460 	KASSERT(p != NULL, ("thread exiting without a process"));
461 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
462 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
463 	    (long)p->p_pid, p->p_comm);
464 	KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
465 
466 	if (td->td_standin != NULL) {
467 		/*
468 		 * Note that we don't need to free the cred here as it
469 		 * is done in thread_reap().
470 		 */
471 		thread_stash(td->td_standin);
472 		td->td_standin = NULL;
473 	}
474 
475 	/*
476 	 * drop FPU & debug register state storage, or any other
477 	 * architecture specific resources that
478 	 * would not be on a new untouched process.
479 	 */
480 	cpu_thread_exit(td);	/* XXXSMP */
481 
482 	/*
483 	 * The thread is exiting. scheduler can release its stuff
484 	 * and collect stats etc.
485 	 */
486 	sched_thread_exit(td);
487 
488 	/*
489 	 * The last thread is left attached to the process
490 	 * So that the whole bundle gets recycled. Skip
491 	 * all this stuff if we never had threads.
492 	 * EXIT clears all sign of other threads when
493 	 * it goes to single threading, so the last thread always
494 	 * takes the short path.
495 	 */
496 	if (p->p_flag & P_HADTHREADS) {
497 		if (p->p_numthreads > 1) {
498 			thread_unlink(td);
499 
500 			/* XXX first arg not used in 4BSD or ULE */
501 			sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
502 
503 			/*
504 			 * The test below is NOT true if we are the
505 			 * sole exiting thread. P_STOPPED_SNGL is unset
506 			 * in exit1() after it is the only survivor.
507 			 */
508 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
509 				if (p->p_numthreads == p->p_suspcount) {
510 					thread_unsuspend_one(p->p_singlethread);
511 				}
512 			}
513 
514 			/*
515 			 * Because each upcall structure has an owner thread,
516 			 * owner thread exits only when process is in exiting
517 			 * state, so upcall to userland is no longer needed,
518 			 * deleting upcall structure is safe here.
519 			 * So when all threads in a group is exited, all upcalls
520 			 * in the group should be automatically freed.
521 			 *  XXXKSE This is a KSE thing and should be exported
522 			 * there somehow.
523 			 */
524 			upcall_remove(td);
525 
526 			/*
527 			 * If the thread we unlinked above was the last one,
528 			 * then this ksegrp should go away too.
529 			 */
530 			if (kg->kg_numthreads == 0) {
531 				/*
532 				 * let the scheduler know about this in case
533 				 * it needs to recover stats or resources.
534 				 * Theoretically we could let
535 				 * sched_exit_ksegrp()  do the equivalent of
536 				 * setting the concurrency to 0
537 				 * but don't do it yet to avoid changing
538 				 * the existing scheduler code until we
539 				 * are ready.
540 				 * We supply a random other ksegrp
541 				 * as the recipient of any built up
542 				 * cpu usage etc. (If the scheduler wants it).
543 				 * XXXKSE
544 				 * This is probably not fair so think of
545  				 * a better answer.
546 				 */
547 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
548 				sched_set_concurrency(kg, 0); /* XXX TEMP */
549 				ksegrp_unlink(kg);
550 				ksegrp_stash(kg);
551 			}
552 			PROC_UNLOCK(p);
553 			td->td_ksegrp	= NULL;
554 			PCPU_SET(deadthread, td);
555 		} else {
556 			/*
557 			 * The last thread is exiting.. but not through exit()
558 			 * what should we do?
559 			 * Theoretically this can't happen
560  			 * exit1() - clears threading flags before coming here
561  			 * kse_exit() - treats last thread specially
562  			 * thr_exit() - treats last thread specially
563  			 * thread_user_enter() - only if more exist
564  			 * thread_userret() - only if more exist
565  			 * thread_suspend_check() - only if more exist
566 			 */
567 			panic ("thread_exit: Last thread exiting on its own");
568 		}
569 	} else {
570 		/*
571 		 * non threaded process comes here.
572 		 * This includes an EX threaded process that is coming
573 		 * here via exit1(). (exit1 dethreads the proc first).
574 		 */
575 		PROC_UNLOCK(p);
576 	}
577 	td->td_state = TDS_INACTIVE;
578 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
579 	cpu_throw(td, choosethread());
580 	panic("I'm a teapot!");
581 	/* NOTREACHED */
582 }
583 
584 /*
585  * Do any thread specific cleanups that may be needed in wait()
586  * called with Giant, proc and schedlock not held.
587  */
588 void
589 thread_wait(struct proc *p)
590 {
591 	struct thread *td;
592 
593 	mtx_assert(&Giant, MA_NOTOWNED);
594 	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
595 	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
596 	FOREACH_THREAD_IN_PROC(p, td) {
597 		if (td->td_standin != NULL) {
598 			if (td->td_standin->td_ucred != NULL) {
599 				crfree(td->td_standin->td_ucred);
600 				td->td_standin->td_ucred = NULL;
601 			}
602 			thread_free(td->td_standin);
603 			td->td_standin = NULL;
604 		}
605 		cpu_thread_clean(td);
606 		crfree(td->td_ucred);
607 	}
608 	thread_reap();	/* check for zombie threads etc. */
609 }
610 
611 /*
612  * Link a thread to a process.
613  * set up anything that needs to be initialized for it to
614  * be used by the process.
615  *
616  * Note that we do not link to the proc's ucred here.
617  * The thread is linked as if running but no KSE assigned.
618  * Called from:
619  *  proc_linkup()
620  *  thread_schedule_upcall()
621  *  thr_create()
622  */
623 void
624 thread_link(struct thread *td, struct ksegrp *kg)
625 {
626 	struct proc *p;
627 
628 	p = kg->kg_proc;
629 	td->td_state    = TDS_INACTIVE;
630 	td->td_proc     = p;
631 	td->td_ksegrp   = kg;
632 	td->td_flags    = 0;
633 	td->td_kflags	= 0;
634 
635 	LIST_INIT(&td->td_contested);
636 	sigqueue_init(&td->td_sigqueue, p);
637 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
638 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
639 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
640 	p->p_numthreads++;
641 	kg->kg_numthreads++;
642 }
643 
644 /*
645  * Convert a process with one thread to an unthreaded process.
646  * Called from:
647  *  thread_single(exit)  (called from execve and exit)
648  *  kse_exit()		XXX may need cleaning up wrt KSE stuff
649  */
650 void
651 thread_unthread(struct thread *td)
652 {
653 	struct proc *p = td->td_proc;
654 
655 	KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
656 	upcall_remove(td);
657 	p->p_flag &= ~(P_SA|P_HADTHREADS);
658 	td->td_mailbox = NULL;
659 	td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
660 	if (td->td_standin != NULL) {
661 		thread_stash(td->td_standin);
662 		td->td_standin = NULL;
663 	}
664 	sched_set_concurrency(td->td_ksegrp, 1);
665 }
666 
667 /*
668  * Called from:
669  *  thread_exit()
670  */
671 void
672 thread_unlink(struct thread *td)
673 {
674 	struct proc *p = td->td_proc;
675 	struct ksegrp *kg = td->td_ksegrp;
676 
677 	mtx_assert(&sched_lock, MA_OWNED);
678 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
679 	p->p_numthreads--;
680 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
681 	kg->kg_numthreads--;
682 	/* could clear a few other things here */
683 	/* Must  NOT clear links to proc and ksegrp! */
684 }
685 
686 /*
687  * Enforce single-threading.
688  *
689  * Returns 1 if the caller must abort (another thread is waiting to
690  * exit the process or similar). Process is locked!
691  * Returns 0 when you are successfully the only thread running.
692  * A process has successfully single threaded in the suspend mode when
693  * There are no threads in user mode. Threads in the kernel must be
694  * allowed to continue until they get to the user boundary. They may even
695  * copy out their return values and data before suspending. They may however be
696  * accellerated in reaching the user boundary as we will wake up
697  * any sleeping threads that are interruptable. (PCATCH).
698  */
699 int
700 thread_single(int mode)
701 {
702 	struct thread *td;
703 	struct thread *td2;
704 	struct proc *p;
705 	int remaining;
706 
707 	td = curthread;
708 	p = td->td_proc;
709 	mtx_assert(&Giant, MA_NOTOWNED);
710 	PROC_LOCK_ASSERT(p, MA_OWNED);
711 	KASSERT((td != NULL), ("curthread is NULL"));
712 
713 	if ((p->p_flag & P_HADTHREADS) == 0)
714 		return (0);
715 
716 	/* Is someone already single threading? */
717 	if (p->p_singlethread != NULL && p->p_singlethread != td)
718 		return (1);
719 
720 	if (mode == SINGLE_EXIT) {
721 		p->p_flag |= P_SINGLE_EXIT;
722 		p->p_flag &= ~P_SINGLE_BOUNDARY;
723 	} else {
724 		p->p_flag &= ~P_SINGLE_EXIT;
725 		if (mode == SINGLE_BOUNDARY)
726 			p->p_flag |= P_SINGLE_BOUNDARY;
727 		else
728 			p->p_flag &= ~P_SINGLE_BOUNDARY;
729 	}
730 	p->p_flag |= P_STOPPED_SINGLE;
731 	mtx_lock_spin(&sched_lock);
732 	p->p_singlethread = td;
733 	if (mode == SINGLE_EXIT)
734 		remaining = p->p_numthreads;
735 	else if (mode == SINGLE_BOUNDARY)
736 		remaining = p->p_numthreads - p->p_boundary_count;
737 	else
738 		remaining = p->p_numthreads - p->p_suspcount;
739 	while (remaining != 1) {
740 		FOREACH_THREAD_IN_PROC(p, td2) {
741 			if (td2 == td)
742 				continue;
743 			td2->td_flags |= TDF_ASTPENDING;
744 			if (TD_IS_INHIBITED(td2)) {
745 				switch (mode) {
746 				case SINGLE_EXIT:
747 					if (td->td_flags & TDF_DBSUSPEND)
748 						td->td_flags &= ~TDF_DBSUSPEND;
749 					if (TD_IS_SUSPENDED(td2))
750 						thread_unsuspend_one(td2);
751 					if (TD_ON_SLEEPQ(td2) &&
752 					    (td2->td_flags & TDF_SINTR))
753 						sleepq_abort(td2);
754 					break;
755 				case SINGLE_BOUNDARY:
756 					if (TD_IS_SUSPENDED(td2) &&
757 					    !(td2->td_flags & TDF_BOUNDARY))
758 						thread_unsuspend_one(td2);
759 					if (TD_ON_SLEEPQ(td2) &&
760 					    (td2->td_flags & TDF_SINTR))
761 						sleepq_abort(td2);
762 					break;
763 				default:
764 					if (TD_IS_SUSPENDED(td2))
765 						continue;
766 					/*
767 					 * maybe other inhibitted states too?
768 					 */
769 					if ((td2->td_flags & TDF_SINTR) &&
770 					    (td2->td_inhibitors &
771 					    (TDI_SLEEPING | TDI_SWAPPED)))
772 						thread_suspend_one(td2);
773 					break;
774 				}
775 			}
776 		}
777 		if (mode == SINGLE_EXIT)
778 			remaining = p->p_numthreads;
779 		else if (mode == SINGLE_BOUNDARY)
780 			remaining = p->p_numthreads - p->p_boundary_count;
781 		else
782 			remaining = p->p_numthreads - p->p_suspcount;
783 
784 		/*
785 		 * Maybe we suspended some threads.. was it enough?
786 		 */
787 		if (remaining == 1)
788 			break;
789 
790 		/*
791 		 * Wake us up when everyone else has suspended.
792 		 * In the mean time we suspend as well.
793 		 */
794 		thread_suspend_one(td);
795 		PROC_UNLOCK(p);
796 		mi_switch(SW_VOL, NULL);
797 		mtx_unlock_spin(&sched_lock);
798 		PROC_LOCK(p);
799 		mtx_lock_spin(&sched_lock);
800 		if (mode == SINGLE_EXIT)
801 			remaining = p->p_numthreads;
802 		else if (mode == SINGLE_BOUNDARY)
803 			remaining = p->p_numthreads - p->p_boundary_count;
804 		else
805 			remaining = p->p_numthreads - p->p_suspcount;
806 	}
807 	if (mode == SINGLE_EXIT) {
808 		/*
809 		 * We have gotten rid of all the other threads and we
810 		 * are about to either exit or exec. In either case,
811 		 * we try our utmost  to revert to being a non-threaded
812 		 * process.
813 		 */
814 		p->p_singlethread = NULL;
815 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
816 		thread_unthread(td);
817 	}
818 	mtx_unlock_spin(&sched_lock);
819 	return (0);
820 }
821 
822 /*
823  * Called in from locations that can safely check to see
824  * whether we have to suspend or at least throttle for a
825  * single-thread event (e.g. fork).
826  *
827  * Such locations include userret().
828  * If the "return_instead" argument is non zero, the thread must be able to
829  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
830  *
831  * The 'return_instead' argument tells the function if it may do a
832  * thread_exit() or suspend, or whether the caller must abort and back
833  * out instead.
834  *
835  * If the thread that set the single_threading request has set the
836  * P_SINGLE_EXIT bit in the process flags then this call will never return
837  * if 'return_instead' is false, but will exit.
838  *
839  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
840  *---------------+--------------------+---------------------
841  *       0       | returns 0          |   returns 0 or 1
842  *               | when ST ends       |   immediatly
843  *---------------+--------------------+---------------------
844  *       1       | thread exits       |   returns 1
845  *               |                    |  immediatly
846  * 0 = thread_exit() or suspension ok,
847  * other = return error instead of stopping the thread.
848  *
849  * While a full suspension is under effect, even a single threading
850  * thread would be suspended if it made this call (but it shouldn't).
851  * This call should only be made from places where
852  * thread_exit() would be safe as that may be the outcome unless
853  * return_instead is set.
854  */
855 int
856 thread_suspend_check(int return_instead)
857 {
858 	struct thread *td;
859 	struct proc *p;
860 
861 	td = curthread;
862 	p = td->td_proc;
863 	mtx_assert(&Giant, MA_NOTOWNED);
864 	PROC_LOCK_ASSERT(p, MA_OWNED);
865 	while (P_SHOULDSTOP(p) ||
866 	      ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
867 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
868 			KASSERT(p->p_singlethread != NULL,
869 			    ("singlethread not set"));
870 			/*
871 			 * The only suspension in action is a
872 			 * single-threading. Single threader need not stop.
873 			 * XXX Should be safe to access unlocked
874 			 * as it can only be set to be true by us.
875 			 */
876 			if (p->p_singlethread == td)
877 				return (0);	/* Exempt from stopping. */
878 		}
879 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
880 			return (1);
881 
882 		/* Should we goto user boundary if we didn't come from there? */
883 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
884 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
885 			return (1);
886 
887 		/* If thread will exit, flush its pending signals */
888 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
889 			sigqueue_flush(&td->td_sigqueue);
890 
891 		mtx_lock_spin(&sched_lock);
892 		thread_stopped(p);
893 		/*
894 		 * If the process is waiting for us to exit,
895 		 * this thread should just suicide.
896 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
897 		 */
898 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
899 			thread_exit();
900 
901 		/*
902 		 * When a thread suspends, it just
903 		 * moves to the processes's suspend queue
904 		 * and stays there.
905 		 */
906 		thread_suspend_one(td);
907 		if (return_instead == 0) {
908 			p->p_boundary_count++;
909 			td->td_flags |= TDF_BOUNDARY;
910 		}
911 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
912 			if (p->p_numthreads == p->p_suspcount)
913 				thread_unsuspend_one(p->p_singlethread);
914 		}
915 		PROC_UNLOCK(p);
916 		mi_switch(SW_INVOL, NULL);
917 		if (return_instead == 0) {
918 			p->p_boundary_count--;
919 			td->td_flags &= ~TDF_BOUNDARY;
920 		}
921 		mtx_unlock_spin(&sched_lock);
922 		PROC_LOCK(p);
923 	}
924 	return (0);
925 }
926 
927 void
928 thread_suspend_one(struct thread *td)
929 {
930 	struct proc *p = td->td_proc;
931 
932 	mtx_assert(&sched_lock, MA_OWNED);
933 	PROC_LOCK_ASSERT(p, MA_OWNED);
934 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
935 	p->p_suspcount++;
936 	TD_SET_SUSPENDED(td);
937 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
938 }
939 
940 void
941 thread_unsuspend_one(struct thread *td)
942 {
943 	struct proc *p = td->td_proc;
944 
945 	mtx_assert(&sched_lock, MA_OWNED);
946 	PROC_LOCK_ASSERT(p, MA_OWNED);
947 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
948 	TD_CLR_SUSPENDED(td);
949 	p->p_suspcount--;
950 	setrunnable(td);
951 }
952 
953 /*
954  * Allow all threads blocked by single threading to continue running.
955  */
956 void
957 thread_unsuspend(struct proc *p)
958 {
959 	struct thread *td;
960 
961 	mtx_assert(&sched_lock, MA_OWNED);
962 	PROC_LOCK_ASSERT(p, MA_OWNED);
963 	if (!P_SHOULDSTOP(p)) {
964 		while ((td = TAILQ_FIRST(&p->p_suspended))) {
965 			thread_unsuspend_one(td);
966 		}
967 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
968 	    (p->p_numthreads == p->p_suspcount)) {
969 		/*
970 		 * Stopping everything also did the job for the single
971 		 * threading request. Now we've downgraded to single-threaded,
972 		 * let it continue.
973 		 */
974 		thread_unsuspend_one(p->p_singlethread);
975 	}
976 }
977 
978 /*
979  * End the single threading mode..
980  */
981 void
982 thread_single_end(void)
983 {
984 	struct thread *td;
985 	struct proc *p;
986 
987 	td = curthread;
988 	p = td->td_proc;
989 	PROC_LOCK_ASSERT(p, MA_OWNED);
990 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
991 	mtx_lock_spin(&sched_lock);
992 	p->p_singlethread = NULL;
993 	p->p_procscopegrp = NULL;
994 	/*
995 	 * If there are other threads they mey now run,
996 	 * unless of course there is a blanket 'stop order'
997 	 * on the process. The single threader must be allowed
998 	 * to continue however as this is a bad place to stop.
999 	 */
1000 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1001 		while ((td = TAILQ_FIRST(&p->p_suspended))) {
1002 			thread_unsuspend_one(td);
1003 		}
1004 	}
1005 	mtx_unlock_spin(&sched_lock);
1006 }
1007 
1008 /*
1009  * Called before going into an interruptible sleep to see if we have been
1010  * interrupted or requested to exit.
1011  */
1012 int
1013 thread_sleep_check(struct thread *td)
1014 {
1015 	struct proc *p;
1016 
1017 	p = td->td_proc;
1018 	mtx_assert(&sched_lock, MA_OWNED);
1019 	if (p->p_flag & P_HADTHREADS) {
1020 		if (p->p_singlethread != td) {
1021 			if (p->p_flag & P_SINGLE_EXIT)
1022 				return (EINTR);
1023 			if (p->p_flag & P_SINGLE_BOUNDARY)
1024 				return (ERESTART);
1025 		}
1026 		if (td->td_flags & TDF_INTERRUPT)
1027 			return (td->td_intrval);
1028 	}
1029 	return (0);
1030 }
1031 
1032 struct thread *
1033 thread_find(struct proc *p, lwpid_t tid)
1034 {
1035 	struct thread *td;
1036 
1037 	PROC_LOCK_ASSERT(p, MA_OWNED);
1038 	mtx_lock_spin(&sched_lock);
1039 	FOREACH_THREAD_IN_PROC(p, td) {
1040 		if (td->td_tid == tid)
1041 			break;
1042 	}
1043 	mtx_unlock_spin(&sched_lock);
1044 	return (td);
1045 }
1046