xref: /freebsd/sys/kern/kern_thread.c (revision 21fc3164307dc288ff6eb1f028a268f3c76cc6e5)
1 /*-
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/sched.h>
41 #include <sys/sleepqueue.h>
42 #include <sys/turnstile.h>
43 #include <sys/ktr.h>
44 #include <sys/umtx.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/uma.h>
49 
50 /*
51  * KSEGRP related storage.
52  */
53 static uma_zone_t ksegrp_zone;
54 static uma_zone_t thread_zone;
55 
56 /* DEBUG ONLY */
57 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
58 static int thread_debug = 0;
59 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
60 	&thread_debug, 0, "thread debug");
61 
62 int max_threads_per_proc = 1500;
63 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
64 	&max_threads_per_proc, 0, "Limit on threads per proc");
65 
66 int max_groups_per_proc = 1500;
67 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
68 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
69 
70 int max_threads_hits;
71 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
72 	&max_threads_hits, 0, "");
73 
74 int virtual_cpu;
75 
76 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
77 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
78 struct mtx kse_zombie_lock;
79 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
80 
81 static int
82 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
83 {
84 	int error, new_val;
85 	int def_val;
86 
87 	def_val = mp_ncpus;
88 	if (virtual_cpu == 0)
89 		new_val = def_val;
90 	else
91 		new_val = virtual_cpu;
92 	error = sysctl_handle_int(oidp, &new_val, 0, req);
93 	if (error != 0 || req->newptr == NULL)
94 		return (error);
95 	if (new_val < 0)
96 		return (EINVAL);
97 	virtual_cpu = new_val;
98 	return (0);
99 }
100 
101 /* DEBUG ONLY */
102 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
103 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
104 	"debug virtual cpus");
105 
106 struct mtx tid_lock;
107 static struct unrhdr *tid_unrhdr;
108 
109 /*
110  * Prepare a thread for use.
111  */
112 static int
113 thread_ctor(void *mem, int size, void *arg, int flags)
114 {
115 	struct thread	*td;
116 
117 	td = (struct thread *)mem;
118 	td->td_state = TDS_INACTIVE;
119 	td->td_oncpu = NOCPU;
120 
121 	td->td_tid = alloc_unr(tid_unrhdr);
122 
123 	/*
124 	 * Note that td_critnest begins life as 1 because the thread is not
125 	 * running and is thereby implicitly waiting to be on the receiving
126 	 * end of a context switch.  A context switch must occur inside a
127 	 * critical section, and in fact, includes hand-off of the sched_lock.
128 	 * After a context switch to a newly created thread, it will release
129 	 * sched_lock for the first time, and its td_critnest will hit 0 for
130 	 * the first time.  This happens on the far end of a context switch,
131 	 * and when it context switches away from itself, it will in fact go
132 	 * back into a critical section, and hand off the sched lock to the
133 	 * next thread.
134 	 */
135 	td->td_critnest = 1;
136 	return (0);
137 }
138 
139 /*
140  * Reclaim a thread after use.
141  */
142 static void
143 thread_dtor(void *mem, int size, void *arg)
144 {
145 	struct thread *td;
146 
147 	td = (struct thread *)mem;
148 
149 #ifdef INVARIANTS
150 	/* Verify that this thread is in a safe state to free. */
151 	switch (td->td_state) {
152 	case TDS_INHIBITED:
153 	case TDS_RUNNING:
154 	case TDS_CAN_RUN:
155 	case TDS_RUNQ:
156 		/*
157 		 * We must never unlink a thread that is in one of
158 		 * these states, because it is currently active.
159 		 */
160 		panic("bad state for thread unlinking");
161 		/* NOTREACHED */
162 	case TDS_INACTIVE:
163 		break;
164 	default:
165 		panic("bad thread state");
166 		/* NOTREACHED */
167 	}
168 #endif
169 
170 	free_unr(tid_unrhdr, td->td_tid);
171 	sched_newthread(td);
172 }
173 
174 /*
175  * Initialize type-stable parts of a thread (when newly created).
176  */
177 static int
178 thread_init(void *mem, int size, int flags)
179 {
180 	struct thread *td;
181 
182 	td = (struct thread *)mem;
183 
184 	vm_thread_new(td, 0);
185 	cpu_thread_setup(td);
186 	td->td_sleepqueue = sleepq_alloc();
187 	td->td_turnstile = turnstile_alloc();
188 	td->td_umtxq = umtxq_alloc();
189 	td->td_sched = (struct td_sched *)&td[1];
190 	sched_newthread(td);
191 	return (0);
192 }
193 
194 /*
195  * Tear down type-stable parts of a thread (just before being discarded).
196  */
197 static void
198 thread_fini(void *mem, int size)
199 {
200 	struct thread *td;
201 
202 	td = (struct thread *)mem;
203 	turnstile_free(td->td_turnstile);
204 	sleepq_free(td->td_sleepqueue);
205 	umtxq_free(td->td_umtxq);
206 	vm_thread_dispose(td);
207 }
208 
209 /*
210  * Initialize type-stable parts of a ksegrp (when newly created).
211  */
212 static int
213 ksegrp_ctor(void *mem, int size, void *arg, int flags)
214 {
215 	struct ksegrp	*kg;
216 
217 	kg = (struct ksegrp *)mem;
218 	bzero(mem, size);
219 	kg->kg_sched = (struct kg_sched *)&kg[1];
220 	return (0);
221 }
222 
223 void
224 ksegrp_link(struct ksegrp *kg, struct proc *p)
225 {
226 
227 	TAILQ_INIT(&kg->kg_threads);
228 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
229 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
230 	kg->kg_proc = p;
231 	/*
232 	 * the following counters are in the -zero- section
233 	 * and may not need clearing
234 	 */
235 	kg->kg_numthreads = 0;
236 	kg->kg_numupcalls = 0;
237 	/* link it in now that it's consistent */
238 	p->p_numksegrps++;
239 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
240 }
241 
242 /*
243  * Called from:
244  *   thread-exit()
245  */
246 void
247 ksegrp_unlink(struct ksegrp *kg)
248 {
249 	struct proc *p;
250 
251 	mtx_assert(&sched_lock, MA_OWNED);
252 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
253 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
254 
255 	p = kg->kg_proc;
256 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
257 	p->p_numksegrps--;
258 	/*
259 	 * Aggregate stats from the KSE
260 	 */
261 	if (p->p_procscopegrp == kg)
262 		p->p_procscopegrp = NULL;
263 }
264 
265 /*
266  * For a newly created process,
267  * link up all the structures and its initial threads etc.
268  * called from:
269  * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
270  * proc_dtor() (should go away)
271  * proc_init()
272  */
273 void
274 proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
275 {
276 
277 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
278 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
279 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
280 	p->p_numksegrps = 0;
281 	p->p_numthreads = 0;
282 
283 	ksegrp_link(kg, p);
284 	thread_link(td, kg);
285 }
286 
287 /*
288  * Initialize global thread allocation resources.
289  */
290 void
291 threadinit(void)
292 {
293 
294 	mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
295 	tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
296 
297 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
298 	    thread_ctor, thread_dtor, thread_init, thread_fini,
299 	    UMA_ALIGN_CACHE, 0);
300 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
301 	    ksegrp_ctor, NULL, NULL, NULL,
302 	    UMA_ALIGN_CACHE, 0);
303 	kseinit();	/* set up kse specific stuff  e.g. upcall zone*/
304 }
305 
306 /*
307  * Stash an embarasingly extra thread into the zombie thread queue.
308  */
309 void
310 thread_stash(struct thread *td)
311 {
312 	mtx_lock_spin(&kse_zombie_lock);
313 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
314 	mtx_unlock_spin(&kse_zombie_lock);
315 }
316 
317 /*
318  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
319  */
320 void
321 ksegrp_stash(struct ksegrp *kg)
322 {
323 	mtx_lock_spin(&kse_zombie_lock);
324 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
325 	mtx_unlock_spin(&kse_zombie_lock);
326 }
327 
328 /*
329  * Reap zombie kse resource.
330  */
331 void
332 thread_reap(void)
333 {
334 	struct thread *td_first, *td_next;
335 	struct ksegrp *kg_first, * kg_next;
336 
337 	/*
338 	 * Don't even bother to lock if none at this instant,
339 	 * we really don't care about the next instant..
340 	 */
341 	if ((!TAILQ_EMPTY(&zombie_threads))
342 	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
343 		mtx_lock_spin(&kse_zombie_lock);
344 		td_first = TAILQ_FIRST(&zombie_threads);
345 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
346 		if (td_first)
347 			TAILQ_INIT(&zombie_threads);
348 		if (kg_first)
349 			TAILQ_INIT(&zombie_ksegrps);
350 		mtx_unlock_spin(&kse_zombie_lock);
351 		while (td_first) {
352 			td_next = TAILQ_NEXT(td_first, td_runq);
353 			if (td_first->td_ucred)
354 				crfree(td_first->td_ucred);
355 			thread_free(td_first);
356 			td_first = td_next;
357 		}
358 		while (kg_first) {
359 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
360 			ksegrp_free(kg_first);
361 			kg_first = kg_next;
362 		}
363 		/*
364 		 * there will always be a thread on the list if one of these
365 		 * is there.
366 		 */
367 		kse_GC();
368 	}
369 }
370 
371 /*
372  * Allocate a ksegrp.
373  */
374 struct ksegrp *
375 ksegrp_alloc(void)
376 {
377 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
378 }
379 
380 /*
381  * Allocate a thread.
382  */
383 struct thread *
384 thread_alloc(void)
385 {
386 	thread_reap(); /* check if any zombies to get */
387 	return (uma_zalloc(thread_zone, M_WAITOK));
388 }
389 
390 /*
391  * Deallocate a ksegrp.
392  */
393 void
394 ksegrp_free(struct ksegrp *td)
395 {
396 	uma_zfree(ksegrp_zone, td);
397 }
398 
399 /*
400  * Deallocate a thread.
401  */
402 void
403 thread_free(struct thread *td)
404 {
405 
406 	cpu_thread_clean(td);
407 	uma_zfree(thread_zone, td);
408 }
409 
410 /*
411  * Discard the current thread and exit from its context.
412  * Always called with scheduler locked.
413  *
414  * Because we can't free a thread while we're operating under its context,
415  * push the current thread into our CPU's deadthread holder. This means
416  * we needn't worry about someone else grabbing our context before we
417  * do a cpu_throw().  This may not be needed now as we are under schedlock.
418  * Maybe we can just do a thread_stash() as thr_exit1 does.
419  */
420 /*  XXX
421  * libthr expects its thread exit to return for the last
422  * thread, meaning that the program is back to non-threaded
423  * mode I guess. Because we do this (cpu_throw) unconditionally
424  * here, they have their own version of it. (thr_exit1())
425  * that doesn't do it all if this was the last thread.
426  * It is also called from thread_suspend_check().
427  * Of course in the end, they end up coming here through exit1
428  * anyhow..  After fixing 'thr' to play by the rules we should be able
429  * to merge these two functions together.
430  *
431  * called from:
432  * exit1()
433  * kse_exit()
434  * thr_exit()
435  * thread_user_enter()
436  * thread_userret()
437  * thread_suspend_check()
438  */
439 void
440 thread_exit(void)
441 {
442 	struct thread *td;
443 	struct proc *p;
444 	struct ksegrp	*kg;
445 
446 	td = curthread;
447 	kg = td->td_ksegrp;
448 	p = td->td_proc;
449 
450 	mtx_assert(&sched_lock, MA_OWNED);
451 	mtx_assert(&Giant, MA_NOTOWNED);
452 	PROC_LOCK_ASSERT(p, MA_OWNED);
453 	KASSERT(p != NULL, ("thread exiting without a process"));
454 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
455 	CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
456 	    (long)p->p_pid, p->p_comm);
457 
458 	if (td->td_standin != NULL) {
459 		/*
460 		 * Note that we don't need to free the cred here as it
461 		 * is done in thread_reap().
462 		 */
463 		thread_stash(td->td_standin);
464 		td->td_standin = NULL;
465 	}
466 
467 	/*
468 	 * drop FPU & debug register state storage, or any other
469 	 * architecture specific resources that
470 	 * would not be on a new untouched process.
471 	 */
472 	cpu_thread_exit(td);	/* XXXSMP */
473 
474 	/*
475 	 * The thread is exiting. scheduler can release its stuff
476 	 * and collect stats etc.
477 	 */
478 	sched_thread_exit(td);
479 
480 	/*
481 	 * The last thread is left attached to the process
482 	 * So that the whole bundle gets recycled. Skip
483 	 * all this stuff if we never had threads.
484 	 * EXIT clears all sign of other threads when
485 	 * it goes to single threading, so the last thread always
486 	 * takes the short path.
487 	 */
488 	if (p->p_flag & P_HADTHREADS) {
489 		if (p->p_numthreads > 1) {
490 			thread_unlink(td);
491 
492 			/* XXX first arg not used in 4BSD or ULE */
493 			sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
494 
495 			/*
496 			 * as we are exiting there is room for another
497 			 * to be created.
498 			 */
499 			if (p->p_maxthrwaits)
500 				wakeup(&p->p_numthreads);
501 
502 			/*
503 			 * The test below is NOT true if we are the
504 			 * sole exiting thread. P_STOPPED_SNGL is unset
505 			 * in exit1() after it is the only survivor.
506 			 */
507 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
508 				if (p->p_numthreads == p->p_suspcount) {
509 					thread_unsuspend_one(p->p_singlethread);
510 				}
511 			}
512 
513 			/*
514 			 * Because each upcall structure has an owner thread,
515 			 * owner thread exits only when process is in exiting
516 			 * state, so upcall to userland is no longer needed,
517 			 * deleting upcall structure is safe here.
518 			 * So when all threads in a group is exited, all upcalls
519 			 * in the group should be automatically freed.
520 			 *  XXXKSE This is a KSE thing and should be exported
521 			 * there somehow.
522 			 */
523 			upcall_remove(td);
524 
525 			/*
526 			 * If the thread we unlinked above was the last one,
527 			 * then this ksegrp should go away too.
528 			 */
529 			if (kg->kg_numthreads == 0) {
530 				/*
531 				 * let the scheduler know about this in case
532 				 * it needs to recover stats or resources.
533 				 * Theoretically we could let
534 				 * sched_exit_ksegrp()  do the equivalent of
535 				 * setting the concurrency to 0
536 				 * but don't do it yet to avoid changing
537 				 * the existing scheduler code until we
538 				 * are ready.
539 				 * We supply a random other ksegrp
540 				 * as the recipient of any built up
541 				 * cpu usage etc. (If the scheduler wants it).
542 				 * XXXKSE
543 				 * This is probably not fair so think of
544  				 * a better answer.
545 				 */
546 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
547 				sched_set_concurrency(kg, 0); /* XXX TEMP */
548 				ksegrp_unlink(kg);
549 				ksegrp_stash(kg);
550 			}
551 			PROC_UNLOCK(p);
552 			td->td_ksegrp	= NULL;
553 			PCPU_SET(deadthread, td);
554 		} else {
555 			/*
556 			 * The last thread is exiting.. but not through exit()
557 			 * what should we do?
558 			 * Theoretically this can't happen
559  			 * exit1() - clears threading flags before coming here
560  			 * kse_exit() - treats last thread specially
561  			 * thr_exit() - treats last thread specially
562  			 * thread_user_enter() - only if more exist
563  			 * thread_userret() - only if more exist
564  			 * thread_suspend_check() - only if more exist
565 			 */
566 			panic ("thread_exit: Last thread exiting on its own");
567 		}
568 	} else {
569 		/*
570 		 * non threaded process comes here.
571 		 * This includes an EX threaded process that is coming
572 		 * here via exit1(). (exit1 dethreads the proc first).
573 		 */
574 		PROC_UNLOCK(p);
575 	}
576 	td->td_state = TDS_INACTIVE;
577 	CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
578 	cpu_throw(td, choosethread());
579 	panic("I'm a teapot!");
580 	/* NOTREACHED */
581 }
582 
583 /*
584  * Do any thread specific cleanups that may be needed in wait()
585  * called with Giant, proc and schedlock not held.
586  */
587 void
588 thread_wait(struct proc *p)
589 {
590 	struct thread *td;
591 
592 	mtx_assert(&Giant, MA_NOTOWNED);
593 	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
594 	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
595 	FOREACH_THREAD_IN_PROC(p, td) {
596 		if (td->td_standin != NULL) {
597 			if (td->td_standin->td_ucred != NULL) {
598 				crfree(td->td_standin->td_ucred);
599 				td->td_standin->td_ucred = NULL;
600 			}
601 			thread_free(td->td_standin);
602 			td->td_standin = NULL;
603 		}
604 		cpu_thread_clean(td);
605 		crfree(td->td_ucred);
606 	}
607 	thread_reap();	/* check for zombie threads etc. */
608 }
609 
610 /*
611  * Link a thread to a process.
612  * set up anything that needs to be initialized for it to
613  * be used by the process.
614  *
615  * Note that we do not link to the proc's ucred here.
616  * The thread is linked as if running but no KSE assigned.
617  * Called from:
618  *  proc_linkup()
619  *  thread_schedule_upcall()
620  *  thr_create()
621  */
622 void
623 thread_link(struct thread *td, struct ksegrp *kg)
624 {
625 	struct proc *p;
626 
627 	p = kg->kg_proc;
628 	td->td_state    = TDS_INACTIVE;
629 	td->td_proc     = p;
630 	td->td_ksegrp   = kg;
631 	td->td_flags    = 0;
632 	td->td_kflags	= 0;
633 
634 	LIST_INIT(&td->td_contested);
635 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
636 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
637 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
638 	p->p_numthreads++;
639 	kg->kg_numthreads++;
640 }
641 
642 /*
643  * Convert a process with one thread to an unthreaded process.
644  * Called from:
645  *  thread_single(exit)  (called from execve and exit)
646  *  kse_exit()		XXX may need cleaning up wrt KSE stuff
647  */
648 void
649 thread_unthread(struct thread *td)
650 {
651 	struct proc *p = td->td_proc;
652 
653 	KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
654 	upcall_remove(td);
655 	p->p_flag &= ~(P_SA|P_HADTHREADS);
656 	td->td_mailbox = NULL;
657 	td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
658 	if (td->td_standin != NULL) {
659 		thread_stash(td->td_standin);
660 		td->td_standin = NULL;
661 	}
662 	sched_set_concurrency(td->td_ksegrp, 1);
663 }
664 
665 /*
666  * Called from:
667  *  thread_exit()
668  */
669 void
670 thread_unlink(struct thread *td)
671 {
672 	struct proc *p = td->td_proc;
673 	struct ksegrp *kg = td->td_ksegrp;
674 
675 	mtx_assert(&sched_lock, MA_OWNED);
676 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
677 	p->p_numthreads--;
678 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
679 	kg->kg_numthreads--;
680 	/* could clear a few other things here */
681 	/* Must  NOT clear links to proc and ksegrp! */
682 }
683 
684 /*
685  * Enforce single-threading.
686  *
687  * Returns 1 if the caller must abort (another thread is waiting to
688  * exit the process or similar). Process is locked!
689  * Returns 0 when you are successfully the only thread running.
690  * A process has successfully single threaded in the suspend mode when
691  * There are no threads in user mode. Threads in the kernel must be
692  * allowed to continue until they get to the user boundary. They may even
693  * copy out their return values and data before suspending. They may however be
694  * accellerated in reaching the user boundary as we will wake up
695  * any sleeping threads that are interruptable. (PCATCH).
696  */
697 int
698 thread_single(int mode)
699 {
700 	struct thread *td;
701 	struct thread *td2;
702 	struct proc *p;
703 	int remaining;
704 
705 	td = curthread;
706 	p = td->td_proc;
707 	mtx_assert(&Giant, MA_NOTOWNED);
708 	PROC_LOCK_ASSERT(p, MA_OWNED);
709 	KASSERT((td != NULL), ("curthread is NULL"));
710 
711 	if ((p->p_flag & P_HADTHREADS) == 0)
712 		return (0);
713 
714 	/* Is someone already single threading? */
715 	if (p->p_singlethread != NULL && p->p_singlethread != td)
716 		return (1);
717 
718 	if (mode == SINGLE_EXIT) {
719 		p->p_flag |= P_SINGLE_EXIT;
720 		p->p_flag &= ~P_SINGLE_BOUNDARY;
721 	} else {
722 		p->p_flag &= ~P_SINGLE_EXIT;
723 		if (mode == SINGLE_BOUNDARY)
724 			p->p_flag |= P_SINGLE_BOUNDARY;
725 		else
726 			p->p_flag &= ~P_SINGLE_BOUNDARY;
727 	}
728 	p->p_flag |= P_STOPPED_SINGLE;
729 	mtx_lock_spin(&sched_lock);
730 	p->p_singlethread = td;
731 	if (mode == SINGLE_EXIT)
732 		remaining = p->p_numthreads;
733 	else if (mode == SINGLE_BOUNDARY)
734 		remaining = p->p_numthreads - p->p_boundary_count;
735 	else
736 		remaining = p->p_numthreads - p->p_suspcount;
737 	while (remaining != 1) {
738 		FOREACH_THREAD_IN_PROC(p, td2) {
739 			if (td2 == td)
740 				continue;
741 			td2->td_flags |= TDF_ASTPENDING;
742 			if (TD_IS_INHIBITED(td2)) {
743 				switch (mode) {
744 				case SINGLE_EXIT:
745 					if (td->td_flags & TDF_DBSUSPEND)
746 						td->td_flags &= ~TDF_DBSUSPEND;
747 					if (TD_IS_SUSPENDED(td2))
748 						thread_unsuspend_one(td2);
749 					if (TD_ON_SLEEPQ(td2) &&
750 					    (td2->td_flags & TDF_SINTR))
751 						sleepq_abort(td2);
752 					break;
753 				case SINGLE_BOUNDARY:
754 					if (TD_IS_SUSPENDED(td2) &&
755 					    !(td2->td_flags & TDF_BOUNDARY))
756 						thread_unsuspend_one(td2);
757 					if (TD_ON_SLEEPQ(td2) &&
758 					    (td2->td_flags & TDF_SINTR))
759 						sleepq_abort(td2);
760 					break;
761 				default:
762 					if (TD_IS_SUSPENDED(td2))
763 						continue;
764 					/*
765 					 * maybe other inhibitted states too?
766 					 */
767 					if ((td2->td_flags & TDF_SINTR) &&
768 					    (td2->td_inhibitors &
769 					    (TDI_SLEEPING | TDI_SWAPPED)))
770 						thread_suspend_one(td2);
771 					break;
772 				}
773 			}
774 		}
775 		if (mode == SINGLE_EXIT)
776 			remaining = p->p_numthreads;
777 		else if (mode == SINGLE_BOUNDARY)
778 			remaining = p->p_numthreads - p->p_boundary_count;
779 		else
780 			remaining = p->p_numthreads - p->p_suspcount;
781 
782 		/*
783 		 * Maybe we suspended some threads.. was it enough?
784 		 */
785 		if (remaining == 1)
786 			break;
787 
788 		/*
789 		 * Wake us up when everyone else has suspended.
790 		 * In the mean time we suspend as well.
791 		 */
792 		thread_suspend_one(td);
793 		PROC_UNLOCK(p);
794 		mi_switch(SW_VOL, NULL);
795 		mtx_unlock_spin(&sched_lock);
796 		PROC_LOCK(p);
797 		mtx_lock_spin(&sched_lock);
798 		if (mode == SINGLE_EXIT)
799 			remaining = p->p_numthreads;
800 		else if (mode == SINGLE_BOUNDARY)
801 			remaining = p->p_numthreads - p->p_boundary_count;
802 		else
803 			remaining = p->p_numthreads - p->p_suspcount;
804 	}
805 	if (mode == SINGLE_EXIT) {
806 		/*
807 		 * We have gotten rid of all the other threads and we
808 		 * are about to either exit or exec. In either case,
809 		 * we try our utmost  to revert to being a non-threaded
810 		 * process.
811 		 */
812 		p->p_singlethread = NULL;
813 		p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
814 		thread_unthread(td);
815 	}
816 	mtx_unlock_spin(&sched_lock);
817 	return (0);
818 }
819 
820 /*
821  * Called in from locations that can safely check to see
822  * whether we have to suspend or at least throttle for a
823  * single-thread event (e.g. fork).
824  *
825  * Such locations include userret().
826  * If the "return_instead" argument is non zero, the thread must be able to
827  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
828  *
829  * The 'return_instead' argument tells the function if it may do a
830  * thread_exit() or suspend, or whether the caller must abort and back
831  * out instead.
832  *
833  * If the thread that set the single_threading request has set the
834  * P_SINGLE_EXIT bit in the process flags then this call will never return
835  * if 'return_instead' is false, but will exit.
836  *
837  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
838  *---------------+--------------------+---------------------
839  *       0       | returns 0          |   returns 0 or 1
840  *               | when ST ends       |   immediatly
841  *---------------+--------------------+---------------------
842  *       1       | thread exits       |   returns 1
843  *               |                    |  immediatly
844  * 0 = thread_exit() or suspension ok,
845  * other = return error instead of stopping the thread.
846  *
847  * While a full suspension is under effect, even a single threading
848  * thread would be suspended if it made this call (but it shouldn't).
849  * This call should only be made from places where
850  * thread_exit() would be safe as that may be the outcome unless
851  * return_instead is set.
852  */
853 int
854 thread_suspend_check(int return_instead)
855 {
856 	struct thread *td;
857 	struct proc *p;
858 
859 	td = curthread;
860 	p = td->td_proc;
861 	mtx_assert(&Giant, MA_NOTOWNED);
862 	PROC_LOCK_ASSERT(p, MA_OWNED);
863 	while (P_SHOULDSTOP(p) ||
864 	      ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
865 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
866 			KASSERT(p->p_singlethread != NULL,
867 			    ("singlethread not set"));
868 			/*
869 			 * The only suspension in action is a
870 			 * single-threading. Single threader need not stop.
871 			 * XXX Should be safe to access unlocked
872 			 * as it can only be set to be true by us.
873 			 */
874 			if (p->p_singlethread == td)
875 				return (0);	/* Exempt from stopping. */
876 		}
877 		if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
878 			return (1);
879 
880 		/* Should we goto user boundary if we didn't come from there? */
881 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
882 		    (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
883 			return (1);
884 
885 		mtx_lock_spin(&sched_lock);
886 		thread_stopped(p);
887 		/*
888 		 * If the process is waiting for us to exit,
889 		 * this thread should just suicide.
890 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
891 		 */
892 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
893 			thread_exit();
894 
895 		/*
896 		 * When a thread suspends, it just
897 		 * moves to the processes's suspend queue
898 		 * and stays there.
899 		 */
900 		thread_suspend_one(td);
901 		if (return_instead == 0) {
902 			p->p_boundary_count++;
903 			td->td_flags |= TDF_BOUNDARY;
904 		}
905 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
906 			if (p->p_numthreads == p->p_suspcount)
907 				thread_unsuspend_one(p->p_singlethread);
908 		}
909 		PROC_UNLOCK(p);
910 		mi_switch(SW_INVOL, NULL);
911 		if (return_instead == 0) {
912 			p->p_boundary_count--;
913 			td->td_flags &= ~TDF_BOUNDARY;
914 		}
915 		mtx_unlock_spin(&sched_lock);
916 		PROC_LOCK(p);
917 	}
918 	return (0);
919 }
920 
921 void
922 thread_suspend_one(struct thread *td)
923 {
924 	struct proc *p = td->td_proc;
925 
926 	mtx_assert(&sched_lock, MA_OWNED);
927 	PROC_LOCK_ASSERT(p, MA_OWNED);
928 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
929 	p->p_suspcount++;
930 	TD_SET_SUSPENDED(td);
931 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
932 	/*
933 	 * Hack: If we are suspending but are on the sleep queue
934 	 * then we are in msleep or the cv equivalent. We
935 	 * want to look like we have two Inhibitors.
936 	 * May already be set.. doesn't matter.
937 	 */
938 	if (TD_ON_SLEEPQ(td))
939 		TD_SET_SLEEPING(td);
940 }
941 
942 void
943 thread_unsuspend_one(struct thread *td)
944 {
945 	struct proc *p = td->td_proc;
946 
947 	mtx_assert(&sched_lock, MA_OWNED);
948 	PROC_LOCK_ASSERT(p, MA_OWNED);
949 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
950 	TD_CLR_SUSPENDED(td);
951 	p->p_suspcount--;
952 	setrunnable(td);
953 }
954 
955 /*
956  * Allow all threads blocked by single threading to continue running.
957  */
958 void
959 thread_unsuspend(struct proc *p)
960 {
961 	struct thread *td;
962 
963 	mtx_assert(&sched_lock, MA_OWNED);
964 	PROC_LOCK_ASSERT(p, MA_OWNED);
965 	if (!P_SHOULDSTOP(p)) {
966 		while ((td = TAILQ_FIRST(&p->p_suspended))) {
967 			thread_unsuspend_one(td);
968 		}
969 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
970 	    (p->p_numthreads == p->p_suspcount)) {
971 		/*
972 		 * Stopping everything also did the job for the single
973 		 * threading request. Now we've downgraded to single-threaded,
974 		 * let it continue.
975 		 */
976 		thread_unsuspend_one(p->p_singlethread);
977 	}
978 }
979 
980 /*
981  * End the single threading mode..
982  */
983 void
984 thread_single_end(void)
985 {
986 	struct thread *td;
987 	struct proc *p;
988 
989 	td = curthread;
990 	p = td->td_proc;
991 	PROC_LOCK_ASSERT(p, MA_OWNED);
992 	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
993 	mtx_lock_spin(&sched_lock);
994 	p->p_singlethread = NULL;
995 	p->p_procscopegrp = NULL;
996 	/*
997 	 * If there are other threads they mey now run,
998 	 * unless of course there is a blanket 'stop order'
999 	 * on the process. The single threader must be allowed
1000 	 * to continue however as this is a bad place to stop.
1001 	 */
1002 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1003 		while ((td = TAILQ_FIRST(&p->p_suspended))) {
1004 			thread_unsuspend_one(td);
1005 		}
1006 	}
1007 	mtx_unlock_spin(&sched_lock);
1008 }
1009 
1010 /*
1011  * Called before going into an interruptible sleep to see if we have been
1012  * interrupted or requested to exit.
1013  */
1014 int
1015 thread_sleep_check(struct thread *td)
1016 {
1017 	struct proc *p;
1018 
1019 	p = td->td_proc;
1020 	mtx_assert(&sched_lock, MA_OWNED);
1021 	if (p->p_flag & P_HADTHREADS) {
1022 		if (p->p_singlethread != td) {
1023 			if (p->p_flag & P_SINGLE_EXIT)
1024 				return (EINTR);
1025 			if (p->p_flag & P_SINGLE_BOUNDARY)
1026 				return (ERESTART);
1027 		}
1028 		if (td->td_flags & TDF_INTERRUPT)
1029 			return (td->td_intrval);
1030 	}
1031 	return (0);
1032 }
1033