xref: /freebsd/sys/kern/kern_thread.c (revision c4f6a2a9e1b1879b618c436ab4f56ff75c73a0f5)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39 #include <sys/filedesc.h>
40 #include <sys/tty.h>
41 #include <sys/signalvar.h>
42 #include <sys/sx.h>
43 #include <sys/user.h>
44 #include <sys/jail.h>
45 #include <sys/kse.h>
46 #include <sys/ktr.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_object.h>
50 #include <vm/pmap.h>
51 #include <vm/uma.h>
52 #include <vm/vm_map.h>
53 
54 #include <machine/frame.h>
55 
56 /*
57  * Thread related storage.
58  */
59 static uma_zone_t thread_zone;
60 static int allocated_threads;
61 static int active_threads;
62 static int cached_threads;
63 
64 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
65 
66 SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD,
67 	&active_threads, 0, "Number of active threads in system.");
68 
69 SYSCTL_INT(_kern_threads, OID_AUTO, cached, CTLFLAG_RD,
70 	&cached_threads, 0, "Number of threads in thread cache.");
71 
72 SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD,
73 	&allocated_threads, 0, "Number of threads in zone.");
74 
75 static int oiks_debug = 1;	/* 0 disable, 1 printf, 2 enter debugger */
76 SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
77 	&oiks_debug, 0, "OIKS thread debug");
78 
79 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
80 
81 struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
82 struct mtx zombie_thread_lock;
83 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
84     "zombie_thread_lock", MTX_SPIN);
85 
86 /*
87  * Pepare a thread for use.
88  */
89 static void
90 thread_ctor(void *mem, int size, void *arg)
91 {
92 	struct thread	*td;
93 
94 	KASSERT((size == sizeof(struct thread)),
95 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
96 
97 	td = (struct thread *)mem;
98 	bzero(&td->td_startzero,
99 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
100 	td->td_state = TDS_NEW;
101 	td->td_flags |= TDF_UNBOUND;
102 #if 0
103 	/*
104 	 * Maybe move these here from process creation, but maybe not.
105 	 * Moving them here takes them away from their "natural" place
106 	 * in the fork process.
107 	 */
108 	/* XXX td_contested does not appear to be initialized for threads! */
109 	LIST_INIT(&td->td_contested);
110 	callout_init(&td->td_slpcallout, 1);
111 #endif
112 	cached_threads--;	/* XXXSMP */
113 	active_threads++;	/* XXXSMP */
114 }
115 
116 /*
117  * Reclaim a thread after use.
118  */
119 static void
120 thread_dtor(void *mem, int size, void *arg)
121 {
122 	struct thread	*td;
123 
124 	KASSERT((size == sizeof(struct thread)),
125 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
126 
127 	td = (struct thread *)mem;
128 
129 #ifdef INVARIANTS
130 	/* Verify that this thread is in a safe state to free. */
131 	switch (td->td_state) {
132 	case TDS_SLP:
133 	case TDS_MTX:
134 	case TDS_RUNQ:
135 		/*
136 		 * We must never unlink a thread that is in one of
137 		 * these states, because it is currently active.
138 		 */
139 		panic("bad state for thread unlinking");
140 		/* NOTREACHED */
141 	case TDS_UNQUEUED:
142 	case TDS_NEW:
143 	case TDS_RUNNING:
144 	case TDS_SURPLUS:
145 		break;
146 	default:
147 		panic("bad thread state");
148 		/* NOTREACHED */
149 	}
150 #endif
151 
152 	/* Update counters. */
153 	active_threads--;	/* XXXSMP */
154 	cached_threads++;	/* XXXSMP */
155 }
156 
157 /*
158  * Initialize type-stable parts of a thread (when newly created).
159  */
160 static void
161 thread_init(void *mem, int size)
162 {
163 	struct thread	*td;
164 
165 	KASSERT((size == sizeof(struct thread)),
166 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
167 
168 	td = (struct thread *)mem;
169 	pmap_new_thread(td);
170 	cpu_thread_setup(td);
171 	cached_threads++;	/* XXXSMP */
172 	allocated_threads++;	/* XXXSMP */
173 }
174 
175 /*
176  * Tear down type-stable parts of a thread (just before being discarded).
177  */
178 static void
179 thread_fini(void *mem, int size)
180 {
181 	struct thread	*td;
182 
183 	KASSERT((size == sizeof(struct thread)),
184 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
185 
186 	td = (struct thread *)mem;
187 	pmap_dispose_thread(td);
188 	cached_threads--;	/* XXXSMP */
189 	allocated_threads--;	/* XXXSMP */
190 }
191 
192 /*
193  * Initialize global thread allocation resources.
194  */
195 void
196 threadinit(void)
197 {
198 
199 	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
200 	    thread_ctor, thread_dtor, thread_init, thread_fini,
201 	    UMA_ALIGN_CACHE, 0);
202 }
203 
204 /*
205  * Stash an embarasingly esxtra thread into the zombie thread queue.
206  */
207 void
208 thread_stash(struct thread *td)
209 {
210 	mtx_lock_spin(&zombie_thread_lock);
211 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
212 	mtx_unlock_spin(&zombie_thread_lock);
213 }
214 
215 /*
216  * reap any  zombie threads.
217  */
218 void
219 thread_reap(void)
220 {
221 	struct thread *td_reaped;
222 
223 	/*
224 	 * don't even bother to lock if none at this instant
225 	 * We really don't care about the next instant..
226 	 */
227 	if (!TAILQ_EMPTY(&zombie_threads)) {
228 		mtx_lock_spin(&zombie_thread_lock);
229 		while (!TAILQ_EMPTY(&zombie_threads)) {
230 			td_reaped = TAILQ_FIRST(&zombie_threads);
231 			TAILQ_REMOVE(&zombie_threads, td_reaped, td_runq);
232 			mtx_unlock_spin(&zombie_thread_lock);
233 			thread_free(td_reaped);
234 			mtx_lock_spin(&zombie_thread_lock);
235 		}
236 		mtx_unlock_spin(&zombie_thread_lock);
237 	}
238 }
239 
240 /*
241  * Allocate a thread.
242  */
243 struct thread *
244 thread_alloc(void)
245 {
246 	thread_reap(); /* check if any zombies to get */
247 	return (uma_zalloc(thread_zone, M_WAITOK));
248 }
249 
250 /*
251  * Deallocate a thread.
252  */
253 void
254 thread_free(struct thread *td)
255 {
256 	uma_zfree(thread_zone, td);
257 }
258 
259 /*
260  * Store the thread context in the UTS's mailbox.
261  */
262 int
263 thread_export_context(struct thread *td)
264 {
265 	struct kse *ke;
266 	uintptr_t td2_mbx;
267 	void *addr1;
268 	void *addr2;
269 	int error;
270 
271 #ifdef __ia64__
272 	td2_mbx = 0;		/* pacify gcc (!) */
273 #endif
274 	/* Export the register contents. */
275 	error = cpu_export_context(td);
276 
277 	ke = td->td_kse;
278 	addr1 = (caddr_t)ke->ke_mailbox
279 			+ offsetof(struct kse_mailbox, kmbx_completed_threads);
280 	addr2 = (caddr_t)td->td_mailbox
281 			+ offsetof(struct thread_mailbox , next_completed);
282 	/* Then link it into it's KSE's list of completed threads. */
283 	if (!error) {
284 		error = td2_mbx = fuword(addr1);
285 		if (error == -1)
286 			error = EFAULT;
287 		else
288 			error = 0;
289 	}
290 	if (!error)
291 		error = suword(addr2, td2_mbx);
292 	if (!error)
293 		error = suword(addr1, (u_long)td->td_mailbox);
294 	if (error == -1)
295 		error = EFAULT;
296 	return (error);
297 }
298 
299 
300 /*
301  * Discard the current thread and exit from its context.
302  *
303  * Because we can't free a thread while we're operating under its context,
304  * push the current thread into our KSE's ke_tdspare slot, freeing the
305  * thread that might be there currently. Because we know that only this
306  * processor will run our KSE, we needn't worry about someone else grabbing
307  * our context before we do a cpu_throw.
308  */
309 void
310 thread_exit(void)
311 {
312 	struct thread *td;
313 	struct kse *ke;
314 	struct proc *p;
315 	struct ksegrp	*kg;
316 
317 	td = curthread;
318 	kg = td->td_ksegrp;
319 	p = td->td_proc;
320 	ke = td->td_kse;
321 
322 	mtx_assert(&sched_lock, MA_OWNED);
323 	PROC_LOCK_ASSERT(p, MA_OWNED);
324 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
325 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
326 
327 	if (ke->ke_tdspare != NULL) {
328 		thread_stash(ke->ke_tdspare);
329 		ke->ke_tdspare = NULL;
330 	}
331 	cpu_thread_exit(td);	/* XXXSMP */
332 
333 	/* Reassign this thread's KSE. */
334 	if (ke != NULL) {
335 		ke->ke_thread = NULL;
336 		td->td_kse = NULL;
337 		ke->ke_state = KES_UNQUEUED;
338 		kse_reassign(ke);
339 	}
340 
341 	/* Unlink this thread from its proc. and the kseg */
342 	if (p != NULL) {
343 		TAILQ_REMOVE(&p->p_threads, td, td_plist);
344 		p->p_numthreads--;
345 		if (kg != NULL) {
346 			TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
347 			kg->kg_numthreads--;
348 		}
349 		/*
350 		 * The test below is NOT true if we are the
351 		 * sole exiting thread. P_STOPPED_SNGL is unset
352 		 * in exit1() after it is the only survivor.
353 		 */
354 		if (P_SHOULDSTOP(p) == P_STOPPED_SNGL) {
355 			if (p->p_numthreads == p->p_suspcount) {
356 				TAILQ_REMOVE(&p->p_suspended,
357 				    p->p_singlethread, td_runq);
358 				setrunqueue(p->p_singlethread);
359 				p->p_suspcount--;
360 			}
361 		}
362 	}
363 	td->td_state	= TDS_SURPLUS;
364 	td->td_proc	= NULL;
365 	td->td_ksegrp	= NULL;
366 	td->td_last_kse	= NULL;
367 	ke->ke_tdspare = td;
368 	PROC_UNLOCK(p);
369 	cpu_throw();
370 	/* NOTREACHED */
371 }
372 
373 /*
374  * Link a thread to a process.
375  *
376  * Note that we do not link to the proc's ucred here.
377  * The thread is linked as if running but no KSE assigned.
378  */
379 void
380 thread_link(struct thread *td, struct ksegrp *kg)
381 {
382 	struct proc *p;
383 
384 	p = kg->kg_proc;
385 	td->td_state = TDS_NEW;
386 	td->td_proc	= p;
387 	td->td_ksegrp	= kg;
388 	td->td_last_kse	= NULL;
389 
390 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
391 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
392 	p->p_numthreads++;
393 	kg->kg_numthreads++;
394 	if (oiks_debug && p->p_numthreads > 4) {
395 		printf("OIKS %d\n", p->p_numthreads);
396 		if (oiks_debug > 1)
397 			Debugger("OIKS");
398 	}
399 	td->td_critnest = 0;
400 	td->td_kse	= NULL;
401 }
402 
403 /*
404  * Set up the upcall pcb in either a given thread or a new one
405  * if none given. Use the upcall for the given KSE
406  * XXXKSE possibly fix cpu_set_upcall() to not need td->td_kse set.
407  */
408 struct thread *
409 thread_schedule_upcall(struct thread *td, struct kse *ke)
410 {
411 	struct thread *td2;
412 
413 	mtx_assert(&sched_lock, MA_OWNED);
414 	if (ke->ke_tdspare != NULL) {
415 		td2 = ke->ke_tdspare;
416 		ke->ke_tdspare = NULL;
417 	} else {
418 		mtx_unlock_spin(&sched_lock);
419 		td2 = thread_alloc();
420 		mtx_lock_spin(&sched_lock);
421 	}
422 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
423 	     td, td->td_proc->p_pid, td->td_proc->p_comm);
424 	thread_link(td2, ke->ke_ksegrp);
425 	cpu_set_upcall(td2, ke->ke_pcb);
426 	td2->td_ucred = crhold(td->td_ucred);
427 	td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
428 	td2->td_priority = td->td_priority;
429 	setrunqueue(td2);
430 	return (td2);
431 }
432 
433 /*
434  * The extra work we go through if we are a threaded process when we
435  * return to userland
436  *
437  * If we are a KSE process and returning to user mode, check for
438  * extra work to do before we return (e.g. for more syscalls
439  * to complete first).  If we were in a critical section, we should
440  * just return to let it finish. Same if we were in the UTS (in
441  * which case we will have no thread mailbox registered).  The only
442  * traps we suport will have set the mailbox.  We will clear it here.
443  */
444 int
445 thread_userret(struct proc *p, struct ksegrp *kg, struct kse *ke,
446     struct thread *td, struct trapframe *frame)
447 {
448 	int error = 0;
449 
450 	if (ke->ke_tdspare == NULL) {
451 		ke->ke_tdspare = thread_alloc();
452 	}
453 	if (td->td_flags & TDF_UNBOUND) {
454 		/*
455 		 * Are we returning from a thread that had a mailbox?
456 		 *
457 		 * XXX Maybe this should be in a separate function.
458 		 */
459 		if (((td->td_flags & TDF_UPCALLING) == 0) && td->td_mailbox) {
460 			/*
461 			 * [XXXKSE Future enhancement]
462 			 * We could also go straight back to the syscall
463 			 * if we never had to do an upcall since then.
464 			 * If the KSE's copy is == the thread's copy..
465 			 * AND there are no other completed threads.
466 			 */
467 			/*
468 			 * We will go back as an upcall or go do another thread.
469 			 * Either way we need to save the context back to
470 			 * the user thread mailbox.
471 			 * So the UTS can restart it later.
472 			 */
473 			error = thread_export_context(td);
474 			td->td_mailbox = NULL;
475 			if (error) {
476 				/*
477 				 * Failing to do the KSE
478 				 * operation just defaults operation
479 				 * back to synchonous operation.
480 				 */
481 				goto cont;
482 			}
483 
484 			if (TAILQ_FIRST(&kg->kg_runq)) {
485 				/*
486 				 * Uh-oh.. don't return to the user.
487 				 * Instead, switch to the thread that
488 				 * needs to run. The question is:
489 				 * What do we do with the thread we have now?
490 				 * We have put the completion block
491 				 * on the kse mailbox. If we had more energy,
492 				 * we could lazily do so, assuming someone
493 				 * else might get to userland earlier
494 				 * and deliver it earlier than we could.
495 				 * To do that we could save it off the KSEG.
496 				 * An upcalling KSE would 'reap' all completed
497 				 * threads.
498 				 * Being in a hurry, we'll do nothing and
499 				 * leave it on the current KSE for now.
500 				 *
501 				 * As for the other threads to run;
502 				 * we COULD rush through all the threads
503 				 * in this KSEG at this priority, or we
504 				 * could throw the ball back into the court
505 				 * and just run the highest prio kse available.
506 				 * What is OUR priority?
507 				 * the priority of the highest sycall waiting
508 				 * to be returned?
509 				 * For now, just let another KSE run (easiest).
510 				 */
511 				PROC_LOCK(p);
512 				mtx_lock_spin(&sched_lock);
513 				thread_exit(); /* Abandon current thread. */
514 				/* NOTREACHED */
515 			} else { /* if (number of returning syscalls = 1) */
516 				/*
517 				 * Swap our frame for the upcall frame.
518 				 *
519 				 * XXXKSE Assumes we are going to user land
520 				 * and not nested in the kernel
521 				 */
522 				td->td_flags |= TDF_UPCALLING;
523 			}
524 		}
525 		/*
526 		 * This is NOT just an 'else' clause for the above test...
527 		 */
528 		if (td->td_flags & TDF_UPCALLING) {
529 			CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
530 			    td, p->p_pid, p->p_comm);
531 			/*
532 			 * Make sure that it has the correct frame loaded.
533 			 * While we know that we are on the same KSEGRP
534 			 * as we were created on, we could very easily
535 			 * have come in on another KSE. We therefore need
536 			 * to do the copy of the frame after the last
537 			 * possible switch() (the one above).
538 			 */
539 			bcopy(ke->ke_frame, frame, sizeof(struct trapframe));
540 
541 			/*
542 			 * Decide what we are sending to the user
543 			 * upcall sets one argument. The address of the mbox.
544 			 */
545 			cpu_set_args(td, ke);
546 
547 			/*
548 			 * There is no more work to do and we are going to ride
549 			 * this thead/KSE up to userland. Make sure the user's
550 			 * pointer to the thread mailbox is cleared before we
551 			 * re-enter the kernel next time for any reason..
552 			 * We might as well do it here.
553 			 */
554 			td->td_flags &= ~TDF_UPCALLING;	/* Hmmmm. */
555 			error = suword((caddr_t)td->td_kse->ke_mailbox +
556 			    offsetof(struct kse_mailbox, kmbx_current_thread),
557 			    0);
558 		}
559 		/*
560 		 * Stop any chance that we may be separated from
561 		 * the KSE we are currently on. This is "biting the bullet",
562 		 * we are committing to go to user space as as THIS KSE here.
563 		 */
564 cont:
565 		td->td_flags &= ~TDF_UNBOUND;
566 	}
567 	return (error);
568 }
569 
570 /*
571  * Enforce single-threading.
572  *
573  * Returns 1 if the caller must abort (another thread is waiting to
574  * exit the process or similar). Process is locked!
575  * Returns 0 when you are successfully the only thread running.
576  * A process has successfully single threaded in the suspend mode when
577  * There are no threads in user mode. Threads in the kernel must be
578  * allowed to continue until they get to the user boundary. They may even
579  * copy out their return values and data before suspending. They may however be
580  * accellerated in reaching the user boundary as we will wake up
581  * any sleeping threads that are interruptable. (PCATCH).
582  */
583 int
584 thread_single(int force_exit)
585 {
586 	struct thread *td;
587 	struct thread *td2;
588 	struct proc *p;
589 
590 	td = curthread;
591 	p = td->td_proc;
592 	PROC_LOCK_ASSERT(p, MA_OWNED);
593 	KASSERT((td != NULL), ("curthread is NULL"));
594 
595 	if ((p->p_flag & P_KSES) == 0)
596 		return (0);
597 
598 	/* Is someone already single threading? */
599 	if (p->p_singlethread)
600 		return (1);
601 
602 	if (force_exit == SNGLE_EXIT)
603 		p->p_flag |= P_SINGLE_EXIT;
604 	else
605 		p->p_flag &= ~P_SINGLE_EXIT;
606 	p->p_flag |= P_STOPPED_SNGL;
607 	p->p_singlethread = td;
608 	while ((p->p_numthreads - p->p_suspcount) != 1) {
609 		FOREACH_THREAD_IN_PROC(p, td2) {
610 			if (td2 == td)
611 				continue;
612 			switch(td2->td_state) {
613 			case TDS_SUSPENDED:
614 				if (force_exit == SNGLE_EXIT) {
615 					mtx_lock_spin(&sched_lock);
616 					TAILQ_REMOVE(&p->p_suspended,
617 					    td, td_runq);
618 					p->p_suspcount--;
619 					setrunqueue(td); /* Should suicide. */
620 					mtx_unlock_spin(&sched_lock);
621 				}
622 			case TDS_SLP:
623 				if (td2->td_flags & TDF_CVWAITQ)
624 					cv_abort(td2);
625 				else
626 					abortsleep(td2);
627 				break;
628 			/* case TDS RUNNABLE: XXXKSE maybe raise priority? */
629 			default: 	/* needed to avoid an error */
630 				break;
631 			}
632 		}
633 		/*
634 		 * Wake us up when everyone else has suspended.
635 		 * In the mean time we suspend as well.
636 		 */
637 		mtx_lock_spin(&sched_lock);
638 		TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
639 		td->td_state = TDS_SUSPENDED;
640 		p->p_suspcount++;
641 		mtx_unlock(&Giant);
642 		PROC_UNLOCK(p);
643 		mi_switch();
644 		mtx_unlock_spin(&sched_lock);
645 		mtx_lock(&Giant);
646 		PROC_LOCK(p);
647 	}
648 	return (0);
649 }
650 
651 /*
652  * Called in from locations that can safely check to see
653  * whether we have to suspend or at least throttle for a
654  * single-thread event (e.g. fork).
655  *
656  * Such locations include userret().
657  * If the "return_instead" argument is non zero, the thread must be able to
658  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
659  *
660  * The 'return_instead' argument tells the function if it may do a
661  * thread_exit() or suspend, or whether the caller must abort and back
662  * out instead.
663  *
664  * If the thread that set the single_threading request has set the
665  * P_SINGLE_EXIT bit in the process flags then this call will never return
666  * if 'return_instead' is false, but will exit.
667  *
668  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
669  *---------------+--------------------+---------------------
670  *       0       | returns 0          |   returns 0 or 1
671  *               | when ST ends       |   immediatly
672  *---------------+--------------------+---------------------
673  *       1       | thread exits       |   returns 1
674  *               |                    |  immediatly
675  * 0 = thread_exit() or suspension ok,
676  * other = return error instead of stopping the thread.
677  *
678  * While a full suspension is under effect, even a single threading
679  * thread would be suspended if it made this call (but it shouldn't).
680  * This call should only be made from places where
681  * thread_exit() would be safe as that may be the outcome unless
682  * return_instead is set.
683  */
684 int
685 thread_suspend_check(int return_instead)
686 {
687 	struct thread *td = curthread;
688 	struct proc *p = td->td_proc;
689 
690 	td = curthread;
691 	p = td->td_proc;
692 	PROC_LOCK_ASSERT(p, MA_OWNED);
693 	while (P_SHOULDSTOP(p)) {
694 		if (P_SHOULDSTOP(p) == P_STOPPED_SNGL) {
695 			KASSERT(p->p_singlethread != NULL,
696 			    ("singlethread not set"));
697 			/*
698 			 * The only suspension in action is a
699 			 * single-threading. Single threader need not stop.
700 			 * XXX Should be safe to access unlocked
701 			 * as it can only be set to be true by us.
702 			 */
703 			if (p->p_singlethread == td)
704 				return (0);	/* Exempt from stopping. */
705 		}
706 		if (return_instead)
707 			return (1);
708 
709 		/*
710 		 * If the process is waiting for us to exit,
711 		 * this thread should just suicide.
712 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SNGL.
713 		 */
714 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
715 			mtx_lock_spin(&sched_lock);
716 			while (mtx_owned(&Giant))
717 				mtx_unlock(&Giant);
718 			thread_exit();
719 		}
720 
721 		/*
722 		 * When a thread suspends, it just
723 		 * moves to the processes's suspend queue
724 		 * and stays there.
725 		 *
726 		 * XXXKSE if TDF_BOUND is true
727 		 * it will not release it's KSE which might
728 		 * lead to deadlock if there are not enough KSEs
729 		 * to complete all waiting threads.
730 		 * Maybe be able to 'lend' it out again.
731 		 * (lent kse's can not go back to userland?)
732 		 * and can only be lent in STOPPED state.
733 		 */
734 		mtx_lock_spin(&sched_lock);
735 		if ((p->p_flag & P_STOPPED_SGNL) &&
736 		    (p->p_suspcount+1 == p->p_numthreads)) {
737 			mtx_unlock_spin(&sched_lock);
738 			PROC_LOCK(p->p_pptr);
739 			if ((p->p_pptr->p_procsig->ps_flag &
740 				PS_NOCLDSTOP) == 0) {
741 				psignal(p->p_pptr, SIGCHLD);
742 			}
743 			PROC_UNLOCK(p->p_pptr);
744 		}
745 		mtx_assert(&Giant, MA_NOTOWNED);
746 		mtx_lock_spin(&sched_lock);
747 		p->p_suspcount++;
748 		td->td_state = TDS_SUSPENDED;
749 		TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
750 		PROC_UNLOCK(p);
751 		if (P_SHOULDSTOP(p) == P_STOPPED_SNGL) {
752 			if (p->p_numthreads == p->p_suspcount) {
753 				TAILQ_REMOVE(&p->p_suspended,
754 				    p->p_singlethread, td_runq);
755 				p->p_suspcount--;
756 				setrunqueue(p->p_singlethread);
757 			}
758 		}
759 		p->p_stats->p_ru.ru_nivcsw++;
760 		mi_switch();
761 		mtx_unlock_spin(&sched_lock);
762 		PROC_LOCK(p);
763 	}
764 	return (0);
765 }
766 
767 /*
768  * Allow all threads blocked by single threading to continue running.
769  */
770 void
771 thread_unsuspend(struct proc *p)
772 {
773 	struct thread *td;
774 
775 	mtx_assert(&sched_lock, MA_OWNED);
776 	PROC_LOCK_ASSERT(p, MA_OWNED);
777 	if (!P_SHOULDSTOP(p)) {
778 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
779 			TAILQ_REMOVE(&p->p_suspended, td, td_runq);
780 			p->p_suspcount--;
781 			setrunqueue(td);
782 		}
783 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SNGL) &&
784 	    (p->p_numthreads == p->p_suspcount)) {
785 		/*
786 		 * Stopping everything also did the job for the single
787 		 * threading request. Now we've downgraded to single-threaded,
788 		 * let it continue.
789 		 */
790 		TAILQ_REMOVE(&p->p_suspended, p->p_singlethread, td_runq);
791 		p->p_suspcount--;
792 		setrunqueue(p->p_singlethread);
793 	}
794 }
795 
796 void
797 thread_single_end(void)
798 {
799 	struct thread *td;
800 	struct proc *p;
801 
802 	td = curthread;
803 	p = td->td_proc;
804 	PROC_LOCK_ASSERT(p, MA_OWNED);
805 	p->p_flag &= ~P_STOPPED_SNGL;
806 	p->p_singlethread = NULL;
807 	thread_unsuspend(p);
808 }
809 
810