xref: /freebsd/sys/kern/kern_thread.c (revision 224af215a6fe8d5e5e2c91cc97c48bdd67c991c7)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39 #include <sys/filedesc.h>
40 #include <sys/tty.h>
41 #include <sys/signalvar.h>
42 #include <sys/sx.h>
43 #include <sys/user.h>
44 #include <sys/jail.h>
45 #include <sys/kse.h>
46 #include <sys/ktr.h>
47 #include <sys/ucontext.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/pmap.h>
52 #include <vm/uma.h>
53 #include <vm/vm_map.h>
54 
55 #include <machine/frame.h>
56 
57 /*
58  * KSEGRP related storage.
59  */
60 static uma_zone_t ksegrp_zone;
61 static uma_zone_t kse_zone;
62 static uma_zone_t thread_zone;
63 
64 /* DEBUG ONLY */
65 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
66 static int oiks_debug = 1;	/* 0 disable, 1 printf, 2 enter debugger */
67 SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
68 	&oiks_debug, 0, "OIKS thread debug");
69 
70 static int max_threads_per_proc = 6;
71 SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
72 	&max_threads_per_proc, 0, "Limit on threads per proc");
73 
74 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
75 
76 struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
77 struct mtx zombie_thread_lock;
78 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
79     "zombie_thread_lock", MTX_SPIN);
80 
81 /*
82  * Pepare a thread for use.
83  */
84 static void
85 thread_ctor(void *mem, int size, void *arg)
86 {
87 	struct thread	*td;
88 
89 	KASSERT((size == sizeof(struct thread)),
90 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
91 
92 	td = (struct thread *)mem;
93 	td->td_state = TDS_INACTIVE;
94 	td->td_flags |= TDF_UNBOUND;
95 }
96 
97 /*
98  * Reclaim a thread after use.
99  */
100 static void
101 thread_dtor(void *mem, int size, void *arg)
102 {
103 	struct thread	*td;
104 
105 	KASSERT((size == sizeof(struct thread)),
106 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
107 
108 	td = (struct thread *)mem;
109 
110 #ifdef INVARIANTS
111 	/* Verify that this thread is in a safe state to free. */
112 	switch (td->td_state) {
113 	case TDS_INHIBITED:
114 	case TDS_RUNNING:
115 	case TDS_CAN_RUN:
116 	case TDS_RUNQ:
117 		/*
118 		 * We must never unlink a thread that is in one of
119 		 * these states, because it is currently active.
120 		 */
121 		panic("bad state for thread unlinking");
122 		/* NOTREACHED */
123 	case TDS_INACTIVE:
124 		break;
125 	default:
126 		panic("bad thread state");
127 		/* NOTREACHED */
128 	}
129 #endif
130 }
131 
132 /*
133  * Initialize type-stable parts of a thread (when newly created).
134  */
135 static void
136 thread_init(void *mem, int size)
137 {
138 	struct thread	*td;
139 
140 	KASSERT((size == sizeof(struct thread)),
141 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
142 
143 	td = (struct thread *)mem;
144 	mtx_lock(&Giant);
145 	pmap_new_thread(td);
146 	mtx_unlock(&Giant);
147 	cpu_thread_setup(td);
148 }
149 
150 /*
151  * Tear down type-stable parts of a thread (just before being discarded).
152  */
153 static void
154 thread_fini(void *mem, int size)
155 {
156 	struct thread	*td;
157 
158 	KASSERT((size == sizeof(struct thread)),
159 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
160 
161 	td = (struct thread *)mem;
162 	pmap_dispose_thread(td);
163 }
164 
165 /*
166  * Fill a ucontext_t with a thread's context information.
167  *
168  * This is an analogue to getcontext(3).
169  */
170 void
171 thread_getcontext(struct thread *td, ucontext_t *uc)
172 {
173 
174 /*
175  * XXX this is declared in a MD include file, i386/include/ucontext.h but
176  * is used in MI code.
177  */
178 #ifdef __i386__
179 	get_mcontext(td, &uc->uc_mcontext);
180 #endif
181 	uc->uc_sigmask = td->td_proc->p_sigmask;
182 }
183 
184 /*
185  * Set a thread's context from a ucontext_t.
186  *
187  * This is an analogue to setcontext(3).
188  */
189 int
190 thread_setcontext(struct thread *td, ucontext_t *uc)
191 {
192 	int ret;
193 
194 /*
195  * XXX this is declared in a MD include file, i386/include/ucontext.h but
196  * is used in MI code.
197  */
198 #ifdef __i386__
199 	ret = set_mcontext(td, &uc->uc_mcontext);
200 #else
201 	ret = ENOSYS;
202 #endif
203 	if (ret == 0) {
204 		SIG_CANTMASK(uc->uc_sigmask);
205 		PROC_LOCK(td->td_proc);
206 		td->td_proc->p_sigmask = uc->uc_sigmask;
207 		PROC_UNLOCK(td->td_proc);
208 	}
209 	return (ret);
210 }
211 
212 /*
213  * Initialize global thread allocation resources.
214  */
215 void
216 threadinit(void)
217 {
218 
219 	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
220 	    thread_ctor, thread_dtor, thread_init, thread_fini,
221 	    UMA_ALIGN_CACHE, 0);
222 	ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
223 	    NULL, NULL, NULL, NULL,
224 	    UMA_ALIGN_CACHE, 0);
225 	kse_zone = uma_zcreate("KSE", sizeof (struct kse),
226 	    NULL, NULL, NULL, NULL,
227 	    UMA_ALIGN_CACHE, 0);
228 }
229 
230 /*
231  * Stash an embarasingly extra thread into the zombie thread queue.
232  */
233 void
234 thread_stash(struct thread *td)
235 {
236 	mtx_lock_spin(&zombie_thread_lock);
237 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
238 	mtx_unlock_spin(&zombie_thread_lock);
239 }
240 
241 /*
242  * Reap zombie threads.
243  */
244 void
245 thread_reap(void)
246 {
247 	struct thread *td_reaped;
248 
249 	/*
250 	 * don't even bother to lock if none at this instant
251 	 * We really don't care about the next instant..
252 	 */
253 	if (!TAILQ_EMPTY(&zombie_threads)) {
254 		mtx_lock_spin(&zombie_thread_lock);
255 		while (!TAILQ_EMPTY(&zombie_threads)) {
256 			td_reaped = TAILQ_FIRST(&zombie_threads);
257 			TAILQ_REMOVE(&zombie_threads, td_reaped, td_runq);
258 			mtx_unlock_spin(&zombie_thread_lock);
259 			thread_free(td_reaped);
260 			mtx_lock_spin(&zombie_thread_lock);
261 		}
262 		mtx_unlock_spin(&zombie_thread_lock);
263 	}
264 }
265 
266 /*
267  * Allocate a ksegrp.
268  */
269 struct ksegrp *
270 ksegrp_alloc(void)
271 {
272 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
273 }
274 
275 /*
276  * Allocate a kse.
277  */
278 struct kse *
279 kse_alloc(void)
280 {
281 	return (uma_zalloc(kse_zone, M_WAITOK));
282 }
283 
284 /*
285  * Allocate a thread.
286  */
287 struct thread *
288 thread_alloc(void)
289 {
290 	thread_reap(); /* check if any zombies to get */
291 	return (uma_zalloc(thread_zone, M_WAITOK));
292 }
293 
294 /*
295  * Deallocate a ksegrp.
296  */
297 void
298 ksegrp_free(struct ksegrp *td)
299 {
300 	uma_zfree(ksegrp_zone, td);
301 }
302 
303 /*
304  * Deallocate a kse.
305  */
306 void
307 kse_free(struct kse *td)
308 {
309 	uma_zfree(kse_zone, td);
310 }
311 
312 /*
313  * Deallocate a thread.
314  */
315 void
316 thread_free(struct thread *td)
317 {
318 	uma_zfree(thread_zone, td);
319 }
320 
321 /*
322  * Store the thread context in the UTS's mailbox.
323  * then add the mailbox at the head of a list we are building in user space.
324  * The list is anchored in the ksegrp structure.
325  */
326 int
327 thread_export_context(struct thread *td)
328 {
329 	struct ksegrp *kg;
330 	uintptr_t mbx;
331 	void *addr;
332 	int error;
333 	ucontext_t uc;
334 
335 	/* Export the user/machine context. */
336 #if 0
337 	addr = (caddr_t)td->td_mailbox +
338 	    offsetof(struct kse_thr_mailbox, tm_context);
339 #else /* if user pointer arithmetic is valid in the kernel */
340 		addr = (void *)(&td->td_mailbox->tm_context);
341 #endif
342 	error = copyin(addr, &uc, sizeof(ucontext_t));
343 	if (error == 0) {
344 		thread_getcontext(td, &uc);
345 		error = copyout(&uc, addr, sizeof(ucontext_t));
346 
347 	}
348 	if (error) {
349 		psignal(td->td_proc, SIGSEGV);
350 		return (error);
351 	}
352 	/* get address in latest mbox of list pointer */
353 #if 0
354 	addr = (caddr_t)td->td_mailbox
355 	    + offsetof(struct kse_thr_mailbox , tm_next);
356 #else /* if user pointer arithmetic is valid in the kernel */
357 	addr = (void *)(&td->td_mailbox->tm_next);
358 #endif
359 	/*
360 	 * Put the saved address of the previous first
361 	 * entry into this one
362 	 */
363 	kg = td->td_ksegrp;
364 	for (;;) {
365 		mbx = (uintptr_t)kg->kg_completed;
366 		if (suword(addr, mbx)) {
367 			psignal(kg->kg_proc, SIGSEGV);
368 			return (EFAULT);
369 		}
370 		PROC_LOCK(kg->kg_proc);
371 		if (mbx == (uintptr_t)kg->kg_completed) {
372 			kg->kg_completed = td->td_mailbox;
373 			PROC_UNLOCK(kg->kg_proc);
374 			break;
375 		}
376 		PROC_UNLOCK(kg->kg_proc);
377 	}
378 	return (0);
379 }
380 
381 /*
382  * Take the list of completed mailboxes for this KSEGRP and put them on this
383  * KSE's mailbox as it's the next one going up.
384  */
385 static int
386 thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
387 {
388 	void *addr;
389 	uintptr_t mbx;
390 
391 #if 0
392 	addr = (caddr_t)ke->ke_mailbox
393 	    + offsetof(struct kse_mailbox, km_completed);
394 #else /* if user pointer arithmetic is valid in the kernel */
395 		addr = (void *)(&ke->ke_mailbox->km_completed);
396 #endif
397 	for (;;) {
398 		mbx = (uintptr_t)kg->kg_completed;
399 		if (suword(addr, mbx)) {
400 			psignal(kg->kg_proc, SIGSEGV);
401 			return (EFAULT);
402 		}
403 		/* XXXKSE could use atomic CMPXCH here */
404 		PROC_LOCK(kg->kg_proc);
405 		if (mbx == (uintptr_t)kg->kg_completed) {
406 			kg->kg_completed = NULL;
407 			PROC_UNLOCK(kg->kg_proc);
408 			break;
409 		}
410 		PROC_UNLOCK(kg->kg_proc);
411 	}
412 	return (0);
413 }
414 
415 /*
416  * Discard the current thread and exit from its context.
417  *
418  * Because we can't free a thread while we're operating under its context,
419  * push the current thread into our KSE's ke_tdspare slot, freeing the
420  * thread that might be there currently. Because we know that only this
421  * processor will run our KSE, we needn't worry about someone else grabbing
422  * our context before we do a cpu_throw.
423  */
424 void
425 thread_exit(void)
426 {
427 	struct thread *td;
428 	struct kse *ke;
429 	struct proc *p;
430 	struct ksegrp	*kg;
431 
432 	td = curthread;
433 	kg = td->td_ksegrp;
434 	p = td->td_proc;
435 	ke = td->td_kse;
436 
437 	mtx_assert(&sched_lock, MA_OWNED);
438 	KASSERT(p != NULL, ("thread exiting without a process"));
439 	KASSERT(ke != NULL, ("thread exiting without a kse"));
440 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
441 	PROC_LOCK_ASSERT(p, MA_OWNED);
442 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
443 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
444 
445 	if (ke->ke_tdspare != NULL) {
446 		thread_stash(ke->ke_tdspare);
447 		ke->ke_tdspare = NULL;
448 	}
449 	cpu_thread_exit(td);	/* XXXSMP */
450 
451 	/*
452 	 * The last thread is left attached to the process
453 	 * So that the whole bundle gets recycled. Skip
454 	 * all this stuff.
455 	 */
456 	if (p->p_numthreads > 1) {
457 		/* Reassign this thread's KSE. */
458 		ke->ke_thread = NULL;
459 		td->td_kse = NULL;
460 		ke->ke_state = KES_UNQUEUED;
461 		kse_reassign(ke);
462 
463 		/* Unlink this thread from its proc. and the kseg */
464 		TAILQ_REMOVE(&p->p_threads, td, td_plist);
465 		p->p_numthreads--;
466 		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
467 		kg->kg_numthreads--;
468 		/*
469 		 * The test below is NOT true if we are the
470 		 * sole exiting thread. P_STOPPED_SNGL is unset
471 		 * in exit1() after it is the only survivor.
472 		 */
473 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
474 			if (p->p_numthreads == p->p_suspcount) {
475 				thread_unsuspend_one(p->p_singlethread);
476 			}
477 		}
478 		PROC_UNLOCK(p);
479 		td->td_state	= TDS_INACTIVE;
480 		td->td_proc	= NULL;
481 		td->td_ksegrp	= NULL;
482 		td->td_last_kse	= NULL;
483 		ke->ke_tdspare = td;
484 	} else {
485 		PROC_UNLOCK(p);
486 	}
487 
488 	cpu_throw();
489 	/* NOTREACHED */
490 }
491 
492 /*
493  * Link a thread to a process.
494  * set up anything that needs to be initialized for it to
495  * be used by the process.
496  *
497  * Note that we do not link to the proc's ucred here.
498  * The thread is linked as if running but no KSE assigned.
499  */
500 void
501 thread_link(struct thread *td, struct ksegrp *kg)
502 {
503 	struct proc *p;
504 
505 	p = kg->kg_proc;
506 	td->td_state = TDS_INACTIVE;
507 	td->td_proc	= p;
508 	td->td_ksegrp	= kg;
509 	td->td_last_kse	= NULL;
510 
511 	LIST_INIT(&td->td_contested);
512 	callout_init(&td->td_slpcallout, 1);
513 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
514 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
515 	p->p_numthreads++;
516 	kg->kg_numthreads++;
517 	if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
518 		printf("OIKS %d\n", p->p_numthreads);
519 		if (oiks_debug > 1)
520 			Debugger("OIKS");
521 	}
522 	td->td_kse	= NULL;
523 }
524 
525 /*
526  * Create a thread and schedule it for upcall on the KSE given.
527  */
528 struct thread *
529 thread_schedule_upcall(struct thread *td, struct kse *ke)
530 {
531 	struct thread *td2;
532 
533 	mtx_assert(&sched_lock, MA_OWNED);
534 	if (ke->ke_tdspare != NULL) {
535 		td2 = ke->ke_tdspare;
536 		ke->ke_tdspare = NULL;
537 	} else {
538 		mtx_unlock_spin(&sched_lock);
539 		td2 = thread_alloc();
540 		mtx_lock_spin(&sched_lock);
541 	}
542 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
543 	     td, td->td_proc->p_pid, td->td_proc->p_comm);
544 	bzero(&td2->td_startzero,
545 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
546 	bcopy(&td->td_startcopy, &td2->td_startcopy,
547 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
548 	thread_link(td2, ke->ke_ksegrp);
549 	cpu_set_upcall(td2, td->td_pcb);
550 	bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
551 	/*
552 	 * The user context for this thread is selected when we choose
553 	 * a KSE and return to userland on it. All we need do here is
554 	 * note that the thread exists in order to perform an upcall.
555 	 *
556 	 * Since selecting a KSE to perform the upcall involves locking
557 	 * that KSE's context to our upcall, its best to wait until the
558 	 * last possible moment before grabbing a KSE. We do this in
559 	 * userret().
560 	 */
561 	td2->td_ucred = crhold(td->td_ucred);
562 	td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
563 	TD_SET_CAN_RUN(td2);
564 	setrunqueue(td2);
565 	return (td2);
566 }
567 
568 /*
569  * Schedule an upcall to notify a KSE process recieved signals.
570  *
571  * XXX - Modifying a sigset_t like this is totally bogus.
572  */
573 struct thread *
574 signal_upcall(struct proc *p, int sig)
575 {
576 	struct thread *td, *td2;
577 	struct kse *ke;
578 	sigset_t ss;
579 	int error;
580 
581 	PROC_LOCK_ASSERT(p, MA_OWNED);
582 
583 	td = FIRST_THREAD_IN_PROC(p);
584 	ke = td->td_kse;
585 	PROC_UNLOCK(p);
586 	error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
587 	PROC_LOCK(p);
588 	if (error)
589 		return (NULL);
590 	SIGADDSET(ss, sig);
591 	PROC_UNLOCK(p);
592 	error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
593 	PROC_LOCK(p);
594 	if (error)
595 		return (NULL);
596 	mtx_lock_spin(&sched_lock);
597 	td2 = thread_schedule_upcall(td, ke);
598 	mtx_unlock_spin(&sched_lock);
599 	return (td2);
600 }
601 
602 /*
603  * Consider whether or not an upcall should be made, and update the
604  * TDF_UPCALLING flag appropriately.
605  *
606  * This function is called when the current thread had been bound to a user
607  * thread that performed a syscall that blocked, and is now returning.
608  * Got that? syscall -> msleep -> wakeup -> syscall_return -> us.
609  *
610  * This thread will be returned to the UTS in its mailbox as a completed
611  * thread.  We need to decide whether or not to perform an upcall now,
612  * or simply queue the thread for later.
613  *
614  * XXXKSE Future enhancement: We could also return back to
615  * the thread if we haven't had to do an upcall since then.
616  * If the KSE's copy is == the thread's copy, and there are
617  * no other completed threads.
618  */
619 static int
620 thread_consider_upcalling(struct thread *td)
621 {
622 	struct proc *p;
623 	struct ksegrp *kg;
624 	int error;
625 
626 	/*
627 	 * Save the thread's context, and link it
628 	 * into the KSEGRP's list of completed threads.
629 	 */
630 	error = thread_export_context(td);
631 	td->td_flags &= ~TDF_UNBOUND;
632 	td->td_mailbox = NULL;
633 	if (error)
634 		/*
635 		 * Failing to do the KSE operation just defaults
636 		 * back to synchonous operation, so just return from
637 		 * the syscall.
638 		 */
639 		return (error);
640 
641 	/*
642 	 * Decide whether to perform an upcall now.
643 	 */
644 	/* Make sure there are no other threads waiting to run. */
645 	p = td->td_proc;
646 	kg = td->td_ksegrp;
647 	PROC_LOCK(p);
648 	mtx_lock_spin(&sched_lock);
649 	/* bogus test, ok for testing though */
650 	if (TAILQ_FIRST(&kg->kg_runq) &&
651 	    (TAILQ_LAST(&kg->kg_runq, threadqueue)
652 		!= kg->kg_last_assigned)) {
653 		/*
654 		 * Another thread in this KSEG needs to run.
655 		 * Switch to it instead of performing an upcall,
656 		 * abondoning this thread.  Perform the upcall
657 		 * later; discard this thread for now.
658 		 *
659 		 * XXXKSE - As for the other threads to run;
660 		 * we COULD rush through all the threads
661 		 * in this KSEG at this priority, or we
662 		 * could throw the ball back into the court
663 		 * and just run the highest prio kse available.
664 		 * What is OUR priority?  The priority of the highest
665 		 * sycall waiting to be returned?
666 		 * For now, just let another KSE run (easiest).
667 		 */
668 		thread_exit(); /* Abandon current thread. */
669 		/* NOTREACHED */
670 	}
671 	/*
672 	 * Perform an upcall now.
673 	 *
674 	 * XXXKSE - Assumes we are going to userland, and not
675 	 * nested in the kernel.
676 	 */
677 	td->td_flags |= TDF_UPCALLING;
678 	mtx_unlock_spin(&sched_lock);
679 	PROC_UNLOCK(p);
680 	return (0);
681 }
682 
683 /*
684  * The extra work we go through if we are a threaded process when we
685  * return to userland.
686  *
687  * If we are a KSE process and returning to user mode, check for
688  * extra work to do before we return (e.g. for more syscalls
689  * to complete first).  If we were in a critical section, we should
690  * just return to let it finish. Same if we were in the UTS (in
691  * which case the mailbox's context's busy indicator will be set).
692  * The only traps we suport will have set the mailbox.
693  * We will clear it here.
694  */
695 int
696 thread_userret(struct thread *td, struct trapframe *frame)
697 {
698 	int error;
699 	int unbound;
700 	struct kse *ke;
701 
702 	/* Make the thread bound from now on, but remember what it was. */
703 	unbound = td->td_flags & TDF_UNBOUND;
704 	td->td_flags &= ~TDF_UNBOUND;
705 	/*
706 	 * Ensure that we have a spare thread available.
707 	 */
708 	ke = td->td_kse;
709 	if (ke->ke_tdspare == NULL) {
710 		mtx_lock(&Giant);
711 		ke->ke_tdspare = thread_alloc();
712 		mtx_unlock(&Giant);
713 	}
714 	/*
715 	 * Originally bound threads need no additional work.
716 	 */
717 	if (unbound == 0)
718 		return (0);
719 	error = 0;
720 	/*
721 	 * Decide whether or not we should perform an upcall now.
722 	 */
723 	if (((td->td_flags & TDF_UPCALLING) == 0) && unbound) {
724 		/* if we have other threads to run we will not return */
725 		if ((error = thread_consider_upcalling(td)))
726 			return (error); /* coundn't go async , just go sync. */
727 	}
728 	if (td->td_flags & TDF_UPCALLING) {
729 		/*
730 		 * There is no more work to do and we are going to ride
731 		 * this thead/KSE up to userland as an upcall.
732 		 */
733 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
734 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
735 
736 		/*
737 		 * Set user context to the UTS.
738 		 */
739 		cpu_set_upcall_kse(td, ke);
740 
741 		/*
742 		 * Put any completed mailboxes on this KSE's list.
743 		 */
744 		error = thread_link_mboxes(td->td_ksegrp, ke);
745 		if (error)
746 			goto bad;
747 
748 		/*
749 		 * Set state and mailbox.
750 		 */
751 		td->td_flags &= ~TDF_UPCALLING;
752 #if 0
753 		error = suword((caddr_t)ke->ke_mailbox +
754 		    offsetof(struct kse_mailbox, km_curthread),
755 		    0);
756 #else	/* if user pointer arithmetic is ok in the kernel */
757 		error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
758 #endif
759 		if (error)
760 			goto bad;
761 	}
762 	/*
763 	 * Stop any chance that we may be separated from
764 	 * the KSE we are currently on. This is "biting the bullet",
765 	 * we are committing to go to user space as as this KSE here.
766 	 */
767 	return (error);
768 bad:
769 	/*
770 	 * Things are going to be so screwed we should just kill the process.
771  	 * how do we do that?
772 	 */
773 	 panic ("thread_userret.. need to kill proc..... how?");
774 }
775 
776 /*
777  * Enforce single-threading.
778  *
779  * Returns 1 if the caller must abort (another thread is waiting to
780  * exit the process or similar). Process is locked!
781  * Returns 0 when you are successfully the only thread running.
782  * A process has successfully single threaded in the suspend mode when
783  * There are no threads in user mode. Threads in the kernel must be
784  * allowed to continue until they get to the user boundary. They may even
785  * copy out their return values and data before suspending. They may however be
786  * accellerated in reaching the user boundary as we will wake up
787  * any sleeping threads that are interruptable. (PCATCH).
788  */
789 int
790 thread_single(int force_exit)
791 {
792 	struct thread *td;
793 	struct thread *td2;
794 	struct proc *p;
795 
796 	td = curthread;
797 	p = td->td_proc;
798 	PROC_LOCK_ASSERT(p, MA_OWNED);
799 	KASSERT((td != NULL), ("curthread is NULL"));
800 
801 	if ((p->p_flag & P_KSES) == 0)
802 		return (0);
803 
804 	/* Is someone already single threading? */
805 	if (p->p_singlethread)
806 		return (1);
807 
808 	if (force_exit == SINGLE_EXIT)
809 		p->p_flag |= P_SINGLE_EXIT;
810 	else
811 		p->p_flag &= ~P_SINGLE_EXIT;
812 	p->p_flag |= P_STOPPED_SINGLE;
813 	p->p_singlethread = td;
814 	while ((p->p_numthreads - p->p_suspcount) != 1) {
815 		mtx_lock_spin(&sched_lock);
816 		FOREACH_THREAD_IN_PROC(p, td2) {
817 			if (td2 == td)
818 				continue;
819 			if (TD_IS_INHIBITED(td2)) {
820 				if (TD_IS_SUSPENDED(td2)) {
821 					if (force_exit == SINGLE_EXIT) {
822 						thread_unsuspend_one(td2);
823 					}
824 				}
825 				if ( TD_IS_SLEEPING(td2)) {
826 					if (td2->td_flags & TDF_CVWAITQ)
827 						cv_waitq_remove(td2);
828 					else
829 						unsleep(td2);
830 					break;
831 				}
832 				if (TD_CAN_RUN(td2))
833 					setrunqueue(td2);
834 			}
835 		}
836 		/*
837 		 * Wake us up when everyone else has suspended.
838 		 * In the mean time we suspend as well.
839 		 */
840 		thread_suspend_one(td);
841 		mtx_unlock(&Giant);
842 		PROC_UNLOCK(p);
843 		mi_switch();
844 		mtx_unlock_spin(&sched_lock);
845 		mtx_lock(&Giant);
846 		PROC_LOCK(p);
847 	}
848 	return (0);
849 }
850 
851 /*
852  * Called in from locations that can safely check to see
853  * whether we have to suspend or at least throttle for a
854  * single-thread event (e.g. fork).
855  *
856  * Such locations include userret().
857  * If the "return_instead" argument is non zero, the thread must be able to
858  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
859  *
860  * The 'return_instead' argument tells the function if it may do a
861  * thread_exit() or suspend, or whether the caller must abort and back
862  * out instead.
863  *
864  * If the thread that set the single_threading request has set the
865  * P_SINGLE_EXIT bit in the process flags then this call will never return
866  * if 'return_instead' is false, but will exit.
867  *
868  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
869  *---------------+--------------------+---------------------
870  *       0       | returns 0          |   returns 0 or 1
871  *               | when ST ends       |   immediatly
872  *---------------+--------------------+---------------------
873  *       1       | thread exits       |   returns 1
874  *               |                    |  immediatly
875  * 0 = thread_exit() or suspension ok,
876  * other = return error instead of stopping the thread.
877  *
878  * While a full suspension is under effect, even a single threading
879  * thread would be suspended if it made this call (but it shouldn't).
880  * This call should only be made from places where
881  * thread_exit() would be safe as that may be the outcome unless
882  * return_instead is set.
883  */
884 int
885 thread_suspend_check(int return_instead)
886 {
887 	struct thread *td = curthread;
888 	struct proc *p = td->td_proc;
889 
890 	td = curthread;
891 	p = td->td_proc;
892 	PROC_LOCK_ASSERT(p, MA_OWNED);
893 	while (P_SHOULDSTOP(p)) {
894 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
895 			KASSERT(p->p_singlethread != NULL,
896 			    ("singlethread not set"));
897 			/*
898 			 * The only suspension in action is a
899 			 * single-threading. Single threader need not stop.
900 			 * XXX Should be safe to access unlocked
901 			 * as it can only be set to be true by us.
902 			 */
903 			if (p->p_singlethread == td)
904 				return (0);	/* Exempt from stopping. */
905 		}
906 		if (return_instead)
907 			return (1);
908 
909 		/*
910 		 * If the process is waiting for us to exit,
911 		 * this thread should just suicide.
912 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
913 		 */
914 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
915 			mtx_lock_spin(&sched_lock);
916 			while (mtx_owned(&Giant))
917 				mtx_unlock(&Giant);
918 			thread_exit();
919 		}
920 
921 		/*
922 		 * When a thread suspends, it just
923 		 * moves to the processes's suspend queue
924 		 * and stays there.
925 		 *
926 		 * XXXKSE if TDF_BOUND is true
927 		 * it will not release it's KSE which might
928 		 * lead to deadlock if there are not enough KSEs
929 		 * to complete all waiting threads.
930 		 * Maybe be able to 'lend' it out again.
931 		 * (lent kse's can not go back to userland?)
932 		 * and can only be lent in STOPPED state.
933 		 */
934 		mtx_lock_spin(&sched_lock);
935 		if ((p->p_flag & P_STOPPED_SIG) &&
936 		    (p->p_suspcount+1 == p->p_numthreads)) {
937 			mtx_unlock_spin(&sched_lock);
938 			PROC_LOCK(p->p_pptr);
939 			if ((p->p_pptr->p_procsig->ps_flag &
940 				PS_NOCLDSTOP) == 0) {
941 				psignal(p->p_pptr, SIGCHLD);
942 			}
943 			PROC_UNLOCK(p->p_pptr);
944 			mtx_lock_spin(&sched_lock);
945 		}
946 		mtx_assert(&Giant, MA_NOTOWNED);
947 		thread_suspend_one(td);
948 		PROC_UNLOCK(p);
949 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
950 			if (p->p_numthreads == p->p_suspcount) {
951 				thread_unsuspend_one(p->p_singlethread);
952 			}
953 		}
954 		p->p_stats->p_ru.ru_nivcsw++;
955 		mi_switch();
956 		mtx_unlock_spin(&sched_lock);
957 		PROC_LOCK(p);
958 	}
959 	return (0);
960 }
961 
962 void
963 thread_suspend_one(struct thread *td)
964 {
965 	struct proc *p = td->td_proc;
966 
967 	mtx_assert(&sched_lock, MA_OWNED);
968 	p->p_suspcount++;
969 	TD_SET_SUSPENDED(td);
970 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
971 	/*
972 	 * Hack: If we are suspending but are on the sleep queue
973 	 * then we are in msleep or the cv equivalent. We
974 	 * want to look like we have two Inhibitors.
975 	 */
976 	if (TD_ON_SLEEPQ(td))
977 		TD_SET_SLEEPING(td);
978 }
979 
980 void
981 thread_unsuspend_one(struct thread *td)
982 {
983 	struct proc *p = td->td_proc;
984 
985 	mtx_assert(&sched_lock, MA_OWNED);
986 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
987 	TD_CLR_SUSPENDED(td);
988 	p->p_suspcount--;
989 	setrunnable(td);
990 }
991 
992 /*
993  * Allow all threads blocked by single threading to continue running.
994  */
995 void
996 thread_unsuspend(struct proc *p)
997 {
998 	struct thread *td;
999 
1000 	mtx_assert(&sched_lock, MA_OWNED);
1001 	PROC_LOCK_ASSERT(p, MA_OWNED);
1002 	if (!P_SHOULDSTOP(p)) {
1003 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1004 			thread_unsuspend_one(td);
1005 		}
1006 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1007 	    (p->p_numthreads == p->p_suspcount)) {
1008 		/*
1009 		 * Stopping everything also did the job for the single
1010 		 * threading request. Now we've downgraded to single-threaded,
1011 		 * let it continue.
1012 		 */
1013 		thread_unsuspend_one(p->p_singlethread);
1014 	}
1015 }
1016 
1017 void
1018 thread_single_end(void)
1019 {
1020 	struct thread *td;
1021 	struct proc *p;
1022 
1023 	td = curthread;
1024 	p = td->td_proc;
1025 	PROC_LOCK_ASSERT(p, MA_OWNED);
1026 	p->p_flag &= ~P_STOPPED_SINGLE;
1027 	p->p_singlethread = NULL;
1028 	/*
1029 	 * If there are other threads they mey now run,
1030 	 * unless of course there is a blanket 'stop order'
1031 	 * on the process. The single threader must be allowed
1032 	 * to continue however as this is a bad place to stop.
1033 	 */
1034 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1035 		mtx_lock_spin(&sched_lock);
1036 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1037 			thread_unsuspend_one(td);
1038 		}
1039 		mtx_unlock_spin(&sched_lock);
1040 	}
1041 }
1042 
1043 
1044