xref: /freebsd/sys/kern/kern_thread.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39 #include <sys/sysproto.h>
40 #include <sys/filedesc.h>
41 #include <sys/tty.h>
42 #include <sys/signalvar.h>
43 #include <sys/sx.h>
44 #include <sys/user.h>
45 #include <sys/jail.h>
46 #include <sys/kse.h>
47 #include <sys/ktr.h>
48 #include <sys/ucontext.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_object.h>
52 #include <vm/pmap.h>
53 #include <vm/uma.h>
54 #include <vm/vm_map.h>
55 
56 #include <machine/frame.h>
57 
58 /*
59  * KSEGRP related storage.
60  */
61 static uma_zone_t ksegrp_zone;
62 static uma_zone_t kse_zone;
63 static uma_zone_t thread_zone;
64 
65 /* DEBUG ONLY */
66 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
67 static int oiks_debug = 1;	/* 0 disable, 1 printf, 2 enter debugger */
68 SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
69 	&oiks_debug, 0, "OIKS thread debug");
70 
71 static int max_threads_per_proc = 10;
72 SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
73 	&max_threads_per_proc, 0, "Limit on threads per proc");
74 
75 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
76 
77 struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
78 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
79 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
80 struct mtx zombie_thread_lock;
81 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
82     "zombie_thread_lock", MTX_SPIN);
83 
84 
85 
86 void kse_purge(struct proc *p, struct thread *td);
87 /*
88  * Pepare a thread for use.
89  */
90 static void
91 thread_ctor(void *mem, int size, void *arg)
92 {
93 	struct thread	*td;
94 
95 	KASSERT((size == sizeof(struct thread)),
96 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
97 
98 	td = (struct thread *)mem;
99 	td->td_state = TDS_INACTIVE;
100 	td->td_flags |= TDF_UNBOUND;
101 }
102 
103 /*
104  * Reclaim a thread after use.
105  */
106 static void
107 thread_dtor(void *mem, int size, void *arg)
108 {
109 	struct thread	*td;
110 
111 	KASSERT((size == sizeof(struct thread)),
112 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
113 
114 	td = (struct thread *)mem;
115 
116 #ifdef INVARIANTS
117 	/* Verify that this thread is in a safe state to free. */
118 	switch (td->td_state) {
119 	case TDS_INHIBITED:
120 	case TDS_RUNNING:
121 	case TDS_CAN_RUN:
122 	case TDS_RUNQ:
123 		/*
124 		 * We must never unlink a thread that is in one of
125 		 * these states, because it is currently active.
126 		 */
127 		panic("bad state for thread unlinking");
128 		/* NOTREACHED */
129 	case TDS_INACTIVE:
130 		break;
131 	default:
132 		panic("bad thread state");
133 		/* NOTREACHED */
134 	}
135 #endif
136 }
137 
138 /*
139  * Initialize type-stable parts of a thread (when newly created).
140  */
141 static void
142 thread_init(void *mem, int size)
143 {
144 	struct thread	*td;
145 
146 	KASSERT((size == sizeof(struct thread)),
147 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
148 
149 	td = (struct thread *)mem;
150 	mtx_lock(&Giant);
151 	pmap_new_thread(td, 0);
152 	mtx_unlock(&Giant);
153 	cpu_thread_setup(td);
154 }
155 
156 /*
157  * Tear down type-stable parts of a thread (just before being discarded).
158  */
159 static void
160 thread_fini(void *mem, int size)
161 {
162 	struct thread	*td;
163 
164 	KASSERT((size == sizeof(struct thread)),
165 	    ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
166 
167 	td = (struct thread *)mem;
168 	pmap_dispose_thread(td);
169 }
170 
171 /*
172  * KSE is linked onto the idle queue.
173  */
174 void
175 kse_link(struct kse *ke, struct ksegrp *kg)
176 {
177 	struct proc *p = kg->kg_proc;
178 
179 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
180 	kg->kg_kses++;
181 	ke->ke_state = KES_UNQUEUED;
182 	ke->ke_proc	= p;
183 	ke->ke_ksegrp	= kg;
184 	ke->ke_thread	= NULL;
185 	ke->ke_oncpu = NOCPU;
186 }
187 
188 void
189 kse_unlink(struct kse *ke)
190 {
191 	struct ksegrp *kg;
192 
193 	mtx_assert(&sched_lock, MA_OWNED);
194 	kg = ke->ke_ksegrp;
195 	if (ke->ke_state == KES_IDLE) {
196 		kg->kg_idle_kses--;
197 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
198 	}
199 
200 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
201 	if (--kg->kg_kses == 0) {
202 			ksegrp_unlink(kg);
203 	}
204 	/*
205 	 * Aggregate stats from the KSE
206 	 */
207 	kse_stash(ke);
208 }
209 
210 void
211 ksegrp_link(struct ksegrp *kg, struct proc *p)
212 {
213 
214 	TAILQ_INIT(&kg->kg_threads);
215 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
216 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
217 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
218 	TAILQ_INIT(&kg->kg_iq);		/* idle kses in ksegrp */
219 	TAILQ_INIT(&kg->kg_lq);		/* loan kses in ksegrp */
220 	kg->kg_proc	= p;
221 /* the following counters are in the -zero- section and may not need clearing */
222 	kg->kg_numthreads = 0;
223 	kg->kg_runnable = 0;
224 	kg->kg_kses = 0;
225 	kg->kg_idle_kses = 0;
226 	kg->kg_loan_kses = 0;
227 	kg->kg_runq_kses = 0; /* XXXKSE change name */
228 /* link it in now that it's consistent */
229 	p->p_numksegrps++;
230 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
231 }
232 
233 void
234 ksegrp_unlink(struct ksegrp *kg)
235 {
236 	struct proc *p;
237 
238 	mtx_assert(&sched_lock, MA_OWNED);
239 	p = kg->kg_proc;
240 	KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
241 	    ("kseg_unlink: residual threads or KSEs"));
242 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
243 	p->p_numksegrps--;
244 	/*
245 	 * Aggregate stats from the KSE
246 	 */
247 	ksegrp_stash(kg);
248 }
249 
250 /*
251  * for a newly created process,
252  * link up a the structure and its initial threads etc.
253  */
254 void
255 proc_linkup(struct proc *p, struct ksegrp *kg,
256 			struct kse *ke, struct thread *td)
257 {
258 
259 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
260 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
261 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
262 	p->p_numksegrps = 0;
263 	p->p_numthreads = 0;
264 
265 	ksegrp_link(kg, p);
266 	kse_link(ke, kg);
267 	thread_link(td, kg);
268 }
269 
270 int
271 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
272 {
273 
274 	return(ENOSYS);
275 }
276 
277 int
278 kse_exit(struct thread *td, struct kse_exit_args *uap)
279 {
280 	struct proc *p;
281 	struct ksegrp *kg;
282 
283 	p = td->td_proc;
284 	/* KSE-enabled processes only, please. */
285 	if (!(p->p_flag & P_KSES))
286 		return EINVAL;
287 	/* must be a bound thread */
288 	if (td->td_flags & TDF_UNBOUND)
289 		return EINVAL;
290 	kg = td->td_ksegrp;
291 	/* serialize killing kse */
292 	PROC_LOCK(p);
293 	mtx_lock_spin(&sched_lock);
294 	if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
295 		mtx_unlock_spin(&sched_lock);
296 		PROC_UNLOCK(p);
297 		return (EDEADLK);
298 	}
299 	if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
300 		p->p_flag &= ~P_KSES;
301 		mtx_unlock_spin(&sched_lock);
302 		PROC_UNLOCK(p);
303 	} else {
304 		while (mtx_owned(&Giant))
305 			mtx_unlock(&Giant);
306 		td->td_kse->ke_flags |= KEF_EXIT;
307 		thread_exit();
308 		/* NOTREACHED */
309 	}
310 	return 0;
311 }
312 
313 int
314 kse_release(struct thread *td, struct kse_release_args *uap)
315 {
316 	struct proc *p;
317 
318 	p = td->td_proc;
319 	/* KSE-enabled processes only, please. */
320 	if (p->p_flag & P_KSES) {
321 		PROC_LOCK(p);
322 		mtx_lock_spin(&sched_lock);
323 		thread_exit();
324 		/* NOTREACHED */
325 	}
326 	return (EINVAL);
327 }
328 
329 /* struct kse_wakeup_args {
330 	struct kse_mailbox *mbx;
331 }; */
332 int
333 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
334 {
335 	struct proc *p;
336 	struct kse *ke, *ke2;
337 	struct ksegrp *kg;
338 
339 	p = td->td_proc;
340 	/* KSE-enabled processes only, please. */
341 	if (!(p->p_flag & P_KSES))
342 		return EINVAL;
343 	if (td->td_standin == NULL)
344 		td->td_standin = thread_alloc();
345 	ke = NULL;
346 	mtx_lock_spin(&sched_lock);
347 	if (uap->mbx) {
348 		FOREACH_KSEGRP_IN_PROC(p, kg) {
349 			FOREACH_KSE_IN_GROUP(kg, ke2) {
350 				if (ke2->ke_mailbox != uap->mbx)
351 					continue;
352 				if (ke2->ke_state == KES_IDLE) {
353 					ke = ke2;
354 					goto found;
355 				} else {
356 					mtx_unlock_spin(&sched_lock);
357 					td->td_retval[0] = 0;
358 					td->td_retval[1] = 0;
359 					return 0;
360 				}
361 			}
362 		}
363 	} else {
364 		kg = td->td_ksegrp;
365 		ke = TAILQ_FIRST(&kg->kg_iq);
366 	}
367 	if (ke == NULL) {
368 		mtx_unlock_spin(&sched_lock);
369 		return ESRCH;
370 	}
371 found:
372 	TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
373 	kg->kg_idle_kses--;
374 	thread_schedule_upcall(td, ke);
375 	mtx_unlock_spin(&sched_lock);
376 	td->td_retval[0] = 0;
377 	td->td_retval[1] = 0;
378 	return 0;
379 }
380 
381 /*
382  * No new KSEG: first call: use current KSE, don't schedule an upcall
383  * All other situations, do allocate a new KSE and schedule an upcall on it.
384  */
385 /* struct kse_create_args {
386 	struct kse_mailbox *mbx;
387 	int newgroup;
388 }; */
389 int
390 kse_create(struct thread *td, struct kse_create_args *uap)
391 {
392 	struct kse *newke;
393 	struct kse *ke;
394 	struct ksegrp *newkg;
395 	struct ksegrp *kg;
396 	struct proc *p;
397 	struct kse_mailbox mbx;
398 	int err;
399 
400 	p = td->td_proc;
401 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
402 		return (err);
403 
404 	p->p_flag |= P_KSES; /* easier to just set it than to test and set */
405 	kg = td->td_ksegrp;
406 	if (uap->newgroup) {
407 		/*
408 		 * If we want a new KSEGRP it doesn't matter whether
409 		 * we have already fired up KSE mode before or not.
410 		 * We put the process in KSE mode and create a new KSEGRP
411 		 * and KSE. If our KSE has not got a mailbox yet then
412 		 * that doesn't matter, just leave it that way. It will
413 		 * ensure that this thread stay BOUND. It's possible
414 		 * that the call came form a threaded library and the main
415 		 * program knows nothing of threads.
416 		 */
417 		newkg = ksegrp_alloc();
418 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
419 		      kg_startzero, kg_endzero));
420 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
421 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
422 		newke = kse_alloc();
423 	} else {
424 		/*
425 		 * Otherwise, if we have already set this KSE
426 		 * to have a mailbox, we want to make another KSE here,
427 		 * but only if there are not already the limit, which
428 		 * is 1 per CPU max.
429 		 *
430 		 * If the current KSE doesn't have a mailbox we just use it
431 		 * and give it one.
432 		 *
433 		 * Because we don't like to access
434 		 * the KSE outside of schedlock if we are UNBOUND,
435 		 * (because it can change if we are preempted by an interrupt)
436 		 * we can deduce it as having a mailbox if we are UNBOUND,
437 		 * and only need to actually look at it if we are BOUND,
438 		 * which is safe.
439 		 */
440 		if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
441 #if 0  /* while debugging */
442 #ifdef SMP
443 			if (kg->kg_kses > mp_ncpus)
444 #endif
445 				return (EPROCLIM);
446 #endif
447 			newke = kse_alloc();
448 		} else {
449 			newke = NULL;
450 		}
451 		newkg = NULL;
452 	}
453 	if (newke) {
454 		bzero(&newke->ke_startzero, RANGEOF(struct kse,
455 		      ke_startzero, ke_endzero));
456 #if 0
457 		bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
458 		      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
459 #endif
460 		PROC_LOCK(p);
461 		if (SIGPENDING(p))
462 			newke->ke_flags |= KEF_ASTPENDING;
463 		PROC_UNLOCK(p);
464 		/* For the first call this may not have been set */
465 		if (td->td_standin == NULL) {
466 			td->td_standin = thread_alloc();
467 		}
468 		mtx_lock_spin(&sched_lock);
469 		if (newkg)
470 			ksegrp_link(newkg, p);
471 		else
472 			newkg = kg;
473 		kse_link(newke, newkg);
474 		newke->ke_mailbox = uap->mbx;
475 		newke->ke_upcall = mbx.km_func;
476 		bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
477 		thread_schedule_upcall(td, newke);
478 		mtx_unlock_spin(&sched_lock);
479 	} else {
480 		/*
481 		 * If we didn't allocate a new KSE then the we are using
482 		 * the exisiting (BOUND) kse.
483 		 */
484 		ke = td->td_kse;
485 		ke->ke_mailbox = uap->mbx;
486 		ke->ke_upcall = mbx.km_func;
487 		bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
488 	}
489 	/*
490 	 * Fill out the KSE-mode specific fields of the new kse.
491 	 */
492 
493 	td->td_retval[0] = 0;
494 	td->td_retval[1] = 0;
495 	return (0);
496 }
497 
498 /*
499  * Fill a ucontext_t with a thread's context information.
500  *
501  * This is an analogue to getcontext(3).
502  */
503 void
504 thread_getcontext(struct thread *td, ucontext_t *uc)
505 {
506 
507 /*
508  * XXX this is declared in a MD include file, i386/include/ucontext.h but
509  * is used in MI code.
510  */
511 #ifdef __i386__
512 	get_mcontext(td, &uc->uc_mcontext);
513 #endif
514 	uc->uc_sigmask = td->td_proc->p_sigmask;
515 }
516 
517 /*
518  * Set a thread's context from a ucontext_t.
519  *
520  * This is an analogue to setcontext(3).
521  */
522 int
523 thread_setcontext(struct thread *td, ucontext_t *uc)
524 {
525 	int ret;
526 
527 /*
528  * XXX this is declared in a MD include file, i386/include/ucontext.h but
529  * is used in MI code.
530  */
531 #ifdef __i386__
532 	ret = set_mcontext(td, &uc->uc_mcontext);
533 #else
534 	ret = ENOSYS;
535 #endif
536 	if (ret == 0) {
537 		SIG_CANTMASK(uc->uc_sigmask);
538 		PROC_LOCK(td->td_proc);
539 		td->td_proc->p_sigmask = uc->uc_sigmask;
540 		PROC_UNLOCK(td->td_proc);
541 	}
542 	return (ret);
543 }
544 
545 /*
546  * Initialize global thread allocation resources.
547  */
548 void
549 threadinit(void)
550 {
551 
552 #ifndef __ia64__
553 	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
554 	    thread_ctor, thread_dtor, thread_init, thread_fini,
555 	    UMA_ALIGN_CACHE, 0);
556 #else
557 	/*
558 	 * XXX the ia64 kstack allocator is really lame and is at the mercy
559 	 * of contigmallloc().  This hackery is to pre-construct a whole
560 	 * pile of thread structures with associated kernel stacks early
561 	 * in the system startup while contigmalloc() still works. Once we
562 	 * have them, keep them.  Sigh.
563 	 */
564 	thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
565 	    thread_ctor, thread_dtor, thread_init, thread_fini,
566 	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
567 	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
568 #endif
569 	ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
570 	    NULL, NULL, NULL, NULL,
571 	    UMA_ALIGN_CACHE, 0);
572 	kse_zone = uma_zcreate("KSE", sizeof (struct kse),
573 	    NULL, NULL, NULL, NULL,
574 	    UMA_ALIGN_CACHE, 0);
575 }
576 
577 /*
578  * Stash an embarasingly extra thread into the zombie thread queue.
579  */
580 void
581 thread_stash(struct thread *td)
582 {
583 	mtx_lock_spin(&zombie_thread_lock);
584 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
585 	mtx_unlock_spin(&zombie_thread_lock);
586 }
587 
588 /*
589  * Stash an embarasingly extra kse into the zombie kse queue.
590  */
591 void
592 kse_stash(struct kse *ke)
593 {
594 	mtx_lock_spin(&zombie_thread_lock);
595 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
596 	mtx_unlock_spin(&zombie_thread_lock);
597 }
598 
599 /*
600  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
601  */
602 void
603 ksegrp_stash(struct ksegrp *kg)
604 {
605 	mtx_lock_spin(&zombie_thread_lock);
606 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
607 	mtx_unlock_spin(&zombie_thread_lock);
608 }
609 
610 /*
611  * Reap zombie threads.
612  */
613 void
614 thread_reap(void)
615 {
616 	struct thread *td_first, *td_next;
617 	struct kse *ke_first, *ke_next;
618 	struct ksegrp *kg_first, * kg_next;
619 
620 	/*
621 	 * don't even bother to lock if none at this instant
622 	 * We really don't care about the next instant..
623 	 */
624 	if ((!TAILQ_EMPTY(&zombie_threads))
625 	    || (!TAILQ_EMPTY(&zombie_kses))
626 	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
627 		mtx_lock_spin(&zombie_thread_lock);
628 		td_first = TAILQ_FIRST(&zombie_threads);
629 		ke_first = TAILQ_FIRST(&zombie_kses);
630 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
631 		if (td_first)
632 			TAILQ_INIT(&zombie_threads);
633 		if (ke_first)
634 			TAILQ_INIT(&zombie_kses);
635 		if (kg_first)
636 			TAILQ_INIT(&zombie_ksegrps);
637 		mtx_unlock_spin(&zombie_thread_lock);
638 		while (td_first) {
639 			td_next = TAILQ_NEXT(td_first, td_runq);
640 			thread_free(td_first);
641 			td_first = td_next;
642 		}
643 		while (ke_first) {
644 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
645 			kse_free(ke_first);
646 			ke_first = ke_next;
647 		}
648 		while (kg_first) {
649 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
650 			ksegrp_free(kg_first);
651 			kg_first = kg_next;
652 		}
653 	}
654 }
655 
656 /*
657  * Allocate a ksegrp.
658  */
659 struct ksegrp *
660 ksegrp_alloc(void)
661 {
662 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
663 }
664 
665 /*
666  * Allocate a kse.
667  */
668 struct kse *
669 kse_alloc(void)
670 {
671 	return (uma_zalloc(kse_zone, M_WAITOK));
672 }
673 
674 /*
675  * Allocate a thread.
676  */
677 struct thread *
678 thread_alloc(void)
679 {
680 	thread_reap(); /* check if any zombies to get */
681 	return (uma_zalloc(thread_zone, M_WAITOK));
682 }
683 
684 /*
685  * Deallocate a ksegrp.
686  */
687 void
688 ksegrp_free(struct ksegrp *td)
689 {
690 	uma_zfree(ksegrp_zone, td);
691 }
692 
693 /*
694  * Deallocate a kse.
695  */
696 void
697 kse_free(struct kse *td)
698 {
699 	uma_zfree(kse_zone, td);
700 }
701 
702 /*
703  * Deallocate a thread.
704  */
705 void
706 thread_free(struct thread *td)
707 {
708 	uma_zfree(thread_zone, td);
709 }
710 
711 /*
712  * Store the thread context in the UTS's mailbox.
713  * then add the mailbox at the head of a list we are building in user space.
714  * The list is anchored in the ksegrp structure.
715  */
716 int
717 thread_export_context(struct thread *td)
718 {
719 	struct proc *p;
720 	struct ksegrp *kg;
721 	uintptr_t mbx;
722 	void *addr;
723 	int error;
724 	ucontext_t uc;
725 
726 	p = td->td_proc;
727 	kg = td->td_ksegrp;
728 
729 	/* Export the user/machine context. */
730 #if 0
731 	addr = (caddr_t)td->td_mailbox +
732 	    offsetof(struct kse_thr_mailbox, tm_context);
733 #else /* if user pointer arithmetic is valid in the kernel */
734 		addr = (void *)(&td->td_mailbox->tm_context);
735 #endif
736 	error = copyin(addr, &uc, sizeof(ucontext_t));
737 	if (error == 0) {
738 		thread_getcontext(td, &uc);
739 		error = copyout(&uc, addr, sizeof(ucontext_t));
740 
741 	}
742 	if (error) {
743 		PROC_LOCK(p);
744 		psignal(p, SIGSEGV);
745 		PROC_UNLOCK(p);
746 		return (error);
747 	}
748 	/* get address in latest mbox of list pointer */
749 #if 0
750 	addr = (caddr_t)td->td_mailbox
751 	    + offsetof(struct kse_thr_mailbox , tm_next);
752 #else /* if user pointer arithmetic is valid in the kernel */
753 	addr = (void *)(&td->td_mailbox->tm_next);
754 #endif
755 	/*
756 	 * Put the saved address of the previous first
757 	 * entry into this one
758 	 */
759 	for (;;) {
760 		mbx = (uintptr_t)kg->kg_completed;
761 		if (suword(addr, mbx)) {
762 			PROC_LOCK(p);
763 			psignal(p, SIGSEGV);
764 			PROC_UNLOCK(p);
765 			return (EFAULT);
766 		}
767 		PROC_LOCK(p);
768 		if (mbx == (uintptr_t)kg->kg_completed) {
769 			kg->kg_completed = td->td_mailbox;
770 			PROC_UNLOCK(p);
771 			break;
772 		}
773 		PROC_UNLOCK(p);
774 	}
775 	return (0);
776 }
777 
778 /*
779  * Take the list of completed mailboxes for this KSEGRP and put them on this
780  * KSE's mailbox as it's the next one going up.
781  */
782 static int
783 thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
784 {
785 	struct proc *p = kg->kg_proc;
786 	void *addr;
787 	uintptr_t mbx;
788 
789 #if 0
790 	addr = (caddr_t)ke->ke_mailbox
791 	    + offsetof(struct kse_mailbox, km_completed);
792 #else /* if user pointer arithmetic is valid in the kernel */
793 		addr = (void *)(&ke->ke_mailbox->km_completed);
794 #endif
795 	for (;;) {
796 		mbx = (uintptr_t)kg->kg_completed;
797 		if (suword(addr, mbx)) {
798 			PROC_LOCK(p);
799 			psignal(p, SIGSEGV);
800 			PROC_UNLOCK(p);
801 			return (EFAULT);
802 		}
803 		/* XXXKSE could use atomic CMPXCH here */
804 		PROC_LOCK(p);
805 		if (mbx == (uintptr_t)kg->kg_completed) {
806 			kg->kg_completed = NULL;
807 			PROC_UNLOCK(p);
808 			break;
809 		}
810 		PROC_UNLOCK(p);
811 	}
812 	return (0);
813 }
814 
815 /*
816  * Discard the current thread and exit from its context.
817  *
818  * Because we can't free a thread while we're operating under its context,
819  * push the current thread into our KSE's ke_tdspare slot, freeing the
820  * thread that might be there currently. Because we know that only this
821  * processor will run our KSE, we needn't worry about someone else grabbing
822  * our context before we do a cpu_throw.
823  */
824 void
825 thread_exit(void)
826 {
827 	struct thread *td;
828 	struct kse *ke;
829 	struct proc *p;
830 	struct ksegrp	*kg;
831 
832 	td = curthread;
833 	kg = td->td_ksegrp;
834 	p = td->td_proc;
835 	ke = td->td_kse;
836 
837 	mtx_assert(&sched_lock, MA_OWNED);
838 	KASSERT(p != NULL, ("thread exiting without a process"));
839 	KASSERT(ke != NULL, ("thread exiting without a kse"));
840 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
841 	PROC_LOCK_ASSERT(p, MA_OWNED);
842 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
843 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
844 
845 	if (ke->ke_tdspare != NULL) {
846 		thread_stash(ke->ke_tdspare);
847 		ke->ke_tdspare = NULL;
848 	}
849 	if (td->td_standin != NULL) {
850 		thread_stash(td->td_standin);
851 		td->td_standin = NULL;
852 	}
853 
854 	cpu_thread_exit(td);	/* XXXSMP */
855 
856 	/*
857 	 * The last thread is left attached to the process
858 	 * So that the whole bundle gets recycled. Skip
859 	 * all this stuff.
860 	 */
861 	if (p->p_numthreads > 1) {
862 		/*
863 		 * Unlink this thread from its proc and the kseg.
864 		 * In keeping with the other structs we probably should
865 		 * have a thread_unlink() that does some of this but it
866 		 * would only be called from here (I think) so it would
867 		 * be a waste. (might be useful for proc_fini() as well.)
868  		 */
869 		TAILQ_REMOVE(&p->p_threads, td, td_plist);
870 		p->p_numthreads--;
871 		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
872 		kg->kg_numthreads--;
873 		/*
874 		 * The test below is NOT true if we are the
875 		 * sole exiting thread. P_STOPPED_SNGL is unset
876 		 * in exit1() after it is the only survivor.
877 		 */
878 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
879 			if (p->p_numthreads == p->p_suspcount) {
880 				thread_unsuspend_one(p->p_singlethread);
881 			}
882 		}
883 
884 		/* Reassign this thread's KSE. */
885 		ke->ke_thread = NULL;
886 		td->td_kse = NULL;
887 		ke->ke_state = KES_UNQUEUED;
888 		KASSERT((ke->ke_bound != td),
889 		    ("thread_exit: entered with ke_bound set"));
890 
891 		/*
892 		 * The reason for all this hoopla is
893 		 * an attempt to stop our thread stack from being freed
894 		 * until AFTER we have stopped running on it.
895 		 * Since we are under schedlock, almost any method where
896 		 * it is eventually freed by someone else is probably ok.
897 		 * (Especially if they do it under schedlock). We could
898 		 * almost free it here if we could be certain that
899 		 * the uma code wouldn't pull it apart immediatly,
900 		 * but unfortunatly we can not guarantee that.
901 		 *
902 		 * For threads that are exiting and NOT killing their
903 		 * KSEs we can just stash it in the KSE, however
904 		 * in the case where the KSE is also being deallocated,
905 		 * we need to store it somewhere else. It turns out that
906 		 * we will never free the last KSE, so there is always one
907 		 * other KSE available. We might as well just choose one
908 		 * and stash it there. Being under schedlock should make that
909 		 * safe.
910 		 *
911 		 * In borrower threads, we can stash it in the lender
912 		 * Where it won't be needed until this thread is long gone.
913 		 * Borrower threads can't kill their KSE anyhow, so even
914 		 * the KSE would be a safe place for them. It is not
915 		 * necessary to have a KSE (or KSEGRP) at all beyond this
916 		 * point, while we are under the protection of schedlock.
917 		 *
918 		 * Either give the KSE to another thread to use (or make
919 		 * it idle), or free it entirely, possibly along with its
920 		 * ksegrp if it's the last one.
921 		 */
922 		if (ke->ke_flags & KEF_EXIT) {
923 			kse_unlink(ke);
924 			/*
925 			 * Designate another KSE to hold our thread.
926 			 * Safe as long as we abide by whatever lock
927 			 * we control it with.. The other KSE will not
928 			 * be able to run it until we release the schelock,
929 			 * but we need to be careful about it deciding to
930 			 * write to the stack before then. Luckily
931 			 * I believe that while another thread's
932 			 * standin thread can be used in this way, the
933 			 * spare thread for the KSE cannot be used without
934 			 * holding schedlock at least once.
935 			 */
936 			ke =  FIRST_KSE_IN_PROC(p);
937 		} else {
938 			kse_reassign(ke);
939 		}
940 		if (ke->ke_bound) {
941 			/*
942 			 * WE are a borrower..
943 			 * stash our thread with the owner.
944 			 */
945 			if (ke->ke_bound->td_standin) {
946 				thread_stash(ke->ke_bound->td_standin);
947 			}
948 			ke->ke_bound->td_standin = td;
949 		} else {
950 			if (ke->ke_tdspare != NULL) {
951 				thread_stash(ke->ke_tdspare);
952 				ke->ke_tdspare = NULL;
953 			}
954 			ke->ke_tdspare = td;
955 		}
956 		PROC_UNLOCK(p);
957 		td->td_state	= TDS_INACTIVE;
958 		td->td_proc	= NULL;
959 		td->td_ksegrp	= NULL;
960 		td->td_last_kse	= NULL;
961 	} else {
962 		PROC_UNLOCK(p);
963 	}
964 
965 	cpu_throw();
966 	/* NOTREACHED */
967 }
968 
969 /*
970  * Link a thread to a process.
971  * set up anything that needs to be initialized for it to
972  * be used by the process.
973  *
974  * Note that we do not link to the proc's ucred here.
975  * The thread is linked as if running but no KSE assigned.
976  */
977 void
978 thread_link(struct thread *td, struct ksegrp *kg)
979 {
980 	struct proc *p;
981 
982 	p = kg->kg_proc;
983 	td->td_state = TDS_INACTIVE;
984 	td->td_proc	= p;
985 	td->td_ksegrp	= kg;
986 	td->td_last_kse	= NULL;
987 
988 	LIST_INIT(&td->td_contested);
989 	callout_init(&td->td_slpcallout, 1);
990 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
991 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
992 	p->p_numthreads++;
993 	kg->kg_numthreads++;
994 	if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
995 		printf("OIKS %d\n", p->p_numthreads);
996 		if (oiks_debug > 1)
997 			Debugger("OIKS");
998 	}
999 	td->td_kse	= NULL;
1000 }
1001 
1002 void
1003 kse_purge(struct proc *p, struct thread *td)
1004 {
1005 	struct kse *ke;
1006 	struct ksegrp *kg;
1007 
1008  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1009 	mtx_lock_spin(&sched_lock);
1010 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1011 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1012 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1013 			kg->kg_idle_kses--;
1014 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1015 			kg->kg_kses--;
1016 			if (ke->ke_tdspare)
1017 				thread_stash(ke->ke_tdspare);
1018    			kse_stash(ke);
1019 		}
1020 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1021 		p->p_numksegrps--;
1022 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1023 		    ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1024 			("wrong kg_kses"));
1025 		if (kg != td->td_ksegrp) {
1026 			ksegrp_stash(kg);
1027 		}
1028 	}
1029 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1030 	p->p_numksegrps++;
1031 	mtx_unlock_spin(&sched_lock);
1032 }
1033 
1034 
1035 /*
1036  * Create a thread and schedule it for upcall on the KSE given.
1037  */
1038 struct thread *
1039 thread_schedule_upcall(struct thread *td, struct kse *ke)
1040 {
1041 	struct thread *td2;
1042 	int newkse;
1043 
1044 	mtx_assert(&sched_lock, MA_OWNED);
1045 	newkse = (ke != td->td_kse);
1046 
1047 	/*
1048 	 * If the kse is already owned by another thread then we can't
1049 	 * schedule an upcall because the other thread must be BOUND
1050 	 * which means it is not in a position to take an upcall.
1051 	 * We must be borrowing the KSE to allow us to complete some in-kernel
1052 	 * work. When we complete, the Bound thread will have teh chance to
1053 	 * complete. This thread will sleep as planned. Hopefully there will
1054 	 * eventually be un unbound thread that can be converted to an
1055 	 * upcall to report the completion of this thread.
1056 	 */
1057 	if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
1058 		return (NULL);
1059 	}
1060 	KASSERT((ke->ke_bound == NULL), ("kse already bound"));
1061 
1062 	if ((td2 = td->td_standin) != NULL) {
1063 		td->td_standin = NULL;
1064 	} else {
1065 		if (newkse)
1066 			panic("no reserve thread when called with a new kse");
1067 		/*
1068 		 * If called from (e.g.) sleep and we do not have
1069 		 * a reserve thread, then we've used it, so do not
1070 		 * create an upcall.
1071 		 */
1072 		return(NULL);
1073 	}
1074 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1075 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1076 	bzero(&td2->td_startzero,
1077 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1078 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1079 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1080 	thread_link(td2, ke->ke_ksegrp);
1081 	cpu_set_upcall(td2, td->td_pcb);
1082 
1083 	/*
1084 	 * XXXKSE do we really need this? (default values for the
1085 	 * frame).
1086 	 */
1087 	bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1088 
1089 	/*
1090 	 * Bind the new thread to the KSE,
1091 	 * and if it's our KSE, lend it back to ourself
1092 	 * so we can continue running.
1093 	 */
1094 	td2->td_ucred = crhold(td->td_ucred);
1095 	td2->td_flags = TDF_UPCALLING; /* note: BOUND */
1096 	td2->td_kse = ke;
1097 	td2->td_state = TDS_CAN_RUN;
1098 	td2->td_inhibitors = 0;
1099 	/*
1100 	 * If called from msleep(), we are working on the current
1101 	 * KSE so fake that we borrowed it. If called from
1102 	 * kse_create(), don't, as we have a new kse too.
1103 	 */
1104 	if (!newkse) {
1105 		/*
1106 		 * This thread will be scheduled when the current thread
1107 		 * blocks, exits or tries to enter userspace, (which ever
1108 		 * happens first). When that happens the KSe will "revert"
1109 		 * to this thread in a BOUND manner. Since we are called
1110 		 * from msleep() this is going to be "very soon" in nearly
1111 		 * all cases.
1112 		 */
1113 		ke->ke_bound = td2;
1114 		TD_SET_LOAN(td2);
1115 	} else {
1116 		ke->ke_bound = NULL;
1117 		ke->ke_thread = td2;
1118 		setrunqueue(td2);
1119 	}
1120 	return (td2);	/* bogus.. should be a void function */
1121 }
1122 
1123 /*
1124  * Schedule an upcall to notify a KSE process recieved signals.
1125  *
1126  * XXX - Modifying a sigset_t like this is totally bogus.
1127  */
1128 struct thread *
1129 signal_upcall(struct proc *p, int sig)
1130 {
1131 	struct thread *td, *td2;
1132 	struct kse *ke;
1133 	sigset_t ss;
1134 	int error;
1135 
1136 	PROC_LOCK_ASSERT(p, MA_OWNED);
1137 return (NULL);
1138 
1139 	td = FIRST_THREAD_IN_PROC(p);
1140 	ke = td->td_kse;
1141 	PROC_UNLOCK(p);
1142 	error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1143 	PROC_LOCK(p);
1144 	if (error)
1145 		return (NULL);
1146 	SIGADDSET(ss, sig);
1147 	PROC_UNLOCK(p);
1148 	error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1149 	PROC_LOCK(p);
1150 	if (error)
1151 		return (NULL);
1152 	if (td->td_standin == NULL)
1153 		td->td_standin = thread_alloc();
1154 	mtx_lock_spin(&sched_lock);
1155 	td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1156 	mtx_unlock_spin(&sched_lock);
1157 	return (td2);
1158 }
1159 
1160 /*
1161  * setup done on the thread when it enters the kernel.
1162  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1163  */
1164 void
1165 thread_user_enter(struct proc *p, struct thread *td)
1166 {
1167 	struct kse *ke;
1168 
1169 	/*
1170 	 * First check that we shouldn't just abort.
1171 	 * But check if we are the single thread first!
1172 	 * XXX p_singlethread not locked, but should be safe.
1173 	 */
1174 	if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
1175 		PROC_LOCK(p);
1176 		mtx_lock_spin(&sched_lock);
1177 		thread_exit();
1178 		/* NOTREACHED */
1179 	}
1180 
1181 	/*
1182 	 * If we are doing a syscall in a KSE environment,
1183 	 * note where our mailbox is. There is always the
1184 	 * possibility that we could do this lazily (in sleep()),
1185 	 * but for now do it every time.
1186 	 */
1187 	ke = td->td_kse;
1188 	if (ke->ke_mailbox != NULL) {
1189 #if 0
1190 		td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
1191 		    + offsetof(struct kse_mailbox, km_curthread));
1192 #else /* if user pointer arithmetic is ok in the kernel */
1193 		td->td_mailbox =
1194 		    (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
1195 #endif
1196 		if ((td->td_mailbox == NULL) ||
1197 		    (td->td_mailbox == (void *)-1)) {
1198 			td->td_mailbox = NULL;	/* single thread it.. */
1199 			td->td_flags &= ~TDF_UNBOUND;
1200 		} else {
1201 			if (td->td_standin == NULL)
1202 				td->td_standin = thread_alloc();
1203 			td->td_flags |= TDF_UNBOUND;
1204 		}
1205 	}
1206 }
1207 
1208 /*
1209  * The extra work we go through if we are a threaded process when we
1210  * return to userland.
1211  *
1212  * If we are a KSE process and returning to user mode, check for
1213  * extra work to do before we return (e.g. for more syscalls
1214  * to complete first).  If we were in a critical section, we should
1215  * just return to let it finish. Same if we were in the UTS (in
1216  * which case the mailbox's context's busy indicator will be set).
1217  * The only traps we suport will have set the mailbox.
1218  * We will clear it here.
1219  */
1220 int
1221 thread_userret(struct thread *td, struct trapframe *frame)
1222 {
1223 	int error;
1224 	int unbound;
1225 	struct kse *ke;
1226 	struct ksegrp *kg;
1227 	struct thread *td2;
1228 	struct proc *p;
1229 
1230 	error = 0;
1231 
1232 	unbound = td->td_flags & TDF_UNBOUND;
1233 
1234 	kg = td->td_ksegrp;
1235 	p = td->td_proc;
1236 
1237 	/*
1238 	 * Originally bound threads never upcall but they may
1239 	 * loan out their KSE at this point.
1240 	 * Upcalls imply bound.. They also may want to do some Philantropy.
1241 	 * Unbound threads on the other hand either yield to other work
1242 	 * or transform into an upcall.
1243 	 * (having saved their context to user space in both cases)
1244 	 */
1245 	if (unbound ) {
1246 		/*
1247 		 * We are an unbound thread, looking to return to
1248 		 * user space.
1249 		 * THere are several possibilities:
1250 		 * 1) we are using a borrowed KSE. save state and exit.
1251 		 *    kse_reassign() will recycle the kse as needed,
1252 		 * 2) we are not.. save state, and then convert ourself
1253 		 *    to be an upcall, bound to the KSE.
1254 		 *    if there are others that need the kse,
1255 		 *    give them a chance by doing an mi_switch().
1256 		 *    Because we are bound, control will eventually return
1257 		 *    to us here.
1258 		 * ***
1259 		 * Save the thread's context, and link it
1260 		 * into the KSEGRP's list of completed threads.
1261 		 */
1262 		error = thread_export_context(td);
1263 		td->td_mailbox = NULL;
1264 		if (error) {
1265 			/*
1266 			 * If we are not running on a borrowed KSE, then
1267 			 * failing to do the KSE operation just defaults
1268 			 * back to synchonous operation, so just return from
1269 			 * the syscall. If it IS borrowed, there is nothing
1270 			 * we can do. We just lose that context. We
1271 			 * probably should note this somewhere and send
1272 			 * the process a signal.
1273 			 */
1274 			PROC_LOCK(td->td_proc);
1275 			psignal(td->td_proc, SIGSEGV);
1276 			mtx_lock_spin(&sched_lock);
1277 			if (td->td_kse->ke_bound == NULL) {
1278 				td->td_flags &= ~TDF_UNBOUND;
1279 				PROC_UNLOCK(td->td_proc);
1280 				mtx_unlock_spin(&sched_lock);
1281 				return (error);	/* go sync */
1282 			}
1283 			thread_exit();
1284 		}
1285 
1286 		/*
1287 		 * if the KSE is owned and we are borrowing it,
1288 		 * don't make an upcall, just exit so that the owner
1289 		 * can get its KSE if it wants it.
1290 		 * Our context is already safely stored for later
1291 		 * use by the UTS.
1292 		 */
1293 		PROC_LOCK(p);
1294 		mtx_lock_spin(&sched_lock);
1295 		if (td->td_kse->ke_bound) {
1296 			thread_exit();
1297 		}
1298 		PROC_UNLOCK(p);
1299 
1300 		/*
1301 		 * Turn ourself into a bound upcall.
1302 		 * We will rely on kse_reassign()
1303 		 * to make us run at a later time.
1304 		 * We should look just like a sheduled upcall
1305 		 * from msleep() or cv_wait().
1306 		 */
1307 		td->td_flags &= ~TDF_UNBOUND;
1308 		td->td_flags |= TDF_UPCALLING;
1309 		/* Only get here if we have become an upcall */
1310 
1311 	} else {
1312 		mtx_lock_spin(&sched_lock);
1313 	}
1314 	/*
1315 	 * We ARE going back to userland with this KSE.
1316 	 * Check for threads that need to borrow it.
1317 	 * Optimisation: don't call mi_switch if no-one wants the KSE.
1318 	 * Any other thread that comes ready after this missed the boat.
1319 	 */
1320 	ke = td->td_kse;
1321 	if ((td2 = kg->kg_last_assigned))
1322 		td2 = TAILQ_NEXT(td2, td_runq);
1323 	else
1324 		td2 = TAILQ_FIRST(&kg->kg_runq);
1325 	if (td2)  {
1326 		/*
1327 		 * force a switch to more urgent 'in kernel'
1328 		 * work. Control will return to this thread
1329 		 * when there is no more work to do.
1330 		 * kse_reassign() will do tha for us.
1331 		 */
1332 		TD_SET_LOAN(td);
1333 		ke->ke_bound = td;
1334 		ke->ke_thread = NULL;
1335 		mi_switch(); /* kse_reassign() will (re)find td2 */
1336 	}
1337 	mtx_unlock_spin(&sched_lock);
1338 
1339 	/*
1340 	 * Optimisation:
1341 	 * Ensure that we have a spare thread available,
1342 	 * for when we re-enter the kernel.
1343 	 */
1344 	if (td->td_standin == NULL) {
1345 		if (ke->ke_tdspare) {
1346 			td->td_standin = ke->ke_tdspare;
1347 			ke->ke_tdspare = NULL;
1348 		} else {
1349 			td->td_standin = thread_alloc();
1350 		}
1351 	}
1352 
1353 	/*
1354 	 * To get here, we know there is no other need for our
1355 	 * KSE so we can proceed. If not upcalling, go back to
1356 	 * userspace. If we are, get the upcall set up.
1357 	 */
1358 	if ((td->td_flags & TDF_UPCALLING) == 0)
1359 		return (0);
1360 
1361 	/*
1362 	 * We must be an upcall to get this far.
1363 	 * There is no more work to do and we are going to ride
1364 	 * this thead/KSE up to userland as an upcall.
1365 	 * Do the last parts of the setup needed for the upcall.
1366 	 */
1367 	CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1368 	    td, td->td_proc->p_pid, td->td_proc->p_comm);
1369 
1370 	/*
1371 	 * Set user context to the UTS.
1372 	 */
1373 	cpu_set_upcall_kse(td, ke);
1374 
1375 	/*
1376 	 * Put any completed mailboxes on this KSE's list.
1377 	 */
1378 	error = thread_link_mboxes(kg, ke);
1379 	if (error)
1380 		goto bad;
1381 
1382 	/*
1383 	 * Set state and mailbox.
1384 	 * From now on we are just a bound outgoing process.
1385 	 * **Problem** userret is often called several times.
1386 	 * it would be nice if this all happenned only on the first time
1387 	 * through. (the scan for extra work etc.)
1388 	 */
1389 	td->td_flags &= ~TDF_UPCALLING;
1390 #if 0
1391 	error = suword((caddr_t)ke->ke_mailbox +
1392 	    offsetof(struct kse_mailbox, km_curthread), 0);
1393 #else	/* if user pointer arithmetic is ok in the kernel */
1394 	error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
1395 #endif
1396 	if (!error)
1397 		return (0);
1398 
1399 bad:
1400 	/*
1401 	 * Things are going to be so screwed we should just kill the process.
1402  	 * how do we do that?
1403 	 */
1404 	PROC_LOCK(td->td_proc);
1405 	psignal(td->td_proc, SIGSEGV);
1406 	PROC_UNLOCK(td->td_proc);
1407 	return (error);	/* go sync */
1408 }
1409 
1410 /*
1411  * Enforce single-threading.
1412  *
1413  * Returns 1 if the caller must abort (another thread is waiting to
1414  * exit the process or similar). Process is locked!
1415  * Returns 0 when you are successfully the only thread running.
1416  * A process has successfully single threaded in the suspend mode when
1417  * There are no threads in user mode. Threads in the kernel must be
1418  * allowed to continue until they get to the user boundary. They may even
1419  * copy out their return values and data before suspending. They may however be
1420  * accellerated in reaching the user boundary as we will wake up
1421  * any sleeping threads that are interruptable. (PCATCH).
1422  */
1423 int
1424 thread_single(int force_exit)
1425 {
1426 	struct thread *td;
1427 	struct thread *td2;
1428 	struct proc *p;
1429 
1430 	td = curthread;
1431 	p = td->td_proc;
1432 	PROC_LOCK_ASSERT(p, MA_OWNED);
1433 	KASSERT((td != NULL), ("curthread is NULL"));
1434 
1435 	if ((p->p_flag & P_KSES) == 0)
1436 		return (0);
1437 
1438 	/* Is someone already single threading? */
1439 	if (p->p_singlethread)
1440 		return (1);
1441 
1442 	if (force_exit == SINGLE_EXIT)
1443 		p->p_flag |= P_SINGLE_EXIT;
1444 	else
1445 		p->p_flag &= ~P_SINGLE_EXIT;
1446 	p->p_flag |= P_STOPPED_SINGLE;
1447 	p->p_singlethread = td;
1448 	/* XXXKSE Which lock protects the below values? */
1449 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1450 		mtx_lock_spin(&sched_lock);
1451 		FOREACH_THREAD_IN_PROC(p, td2) {
1452 			if (td2 == td)
1453 				continue;
1454 			if (TD_IS_INHIBITED(td2)) {
1455 				if (force_exit == SINGLE_EXIT) {
1456 					if (TD_IS_SUSPENDED(td2)) {
1457 						thread_unsuspend_one(td2);
1458 					}
1459 					if (TD_ON_SLEEPQ(td2) &&
1460 					    (td2->td_flags & TDF_SINTR)) {
1461 						if (td2->td_flags & TDF_CVWAITQ)
1462 							cv_abort(td2);
1463 						else
1464 							abortsleep(td2);
1465 					}
1466 				} else {
1467 					if (TD_IS_SUSPENDED(td2))
1468 						continue;
1469 					/* maybe other inhibitted states too? */
1470 					if (TD_IS_SLEEPING(td2))
1471 						thread_suspend_one(td2);
1472 				}
1473 			}
1474 		}
1475 		/*
1476 		 * Maybe we suspended some threads.. was it enough?
1477 		 */
1478 		if ((p->p_numthreads - p->p_suspcount) == 1) {
1479 			mtx_unlock_spin(&sched_lock);
1480 			break;
1481 		}
1482 
1483 		/*
1484 		 * Wake us up when everyone else has suspended.
1485 		 * In the mean time we suspend as well.
1486 		 */
1487 		thread_suspend_one(td);
1488 		mtx_unlock(&Giant);
1489 		PROC_UNLOCK(p);
1490 		mi_switch();
1491 		mtx_unlock_spin(&sched_lock);
1492 		mtx_lock(&Giant);
1493 		PROC_LOCK(p);
1494 	}
1495 	if (force_exit == SINGLE_EXIT)
1496 		kse_purge(p, td);
1497 	return (0);
1498 }
1499 
1500 /*
1501  * Called in from locations that can safely check to see
1502  * whether we have to suspend or at least throttle for a
1503  * single-thread event (e.g. fork).
1504  *
1505  * Such locations include userret().
1506  * If the "return_instead" argument is non zero, the thread must be able to
1507  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1508  *
1509  * The 'return_instead' argument tells the function if it may do a
1510  * thread_exit() or suspend, or whether the caller must abort and back
1511  * out instead.
1512  *
1513  * If the thread that set the single_threading request has set the
1514  * P_SINGLE_EXIT bit in the process flags then this call will never return
1515  * if 'return_instead' is false, but will exit.
1516  *
1517  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1518  *---------------+--------------------+---------------------
1519  *       0       | returns 0          |   returns 0 or 1
1520  *               | when ST ends       |   immediatly
1521  *---------------+--------------------+---------------------
1522  *       1       | thread exits       |   returns 1
1523  *               |                    |  immediatly
1524  * 0 = thread_exit() or suspension ok,
1525  * other = return error instead of stopping the thread.
1526  *
1527  * While a full suspension is under effect, even a single threading
1528  * thread would be suspended if it made this call (but it shouldn't).
1529  * This call should only be made from places where
1530  * thread_exit() would be safe as that may be the outcome unless
1531  * return_instead is set.
1532  */
1533 int
1534 thread_suspend_check(int return_instead)
1535 {
1536 	struct thread *td;
1537 	struct proc *p;
1538 	struct kse *ke;
1539 	struct ksegrp *kg;
1540 
1541 	td = curthread;
1542 	p = td->td_proc;
1543 	kg = td->td_ksegrp;
1544 	PROC_LOCK_ASSERT(p, MA_OWNED);
1545 	while (P_SHOULDSTOP(p)) {
1546 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1547 			KASSERT(p->p_singlethread != NULL,
1548 			    ("singlethread not set"));
1549 			/*
1550 			 * The only suspension in action is a
1551 			 * single-threading. Single threader need not stop.
1552 			 * XXX Should be safe to access unlocked
1553 			 * as it can only be set to be true by us.
1554 			 */
1555 			if (p->p_singlethread == td)
1556 				return (0);	/* Exempt from stopping. */
1557 		}
1558 		if (return_instead)
1559 			return (1);
1560 
1561 		/*
1562 		 * If the process is waiting for us to exit,
1563 		 * this thread should just suicide.
1564 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1565 		 */
1566 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1567 			mtx_lock_spin(&sched_lock);
1568 			while (mtx_owned(&Giant))
1569 				mtx_unlock(&Giant);
1570 			/*
1571 			 * free extra kses and ksegrps, we needn't worry
1572 			 * about if current thread is in same ksegrp as
1573 			 * p_singlethread and last kse in the group
1574 			 * could be killed, this is protected by kg_numthreads,
1575 			 * in this case, we deduce that kg_numthreads must > 1.
1576 			 */
1577 			ke = td->td_kse;
1578 			if (ke->ke_bound == NULL &&
1579 			    ((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
1580 				ke->ke_flags |= KEF_EXIT;
1581 			thread_exit();
1582 		}
1583 
1584 		/*
1585 		 * When a thread suspends, it just
1586 		 * moves to the processes's suspend queue
1587 		 * and stays there.
1588 		 *
1589 		 * XXXKSE if TDF_BOUND is true
1590 		 * it will not release it's KSE which might
1591 		 * lead to deadlock if there are not enough KSEs
1592 		 * to complete all waiting threads.
1593 		 * Maybe be able to 'lend' it out again.
1594 		 * (lent kse's can not go back to userland?)
1595 		 * and can only be lent in STOPPED state.
1596 		 */
1597 		mtx_lock_spin(&sched_lock);
1598 		if ((p->p_flag & P_STOPPED_SIG) &&
1599 		    (p->p_suspcount+1 == p->p_numthreads)) {
1600 			mtx_unlock_spin(&sched_lock);
1601 			PROC_LOCK(p->p_pptr);
1602 			if ((p->p_pptr->p_procsig->ps_flag &
1603 				PS_NOCLDSTOP) == 0) {
1604 				psignal(p->p_pptr, SIGCHLD);
1605 			}
1606 			PROC_UNLOCK(p->p_pptr);
1607 			mtx_lock_spin(&sched_lock);
1608 		}
1609 		mtx_assert(&Giant, MA_NOTOWNED);
1610 		thread_suspend_one(td);
1611 		PROC_UNLOCK(p);
1612 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1613 			if (p->p_numthreads == p->p_suspcount) {
1614 				thread_unsuspend_one(p->p_singlethread);
1615 			}
1616 		}
1617 		p->p_stats->p_ru.ru_nivcsw++;
1618 		mi_switch();
1619 		mtx_unlock_spin(&sched_lock);
1620 		PROC_LOCK(p);
1621 	}
1622 	return (0);
1623 }
1624 
1625 void
1626 thread_suspend_one(struct thread *td)
1627 {
1628 	struct proc *p = td->td_proc;
1629 
1630 	mtx_assert(&sched_lock, MA_OWNED);
1631 	p->p_suspcount++;
1632 	TD_SET_SUSPENDED(td);
1633 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1634 	/*
1635 	 * Hack: If we are suspending but are on the sleep queue
1636 	 * then we are in msleep or the cv equivalent. We
1637 	 * want to look like we have two Inhibitors.
1638 	 * May already be set.. doesn't matter.
1639 	 */
1640 	if (TD_ON_SLEEPQ(td))
1641 		TD_SET_SLEEPING(td);
1642 }
1643 
1644 void
1645 thread_unsuspend_one(struct thread *td)
1646 {
1647 	struct proc *p = td->td_proc;
1648 
1649 	mtx_assert(&sched_lock, MA_OWNED);
1650 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1651 	TD_CLR_SUSPENDED(td);
1652 	p->p_suspcount--;
1653 	setrunnable(td);
1654 }
1655 
1656 /*
1657  * Allow all threads blocked by single threading to continue running.
1658  */
1659 void
1660 thread_unsuspend(struct proc *p)
1661 {
1662 	struct thread *td;
1663 
1664 	mtx_assert(&sched_lock, MA_OWNED);
1665 	PROC_LOCK_ASSERT(p, MA_OWNED);
1666 	if (!P_SHOULDSTOP(p)) {
1667 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1668 			thread_unsuspend_one(td);
1669 		}
1670 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1671 	    (p->p_numthreads == p->p_suspcount)) {
1672 		/*
1673 		 * Stopping everything also did the job for the single
1674 		 * threading request. Now we've downgraded to single-threaded,
1675 		 * let it continue.
1676 		 */
1677 		thread_unsuspend_one(p->p_singlethread);
1678 	}
1679 }
1680 
1681 void
1682 thread_single_end(void)
1683 {
1684 	struct thread *td;
1685 	struct proc *p;
1686 
1687 	td = curthread;
1688 	p = td->td_proc;
1689 	PROC_LOCK_ASSERT(p, MA_OWNED);
1690 	p->p_flag &= ~P_STOPPED_SINGLE;
1691 	p->p_singlethread = NULL;
1692 	/*
1693 	 * If there are other threads they mey now run,
1694 	 * unless of course there is a blanket 'stop order'
1695 	 * on the process. The single threader must be allowed
1696 	 * to continue however as this is a bad place to stop.
1697 	 */
1698 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1699 		mtx_lock_spin(&sched_lock);
1700 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1701 			thread_unsuspend_one(td);
1702 		}
1703 		mtx_unlock_spin(&sched_lock);
1704 	}
1705 }
1706 
1707 
1708