xref: /freebsd/sys/kern/kern_thread.c (revision 747ca5f52192617ade3a33956f61380c684b74b8)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysproto.h>
42 #include <sys/filedesc.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sx.h>
46 #include <sys/tty.h>
47 #include <sys/turnstile.h>
48 #include <sys/user.h>
49 #include <sys/jail.h>
50 #include <sys/kse.h>
51 #include <sys/ktr.h>
52 #include <sys/ucontext.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_object.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59 #include <vm/vm_map.h>
60 
61 #include <machine/frame.h>
62 
63 /*
64  * KSEGRP related storage.
65  */
66 static uma_zone_t ksegrp_zone;
67 static uma_zone_t kse_zone;
68 static uma_zone_t thread_zone;
69 static uma_zone_t upcall_zone;
70 
71 /* DEBUG ONLY */
72 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
73 static int thread_debug = 0;
74 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
75 	&thread_debug, 0, "thread debug");
76 
77 static int max_threads_per_proc = 150;
78 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
79 	&max_threads_per_proc, 0, "Limit on threads per proc");
80 
81 static int max_groups_per_proc = 50;
82 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
83 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
84 
85 static int max_threads_hits;
86 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
87 	&max_threads_hits, 0, "");
88 
89 static int virtual_cpu;
90 
91 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
92 
93 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
94 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
95 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
96 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
97 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
98 struct mtx kse_zombie_lock;
99 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
100 
101 static void kse_purge(struct proc *p, struct thread *td);
102 static void kse_purge_group(struct thread *td);
103 static int thread_update_usr_ticks(struct thread *td, int user);
104 static void thread_alloc_spare(struct thread *td, struct thread *spare);
105 
106 static int
107 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
108 {
109 	int error, new_val;
110 	int def_val;
111 
112 #ifdef SMP
113 	def_val = mp_ncpus;
114 #else
115 	def_val = 1;
116 #endif
117 	if (virtual_cpu == 0)
118 		new_val = def_val;
119 	else
120 		new_val = virtual_cpu;
121 	error = sysctl_handle_int(oidp, &new_val, 0, req);
122         if (error != 0 || req->newptr == NULL)
123 		return (error);
124 	if (new_val < 0)
125 		return (EINVAL);
126 	virtual_cpu = new_val;
127 	return (0);
128 }
129 
130 /* DEBUG ONLY */
131 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
132 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
133 	"debug virtual cpus");
134 
135 /*
136  * Prepare a thread for use.
137  */
138 static void
139 thread_ctor(void *mem, int size, void *arg)
140 {
141 	struct thread	*td;
142 
143 	td = (struct thread *)mem;
144 	td->td_state = TDS_INACTIVE;
145 	td->td_oncpu	= NOCPU;
146 	td->td_critnest = 1;
147 }
148 
149 /*
150  * Reclaim a thread after use.
151  */
152 static void
153 thread_dtor(void *mem, int size, void *arg)
154 {
155 	struct thread	*td;
156 
157 	td = (struct thread *)mem;
158 
159 #ifdef INVARIANTS
160 	/* Verify that this thread is in a safe state to free. */
161 	switch (td->td_state) {
162 	case TDS_INHIBITED:
163 	case TDS_RUNNING:
164 	case TDS_CAN_RUN:
165 	case TDS_RUNQ:
166 		/*
167 		 * We must never unlink a thread that is in one of
168 		 * these states, because it is currently active.
169 		 */
170 		panic("bad state for thread unlinking");
171 		/* NOTREACHED */
172 	case TDS_INACTIVE:
173 		break;
174 	default:
175 		panic("bad thread state");
176 		/* NOTREACHED */
177 	}
178 #endif
179 }
180 
181 /*
182  * Initialize type-stable parts of a thread (when newly created).
183  */
184 static void
185 thread_init(void *mem, int size)
186 {
187 	struct thread	*td;
188 
189 	td = (struct thread *)mem;
190 	vm_thread_new(td, 0);
191 	cpu_thread_setup(td);
192 	td->td_turnstile = turnstile_alloc();
193 	td->td_sched = (struct td_sched *)&td[1];
194 }
195 
196 /*
197  * Tear down type-stable parts of a thread (just before being discarded).
198  */
199 static void
200 thread_fini(void *mem, int size)
201 {
202 	struct thread	*td;
203 
204 	td = (struct thread *)mem;
205 	turnstile_free(td->td_turnstile);
206 	vm_thread_dispose(td);
207 }
208 
209 /*
210  * Initialize type-stable parts of a kse (when newly created).
211  */
212 static void
213 kse_init(void *mem, int size)
214 {
215 	struct kse	*ke;
216 
217 	ke = (struct kse *)mem;
218 	ke->ke_sched = (struct ke_sched *)&ke[1];
219 }
220 
221 /*
222  * Initialize type-stable parts of a ksegrp (when newly created).
223  */
224 static void
225 ksegrp_init(void *mem, int size)
226 {
227 	struct ksegrp	*kg;
228 
229 	kg = (struct ksegrp *)mem;
230 	kg->kg_sched = (struct kg_sched *)&kg[1];
231 }
232 
233 /*
234  * KSE is linked into kse group.
235  */
236 void
237 kse_link(struct kse *ke, struct ksegrp *kg)
238 {
239 	struct proc *p = kg->kg_proc;
240 
241 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
242 	kg->kg_kses++;
243 	ke->ke_state	= KES_UNQUEUED;
244 	ke->ke_proc	= p;
245 	ke->ke_ksegrp	= kg;
246 	ke->ke_thread	= NULL;
247 	ke->ke_oncpu	= NOCPU;
248 	ke->ke_flags	= 0;
249 }
250 
251 void
252 kse_unlink(struct kse *ke)
253 {
254 	struct ksegrp *kg;
255 
256 	mtx_assert(&sched_lock, MA_OWNED);
257 	kg = ke->ke_ksegrp;
258 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
259 	if (ke->ke_state == KES_IDLE) {
260 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
261 		kg->kg_idle_kses--;
262 	}
263 	--kg->kg_kses;
264 	/*
265 	 * Aggregate stats from the KSE
266 	 */
267 	kse_stash(ke);
268 }
269 
270 void
271 ksegrp_link(struct ksegrp *kg, struct proc *p)
272 {
273 
274 	TAILQ_INIT(&kg->kg_threads);
275 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
276 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
277 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
278 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
279 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
280 	kg->kg_proc = p;
281 	/*
282 	 * the following counters are in the -zero- section
283 	 * and may not need clearing
284 	 */
285 	kg->kg_numthreads = 0;
286 	kg->kg_runnable   = 0;
287 	kg->kg_kses       = 0;
288 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
289 	kg->kg_idle_kses  = 0;
290 	kg->kg_numupcalls = 0;
291 	/* link it in now that it's consistent */
292 	p->p_numksegrps++;
293 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
294 }
295 
296 void
297 ksegrp_unlink(struct ksegrp *kg)
298 {
299 	struct proc *p;
300 
301 	mtx_assert(&sched_lock, MA_OWNED);
302 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
303 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
304 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
305 
306 	p = kg->kg_proc;
307 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
308 	p->p_numksegrps--;
309 	/*
310 	 * Aggregate stats from the KSE
311 	 */
312 	ksegrp_stash(kg);
313 }
314 
315 struct kse_upcall *
316 upcall_alloc(void)
317 {
318 	struct kse_upcall *ku;
319 
320 	ku = uma_zalloc(upcall_zone, M_WAITOK);
321 	bzero(ku, sizeof(*ku));
322 	return (ku);
323 }
324 
325 void
326 upcall_free(struct kse_upcall *ku)
327 {
328 
329 	uma_zfree(upcall_zone, ku);
330 }
331 
332 void
333 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
334 {
335 
336 	mtx_assert(&sched_lock, MA_OWNED);
337 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
338 	ku->ku_ksegrp = kg;
339 	kg->kg_numupcalls++;
340 }
341 
342 void
343 upcall_unlink(struct kse_upcall *ku)
344 {
345 	struct ksegrp *kg = ku->ku_ksegrp;
346 
347 	mtx_assert(&sched_lock, MA_OWNED);
348 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
349 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
350 	kg->kg_numupcalls--;
351 	upcall_stash(ku);
352 }
353 
354 void
355 upcall_remove(struct thread *td)
356 {
357 
358 	if (td->td_upcall) {
359 		td->td_upcall->ku_owner = NULL;
360 		upcall_unlink(td->td_upcall);
361 		td->td_upcall = 0;
362 	}
363 }
364 
365 /*
366  * For a newly created process,
367  * link up all the structures and its initial threads etc.
368  */
369 void
370 proc_linkup(struct proc *p, struct ksegrp *kg,
371 	    struct kse *ke, struct thread *td)
372 {
373 
374 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
375 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
376 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
377 	p->p_numksegrps = 0;
378 	p->p_numthreads = 0;
379 
380 	ksegrp_link(kg, p);
381 	kse_link(ke, kg);
382 	thread_link(td, kg);
383 }
384 
385 #ifndef _SYS_SYSPROTO_H_
386 struct kse_switchin_args {
387 	const struct __mcontext *mcp;
388 	long val;
389 	long *loc;
390 };
391 #endif
392 
393 int
394 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
395 {
396 	mcontext_t mc;
397 	int error;
398 
399 	error = (uap->mcp == NULL) ? EINVAL : 0;
400 	if (!error)
401 		error = copyin(uap->mcp, &mc, sizeof(mc));
402 	if (!error && uap->loc != NULL)
403 		error = (suword(uap->loc, uap->val) != 0) ? EINVAL : 0;
404 	if (!error)
405 		error = set_mcontext(td, &mc);
406 	return ((error == 0) ? EJUSTRETURN : error);
407 }
408 
409 /*
410 struct kse_thr_interrupt_args {
411 	struct kse_thr_mailbox * tmbx;
412 	int cmd;
413 	long data;
414 };
415 */
416 int
417 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
418 {
419 	struct proc *p;
420 	struct thread *td2;
421 
422 	p = td->td_proc;
423 
424 	if (!(p->p_flag & P_SA))
425 		return (EINVAL);
426 
427 	switch (uap->cmd) {
428 	case KSE_INTR_SENDSIG:
429 		if (uap->data < 0 || uap->data > _SIG_MAXSIG)
430 			return (EINVAL);
431 	case KSE_INTR_INTERRUPT:
432 	case KSE_INTR_RESTART:
433 		PROC_LOCK(p);
434 		mtx_lock_spin(&sched_lock);
435 		FOREACH_THREAD_IN_PROC(p, td2) {
436 			if (td2->td_mailbox == uap->tmbx)
437 				break;
438 		}
439 		if (td2 == NULL) {
440 			mtx_unlock_spin(&sched_lock);
441 			PROC_UNLOCK(p);
442 			return (ESRCH);
443 		}
444 		if (uap->cmd == KSE_INTR_SENDSIG) {
445 			if (uap->data > 0) {
446 				td2->td_flags &= ~TDF_INTERRUPT;
447 				mtx_unlock_spin(&sched_lock);
448 				tdsignal(td2, (int)uap->data, SIGTARGET_TD);
449 			} else {
450 				mtx_unlock_spin(&sched_lock);
451 			}
452 		} else {
453 			td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
454 			if (TD_CAN_UNBIND(td2))
455 				td2->td_upcall->ku_flags |= KUF_DOUPCALL;
456 			if (uap->cmd == KSE_INTR_INTERRUPT)
457 				td2->td_intrval = EINTR;
458 			else
459 				td2->td_intrval = ERESTART;
460 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
461 				if (td2->td_flags & TDF_CVWAITQ)
462 					cv_abort(td2);
463 				else
464 					abortsleep(td2);
465 			}
466 			mtx_unlock_spin(&sched_lock);
467 		}
468 		PROC_UNLOCK(p);
469 		break;
470 	case KSE_INTR_SIGEXIT:
471 		if (uap->data < 1 || uap->data > _SIG_MAXSIG)
472 			return (EINVAL);
473 		PROC_LOCK(p);
474 		sigexit(td, (int)uap->data);
475 		break;
476 	default:
477 		return (EINVAL);
478 	}
479 	return (0);
480 }
481 
482 /*
483 struct kse_exit_args {
484 	register_t dummy;
485 };
486 */
487 int
488 kse_exit(struct thread *td, struct kse_exit_args *uap)
489 {
490 	struct proc *p;
491 	struct ksegrp *kg;
492 	struct kse *ke;
493 	struct kse_upcall *ku, *ku2;
494 	int    error, count;
495 
496 	p = td->td_proc;
497 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
498 		return (EINVAL);
499 	kg = td->td_ksegrp;
500 	count = 0;
501 	PROC_LOCK(p);
502 	mtx_lock_spin(&sched_lock);
503 	FOREACH_UPCALL_IN_GROUP(kg, ku2) {
504 		if (ku2->ku_flags & KUF_EXITING)
505 			count++;
506 	}
507 	if ((kg->kg_numupcalls - count) == 1 &&
508 	    (kg->kg_numthreads > 1)) {
509 		mtx_unlock_spin(&sched_lock);
510 		PROC_UNLOCK(p);
511 		return (EDEADLK);
512 	}
513 	ku->ku_flags |= KUF_EXITING;
514 	mtx_unlock_spin(&sched_lock);
515 	PROC_UNLOCK(p);
516 	error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
517 	PROC_LOCK(p);
518 	if (error)
519 		psignal(p, SIGSEGV);
520 	mtx_lock_spin(&sched_lock);
521 	upcall_remove(td);
522 	ke = td->td_kse;
523 	if (p->p_numthreads == 1) {
524 		kse_purge(p, td);
525 		p->p_flag &= ~P_SA;
526 		mtx_unlock_spin(&sched_lock);
527 		PROC_UNLOCK(p);
528 	} else {
529 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
530 			kse_purge_group(td);
531 			ke->ke_flags |= KEF_EXIT;
532 		}
533 		thread_stopped(p);
534 		thread_exit();
535 		/* NOTREACHED */
536 	}
537 	return (0);
538 }
539 
540 /*
541  * Either becomes an upcall or waits for an awakening event and
542  * then becomes an upcall. Only error cases return.
543  */
544 /*
545 struct kse_release_args {
546 	struct timespec *timeout;
547 };
548 */
549 int
550 kse_release(struct thread *td, struct kse_release_args *uap)
551 {
552 	struct proc *p;
553 	struct ksegrp *kg;
554 	struct kse_upcall *ku;
555 	struct timespec timeout;
556 	struct timeval tv;
557 	sigset_t sigset;
558 	int error;
559 
560 	p = td->td_proc;
561 	kg = td->td_ksegrp;
562 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
563 		return (EINVAL);
564 	if (uap->timeout != NULL) {
565 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
566 			return (error);
567 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
568 	}
569 	if (td->td_flags & TDF_SA)
570 		td->td_pflags |= TDP_UPCALLING;
571 	else {
572 		ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
573 		if (ku->ku_mflags == -1) {
574 			PROC_LOCK(p);
575 			sigexit(td, SIGSEGV);
576 		}
577 	}
578 	PROC_LOCK(p);
579 	if (ku->ku_mflags & KMF_WAITSIGEVENT) {
580 		/* UTS wants to wait for signal event */
581 		if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL))
582 			error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
583 			    "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
584 		p->p_flag &= ~P_SIGEVENT;
585 		sigset = p->p_siglist;
586 		PROC_UNLOCK(p);
587 		error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
588 		    sizeof(sigset));
589 	} else {
590 		 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) {
591 			kg->kg_upsleeps++;
592 			error = msleep(&kg->kg_completed, &p->p_mtx,
593 				PPAUSE|PCATCH, "kserel",
594 				(uap->timeout ? tvtohz(&tv) : 0));
595 			kg->kg_upsleeps--;
596 		}
597 		PROC_UNLOCK(p);
598 	}
599 	if (ku->ku_flags & KUF_DOUPCALL) {
600 		mtx_lock_spin(&sched_lock);
601 		ku->ku_flags &= ~KUF_DOUPCALL;
602 		mtx_unlock_spin(&sched_lock);
603 	}
604 	return (0);
605 }
606 
607 /* struct kse_wakeup_args {
608 	struct kse_mailbox *mbx;
609 }; */
610 int
611 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
612 {
613 	struct proc *p;
614 	struct ksegrp *kg;
615 	struct kse_upcall *ku;
616 	struct thread *td2;
617 
618 	p = td->td_proc;
619 	td2 = NULL;
620 	ku = NULL;
621 	/* KSE-enabled processes only, please. */
622 	if (!(p->p_flag & P_SA))
623 		return (EINVAL);
624 	PROC_LOCK(p);
625 	mtx_lock_spin(&sched_lock);
626 	if (uap->mbx) {
627 		FOREACH_KSEGRP_IN_PROC(p, kg) {
628 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
629 				if (ku->ku_mailbox == uap->mbx)
630 					break;
631 			}
632 			if (ku)
633 				break;
634 		}
635 	} else {
636 		kg = td->td_ksegrp;
637 		if (kg->kg_upsleeps) {
638 			wakeup_one(&kg->kg_completed);
639 			mtx_unlock_spin(&sched_lock);
640 			PROC_UNLOCK(p);
641 			return (0);
642 		}
643 		ku = TAILQ_FIRST(&kg->kg_upcalls);
644 	}
645 	if (ku) {
646 		if ((td2 = ku->ku_owner) == NULL) {
647 			panic("%s: no owner", __func__);
648 		} else if (TD_ON_SLEEPQ(td2) &&
649 		           ((td2->td_wchan == &kg->kg_completed) ||
650 			    (td2->td_wchan == &p->p_siglist &&
651 			     (ku->ku_mflags & KMF_WAITSIGEVENT)))) {
652 			abortsleep(td2);
653 		} else {
654 			ku->ku_flags |= KUF_DOUPCALL;
655 		}
656 		mtx_unlock_spin(&sched_lock);
657 		PROC_UNLOCK(p);
658 		return (0);
659 	}
660 	mtx_unlock_spin(&sched_lock);
661 	PROC_UNLOCK(p);
662 	return (ESRCH);
663 }
664 
665 /*
666  * No new KSEG: first call: use current KSE, don't schedule an upcall
667  * All other situations, do allocate max new KSEs and schedule an upcall.
668  */
669 /* struct kse_create_args {
670 	struct kse_mailbox *mbx;
671 	int newgroup;
672 }; */
673 int
674 kse_create(struct thread *td, struct kse_create_args *uap)
675 {
676 	struct kse *newke;
677 	struct ksegrp *newkg;
678 	struct ksegrp *kg;
679 	struct proc *p;
680 	struct kse_mailbox mbx;
681 	struct kse_upcall *newku;
682 	int err, ncpus, sa = 0, first = 0;
683 	struct thread *newtd;
684 
685 	p = td->td_proc;
686 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
687 		return (err);
688 
689 	/* Too bad, why hasn't kernel always a cpu counter !? */
690 #ifdef SMP
691 	ncpus = mp_ncpus;
692 #else
693 	ncpus = 1;
694 #endif
695 	if (virtual_cpu != 0)
696 		ncpus = virtual_cpu;
697 	if (!(mbx.km_flags & KMF_BOUND))
698 		sa = TDF_SA;
699 	else
700 		ncpus = 1;
701 	PROC_LOCK(p);
702 	if (!(p->p_flag & P_SA)) {
703 		first = 1;
704 		p->p_flag |= P_SA;
705 	}
706 	PROC_UNLOCK(p);
707 	if (!sa && !uap->newgroup && !first)
708 		return (EINVAL);
709 	kg = td->td_ksegrp;
710 	if (uap->newgroup) {
711 		/* Have race condition but it is cheap */
712 		if (p->p_numksegrps >= max_groups_per_proc)
713 			return (EPROCLIM);
714 		/*
715 		 * If we want a new KSEGRP it doesn't matter whether
716 		 * we have already fired up KSE mode before or not.
717 		 * We put the process in KSE mode and create a new KSEGRP.
718 		 */
719 		newkg = ksegrp_alloc();
720 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
721 		      kg_startzero, kg_endzero));
722 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
723 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
724 		PROC_LOCK(p);
725 		mtx_lock_spin(&sched_lock);
726 		if (p->p_numksegrps >= max_groups_per_proc) {
727 			mtx_unlock_spin(&sched_lock);
728 			PROC_UNLOCK(p);
729 			ksegrp_free(newkg);
730 			return (EPROCLIM);
731 		}
732 		ksegrp_link(newkg, p);
733 		sched_fork_ksegrp(kg, newkg);
734 		mtx_unlock_spin(&sched_lock);
735 		PROC_UNLOCK(p);
736 	} else {
737 		if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
738 			return (EINVAL);
739 		newkg = kg;
740 	}
741 
742 	/*
743 	 * Creating upcalls more than number of physical cpu does
744 	 * not help performance.
745 	 */
746 	if (newkg->kg_numupcalls >= ncpus)
747 		return (EPROCLIM);
748 
749 	if (newkg->kg_numupcalls == 0) {
750 		/*
751 		 * Initialize KSE group
752 		 *
753 		 * For multiplxed group, create KSEs as many as physical
754 		 * cpus. This increases concurrent even if userland
755 		 * is not MP safe and can only run on single CPU.
756 		 * In ideal world, every physical cpu should execute a thread.
757 		 * If there is enough KSEs, threads in kernel can be
758 		 * executed parallel on different cpus with full speed,
759 		 * Concurrent in kernel shouldn't be restricted by number of
760 		 * upcalls userland provides. Adding more upcall structures
761 		 * only increases concurrent in userland.
762 		 *
763 		 * For bound thread group, because there is only thread in the
764 		 * group, we only create one KSE for the group. Thread in this
765 		 * kind of group will never schedule an upcall when blocked,
766 		 * this intends to simulate pthread system scope thread.
767 		 */
768 		while (newkg->kg_kses < ncpus) {
769 			newke = kse_alloc();
770 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
771 			      ke_startzero, ke_endzero));
772 #if 0
773 			mtx_lock_spin(&sched_lock);
774 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
775 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
776 			mtx_unlock_spin(&sched_lock);
777 #endif
778 			mtx_lock_spin(&sched_lock);
779 			kse_link(newke, newkg);
780 			sched_fork_kse(td->td_kse, newke);
781 			/* Add engine */
782 			kse_reassign(newke);
783 			mtx_unlock_spin(&sched_lock);
784 		}
785 	}
786 	newku = upcall_alloc();
787 	newku->ku_mailbox = uap->mbx;
788 	newku->ku_func = mbx.km_func;
789 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
790 
791 	/* For the first call this may not have been set */
792 	if (td->td_standin == NULL)
793 		thread_alloc_spare(td, NULL);
794 
795 	PROC_LOCK(p);
796 	if (newkg->kg_numupcalls >= ncpus) {
797 		PROC_UNLOCK(p);
798 		upcall_free(newku);
799 		return (EPROCLIM);
800 	}
801 	if (first && sa) {
802 		SIGSETOR(p->p_siglist, td->td_siglist);
803 		SIGEMPTYSET(td->td_siglist);
804 		SIGFILLSET(td->td_sigmask);
805 		SIG_CANTMASK(td->td_sigmask);
806 	}
807 	mtx_lock_spin(&sched_lock);
808 	PROC_UNLOCK(p);
809 	upcall_link(newku, newkg);
810 	if (mbx.km_quantum)
811 		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
812 
813 	/*
814 	 * Each upcall structure has an owner thread, find which
815 	 * one owns it.
816 	 */
817 	if (uap->newgroup) {
818 		/*
819 		 * Because new ksegrp hasn't thread,
820 		 * create an initial upcall thread to own it.
821 		 */
822 		newtd = thread_schedule_upcall(td, newku);
823 	} else {
824 		/*
825 		 * If current thread hasn't an upcall structure,
826 		 * just assign the upcall to it.
827 		 */
828 		if (td->td_upcall == NULL) {
829 			newku->ku_owner = td;
830 			td->td_upcall = newku;
831 			newtd = td;
832 		} else {
833 			/*
834 			 * Create a new upcall thread to own it.
835 			 */
836 			newtd = thread_schedule_upcall(td, newku);
837 		}
838 	}
839 	if (!sa) {
840 		newtd->td_mailbox = mbx.km_curthread;
841 		newtd->td_flags &= ~TDF_SA;
842 		if (newtd != td) {
843 			mtx_unlock_spin(&sched_lock);
844 			cpu_set_upcall_kse(newtd, newku);
845 			mtx_lock_spin(&sched_lock);
846 		}
847 	} else {
848 		newtd->td_flags |= TDF_SA;
849 	}
850 	if (newtd != td)
851 		setrunqueue(newtd);
852 	mtx_unlock_spin(&sched_lock);
853 	return (0);
854 }
855 
856 /*
857  * Initialize global thread allocation resources.
858  */
859 void
860 threadinit(void)
861 {
862 
863 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
864 	    thread_ctor, thread_dtor, thread_init, thread_fini,
865 	    UMA_ALIGN_CACHE, 0);
866 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
867 	    NULL, NULL, ksegrp_init, NULL,
868 	    UMA_ALIGN_CACHE, 0);
869 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
870 	    NULL, NULL, kse_init, NULL,
871 	    UMA_ALIGN_CACHE, 0);
872 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
873 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
874 }
875 
876 /*
877  * Stash an embarasingly extra thread into the zombie thread queue.
878  */
879 void
880 thread_stash(struct thread *td)
881 {
882 	mtx_lock_spin(&kse_zombie_lock);
883 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
884 	mtx_unlock_spin(&kse_zombie_lock);
885 }
886 
887 /*
888  * Stash an embarasingly extra kse into the zombie kse queue.
889  */
890 void
891 kse_stash(struct kse *ke)
892 {
893 	mtx_lock_spin(&kse_zombie_lock);
894 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
895 	mtx_unlock_spin(&kse_zombie_lock);
896 }
897 
898 /*
899  * Stash an embarasingly extra upcall into the zombie upcall queue.
900  */
901 
902 void
903 upcall_stash(struct kse_upcall *ku)
904 {
905 	mtx_lock_spin(&kse_zombie_lock);
906 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
907 	mtx_unlock_spin(&kse_zombie_lock);
908 }
909 
910 /*
911  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
912  */
913 void
914 ksegrp_stash(struct ksegrp *kg)
915 {
916 	mtx_lock_spin(&kse_zombie_lock);
917 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
918 	mtx_unlock_spin(&kse_zombie_lock);
919 }
920 
921 /*
922  * Reap zombie kse resource.
923  */
924 void
925 thread_reap(void)
926 {
927 	struct thread *td_first, *td_next;
928 	struct kse *ke_first, *ke_next;
929 	struct ksegrp *kg_first, * kg_next;
930 	struct kse_upcall *ku_first, *ku_next;
931 
932 	/*
933 	 * Don't even bother to lock if none at this instant,
934 	 * we really don't care about the next instant..
935 	 */
936 	if ((!TAILQ_EMPTY(&zombie_threads))
937 	    || (!TAILQ_EMPTY(&zombie_kses))
938 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
939 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
940 		mtx_lock_spin(&kse_zombie_lock);
941 		td_first = TAILQ_FIRST(&zombie_threads);
942 		ke_first = TAILQ_FIRST(&zombie_kses);
943 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
944 		ku_first = TAILQ_FIRST(&zombie_upcalls);
945 		if (td_first)
946 			TAILQ_INIT(&zombie_threads);
947 		if (ke_first)
948 			TAILQ_INIT(&zombie_kses);
949 		if (kg_first)
950 			TAILQ_INIT(&zombie_ksegrps);
951 		if (ku_first)
952 			TAILQ_INIT(&zombie_upcalls);
953 		mtx_unlock_spin(&kse_zombie_lock);
954 		while (td_first) {
955 			td_next = TAILQ_NEXT(td_first, td_runq);
956 			if (td_first->td_ucred)
957 				crfree(td_first->td_ucred);
958 			thread_free(td_first);
959 			td_first = td_next;
960 		}
961 		while (ke_first) {
962 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
963 			kse_free(ke_first);
964 			ke_first = ke_next;
965 		}
966 		while (kg_first) {
967 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
968 			ksegrp_free(kg_first);
969 			kg_first = kg_next;
970 		}
971 		while (ku_first) {
972 			ku_next = TAILQ_NEXT(ku_first, ku_link);
973 			upcall_free(ku_first);
974 			ku_first = ku_next;
975 		}
976 	}
977 }
978 
979 /*
980  * Allocate a ksegrp.
981  */
982 struct ksegrp *
983 ksegrp_alloc(void)
984 {
985 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
986 }
987 
988 /*
989  * Allocate a kse.
990  */
991 struct kse *
992 kse_alloc(void)
993 {
994 	return (uma_zalloc(kse_zone, M_WAITOK));
995 }
996 
997 /*
998  * Allocate a thread.
999  */
1000 struct thread *
1001 thread_alloc(void)
1002 {
1003 	thread_reap(); /* check if any zombies to get */
1004 	return (uma_zalloc(thread_zone, M_WAITOK));
1005 }
1006 
1007 /*
1008  * Deallocate a ksegrp.
1009  */
1010 void
1011 ksegrp_free(struct ksegrp *td)
1012 {
1013 	uma_zfree(ksegrp_zone, td);
1014 }
1015 
1016 /*
1017  * Deallocate a kse.
1018  */
1019 void
1020 kse_free(struct kse *td)
1021 {
1022 	uma_zfree(kse_zone, td);
1023 }
1024 
1025 /*
1026  * Deallocate a thread.
1027  */
1028 void
1029 thread_free(struct thread *td)
1030 {
1031 
1032 	cpu_thread_clean(td);
1033 	uma_zfree(thread_zone, td);
1034 }
1035 
1036 /*
1037  * Store the thread context in the UTS's mailbox.
1038  * then add the mailbox at the head of a list we are building in user space.
1039  * The list is anchored in the ksegrp structure.
1040  */
1041 int
1042 thread_export_context(struct thread *td, int willexit)
1043 {
1044 	struct proc *p;
1045 	struct ksegrp *kg;
1046 	uintptr_t mbx;
1047 	void *addr;
1048 	int error = 0, temp, sig;
1049 	mcontext_t mc;
1050 
1051 	p = td->td_proc;
1052 	kg = td->td_ksegrp;
1053 
1054 	/* Export the user/machine context. */
1055 	get_mcontext(td, &mc, 0);
1056 	addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
1057 	error = copyout(&mc, addr, sizeof(mcontext_t));
1058 	if (error)
1059 		goto bad;
1060 
1061 	/* Exports clock ticks in kernel mode */
1062 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
1063 	temp = fuword32(addr) + td->td_usticks;
1064 	if (suword32(addr, temp)) {
1065 		error = EFAULT;
1066 		goto bad;
1067 	}
1068 
1069 	/*
1070 	 * Post sync signal, or process SIGKILL and SIGSTOP.
1071 	 * For sync signal, it is only possible when the signal is not
1072 	 * caught by userland or process is being debugged.
1073 	 */
1074 	PROC_LOCK(p);
1075 	if (td->td_flags & TDF_NEEDSIGCHK) {
1076 		mtx_lock_spin(&sched_lock);
1077 		td->td_flags &= ~TDF_NEEDSIGCHK;
1078 		mtx_unlock_spin(&sched_lock);
1079 		mtx_lock(&p->p_sigacts->ps_mtx);
1080 		while ((sig = cursig(td)) != 0)
1081 			postsig(sig);
1082 		mtx_unlock(&p->p_sigacts->ps_mtx);
1083 	}
1084 	if (willexit)
1085 		SIGFILLSET(td->td_sigmask);
1086 	PROC_UNLOCK(p);
1087 
1088 	/* Get address in latest mbox of list pointer */
1089 	addr = (void *)(&td->td_mailbox->tm_next);
1090 	/*
1091 	 * Put the saved address of the previous first
1092 	 * entry into this one
1093 	 */
1094 	for (;;) {
1095 		mbx = (uintptr_t)kg->kg_completed;
1096 		if (suword(addr, mbx)) {
1097 			error = EFAULT;
1098 			goto bad;
1099 		}
1100 		PROC_LOCK(p);
1101 		if (mbx == (uintptr_t)kg->kg_completed) {
1102 			kg->kg_completed = td->td_mailbox;
1103 			/*
1104 			 * The thread context may be taken away by
1105 			 * other upcall threads when we unlock
1106 			 * process lock. it's no longer valid to
1107 			 * use it again in any other places.
1108 			 */
1109 			td->td_mailbox = NULL;
1110 			PROC_UNLOCK(p);
1111 			break;
1112 		}
1113 		PROC_UNLOCK(p);
1114 	}
1115 	td->td_usticks = 0;
1116 	return (0);
1117 
1118 bad:
1119 	PROC_LOCK(p);
1120 	sigexit(td, SIGILL);
1121 	return (error);
1122 }
1123 
1124 /*
1125  * Take the list of completed mailboxes for this KSEGRP and put them on this
1126  * upcall's mailbox as it's the next one going up.
1127  */
1128 static int
1129 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1130 {
1131 	struct proc *p = kg->kg_proc;
1132 	void *addr;
1133 	uintptr_t mbx;
1134 
1135 	addr = (void *)(&ku->ku_mailbox->km_completed);
1136 	for (;;) {
1137 		mbx = (uintptr_t)kg->kg_completed;
1138 		if (suword(addr, mbx)) {
1139 			PROC_LOCK(p);
1140 			psignal(p, SIGSEGV);
1141 			PROC_UNLOCK(p);
1142 			return (EFAULT);
1143 		}
1144 		PROC_LOCK(p);
1145 		if (mbx == (uintptr_t)kg->kg_completed) {
1146 			kg->kg_completed = NULL;
1147 			PROC_UNLOCK(p);
1148 			break;
1149 		}
1150 		PROC_UNLOCK(p);
1151 	}
1152 	return (0);
1153 }
1154 
1155 /*
1156  * This function should be called at statclock interrupt time
1157  */
1158 int
1159 thread_statclock(int user)
1160 {
1161 	struct thread *td = curthread;
1162 	struct ksegrp *kg = td->td_ksegrp;
1163 
1164 	if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
1165 		return (0);
1166 	if (user) {
1167 		/* Current always do via ast() */
1168 		mtx_lock_spin(&sched_lock);
1169 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1170 		mtx_unlock_spin(&sched_lock);
1171 		td->td_uuticks++;
1172 	} else {
1173 		if (td->td_mailbox != NULL)
1174 			td->td_usticks++;
1175 		else {
1176 			/* XXXKSE
1177 		 	 * We will call thread_user_enter() for every
1178 			 * kernel entry in future, so if the thread mailbox
1179 			 * is NULL, it must be a UTS kernel, don't account
1180 			 * clock ticks for it.
1181 			 */
1182 		}
1183 	}
1184 	return (0);
1185 }
1186 
1187 /*
1188  * Export state clock ticks for userland
1189  */
1190 static int
1191 thread_update_usr_ticks(struct thread *td, int user)
1192 {
1193 	struct proc *p = td->td_proc;
1194 	struct kse_thr_mailbox *tmbx;
1195 	struct kse_upcall *ku;
1196 	struct ksegrp *kg;
1197 	caddr_t addr;
1198 	u_int uticks;
1199 
1200 	if ((ku = td->td_upcall) == NULL)
1201 		return (-1);
1202 
1203 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1204 	if ((tmbx == NULL) || (tmbx == (void *)-1))
1205 		return (-1);
1206 	if (user) {
1207 		uticks = td->td_uuticks;
1208 		td->td_uuticks = 0;
1209 		addr = (caddr_t)&tmbx->tm_uticks;
1210 	} else {
1211 		uticks = td->td_usticks;
1212 		td->td_usticks = 0;
1213 		addr = (caddr_t)&tmbx->tm_sticks;
1214 	}
1215 	if (uticks) {
1216 		if (suword32(addr, uticks+fuword32(addr))) {
1217 			PROC_LOCK(p);
1218 			psignal(p, SIGSEGV);
1219 			PROC_UNLOCK(p);
1220 			return (-2);
1221 		}
1222 	}
1223 	kg = td->td_ksegrp;
1224 	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1225 		mtx_lock_spin(&sched_lock);
1226 		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1227 		mtx_unlock_spin(&sched_lock);
1228 	}
1229 	return (0);
1230 }
1231 
1232 /*
1233  * Discard the current thread and exit from its context.
1234  *
1235  * Because we can't free a thread while we're operating under its context,
1236  * push the current thread into our CPU's deadthread holder. This means
1237  * we needn't worry about someone else grabbing our context before we
1238  * do a cpu_throw().
1239  */
1240 void
1241 thread_exit(void)
1242 {
1243 	struct thread *td;
1244 	struct kse *ke;
1245 	struct proc *p;
1246 	struct ksegrp	*kg;
1247 
1248 	td = curthread;
1249 	kg = td->td_ksegrp;
1250 	p = td->td_proc;
1251 	ke = td->td_kse;
1252 
1253 	mtx_assert(&sched_lock, MA_OWNED);
1254 	KASSERT(p != NULL, ("thread exiting without a process"));
1255 	KASSERT(ke != NULL, ("thread exiting without a kse"));
1256 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1257 	PROC_LOCK_ASSERT(p, MA_OWNED);
1258 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1259 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1260 
1261 	if (td->td_standin != NULL) {
1262 		thread_stash(td->td_standin);
1263 		td->td_standin = NULL;
1264 	}
1265 
1266 	cpu_thread_exit(td);	/* XXXSMP */
1267 
1268 	/*
1269 	 * The last thread is left attached to the process
1270 	 * So that the whole bundle gets recycled. Skip
1271 	 * all this stuff.
1272 	 */
1273 	if (p->p_numthreads > 1) {
1274 		thread_unlink(td);
1275 		if (p->p_maxthrwaits)
1276 			wakeup(&p->p_numthreads);
1277 		/*
1278 		 * The test below is NOT true if we are the
1279 		 * sole exiting thread. P_STOPPED_SNGL is unset
1280 		 * in exit1() after it is the only survivor.
1281 		 */
1282 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1283 			if (p->p_numthreads == p->p_suspcount) {
1284 				thread_unsuspend_one(p->p_singlethread);
1285 			}
1286 		}
1287 
1288 		/*
1289 		 * Because each upcall structure has an owner thread,
1290 		 * owner thread exits only when process is in exiting
1291 		 * state, so upcall to userland is no longer needed,
1292 		 * deleting upcall structure is safe here.
1293 		 * So when all threads in a group is exited, all upcalls
1294 		 * in the group should be automatically freed.
1295 		 */
1296 		if (td->td_upcall)
1297 			upcall_remove(td);
1298 
1299 		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
1300 		sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
1301 		ke->ke_state = KES_UNQUEUED;
1302 		ke->ke_thread = NULL;
1303 		/*
1304 		 * Decide what to do with the KSE attached to this thread.
1305 		 */
1306 		if (ke->ke_flags & KEF_EXIT) {
1307 			kse_unlink(ke);
1308 			if (kg->kg_kses == 0) {
1309 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
1310 				ksegrp_unlink(kg);
1311 			}
1312 		}
1313 		else
1314 			kse_reassign(ke);
1315 		PROC_UNLOCK(p);
1316 		td->td_kse	= NULL;
1317 		td->td_state	= TDS_INACTIVE;
1318 #if 0
1319 		td->td_proc	= NULL;
1320 #endif
1321 		td->td_ksegrp	= NULL;
1322 		td->td_last_kse	= NULL;
1323 		PCPU_SET(deadthread, td);
1324 	} else {
1325 		PROC_UNLOCK(p);
1326 	}
1327 	/* XXX Shouldn't cpu_throw() here. */
1328 	mtx_assert(&sched_lock, MA_OWNED);
1329 	cpu_throw(td, choosethread());
1330 	panic("I'm a teapot!");
1331 	/* NOTREACHED */
1332 }
1333 
1334 /*
1335  * Do any thread specific cleanups that may be needed in wait()
1336  * called with Giant held, proc and schedlock not held.
1337  */
1338 void
1339 thread_wait(struct proc *p)
1340 {
1341 	struct thread *td;
1342 
1343 	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1344 	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1345 	FOREACH_THREAD_IN_PROC(p, td) {
1346 		if (td->td_standin != NULL) {
1347 			thread_free(td->td_standin);
1348 			td->td_standin = NULL;
1349 		}
1350 		cpu_thread_clean(td);
1351 	}
1352 	thread_reap();	/* check for zombie threads etc. */
1353 }
1354 
1355 /*
1356  * Link a thread to a process.
1357  * set up anything that needs to be initialized for it to
1358  * be used by the process.
1359  *
1360  * Note that we do not link to the proc's ucred here.
1361  * The thread is linked as if running but no KSE assigned.
1362  */
1363 void
1364 thread_link(struct thread *td, struct ksegrp *kg)
1365 {
1366 	struct proc *p;
1367 
1368 	p = kg->kg_proc;
1369 	td->td_state    = TDS_INACTIVE;
1370 	td->td_proc     = p;
1371 	td->td_ksegrp   = kg;
1372 	td->td_last_kse = NULL;
1373 	td->td_flags    = 0;
1374 	td->td_kse      = NULL;
1375 
1376 	LIST_INIT(&td->td_contested);
1377 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
1378 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1379 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1380 	p->p_numthreads++;
1381 	kg->kg_numthreads++;
1382 }
1383 
1384 void
1385 thread_unlink(struct thread *td)
1386 {
1387 	struct proc *p = td->td_proc;
1388 	struct ksegrp *kg = td->td_ksegrp;
1389 
1390 	mtx_assert(&sched_lock, MA_OWNED);
1391 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1392 	p->p_numthreads--;
1393 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1394 	kg->kg_numthreads--;
1395 	/* could clear a few other things here */
1396 }
1397 
1398 /*
1399  * Purge a ksegrp resource. When a ksegrp is preparing to
1400  * exit, it calls this function.
1401  */
1402 static void
1403 kse_purge_group(struct thread *td)
1404 {
1405 	struct ksegrp *kg;
1406 	struct kse *ke;
1407 
1408 	kg = td->td_ksegrp;
1409  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1410 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1411 		KASSERT(ke->ke_state == KES_IDLE,
1412 			("%s: wrong idle KSE state", __func__));
1413 		kse_unlink(ke);
1414 	}
1415 	KASSERT((kg->kg_kses == 1),
1416 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1417 	KASSERT((kg->kg_numupcalls == 0),
1418 	        ("%s: ksegrp still has %d upcall datas",
1419 		__func__, kg->kg_numupcalls));
1420 }
1421 
1422 /*
1423  * Purge a process's KSE resource. When a process is preparing to
1424  * exit, it calls kse_purge to release any extra KSE resources in
1425  * the process.
1426  */
1427 static void
1428 kse_purge(struct proc *p, struct thread *td)
1429 {
1430 	struct ksegrp *kg;
1431 	struct kse *ke;
1432 
1433  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1434 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1435 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1436 		p->p_numksegrps--;
1437 		/*
1438 		 * There is no ownership for KSE, after all threads
1439 		 * in the group exited, it is possible that some KSEs
1440 		 * were left in idle queue, gc them now.
1441 		 */
1442 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1443 			KASSERT(ke->ke_state == KES_IDLE,
1444 			   ("%s: wrong idle KSE state", __func__));
1445 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1446 			kg->kg_idle_kses--;
1447 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1448 			kg->kg_kses--;
1449 			kse_stash(ke);
1450 		}
1451 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1452 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1453 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1454 		KASSERT((kg->kg_numupcalls == 0),
1455 		        ("%s: ksegrp still has %d upcall datas",
1456 			__func__, kg->kg_numupcalls));
1457 
1458 		if (kg != td->td_ksegrp)
1459 			ksegrp_stash(kg);
1460 	}
1461 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1462 	p->p_numksegrps++;
1463 }
1464 
1465 /*
1466  * This function is intended to be used to initialize a spare thread
1467  * for upcall. Initialize thread's large data area outside sched_lock
1468  * for thread_schedule_upcall().
1469  */
1470 void
1471 thread_alloc_spare(struct thread *td, struct thread *spare)
1472 {
1473 	if (td->td_standin)
1474 		return;
1475 	if (spare == NULL)
1476 		spare = thread_alloc();
1477 	td->td_standin = spare;
1478 	bzero(&spare->td_startzero,
1479 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1480 	spare->td_proc = td->td_proc;
1481 	spare->td_ucred = crhold(td->td_ucred);
1482 }
1483 
1484 /*
1485  * Create a thread and schedule it for upcall on the KSE given.
1486  * Use our thread's standin so that we don't have to allocate one.
1487  */
1488 struct thread *
1489 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1490 {
1491 	struct thread *td2;
1492 
1493 	mtx_assert(&sched_lock, MA_OWNED);
1494 
1495 	/*
1496 	 * Schedule an upcall thread on specified kse_upcall,
1497 	 * the kse_upcall must be free.
1498 	 * td must have a spare thread.
1499 	 */
1500 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1501 	if ((td2 = td->td_standin) != NULL) {
1502 		td->td_standin = NULL;
1503 	} else {
1504 		panic("no reserve thread when scheduling an upcall");
1505 		return (NULL);
1506 	}
1507 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1508 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1509 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1510 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1511 	thread_link(td2, ku->ku_ksegrp);
1512 	/* inherit blocked thread's context */
1513 	cpu_set_upcall(td2, td);
1514 	/* Let the new thread become owner of the upcall */
1515 	ku->ku_owner   = td2;
1516 	td2->td_upcall = ku;
1517 	td2->td_flags  = TDF_SA;
1518 	td2->td_pflags = TDP_UPCALLING;
1519 	td2->td_kse    = NULL;
1520 	td2->td_state  = TDS_CAN_RUN;
1521 	td2->td_inhibitors = 0;
1522 	SIGFILLSET(td2->td_sigmask);
1523 	SIG_CANTMASK(td2->td_sigmask);
1524 	sched_fork_thread(td, td2);
1525 	return (td2);	/* bogus.. should be a void function */
1526 }
1527 
1528 /*
1529  * It is only used when thread generated a trap and process is being
1530  * debugged.
1531  */
1532 void
1533 thread_signal_add(struct thread *td, int sig)
1534 {
1535 	struct proc *p;
1536 	siginfo_t siginfo;
1537 	struct sigacts *ps;
1538 	int error;
1539 
1540 	p = td->td_proc;
1541 	PROC_LOCK_ASSERT(p, MA_OWNED);
1542 	ps = p->p_sigacts;
1543 	mtx_assert(&ps->ps_mtx, MA_OWNED);
1544 
1545 	cpu_thread_siginfo(sig, 0, &siginfo);
1546 	mtx_unlock(&ps->ps_mtx);
1547 	PROC_UNLOCK(p);
1548 	error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1549 	if (error) {
1550 		PROC_LOCK(p);
1551 		sigexit(td, SIGILL);
1552 	}
1553 	PROC_LOCK(p);
1554 	SIGADDSET(td->td_sigmask, sig);
1555 	mtx_lock(&ps->ps_mtx);
1556 }
1557 
1558 void
1559 thread_switchout(struct thread *td)
1560 {
1561 	struct kse_upcall *ku;
1562 	struct thread *td2;
1563 
1564 	mtx_assert(&sched_lock, MA_OWNED);
1565 
1566 	/*
1567 	 * If the outgoing thread is in threaded group and has never
1568 	 * scheduled an upcall, decide whether this is a short
1569 	 * or long term event and thus whether or not to schedule
1570 	 * an upcall.
1571 	 * If it is a short term event, just suspend it in
1572 	 * a way that takes its KSE with it.
1573 	 * Select the events for which we want to schedule upcalls.
1574 	 * For now it's just sleep.
1575 	 * XXXKSE eventually almost any inhibition could do.
1576 	 */
1577 	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1578 		/*
1579 		 * Release ownership of upcall, and schedule an upcall
1580 		 * thread, this new upcall thread becomes the owner of
1581 		 * the upcall structure.
1582 		 */
1583 		ku = td->td_upcall;
1584 		ku->ku_owner = NULL;
1585 		td->td_upcall = NULL;
1586 		td->td_flags &= ~TDF_CAN_UNBIND;
1587 		td2 = thread_schedule_upcall(td, ku);
1588 		setrunqueue(td2);
1589 	}
1590 }
1591 
1592 /*
1593  * Setup done on the thread when it enters the kernel.
1594  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1595  */
1596 void
1597 thread_user_enter(struct proc *p, struct thread *td)
1598 {
1599 	struct ksegrp *kg;
1600 	struct kse_upcall *ku;
1601 	struct kse_thr_mailbox *tmbx;
1602 	uint32_t tflags;
1603 
1604 	kg = td->td_ksegrp;
1605 
1606 	/*
1607 	 * First check that we shouldn't just abort.
1608 	 * But check if we are the single thread first!
1609 	 */
1610 	if (p->p_flag & P_SINGLE_EXIT) {
1611 		PROC_LOCK(p);
1612 		mtx_lock_spin(&sched_lock);
1613 		thread_stopped(p);
1614 		thread_exit();
1615 		/* NOTREACHED */
1616 	}
1617 
1618 	/*
1619 	 * If we are doing a syscall in a KSE environment,
1620 	 * note where our mailbox is. There is always the
1621 	 * possibility that we could do this lazily (in kse_reassign()),
1622 	 * but for now do it every time.
1623 	 */
1624 	kg = td->td_ksegrp;
1625 	if (td->td_flags & TDF_SA) {
1626 		ku = td->td_upcall;
1627 		KASSERT(ku, ("%s: no upcall owned", __func__));
1628 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1629 		KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1630 		ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1631 		tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1632 		if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1633 		    (ku->ku_mflags & KMF_NOUPCALL)) {
1634 			td->td_mailbox = NULL;
1635 		} else {
1636 			if (td->td_standin == NULL)
1637 				thread_alloc_spare(td, NULL);
1638 			tflags = fuword32(&tmbx->tm_flags);
1639 			/*
1640 			 * On some architectures, TP register points to thread
1641 			 * mailbox but not points to kse mailbox, and userland
1642 			 * can not atomically clear km_curthread, but can
1643 			 * use TP register, and set TMF_NOUPCALL in thread
1644 			 * flag	to indicate a critical region.
1645 			 */
1646 			if (tflags & TMF_NOUPCALL) {
1647 				td->td_mailbox = NULL;
1648 			} else {
1649 				td->td_mailbox = tmbx;
1650 				mtx_lock_spin(&sched_lock);
1651 				td->td_flags |= TDF_CAN_UNBIND;
1652 				mtx_unlock_spin(&sched_lock);
1653 			}
1654 		}
1655 	}
1656 }
1657 
1658 /*
1659  * The extra work we go through if we are a threaded process when we
1660  * return to userland.
1661  *
1662  * If we are a KSE process and returning to user mode, check for
1663  * extra work to do before we return (e.g. for more syscalls
1664  * to complete first).  If we were in a critical section, we should
1665  * just return to let it finish. Same if we were in the UTS (in
1666  * which case the mailbox's context's busy indicator will be set).
1667  * The only traps we suport will have set the mailbox.
1668  * We will clear it here.
1669  */
1670 int
1671 thread_userret(struct thread *td, struct trapframe *frame)
1672 {
1673 	int error = 0, upcalls, uts_crit;
1674 	struct kse_upcall *ku;
1675 	struct ksegrp *kg, *kg2;
1676 	struct proc *p;
1677 	struct timespec ts;
1678 
1679 	p = td->td_proc;
1680 	kg = td->td_ksegrp;
1681 	ku = td->td_upcall;
1682 
1683 	/* Nothing to do with bound thread */
1684 	if (!(td->td_flags & TDF_SA))
1685 		return (0);
1686 
1687 	/*
1688 	 * Stat clock interrupt hit in userland, it
1689 	 * is returning from interrupt, charge thread's
1690 	 * userland time for UTS.
1691 	 */
1692 	if (td->td_flags & TDF_USTATCLOCK) {
1693 		thread_update_usr_ticks(td, 1);
1694 		mtx_lock_spin(&sched_lock);
1695 		td->td_flags &= ~TDF_USTATCLOCK;
1696 		mtx_unlock_spin(&sched_lock);
1697 		if (kg->kg_completed ||
1698 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1699 			thread_user_enter(p, td);
1700 	}
1701 
1702 	uts_crit = (td->td_mailbox == NULL);
1703 	/*
1704 	 * Optimisation:
1705 	 * This thread has not started any upcall.
1706 	 * If there is no work to report other than ourself,
1707 	 * then it can return direct to userland.
1708 	 */
1709 	if (TD_CAN_UNBIND(td)) {
1710 		mtx_lock_spin(&sched_lock);
1711 		td->td_flags &= ~TDF_CAN_UNBIND;
1712 		if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1713 		    (kg->kg_completed == NULL) &&
1714 		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1715 		    (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1716 			mtx_unlock_spin(&sched_lock);
1717 			thread_update_usr_ticks(td, 0);
1718 			nanotime(&ts);
1719 			error = copyout(&ts,
1720 				(caddr_t)&ku->ku_mailbox->km_timeofday,
1721 				sizeof(ts));
1722 			td->td_mailbox = 0;
1723 			ku->ku_mflags = 0;
1724 			if (error)
1725 				goto out;
1726 			return (0);
1727 		}
1728 		mtx_unlock_spin(&sched_lock);
1729 		thread_export_context(td, 0);
1730 		/*
1731 		 * There is something to report, and we own an upcall
1732 		 * strucuture, we can go to userland.
1733 		 * Turn ourself into an upcall thread.
1734 		 */
1735 		td->td_pflags |= TDP_UPCALLING;
1736 	} else if (td->td_mailbox && (ku == NULL)) {
1737 		thread_export_context(td, 1);
1738 		PROC_LOCK(p);
1739 		/*
1740 		 * There are upcall threads waiting for
1741 		 * work to do, wake one of them up.
1742 		 * XXXKSE Maybe wake all of them up.
1743 		 */
1744 		if (kg->kg_upsleeps)
1745 			wakeup_one(&kg->kg_completed);
1746 		mtx_lock_spin(&sched_lock);
1747 		thread_stopped(p);
1748 		thread_exit();
1749 		/* NOTREACHED */
1750 	}
1751 
1752 	KASSERT(ku != NULL, ("upcall is NULL\n"));
1753 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1754 
1755 	if (p->p_numthreads > max_threads_per_proc) {
1756 		max_threads_hits++;
1757 		PROC_LOCK(p);
1758 		mtx_lock_spin(&sched_lock);
1759 		p->p_maxthrwaits++;
1760 		while (p->p_numthreads > max_threads_per_proc) {
1761 			upcalls = 0;
1762 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1763 				if (kg2->kg_numupcalls == 0)
1764 					upcalls++;
1765 				else
1766 					upcalls += kg2->kg_numupcalls;
1767 			}
1768 			if (upcalls >= max_threads_per_proc)
1769 				break;
1770 			mtx_unlock_spin(&sched_lock);
1771 			if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1772 			    "maxthreads", 0)) {
1773 				mtx_lock_spin(&sched_lock);
1774 				break;
1775 			} else {
1776 				mtx_lock_spin(&sched_lock);
1777 			}
1778 		}
1779 		p->p_maxthrwaits--;
1780 		mtx_unlock_spin(&sched_lock);
1781 		PROC_UNLOCK(p);
1782 	}
1783 
1784 	if (td->td_pflags & TDP_UPCALLING) {
1785 		uts_crit = 0;
1786 		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1787 		/*
1788 		 * There is no more work to do and we are going to ride
1789 		 * this thread up to userland as an upcall.
1790 		 * Do the last parts of the setup needed for the upcall.
1791 		 */
1792 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1793 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1794 
1795 		td->td_pflags &= ~TDP_UPCALLING;
1796 		if (ku->ku_flags & KUF_DOUPCALL) {
1797 			mtx_lock_spin(&sched_lock);
1798 			ku->ku_flags &= ~KUF_DOUPCALL;
1799 			mtx_unlock_spin(&sched_lock);
1800 		}
1801 		/*
1802 		 * Set user context to the UTS
1803 		 */
1804 		if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1805 			cpu_set_upcall_kse(td, ku);
1806 			error = suword(&ku->ku_mailbox->km_curthread, 0);
1807 			if (error)
1808 				goto out;
1809 		}
1810 
1811 		/*
1812 		 * Unhook the list of completed threads.
1813 		 * anything that completes after this gets to
1814 		 * come in next time.
1815 		 * Put the list of completed thread mailboxes on
1816 		 * this KSE's mailbox.
1817 		 */
1818 		if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1819 		    (error = thread_link_mboxes(kg, ku)) != 0)
1820 			goto out;
1821 	}
1822 	if (!uts_crit) {
1823 		nanotime(&ts);
1824 		error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1825 	}
1826 
1827 out:
1828 	if (error) {
1829 		/*
1830 		 * Things are going to be so screwed we should just kill
1831 		 * the process.
1832 		 * how do we do that?
1833 		 */
1834 		PROC_LOCK(td->td_proc);
1835 		psignal(td->td_proc, SIGSEGV);
1836 		PROC_UNLOCK(td->td_proc);
1837 	} else {
1838 		/*
1839 		 * Optimisation:
1840 		 * Ensure that we have a spare thread available,
1841 		 * for when we re-enter the kernel.
1842 		 */
1843 		if (td->td_standin == NULL)
1844 			thread_alloc_spare(td, NULL);
1845 	}
1846 
1847 	ku->ku_mflags = 0;
1848 	/*
1849 	 * Clear thread mailbox first, then clear system tick count.
1850 	 * The order is important because thread_statclock() use
1851 	 * mailbox pointer to see if it is an userland thread or
1852 	 * an UTS kernel thread.
1853 	 */
1854 	td->td_mailbox = NULL;
1855 	td->td_usticks = 0;
1856 	return (error);	/* go sync */
1857 }
1858 
1859 /*
1860  * Enforce single-threading.
1861  *
1862  * Returns 1 if the caller must abort (another thread is waiting to
1863  * exit the process or similar). Process is locked!
1864  * Returns 0 when you are successfully the only thread running.
1865  * A process has successfully single threaded in the suspend mode when
1866  * There are no threads in user mode. Threads in the kernel must be
1867  * allowed to continue until they get to the user boundary. They may even
1868  * copy out their return values and data before suspending. They may however be
1869  * accellerated in reaching the user boundary as we will wake up
1870  * any sleeping threads that are interruptable. (PCATCH).
1871  */
1872 int
1873 thread_single(int force_exit)
1874 {
1875 	struct thread *td;
1876 	struct thread *td2;
1877 	struct proc *p;
1878 
1879 	td = curthread;
1880 	p = td->td_proc;
1881 	mtx_assert(&Giant, MA_OWNED);
1882 	PROC_LOCK_ASSERT(p, MA_OWNED);
1883 	KASSERT((td != NULL), ("curthread is NULL"));
1884 
1885 	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
1886 		return (0);
1887 
1888 	/* Is someone already single threading? */
1889 	if (p->p_singlethread)
1890 		return (1);
1891 
1892 	if (force_exit == SINGLE_EXIT) {
1893 		p->p_flag |= P_SINGLE_EXIT;
1894 	} else
1895 		p->p_flag &= ~P_SINGLE_EXIT;
1896 	p->p_flag |= P_STOPPED_SINGLE;
1897 	mtx_lock_spin(&sched_lock);
1898 	p->p_singlethread = td;
1899 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1900 		FOREACH_THREAD_IN_PROC(p, td2) {
1901 			if (td2 == td)
1902 				continue;
1903 			td2->td_flags |= TDF_ASTPENDING;
1904 			if (TD_IS_INHIBITED(td2)) {
1905 				if (force_exit == SINGLE_EXIT) {
1906 					if (TD_IS_SUSPENDED(td2)) {
1907 						thread_unsuspend_one(td2);
1908 					}
1909 					if (TD_ON_SLEEPQ(td2) &&
1910 					    (td2->td_flags & TDF_SINTR)) {
1911 						if (td2->td_flags & TDF_CVWAITQ)
1912 							cv_abort(td2);
1913 						else
1914 							abortsleep(td2);
1915 					}
1916 				} else {
1917 					if (TD_IS_SUSPENDED(td2))
1918 						continue;
1919 					/*
1920 					 * maybe other inhibitted states too?
1921 					 * XXXKSE Is it totally safe to
1922 					 * suspend a non-interruptable thread?
1923 					 */
1924 					if (td2->td_inhibitors &
1925 					    (TDI_SLEEPING | TDI_SWAPPED))
1926 						thread_suspend_one(td2);
1927 				}
1928 			}
1929 		}
1930 		/*
1931 		 * Maybe we suspended some threads.. was it enough?
1932 		 */
1933 		if ((p->p_numthreads - p->p_suspcount) == 1)
1934 			break;
1935 
1936 		/*
1937 		 * Wake us up when everyone else has suspended.
1938 		 * In the mean time we suspend as well.
1939 		 */
1940 		thread_suspend_one(td);
1941 		DROP_GIANT();
1942 		PROC_UNLOCK(p);
1943 		p->p_stats->p_ru.ru_nvcsw++;
1944 		mi_switch();
1945 		mtx_unlock_spin(&sched_lock);
1946 		PICKUP_GIANT();
1947 		PROC_LOCK(p);
1948 		mtx_lock_spin(&sched_lock);
1949 	}
1950 	if (force_exit == SINGLE_EXIT) {
1951 		if (td->td_upcall)
1952 			upcall_remove(td);
1953 		kse_purge(p, td);
1954 	}
1955 	mtx_unlock_spin(&sched_lock);
1956 	return (0);
1957 }
1958 
1959 /*
1960  * Called in from locations that can safely check to see
1961  * whether we have to suspend or at least throttle for a
1962  * single-thread event (e.g. fork).
1963  *
1964  * Such locations include userret().
1965  * If the "return_instead" argument is non zero, the thread must be able to
1966  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1967  *
1968  * The 'return_instead' argument tells the function if it may do a
1969  * thread_exit() or suspend, or whether the caller must abort and back
1970  * out instead.
1971  *
1972  * If the thread that set the single_threading request has set the
1973  * P_SINGLE_EXIT bit in the process flags then this call will never return
1974  * if 'return_instead' is false, but will exit.
1975  *
1976  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1977  *---------------+--------------------+---------------------
1978  *       0       | returns 0          |   returns 0 or 1
1979  *               | when ST ends       |   immediatly
1980  *---------------+--------------------+---------------------
1981  *       1       | thread exits       |   returns 1
1982  *               |                    |  immediatly
1983  * 0 = thread_exit() or suspension ok,
1984  * other = return error instead of stopping the thread.
1985  *
1986  * While a full suspension is under effect, even a single threading
1987  * thread would be suspended if it made this call (but it shouldn't).
1988  * This call should only be made from places where
1989  * thread_exit() would be safe as that may be the outcome unless
1990  * return_instead is set.
1991  */
1992 int
1993 thread_suspend_check(int return_instead)
1994 {
1995 	struct thread *td;
1996 	struct proc *p;
1997 
1998 	td = curthread;
1999 	p = td->td_proc;
2000 	PROC_LOCK_ASSERT(p, MA_OWNED);
2001 	while (P_SHOULDSTOP(p)) {
2002 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2003 			KASSERT(p->p_singlethread != NULL,
2004 			    ("singlethread not set"));
2005 			/*
2006 			 * The only suspension in action is a
2007 			 * single-threading. Single threader need not stop.
2008 			 * XXX Should be safe to access unlocked
2009 			 * as it can only be set to be true by us.
2010 			 */
2011 			if (p->p_singlethread == td)
2012 				return (0);	/* Exempt from stopping. */
2013 		}
2014 		if (return_instead)
2015 			return (1);
2016 
2017 		mtx_lock_spin(&sched_lock);
2018 		thread_stopped(p);
2019 		/*
2020 		 * If the process is waiting for us to exit,
2021 		 * this thread should just suicide.
2022 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
2023 		 */
2024 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
2025 			while (mtx_owned(&Giant))
2026 				mtx_unlock(&Giant);
2027 			if (p->p_flag & P_SA)
2028 				thread_exit();
2029 			else
2030 				thr_exit1();
2031 		}
2032 
2033 		/*
2034 		 * When a thread suspends, it just
2035 		 * moves to the processes's suspend queue
2036 		 * and stays there.
2037 		 */
2038 		thread_suspend_one(td);
2039 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2040 			if (p->p_numthreads == p->p_suspcount) {
2041 				thread_unsuspend_one(p->p_singlethread);
2042 			}
2043 		}
2044 		DROP_GIANT();
2045 		PROC_UNLOCK(p);
2046 		p->p_stats->p_ru.ru_nivcsw++;
2047 		mi_switch();
2048 		mtx_unlock_spin(&sched_lock);
2049 		PICKUP_GIANT();
2050 		PROC_LOCK(p);
2051 	}
2052 	return (0);
2053 }
2054 
2055 void
2056 thread_suspend_one(struct thread *td)
2057 {
2058 	struct proc *p = td->td_proc;
2059 
2060 	mtx_assert(&sched_lock, MA_OWNED);
2061 	PROC_LOCK_ASSERT(p, MA_OWNED);
2062 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
2063 	p->p_suspcount++;
2064 	TD_SET_SUSPENDED(td);
2065 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
2066 	/*
2067 	 * Hack: If we are suspending but are on the sleep queue
2068 	 * then we are in msleep or the cv equivalent. We
2069 	 * want to look like we have two Inhibitors.
2070 	 * May already be set.. doesn't matter.
2071 	 */
2072 	if (TD_ON_SLEEPQ(td))
2073 		TD_SET_SLEEPING(td);
2074 }
2075 
2076 void
2077 thread_unsuspend_one(struct thread *td)
2078 {
2079 	struct proc *p = td->td_proc;
2080 
2081 	mtx_assert(&sched_lock, MA_OWNED);
2082 	PROC_LOCK_ASSERT(p, MA_OWNED);
2083 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
2084 	TD_CLR_SUSPENDED(td);
2085 	p->p_suspcount--;
2086 	setrunnable(td);
2087 }
2088 
2089 /*
2090  * Allow all threads blocked by single threading to continue running.
2091  */
2092 void
2093 thread_unsuspend(struct proc *p)
2094 {
2095 	struct thread *td;
2096 
2097 	mtx_assert(&sched_lock, MA_OWNED);
2098 	PROC_LOCK_ASSERT(p, MA_OWNED);
2099 	if (!P_SHOULDSTOP(p)) {
2100 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2101 			thread_unsuspend_one(td);
2102 		}
2103 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2104 	    (p->p_numthreads == p->p_suspcount)) {
2105 		/*
2106 		 * Stopping everything also did the job for the single
2107 		 * threading request. Now we've downgraded to single-threaded,
2108 		 * let it continue.
2109 		 */
2110 		thread_unsuspend_one(p->p_singlethread);
2111 	}
2112 }
2113 
2114 void
2115 thread_single_end(void)
2116 {
2117 	struct thread *td;
2118 	struct proc *p;
2119 
2120 	td = curthread;
2121 	p = td->td_proc;
2122 	PROC_LOCK_ASSERT(p, MA_OWNED);
2123 	p->p_flag &= ~P_STOPPED_SINGLE;
2124 	mtx_lock_spin(&sched_lock);
2125 	p->p_singlethread = NULL;
2126 	/*
2127 	 * If there are other threads they mey now run,
2128 	 * unless of course there is a blanket 'stop order'
2129 	 * on the process. The single threader must be allowed
2130 	 * to continue however as this is a bad place to stop.
2131 	 */
2132 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2133 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2134 			thread_unsuspend_one(td);
2135 		}
2136 	}
2137 	mtx_unlock_spin(&sched_lock);
2138 }
2139 
2140 
2141