xref: /freebsd/sys/kern/kern_thread.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysproto.h>
42 #include <sys/filedesc.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sx.h>
46 #include <sys/tty.h>
47 #include <sys/user.h>
48 #include <sys/jail.h>
49 #include <sys/kse.h>
50 #include <sys/ktr.h>
51 #include <sys/ucontext.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_object.h>
56 #include <vm/pmap.h>
57 #include <vm/uma.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/frame.h>
61 
62 /*
63  * KSEGRP related storage.
64  */
65 static uma_zone_t ksegrp_zone;
66 static uma_zone_t kse_zone;
67 static uma_zone_t thread_zone;
68 static uma_zone_t upcall_zone;
69 
70 /* DEBUG ONLY */
71 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
72 static int thread_debug = 0;
73 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
74 	&thread_debug, 0, "thread debug");
75 
76 static int max_threads_per_proc = 150;
77 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
78 	&max_threads_per_proc, 0, "Limit on threads per proc");
79 
80 static int max_groups_per_proc = 50;
81 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
82 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
83 
84 static int max_threads_hits;
85 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
86 	&max_threads_hits, 0, "");
87 
88 static int virtual_cpu;
89 
90 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
91 
92 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
93 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
94 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
95 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
96 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
97 struct mtx kse_zombie_lock;
98 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
99 
100 static void kse_purge(struct proc *p, struct thread *td);
101 static void kse_purge_group(struct thread *td);
102 static int thread_update_usr_ticks(struct thread *td, int user);
103 static void thread_alloc_spare(struct thread *td, struct thread *spare);
104 
105 static int
106 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
107 {
108 	int error, new_val;
109 	int def_val;
110 
111 #ifdef SMP
112 	def_val = mp_ncpus;
113 #else
114 	def_val = 1;
115 #endif
116 	if (virtual_cpu == 0)
117 		new_val = def_val;
118 	else
119 		new_val = virtual_cpu;
120 	error = sysctl_handle_int(oidp, &new_val, 0, req);
121         if (error != 0 || req->newptr == NULL)
122 		return (error);
123 	if (new_val < 0)
124 		return (EINVAL);
125 	virtual_cpu = new_val;
126 	return (0);
127 }
128 
129 /* DEBUG ONLY */
130 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
131 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
132 	"debug virtual cpus");
133 
134 /*
135  * Prepare a thread for use.
136  */
137 static void
138 thread_ctor(void *mem, int size, void *arg)
139 {
140 	struct thread	*td;
141 
142 	td = (struct thread *)mem;
143 	td->td_state = TDS_INACTIVE;
144 	td->td_oncpu	= NOCPU;
145 	td->td_critnest = 1;
146 }
147 
148 /*
149  * Reclaim a thread after use.
150  */
151 static void
152 thread_dtor(void *mem, int size, void *arg)
153 {
154 	struct thread	*td;
155 
156 	td = (struct thread *)mem;
157 
158 #ifdef INVARIANTS
159 	/* Verify that this thread is in a safe state to free. */
160 	switch (td->td_state) {
161 	case TDS_INHIBITED:
162 	case TDS_RUNNING:
163 	case TDS_CAN_RUN:
164 	case TDS_RUNQ:
165 		/*
166 		 * We must never unlink a thread that is in one of
167 		 * these states, because it is currently active.
168 		 */
169 		panic("bad state for thread unlinking");
170 		/* NOTREACHED */
171 	case TDS_INACTIVE:
172 		break;
173 	default:
174 		panic("bad thread state");
175 		/* NOTREACHED */
176 	}
177 #endif
178 }
179 
180 /*
181  * Initialize type-stable parts of a thread (when newly created).
182  */
183 static void
184 thread_init(void *mem, int size)
185 {
186 	struct thread	*td;
187 
188 	td = (struct thread *)mem;
189 	mtx_lock(&Giant);
190 	vm_thread_new(td, 0);
191 	mtx_unlock(&Giant);
192 	cpu_thread_setup(td);
193 	td->td_sched = (struct td_sched *)&td[1];
194 }
195 
196 /*
197  * Tear down type-stable parts of a thread (just before being discarded).
198  */
199 static void
200 thread_fini(void *mem, int size)
201 {
202 	struct thread	*td;
203 
204 	td = (struct thread *)mem;
205 	vm_thread_dispose(td);
206 }
207 
208 /*
209  * Initialize type-stable parts of a kse (when newly created).
210  */
211 static void
212 kse_init(void *mem, int size)
213 {
214 	struct kse	*ke;
215 
216 	ke = (struct kse *)mem;
217 	ke->ke_sched = (struct ke_sched *)&ke[1];
218 }
219 
220 /*
221  * Initialize type-stable parts of a ksegrp (when newly created).
222  */
223 static void
224 ksegrp_init(void *mem, int size)
225 {
226 	struct ksegrp	*kg;
227 
228 	kg = (struct ksegrp *)mem;
229 	kg->kg_sched = (struct kg_sched *)&kg[1];
230 }
231 
232 /*
233  * KSE is linked into kse group.
234  */
235 void
236 kse_link(struct kse *ke, struct ksegrp *kg)
237 {
238 	struct proc *p = kg->kg_proc;
239 
240 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
241 	kg->kg_kses++;
242 	ke->ke_state	= KES_UNQUEUED;
243 	ke->ke_proc	= p;
244 	ke->ke_ksegrp	= kg;
245 	ke->ke_thread	= NULL;
246 	ke->ke_oncpu	= NOCPU;
247 	ke->ke_flags	= 0;
248 }
249 
250 void
251 kse_unlink(struct kse *ke)
252 {
253 	struct ksegrp *kg;
254 
255 	mtx_assert(&sched_lock, MA_OWNED);
256 	kg = ke->ke_ksegrp;
257 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
258 	if (ke->ke_state == KES_IDLE) {
259 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
260 		kg->kg_idle_kses--;
261 	}
262 	--kg->kg_kses;
263 	/*
264 	 * Aggregate stats from the KSE
265 	 */
266 	kse_stash(ke);
267 }
268 
269 void
270 ksegrp_link(struct ksegrp *kg, struct proc *p)
271 {
272 
273 	TAILQ_INIT(&kg->kg_threads);
274 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
275 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
276 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
277 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
278 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
279 	kg->kg_proc = p;
280 	/*
281 	 * the following counters are in the -zero- section
282 	 * and may not need clearing
283 	 */
284 	kg->kg_numthreads = 0;
285 	kg->kg_runnable   = 0;
286 	kg->kg_kses       = 0;
287 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
288 	kg->kg_idle_kses  = 0;
289 	kg->kg_numupcalls = 0;
290 	/* link it in now that it's consistent */
291 	p->p_numksegrps++;
292 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
293 }
294 
295 void
296 ksegrp_unlink(struct ksegrp *kg)
297 {
298 	struct proc *p;
299 
300 	mtx_assert(&sched_lock, MA_OWNED);
301 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
302 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
303 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
304 
305 	p = kg->kg_proc;
306 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
307 	p->p_numksegrps--;
308 	/*
309 	 * Aggregate stats from the KSE
310 	 */
311 	ksegrp_stash(kg);
312 }
313 
314 struct kse_upcall *
315 upcall_alloc(void)
316 {
317 	struct kse_upcall *ku;
318 
319 	ku = uma_zalloc(upcall_zone, M_WAITOK);
320 	bzero(ku, sizeof(*ku));
321 	return (ku);
322 }
323 
324 void
325 upcall_free(struct kse_upcall *ku)
326 {
327 
328 	uma_zfree(upcall_zone, ku);
329 }
330 
331 void
332 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
333 {
334 
335 	mtx_assert(&sched_lock, MA_OWNED);
336 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
337 	ku->ku_ksegrp = kg;
338 	kg->kg_numupcalls++;
339 }
340 
341 void
342 upcall_unlink(struct kse_upcall *ku)
343 {
344 	struct ksegrp *kg = ku->ku_ksegrp;
345 
346 	mtx_assert(&sched_lock, MA_OWNED);
347 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
348 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
349 	kg->kg_numupcalls--;
350 	upcall_stash(ku);
351 }
352 
353 void
354 upcall_remove(struct thread *td)
355 {
356 
357 	if (td->td_upcall) {
358 		td->td_upcall->ku_owner = NULL;
359 		upcall_unlink(td->td_upcall);
360 		td->td_upcall = 0;
361 	}
362 }
363 
364 /*
365  * For a newly created process,
366  * link up all the structures and its initial threads etc.
367  */
368 void
369 proc_linkup(struct proc *p, struct ksegrp *kg,
370 	    struct kse *ke, struct thread *td)
371 {
372 
373 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
374 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
375 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
376 	p->p_numksegrps = 0;
377 	p->p_numthreads = 0;
378 
379 	ksegrp_link(kg, p);
380 	kse_link(ke, kg);
381 	thread_link(td, kg);
382 }
383 
384 /*
385 struct kse_thr_interrupt_args {
386 	struct kse_thr_mailbox * tmbx;
387 	int cmd;
388 	long data;
389 };
390 */
391 int
392 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
393 {
394 	struct proc *p;
395 	struct thread *td2;
396 
397 	p = td->td_proc;
398 
399 	if (!(p->p_flag & P_SA))
400 		return (EINVAL);
401 
402 	switch (uap->cmd) {
403 	case KSE_INTR_SENDSIG:
404 		if (uap->data < 0 || uap->data > _SIG_MAXSIG)
405 			return (EINVAL);
406 	case KSE_INTR_INTERRUPT:
407 	case KSE_INTR_RESTART:
408 		PROC_LOCK(p);
409 		mtx_lock_spin(&sched_lock);
410 		FOREACH_THREAD_IN_PROC(p, td2) {
411 			if (td2->td_mailbox == uap->tmbx)
412 				break;
413 		}
414 		if (td2 == NULL) {
415 			mtx_unlock_spin(&sched_lock);
416 			PROC_UNLOCK(p);
417 			return (ESRCH);
418 		}
419 		if (uap->cmd == KSE_INTR_SENDSIG) {
420 			if (uap->data > 0) {
421 				td2->td_flags &= ~TDF_INTERRUPT;
422 				mtx_unlock_spin(&sched_lock);
423 				tdsignal(td2, (int)uap->data, SIGTARGET_TD);
424 			} else {
425 				mtx_unlock_spin(&sched_lock);
426 			}
427 		} else {
428 			td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
429 			if (TD_CAN_UNBIND(td2))
430 				td2->td_upcall->ku_flags |= KUF_DOUPCALL;
431 			if (uap->cmd == KSE_INTR_INTERRUPT)
432 				td2->td_intrval = EINTR;
433 			else
434 				td2->td_intrval = ERESTART;
435 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
436 				if (td2->td_flags & TDF_CVWAITQ)
437 					cv_abort(td2);
438 				else
439 					abortsleep(td2);
440 			}
441 			mtx_unlock_spin(&sched_lock);
442 		}
443 		PROC_UNLOCK(p);
444 		break;
445 	case KSE_INTR_SIGEXIT:
446 		if (uap->data < 1 || uap->data > _SIG_MAXSIG)
447 			return (EINVAL);
448 		PROC_LOCK(p);
449 		sigexit(td, (int)uap->data);
450 		break;
451 	default:
452 		return (EINVAL);
453 	}
454 	return (0);
455 }
456 
457 /*
458 struct kse_exit_args {
459 	register_t dummy;
460 };
461 */
462 int
463 kse_exit(struct thread *td, struct kse_exit_args *uap)
464 {
465 	struct proc *p;
466 	struct ksegrp *kg;
467 	struct kse *ke;
468 	struct kse_upcall *ku, *ku2;
469 	int    error, count;
470 
471 	p = td->td_proc;
472 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
473 		return (EINVAL);
474 	kg = td->td_ksegrp;
475 	count = 0;
476 	PROC_LOCK(p);
477 	mtx_lock_spin(&sched_lock);
478 	FOREACH_UPCALL_IN_GROUP(kg, ku2) {
479 		if (ku2->ku_flags & KUF_EXITING)
480 			count++;
481 	}
482 	if ((kg->kg_numupcalls - count) == 1 &&
483 	    (kg->kg_numthreads > 1)) {
484 		mtx_unlock_spin(&sched_lock);
485 		PROC_UNLOCK(p);
486 		return (EDEADLK);
487 	}
488 	ku->ku_flags |= KUF_EXITING;
489 	mtx_unlock_spin(&sched_lock);
490 	PROC_UNLOCK(p);
491 	error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
492 	PROC_LOCK(p);
493 	if (error)
494 		psignal(p, SIGSEGV);
495 	mtx_lock_spin(&sched_lock);
496 	upcall_remove(td);
497 	ke = td->td_kse;
498 	if (p->p_numthreads == 1) {
499 		kse_purge(p, td);
500 		p->p_flag &= ~P_SA;
501 		mtx_unlock_spin(&sched_lock);
502 		PROC_UNLOCK(p);
503 	} else {
504 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
505 			kse_purge_group(td);
506 			ke->ke_flags |= KEF_EXIT;
507 		}
508 		thread_stopped(p);
509 		thread_exit();
510 		/* NOTREACHED */
511 	}
512 	return (0);
513 }
514 
515 /*
516  * Either becomes an upcall or waits for an awakening event and
517  * then becomes an upcall. Only error cases return.
518  */
519 /*
520 struct kse_release_args {
521 	struct timespec *timeout;
522 };
523 */
524 int
525 kse_release(struct thread *td, struct kse_release_args *uap)
526 {
527 	struct proc *p;
528 	struct ksegrp *kg;
529 	struct kse_upcall *ku;
530 	struct timespec timeout;
531 	struct timeval tv;
532 	sigset_t sigset;
533 	int error;
534 
535 	p = td->td_proc;
536 	kg = td->td_ksegrp;
537 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
538 		return (EINVAL);
539 	if (uap->timeout != NULL) {
540 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
541 			return (error);
542 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
543 	}
544 	if (td->td_flags & TDF_SA)
545 		td->td_pflags |= TDP_UPCALLING;
546 	else {
547 		ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
548 		if (ku->ku_mflags == -1) {
549 			PROC_LOCK(p);
550 			sigexit(td, SIGSEGV);
551 		}
552 	}
553 	PROC_LOCK(p);
554 	if (ku->ku_mflags & KMF_WAITSIGEVENT) {
555 		/* UTS wants to wait for signal event */
556 		if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL))
557 			error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
558 			    "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
559 		p->p_flag &= ~P_SIGEVENT;
560 		sigset = p->p_siglist;
561 		PROC_UNLOCK(p);
562 		error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
563 		    sizeof(sigset));
564 	} else {
565 		 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) {
566 			kg->kg_upsleeps++;
567 			error = msleep(&kg->kg_completed, &p->p_mtx,
568 				PPAUSE|PCATCH, "kserel",
569 				(uap->timeout ? tvtohz(&tv) : 0));
570 			kg->kg_upsleeps--;
571 		}
572 		PROC_UNLOCK(p);
573 	}
574 	if (ku->ku_flags & KUF_DOUPCALL) {
575 		mtx_lock_spin(&sched_lock);
576 		ku->ku_flags &= ~KUF_DOUPCALL;
577 		mtx_unlock_spin(&sched_lock);
578 	}
579 	return (0);
580 }
581 
582 /* struct kse_wakeup_args {
583 	struct kse_mailbox *mbx;
584 }; */
585 int
586 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
587 {
588 	struct proc *p;
589 	struct ksegrp *kg;
590 	struct kse_upcall *ku;
591 	struct thread *td2;
592 
593 	p = td->td_proc;
594 	td2 = NULL;
595 	ku = NULL;
596 	/* KSE-enabled processes only, please. */
597 	if (!(p->p_flag & P_SA))
598 		return (EINVAL);
599 	PROC_LOCK(p);
600 	mtx_lock_spin(&sched_lock);
601 	if (uap->mbx) {
602 		FOREACH_KSEGRP_IN_PROC(p, kg) {
603 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
604 				if (ku->ku_mailbox == uap->mbx)
605 					break;
606 			}
607 			if (ku)
608 				break;
609 		}
610 	} else {
611 		kg = td->td_ksegrp;
612 		if (kg->kg_upsleeps) {
613 			wakeup_one(&kg->kg_completed);
614 			mtx_unlock_spin(&sched_lock);
615 			PROC_UNLOCK(p);
616 			return (0);
617 		}
618 		ku = TAILQ_FIRST(&kg->kg_upcalls);
619 	}
620 	if (ku) {
621 		if ((td2 = ku->ku_owner) == NULL) {
622 			panic("%s: no owner", __func__);
623 		} else if (TD_ON_SLEEPQ(td2) &&
624 		           ((td2->td_wchan == &kg->kg_completed) ||
625 			    (td2->td_wchan == &p->p_siglist &&
626 			     (ku->ku_mflags & KMF_WAITSIGEVENT)))) {
627 			abortsleep(td2);
628 		} else {
629 			ku->ku_flags |= KUF_DOUPCALL;
630 		}
631 		mtx_unlock_spin(&sched_lock);
632 		PROC_UNLOCK(p);
633 		return (0);
634 	}
635 	mtx_unlock_spin(&sched_lock);
636 	PROC_UNLOCK(p);
637 	return (ESRCH);
638 }
639 
640 /*
641  * No new KSEG: first call: use current KSE, don't schedule an upcall
642  * All other situations, do allocate max new KSEs and schedule an upcall.
643  */
644 /* struct kse_create_args {
645 	struct kse_mailbox *mbx;
646 	int newgroup;
647 }; */
648 int
649 kse_create(struct thread *td, struct kse_create_args *uap)
650 {
651 	struct kse *newke;
652 	struct ksegrp *newkg;
653 	struct ksegrp *kg;
654 	struct proc *p;
655 	struct kse_mailbox mbx;
656 	struct kse_upcall *newku;
657 	int err, ncpus, sa = 0, first = 0;
658 	struct thread *newtd;
659 
660 	p = td->td_proc;
661 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
662 		return (err);
663 
664 	/* Too bad, why hasn't kernel always a cpu counter !? */
665 #ifdef SMP
666 	ncpus = mp_ncpus;
667 #else
668 	ncpus = 1;
669 #endif
670 	if (virtual_cpu != 0)
671 		ncpus = virtual_cpu;
672 	if (!(mbx.km_flags & KMF_BOUND))
673 		sa = TDF_SA;
674 	else
675 		ncpus = 1;
676 	PROC_LOCK(p);
677 	if (!(p->p_flag & P_SA)) {
678 		first = 1;
679 		p->p_flag |= P_SA;
680 	}
681 	PROC_UNLOCK(p);
682 	if (!sa && !uap->newgroup && !first)
683 		return (EINVAL);
684 	kg = td->td_ksegrp;
685 	if (uap->newgroup) {
686 		/* Have race condition but it is cheap */
687 		if (p->p_numksegrps >= max_groups_per_proc)
688 			return (EPROCLIM);
689 		/*
690 		 * If we want a new KSEGRP it doesn't matter whether
691 		 * we have already fired up KSE mode before or not.
692 		 * We put the process in KSE mode and create a new KSEGRP.
693 		 */
694 		newkg = ksegrp_alloc();
695 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
696 		      kg_startzero, kg_endzero));
697 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
698 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
699 		PROC_LOCK(p);
700 		mtx_lock_spin(&sched_lock);
701 		if (p->p_numksegrps >= max_groups_per_proc) {
702 			mtx_unlock_spin(&sched_lock);
703 			PROC_UNLOCK(p);
704 			ksegrp_free(newkg);
705 			return (EPROCLIM);
706 		}
707 		ksegrp_link(newkg, p);
708 		sched_fork_ksegrp(kg, newkg);
709 		mtx_unlock_spin(&sched_lock);
710 		PROC_UNLOCK(p);
711 	} else {
712 		if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
713 			return (EINVAL);
714 		newkg = kg;
715 	}
716 
717 	/*
718 	 * Creating upcalls more than number of physical cpu does
719 	 * not help performance.
720 	 */
721 	if (newkg->kg_numupcalls >= ncpus)
722 		return (EPROCLIM);
723 
724 	if (newkg->kg_numupcalls == 0) {
725 		/*
726 		 * Initialize KSE group
727 		 *
728 		 * For multiplxed group, create KSEs as many as physical
729 		 * cpus. This increases concurrent even if userland
730 		 * is not MP safe and can only run on single CPU.
731 		 * In ideal world, every physical cpu should execute a thread.
732 		 * If there is enough KSEs, threads in kernel can be
733 		 * executed parallel on different cpus with full speed,
734 		 * Concurrent in kernel shouldn't be restricted by number of
735 		 * upcalls userland provides. Adding more upcall structures
736 		 * only increases concurrent in userland.
737 		 *
738 		 * For bound thread group, because there is only thread in the
739 		 * group, we only create one KSE for the group. Thread in this
740 		 * kind of group will never schedule an upcall when blocked,
741 		 * this intends to simulate pthread system scope thread.
742 		 */
743 		while (newkg->kg_kses < ncpus) {
744 			newke = kse_alloc();
745 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
746 			      ke_startzero, ke_endzero));
747 #if 0
748 			mtx_lock_spin(&sched_lock);
749 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
750 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
751 			mtx_unlock_spin(&sched_lock);
752 #endif
753 			mtx_lock_spin(&sched_lock);
754 			kse_link(newke, newkg);
755 			sched_fork_kse(td->td_kse, newke);
756 			/* Add engine */
757 			kse_reassign(newke);
758 			mtx_unlock_spin(&sched_lock);
759 		}
760 	}
761 	newku = upcall_alloc();
762 	newku->ku_mailbox = uap->mbx;
763 	newku->ku_func = mbx.km_func;
764 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
765 
766 	/* For the first call this may not have been set */
767 	if (td->td_standin == NULL)
768 		thread_alloc_spare(td, NULL);
769 
770 	PROC_LOCK(p);
771 	if (newkg->kg_numupcalls >= ncpus) {
772 		PROC_UNLOCK(p);
773 		upcall_free(newku);
774 		return (EPROCLIM);
775 	}
776 	if (first && sa) {
777 		SIGSETOR(p->p_siglist, td->td_siglist);
778 		SIGEMPTYSET(td->td_siglist);
779 		SIGFILLSET(td->td_sigmask);
780 		SIG_CANTMASK(td->td_sigmask);
781 	}
782 	mtx_lock_spin(&sched_lock);
783 	PROC_UNLOCK(p);
784 	upcall_link(newku, newkg);
785 	if (mbx.km_quantum)
786 		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
787 
788 	/*
789 	 * Each upcall structure has an owner thread, find which
790 	 * one owns it.
791 	 */
792 	if (uap->newgroup) {
793 		/*
794 		 * Because new ksegrp hasn't thread,
795 		 * create an initial upcall thread to own it.
796 		 */
797 		newtd = thread_schedule_upcall(td, newku);
798 	} else {
799 		/*
800 		 * If current thread hasn't an upcall structure,
801 		 * just assign the upcall to it.
802 		 */
803 		if (td->td_upcall == NULL) {
804 			newku->ku_owner = td;
805 			td->td_upcall = newku;
806 			newtd = td;
807 		} else {
808 			/*
809 			 * Create a new upcall thread to own it.
810 			 */
811 			newtd = thread_schedule_upcall(td, newku);
812 		}
813 	}
814 	if (!sa) {
815 		newtd->td_mailbox = mbx.km_curthread;
816 		newtd->td_flags &= ~TDF_SA;
817 		if (newtd != td) {
818 			mtx_unlock_spin(&sched_lock);
819 			cpu_set_upcall_kse(newtd, newku);
820 			mtx_lock_spin(&sched_lock);
821 		}
822 	} else {
823 		newtd->td_flags |= TDF_SA;
824 	}
825 	if (newtd != td)
826 		setrunqueue(newtd);
827 	mtx_unlock_spin(&sched_lock);
828 	return (0);
829 }
830 
831 /*
832  * Initialize global thread allocation resources.
833  */
834 void
835 threadinit(void)
836 {
837 
838 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
839 	    thread_ctor, thread_dtor, thread_init, thread_fini,
840 	    UMA_ALIGN_CACHE, 0);
841 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
842 	    NULL, NULL, ksegrp_init, NULL,
843 	    UMA_ALIGN_CACHE, 0);
844 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
845 	    NULL, NULL, kse_init, NULL,
846 	    UMA_ALIGN_CACHE, 0);
847 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
848 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
849 }
850 
851 /*
852  * Stash an embarasingly extra thread into the zombie thread queue.
853  */
854 void
855 thread_stash(struct thread *td)
856 {
857 	mtx_lock_spin(&kse_zombie_lock);
858 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
859 	mtx_unlock_spin(&kse_zombie_lock);
860 }
861 
862 /*
863  * Stash an embarasingly extra kse into the zombie kse queue.
864  */
865 void
866 kse_stash(struct kse *ke)
867 {
868 	mtx_lock_spin(&kse_zombie_lock);
869 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
870 	mtx_unlock_spin(&kse_zombie_lock);
871 }
872 
873 /*
874  * Stash an embarasingly extra upcall into the zombie upcall queue.
875  */
876 
877 void
878 upcall_stash(struct kse_upcall *ku)
879 {
880 	mtx_lock_spin(&kse_zombie_lock);
881 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
882 	mtx_unlock_spin(&kse_zombie_lock);
883 }
884 
885 /*
886  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
887  */
888 void
889 ksegrp_stash(struct ksegrp *kg)
890 {
891 	mtx_lock_spin(&kse_zombie_lock);
892 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
893 	mtx_unlock_spin(&kse_zombie_lock);
894 }
895 
896 /*
897  * Reap zombie kse resource.
898  */
899 void
900 thread_reap(void)
901 {
902 	struct thread *td_first, *td_next;
903 	struct kse *ke_first, *ke_next;
904 	struct ksegrp *kg_first, * kg_next;
905 	struct kse_upcall *ku_first, *ku_next;
906 
907 	/*
908 	 * Don't even bother to lock if none at this instant,
909 	 * we really don't care about the next instant..
910 	 */
911 	if ((!TAILQ_EMPTY(&zombie_threads))
912 	    || (!TAILQ_EMPTY(&zombie_kses))
913 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
914 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
915 		mtx_lock_spin(&kse_zombie_lock);
916 		td_first = TAILQ_FIRST(&zombie_threads);
917 		ke_first = TAILQ_FIRST(&zombie_kses);
918 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
919 		ku_first = TAILQ_FIRST(&zombie_upcalls);
920 		if (td_first)
921 			TAILQ_INIT(&zombie_threads);
922 		if (ke_first)
923 			TAILQ_INIT(&zombie_kses);
924 		if (kg_first)
925 			TAILQ_INIT(&zombie_ksegrps);
926 		if (ku_first)
927 			TAILQ_INIT(&zombie_upcalls);
928 		mtx_unlock_spin(&kse_zombie_lock);
929 		while (td_first) {
930 			td_next = TAILQ_NEXT(td_first, td_runq);
931 			if (td_first->td_ucred)
932 				crfree(td_first->td_ucred);
933 			thread_free(td_first);
934 			td_first = td_next;
935 		}
936 		while (ke_first) {
937 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
938 			kse_free(ke_first);
939 			ke_first = ke_next;
940 		}
941 		while (kg_first) {
942 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
943 			ksegrp_free(kg_first);
944 			kg_first = kg_next;
945 		}
946 		while (ku_first) {
947 			ku_next = TAILQ_NEXT(ku_first, ku_link);
948 			upcall_free(ku_first);
949 			ku_first = ku_next;
950 		}
951 	}
952 }
953 
954 /*
955  * Allocate a ksegrp.
956  */
957 struct ksegrp *
958 ksegrp_alloc(void)
959 {
960 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
961 }
962 
963 /*
964  * Allocate a kse.
965  */
966 struct kse *
967 kse_alloc(void)
968 {
969 	return (uma_zalloc(kse_zone, M_WAITOK));
970 }
971 
972 /*
973  * Allocate a thread.
974  */
975 struct thread *
976 thread_alloc(void)
977 {
978 	thread_reap(); /* check if any zombies to get */
979 	return (uma_zalloc(thread_zone, M_WAITOK));
980 }
981 
982 /*
983  * Deallocate a ksegrp.
984  */
985 void
986 ksegrp_free(struct ksegrp *td)
987 {
988 	uma_zfree(ksegrp_zone, td);
989 }
990 
991 /*
992  * Deallocate a kse.
993  */
994 void
995 kse_free(struct kse *td)
996 {
997 	uma_zfree(kse_zone, td);
998 }
999 
1000 /*
1001  * Deallocate a thread.
1002  */
1003 void
1004 thread_free(struct thread *td)
1005 {
1006 
1007 	cpu_thread_clean(td);
1008 	uma_zfree(thread_zone, td);
1009 }
1010 
1011 /*
1012  * Store the thread context in the UTS's mailbox.
1013  * then add the mailbox at the head of a list we are building in user space.
1014  * The list is anchored in the ksegrp structure.
1015  */
1016 int
1017 thread_export_context(struct thread *td, int willexit)
1018 {
1019 	struct proc *p;
1020 	struct ksegrp *kg;
1021 	uintptr_t mbx;
1022 	void *addr;
1023 	int error = 0, temp, sig;
1024 	mcontext_t mc;
1025 
1026 	p = td->td_proc;
1027 	kg = td->td_ksegrp;
1028 
1029 	/* Export the user/machine context. */
1030 	get_mcontext(td, &mc, 0);
1031 	addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
1032 	error = copyout(&mc, addr, sizeof(mcontext_t));
1033 	if (error)
1034 		goto bad;
1035 
1036 	/* Exports clock ticks in kernel mode */
1037 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
1038 	temp = fuword32(addr) + td->td_usticks;
1039 	if (suword32(addr, temp)) {
1040 		error = EFAULT;
1041 		goto bad;
1042 	}
1043 
1044 	/*
1045 	 * Post sync signal, or process SIGKILL and SIGSTOP.
1046 	 * For sync signal, it is only possible when the signal is not
1047 	 * caught by userland or process is being debugged.
1048 	 */
1049 	PROC_LOCK(p);
1050 	if (td->td_flags & TDF_NEEDSIGCHK) {
1051 		mtx_lock_spin(&sched_lock);
1052 		td->td_flags &= ~TDF_NEEDSIGCHK;
1053 		mtx_unlock_spin(&sched_lock);
1054 		mtx_lock(&p->p_sigacts->ps_mtx);
1055 		while ((sig = cursig(td)) != 0)
1056 			postsig(sig);
1057 		mtx_unlock(&p->p_sigacts->ps_mtx);
1058 	}
1059 	if (willexit)
1060 		SIGFILLSET(td->td_sigmask);
1061 	PROC_UNLOCK(p);
1062 
1063 	/* Get address in latest mbox of list pointer */
1064 	addr = (void *)(&td->td_mailbox->tm_next);
1065 	/*
1066 	 * Put the saved address of the previous first
1067 	 * entry into this one
1068 	 */
1069 	for (;;) {
1070 		mbx = (uintptr_t)kg->kg_completed;
1071 		if (suword(addr, mbx)) {
1072 			error = EFAULT;
1073 			goto bad;
1074 		}
1075 		PROC_LOCK(p);
1076 		if (mbx == (uintptr_t)kg->kg_completed) {
1077 			kg->kg_completed = td->td_mailbox;
1078 			/*
1079 			 * The thread context may be taken away by
1080 			 * other upcall threads when we unlock
1081 			 * process lock. it's no longer valid to
1082 			 * use it again in any other places.
1083 			 */
1084 			td->td_mailbox = NULL;
1085 			PROC_UNLOCK(p);
1086 			break;
1087 		}
1088 		PROC_UNLOCK(p);
1089 	}
1090 	td->td_usticks = 0;
1091 	return (0);
1092 
1093 bad:
1094 	PROC_LOCK(p);
1095 	sigexit(td, SIGILL);
1096 	return (error);
1097 }
1098 
1099 /*
1100  * Take the list of completed mailboxes for this KSEGRP and put them on this
1101  * upcall's mailbox as it's the next one going up.
1102  */
1103 static int
1104 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1105 {
1106 	struct proc *p = kg->kg_proc;
1107 	void *addr;
1108 	uintptr_t mbx;
1109 
1110 	addr = (void *)(&ku->ku_mailbox->km_completed);
1111 	for (;;) {
1112 		mbx = (uintptr_t)kg->kg_completed;
1113 		if (suword(addr, mbx)) {
1114 			PROC_LOCK(p);
1115 			psignal(p, SIGSEGV);
1116 			PROC_UNLOCK(p);
1117 			return (EFAULT);
1118 		}
1119 		PROC_LOCK(p);
1120 		if (mbx == (uintptr_t)kg->kg_completed) {
1121 			kg->kg_completed = NULL;
1122 			PROC_UNLOCK(p);
1123 			break;
1124 		}
1125 		PROC_UNLOCK(p);
1126 	}
1127 	return (0);
1128 }
1129 
1130 /*
1131  * This function should be called at statclock interrupt time
1132  */
1133 int
1134 thread_statclock(int user)
1135 {
1136 	struct thread *td = curthread;
1137 	struct ksegrp *kg = td->td_ksegrp;
1138 
1139 	if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
1140 		return (0);
1141 	if (user) {
1142 		/* Current always do via ast() */
1143 		mtx_lock_spin(&sched_lock);
1144 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1145 		mtx_unlock_spin(&sched_lock);
1146 		td->td_uuticks++;
1147 	} else {
1148 		if (td->td_mailbox != NULL)
1149 			td->td_usticks++;
1150 		else {
1151 			/* XXXKSE
1152 		 	 * We will call thread_user_enter() for every
1153 			 * kernel entry in future, so if the thread mailbox
1154 			 * is NULL, it must be a UTS kernel, don't account
1155 			 * clock ticks for it.
1156 			 */
1157 		}
1158 	}
1159 	return (0);
1160 }
1161 
1162 /*
1163  * Export state clock ticks for userland
1164  */
1165 static int
1166 thread_update_usr_ticks(struct thread *td, int user)
1167 {
1168 	struct proc *p = td->td_proc;
1169 	struct kse_thr_mailbox *tmbx;
1170 	struct kse_upcall *ku;
1171 	struct ksegrp *kg;
1172 	caddr_t addr;
1173 	u_int uticks;
1174 
1175 	if ((ku = td->td_upcall) == NULL)
1176 		return (-1);
1177 
1178 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1179 	if ((tmbx == NULL) || (tmbx == (void *)-1))
1180 		return (-1);
1181 	if (user) {
1182 		uticks = td->td_uuticks;
1183 		td->td_uuticks = 0;
1184 		addr = (caddr_t)&tmbx->tm_uticks;
1185 	} else {
1186 		uticks = td->td_usticks;
1187 		td->td_usticks = 0;
1188 		addr = (caddr_t)&tmbx->tm_sticks;
1189 	}
1190 	if (uticks) {
1191 		if (suword32(addr, uticks+fuword32(addr))) {
1192 			PROC_LOCK(p);
1193 			psignal(p, SIGSEGV);
1194 			PROC_UNLOCK(p);
1195 			return (-2);
1196 		}
1197 	}
1198 	kg = td->td_ksegrp;
1199 	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1200 		mtx_lock_spin(&sched_lock);
1201 		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1202 		mtx_unlock_spin(&sched_lock);
1203 	}
1204 	return (0);
1205 }
1206 
1207 /*
1208  * Discard the current thread and exit from its context.
1209  *
1210  * Because we can't free a thread while we're operating under its context,
1211  * push the current thread into our CPU's deadthread holder. This means
1212  * we needn't worry about someone else grabbing our context before we
1213  * do a cpu_throw().
1214  */
1215 void
1216 thread_exit(void)
1217 {
1218 	struct thread *td;
1219 	struct kse *ke;
1220 	struct proc *p;
1221 	struct ksegrp	*kg;
1222 
1223 	td = curthread;
1224 	kg = td->td_ksegrp;
1225 	p = td->td_proc;
1226 	ke = td->td_kse;
1227 
1228 	mtx_assert(&sched_lock, MA_OWNED);
1229 	KASSERT(p != NULL, ("thread exiting without a process"));
1230 	KASSERT(ke != NULL, ("thread exiting without a kse"));
1231 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1232 	PROC_LOCK_ASSERT(p, MA_OWNED);
1233 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1234 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1235 
1236 	if (td->td_standin != NULL) {
1237 		thread_stash(td->td_standin);
1238 		td->td_standin = NULL;
1239 	}
1240 
1241 	cpu_thread_exit(td);	/* XXXSMP */
1242 
1243 	/*
1244 	 * The last thread is left attached to the process
1245 	 * So that the whole bundle gets recycled. Skip
1246 	 * all this stuff.
1247 	 */
1248 	if (p->p_numthreads > 1) {
1249 		thread_unlink(td);
1250 		if (p->p_maxthrwaits)
1251 			wakeup(&p->p_numthreads);
1252 		/*
1253 		 * The test below is NOT true if we are the
1254 		 * sole exiting thread. P_STOPPED_SNGL is unset
1255 		 * in exit1() after it is the only survivor.
1256 		 */
1257 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1258 			if (p->p_numthreads == p->p_suspcount) {
1259 				thread_unsuspend_one(p->p_singlethread);
1260 			}
1261 		}
1262 
1263 		/*
1264 		 * Because each upcall structure has an owner thread,
1265 		 * owner thread exits only when process is in exiting
1266 		 * state, so upcall to userland is no longer needed,
1267 		 * deleting upcall structure is safe here.
1268 		 * So when all threads in a group is exited, all upcalls
1269 		 * in the group should be automatically freed.
1270 		 */
1271 		if (td->td_upcall)
1272 			upcall_remove(td);
1273 
1274 		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
1275 		sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
1276 		ke->ke_state = KES_UNQUEUED;
1277 		ke->ke_thread = NULL;
1278 		/*
1279 		 * Decide what to do with the KSE attached to this thread.
1280 		 */
1281 		if (ke->ke_flags & KEF_EXIT) {
1282 			kse_unlink(ke);
1283 			if (kg->kg_kses == 0) {
1284 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
1285 				ksegrp_unlink(kg);
1286 			}
1287 		}
1288 		else
1289 			kse_reassign(ke);
1290 		PROC_UNLOCK(p);
1291 		td->td_kse	= NULL;
1292 		td->td_state	= TDS_INACTIVE;
1293 #if 0
1294 		td->td_proc	= NULL;
1295 #endif
1296 		td->td_ksegrp	= NULL;
1297 		td->td_last_kse	= NULL;
1298 		PCPU_SET(deadthread, td);
1299 	} else {
1300 		PROC_UNLOCK(p);
1301 	}
1302 	/* XXX Shouldn't cpu_throw() here. */
1303 	mtx_assert(&sched_lock, MA_OWNED);
1304 	cpu_throw(td, choosethread());
1305 	panic("I'm a teapot!");
1306 	/* NOTREACHED */
1307 }
1308 
1309 /*
1310  * Do any thread specific cleanups that may be needed in wait()
1311  * called with Giant held, proc and schedlock not held.
1312  */
1313 void
1314 thread_wait(struct proc *p)
1315 {
1316 	struct thread *td;
1317 
1318 	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1319 	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1320 	FOREACH_THREAD_IN_PROC(p, td) {
1321 		if (td->td_standin != NULL) {
1322 			thread_free(td->td_standin);
1323 			td->td_standin = NULL;
1324 		}
1325 		cpu_thread_clean(td);
1326 	}
1327 	thread_reap();	/* check for zombie threads etc. */
1328 }
1329 
1330 /*
1331  * Link a thread to a process.
1332  * set up anything that needs to be initialized for it to
1333  * be used by the process.
1334  *
1335  * Note that we do not link to the proc's ucred here.
1336  * The thread is linked as if running but no KSE assigned.
1337  */
1338 void
1339 thread_link(struct thread *td, struct ksegrp *kg)
1340 {
1341 	struct proc *p;
1342 
1343 	p = kg->kg_proc;
1344 	td->td_state    = TDS_INACTIVE;
1345 	td->td_proc     = p;
1346 	td->td_ksegrp   = kg;
1347 	td->td_last_kse = NULL;
1348 	td->td_flags    = 0;
1349 	td->td_kse      = NULL;
1350 
1351 	LIST_INIT(&td->td_contested);
1352 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
1353 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1354 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1355 	p->p_numthreads++;
1356 	kg->kg_numthreads++;
1357 }
1358 
1359 void
1360 thread_unlink(struct thread *td)
1361 {
1362 	struct proc *p = td->td_proc;
1363 	struct ksegrp *kg = td->td_ksegrp;
1364 
1365 	mtx_assert(&sched_lock, MA_OWNED);
1366 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1367 	p->p_numthreads--;
1368 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1369 	kg->kg_numthreads--;
1370 	/* could clear a few other things here */
1371 }
1372 
1373 /*
1374  * Purge a ksegrp resource. When a ksegrp is preparing to
1375  * exit, it calls this function.
1376  */
1377 static void
1378 kse_purge_group(struct thread *td)
1379 {
1380 	struct ksegrp *kg;
1381 	struct kse *ke;
1382 
1383 	kg = td->td_ksegrp;
1384  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1385 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1386 		KASSERT(ke->ke_state == KES_IDLE,
1387 			("%s: wrong idle KSE state", __func__));
1388 		kse_unlink(ke);
1389 	}
1390 	KASSERT((kg->kg_kses == 1),
1391 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1392 	KASSERT((kg->kg_numupcalls == 0),
1393 	        ("%s: ksegrp still has %d upcall datas",
1394 		__func__, kg->kg_numupcalls));
1395 }
1396 
1397 /*
1398  * Purge a process's KSE resource. When a process is preparing to
1399  * exit, it calls kse_purge to release any extra KSE resources in
1400  * the process.
1401  */
1402 static void
1403 kse_purge(struct proc *p, struct thread *td)
1404 {
1405 	struct ksegrp *kg;
1406 	struct kse *ke;
1407 
1408  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1409 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1410 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1411 		p->p_numksegrps--;
1412 		/*
1413 		 * There is no ownership for KSE, after all threads
1414 		 * in the group exited, it is possible that some KSEs
1415 		 * were left in idle queue, gc them now.
1416 		 */
1417 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1418 			KASSERT(ke->ke_state == KES_IDLE,
1419 			   ("%s: wrong idle KSE state", __func__));
1420 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1421 			kg->kg_idle_kses--;
1422 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1423 			kg->kg_kses--;
1424 			kse_stash(ke);
1425 		}
1426 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1427 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1428 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1429 		KASSERT((kg->kg_numupcalls == 0),
1430 		        ("%s: ksegrp still has %d upcall datas",
1431 			__func__, kg->kg_numupcalls));
1432 
1433 		if (kg != td->td_ksegrp)
1434 			ksegrp_stash(kg);
1435 	}
1436 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1437 	p->p_numksegrps++;
1438 }
1439 
1440 /*
1441  * This function is intended to be used to initialize a spare thread
1442  * for upcall. Initialize thread's large data area outside sched_lock
1443  * for thread_schedule_upcall().
1444  */
1445 void
1446 thread_alloc_spare(struct thread *td, struct thread *spare)
1447 {
1448 	if (td->td_standin)
1449 		return;
1450 	if (spare == NULL)
1451 		spare = thread_alloc();
1452 	td->td_standin = spare;
1453 	bzero(&spare->td_startzero,
1454 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1455 	spare->td_proc = td->td_proc;
1456 	spare->td_ucred = crhold(td->td_ucred);
1457 }
1458 
1459 /*
1460  * Create a thread and schedule it for upcall on the KSE given.
1461  * Use our thread's standin so that we don't have to allocate one.
1462  */
1463 struct thread *
1464 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1465 {
1466 	struct thread *td2;
1467 
1468 	mtx_assert(&sched_lock, MA_OWNED);
1469 
1470 	/*
1471 	 * Schedule an upcall thread on specified kse_upcall,
1472 	 * the kse_upcall must be free.
1473 	 * td must have a spare thread.
1474 	 */
1475 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1476 	if ((td2 = td->td_standin) != NULL) {
1477 		td->td_standin = NULL;
1478 	} else {
1479 		panic("no reserve thread when scheduling an upcall");
1480 		return (NULL);
1481 	}
1482 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1483 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1484 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1485 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1486 	thread_link(td2, ku->ku_ksegrp);
1487 	/* inherit blocked thread's context */
1488 	cpu_set_upcall(td2, td);
1489 	/* Let the new thread become owner of the upcall */
1490 	ku->ku_owner   = td2;
1491 	td2->td_upcall = ku;
1492 	td2->td_flags  = TDF_SA;
1493 	td2->td_pflags = TDP_UPCALLING;
1494 	td2->td_kse    = NULL;
1495 	td2->td_state  = TDS_CAN_RUN;
1496 	td2->td_inhibitors = 0;
1497 	SIGFILLSET(td2->td_sigmask);
1498 	SIG_CANTMASK(td2->td_sigmask);
1499 	sched_fork_thread(td, td2);
1500 	return (td2);	/* bogus.. should be a void function */
1501 }
1502 
1503 /*
1504  * It is only used when thread generated a trap and process is being
1505  * debugged.
1506  */
1507 void
1508 thread_signal_add(struct thread *td, int sig)
1509 {
1510 	struct proc *p;
1511 	siginfo_t siginfo;
1512 	struct sigacts *ps;
1513 	int error;
1514 
1515 	p = td->td_proc;
1516 	PROC_LOCK_ASSERT(p, MA_OWNED);
1517 	ps = p->p_sigacts;
1518 	mtx_assert(&ps->ps_mtx, MA_OWNED);
1519 
1520 	cpu_thread_siginfo(sig, 0, &siginfo);
1521 	mtx_unlock(&ps->ps_mtx);
1522 	PROC_UNLOCK(p);
1523 	error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1524 	if (error) {
1525 		PROC_LOCK(p);
1526 		sigexit(td, SIGILL);
1527 	}
1528 	PROC_LOCK(p);
1529 	SIGADDSET(td->td_sigmask, sig);
1530 	mtx_lock(&ps->ps_mtx);
1531 }
1532 
1533 void
1534 thread_switchout(struct thread *td)
1535 {
1536 	struct kse_upcall *ku;
1537 	struct thread *td2;
1538 
1539 	mtx_assert(&sched_lock, MA_OWNED);
1540 
1541 	/*
1542 	 * If the outgoing thread is in threaded group and has never
1543 	 * scheduled an upcall, decide whether this is a short
1544 	 * or long term event and thus whether or not to schedule
1545 	 * an upcall.
1546 	 * If it is a short term event, just suspend it in
1547 	 * a way that takes its KSE with it.
1548 	 * Select the events for which we want to schedule upcalls.
1549 	 * For now it's just sleep.
1550 	 * XXXKSE eventually almost any inhibition could do.
1551 	 */
1552 	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1553 		/*
1554 		 * Release ownership of upcall, and schedule an upcall
1555 		 * thread, this new upcall thread becomes the owner of
1556 		 * the upcall structure.
1557 		 */
1558 		ku = td->td_upcall;
1559 		ku->ku_owner = NULL;
1560 		td->td_upcall = NULL;
1561 		td->td_flags &= ~TDF_CAN_UNBIND;
1562 		td2 = thread_schedule_upcall(td, ku);
1563 		setrunqueue(td2);
1564 	}
1565 }
1566 
1567 /*
1568  * Setup done on the thread when it enters the kernel.
1569  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1570  */
1571 void
1572 thread_user_enter(struct proc *p, struct thread *td)
1573 {
1574 	struct ksegrp *kg;
1575 	struct kse_upcall *ku;
1576 	struct kse_thr_mailbox *tmbx;
1577 	uint32_t tflags;
1578 
1579 	kg = td->td_ksegrp;
1580 
1581 	/*
1582 	 * First check that we shouldn't just abort.
1583 	 * But check if we are the single thread first!
1584 	 */
1585 	if (p->p_flag & P_SINGLE_EXIT) {
1586 		PROC_LOCK(p);
1587 		mtx_lock_spin(&sched_lock);
1588 		thread_stopped(p);
1589 		thread_exit();
1590 		/* NOTREACHED */
1591 	}
1592 
1593 	/*
1594 	 * If we are doing a syscall in a KSE environment,
1595 	 * note where our mailbox is. There is always the
1596 	 * possibility that we could do this lazily (in kse_reassign()),
1597 	 * but for now do it every time.
1598 	 */
1599 	kg = td->td_ksegrp;
1600 	if (td->td_flags & TDF_SA) {
1601 		ku = td->td_upcall;
1602 		KASSERT(ku, ("%s: no upcall owned", __func__));
1603 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1604 		KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1605 		ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1606 		tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1607 		if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1608 		    (ku->ku_mflags & KMF_NOUPCALL)) {
1609 			td->td_mailbox = NULL;
1610 		} else {
1611 			if (td->td_standin == NULL)
1612 				thread_alloc_spare(td, NULL);
1613 			tflags = fuword32(&tmbx->tm_flags);
1614 			/*
1615 			 * On some architectures, TP register points to thread
1616 			 * mailbox but not points to kse mailbox, and userland
1617 			 * can not atomically clear km_curthread, but can
1618 			 * use TP register, and set TMF_NOUPCALL in thread
1619 			 * flag	to indicate a critical region.
1620 			 */
1621 			if (tflags & TMF_NOUPCALL) {
1622 				td->td_mailbox = NULL;
1623 			} else {
1624 				td->td_mailbox = tmbx;
1625 				mtx_lock_spin(&sched_lock);
1626 				td->td_flags |= TDF_CAN_UNBIND;
1627 				mtx_unlock_spin(&sched_lock);
1628 			}
1629 		}
1630 	}
1631 }
1632 
1633 /*
1634  * The extra work we go through if we are a threaded process when we
1635  * return to userland.
1636  *
1637  * If we are a KSE process and returning to user mode, check for
1638  * extra work to do before we return (e.g. for more syscalls
1639  * to complete first).  If we were in a critical section, we should
1640  * just return to let it finish. Same if we were in the UTS (in
1641  * which case the mailbox's context's busy indicator will be set).
1642  * The only traps we suport will have set the mailbox.
1643  * We will clear it here.
1644  */
1645 int
1646 thread_userret(struct thread *td, struct trapframe *frame)
1647 {
1648 	int error = 0, upcalls, uts_crit;
1649 	struct kse_upcall *ku;
1650 	struct ksegrp *kg, *kg2;
1651 	struct proc *p;
1652 	struct timespec ts;
1653 
1654 	p = td->td_proc;
1655 	kg = td->td_ksegrp;
1656 	ku = td->td_upcall;
1657 
1658 	/* Nothing to do with bound thread */
1659 	if (!(td->td_flags & TDF_SA))
1660 		return (0);
1661 
1662 	/*
1663 	 * Stat clock interrupt hit in userland, it
1664 	 * is returning from interrupt, charge thread's
1665 	 * userland time for UTS.
1666 	 */
1667 	if (td->td_flags & TDF_USTATCLOCK) {
1668 		thread_update_usr_ticks(td, 1);
1669 		mtx_lock_spin(&sched_lock);
1670 		td->td_flags &= ~TDF_USTATCLOCK;
1671 		mtx_unlock_spin(&sched_lock);
1672 		if (kg->kg_completed ||
1673 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1674 			thread_user_enter(p, td);
1675 	}
1676 
1677 	uts_crit = (td->td_mailbox == NULL);
1678 	/*
1679 	 * Optimisation:
1680 	 * This thread has not started any upcall.
1681 	 * If there is no work to report other than ourself,
1682 	 * then it can return direct to userland.
1683 	 */
1684 	if (TD_CAN_UNBIND(td)) {
1685 		mtx_lock_spin(&sched_lock);
1686 		td->td_flags &= ~TDF_CAN_UNBIND;
1687 		if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1688 		    (kg->kg_completed == NULL) &&
1689 		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1690 		    (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1691 			mtx_unlock_spin(&sched_lock);
1692 			thread_update_usr_ticks(td, 0);
1693 			nanotime(&ts);
1694 			error = copyout(&ts,
1695 				(caddr_t)&ku->ku_mailbox->km_timeofday,
1696 				sizeof(ts));
1697 			td->td_mailbox = 0;
1698 			ku->ku_mflags = 0;
1699 			if (error)
1700 				goto out;
1701 			return (0);
1702 		}
1703 		mtx_unlock_spin(&sched_lock);
1704 		thread_export_context(td, 0);
1705 		/*
1706 		 * There is something to report, and we own an upcall
1707 		 * strucuture, we can go to userland.
1708 		 * Turn ourself into an upcall thread.
1709 		 */
1710 		td->td_pflags |= TDP_UPCALLING;
1711 	} else if (td->td_mailbox && (ku == NULL)) {
1712 		thread_export_context(td, 1);
1713 		PROC_LOCK(p);
1714 		/*
1715 		 * There are upcall threads waiting for
1716 		 * work to do, wake one of them up.
1717 		 * XXXKSE Maybe wake all of them up.
1718 		 */
1719 		if (kg->kg_upsleeps)
1720 			wakeup_one(&kg->kg_completed);
1721 		mtx_lock_spin(&sched_lock);
1722 		thread_stopped(p);
1723 		thread_exit();
1724 		/* NOTREACHED */
1725 	}
1726 
1727 	KASSERT(ku != NULL, ("upcall is NULL\n"));
1728 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1729 
1730 	if (p->p_numthreads > max_threads_per_proc) {
1731 		max_threads_hits++;
1732 		PROC_LOCK(p);
1733 		mtx_lock_spin(&sched_lock);
1734 		p->p_maxthrwaits++;
1735 		while (p->p_numthreads > max_threads_per_proc) {
1736 			upcalls = 0;
1737 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1738 				if (kg2->kg_numupcalls == 0)
1739 					upcalls++;
1740 				else
1741 					upcalls += kg2->kg_numupcalls;
1742 			}
1743 			if (upcalls >= max_threads_per_proc)
1744 				break;
1745 			mtx_unlock_spin(&sched_lock);
1746 			if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1747 			    "maxthreads", NULL)) {
1748 				mtx_lock_spin(&sched_lock);
1749 				break;
1750 			} else {
1751 				mtx_lock_spin(&sched_lock);
1752 			}
1753 		}
1754 		p->p_maxthrwaits--;
1755 		mtx_unlock_spin(&sched_lock);
1756 		PROC_UNLOCK(p);
1757 	}
1758 
1759 	if (td->td_pflags & TDP_UPCALLING) {
1760 		uts_crit = 0;
1761 		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1762 		/*
1763 		 * There is no more work to do and we are going to ride
1764 		 * this thread up to userland as an upcall.
1765 		 * Do the last parts of the setup needed for the upcall.
1766 		 */
1767 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1768 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1769 
1770 		td->td_pflags &= ~TDP_UPCALLING;
1771 		if (ku->ku_flags & KUF_DOUPCALL) {
1772 			mtx_lock_spin(&sched_lock);
1773 			ku->ku_flags &= ~KUF_DOUPCALL;
1774 			mtx_unlock_spin(&sched_lock);
1775 		}
1776 		/*
1777 		 * Set user context to the UTS
1778 		 */
1779 		if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1780 			cpu_set_upcall_kse(td, ku);
1781 			error = suword(&ku->ku_mailbox->km_curthread, 0);
1782 			if (error)
1783 				goto out;
1784 		}
1785 
1786 		/*
1787 		 * Unhook the list of completed threads.
1788 		 * anything that completes after this gets to
1789 		 * come in next time.
1790 		 * Put the list of completed thread mailboxes on
1791 		 * this KSE's mailbox.
1792 		 */
1793 		if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1794 		    (error = thread_link_mboxes(kg, ku)) != 0)
1795 			goto out;
1796 	}
1797 	if (!uts_crit) {
1798 		nanotime(&ts);
1799 		error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1800 	}
1801 
1802 out:
1803 	if (error) {
1804 		/*
1805 		 * Things are going to be so screwed we should just kill
1806 		 * the process.
1807 		 * how do we do that?
1808 		 */
1809 		PROC_LOCK(td->td_proc);
1810 		psignal(td->td_proc, SIGSEGV);
1811 		PROC_UNLOCK(td->td_proc);
1812 	} else {
1813 		/*
1814 		 * Optimisation:
1815 		 * Ensure that we have a spare thread available,
1816 		 * for when we re-enter the kernel.
1817 		 */
1818 		if (td->td_standin == NULL)
1819 			thread_alloc_spare(td, NULL);
1820 	}
1821 
1822 	ku->ku_mflags = 0;
1823 	/*
1824 	 * Clear thread mailbox first, then clear system tick count.
1825 	 * The order is important because thread_statclock() use
1826 	 * mailbox pointer to see if it is an userland thread or
1827 	 * an UTS kernel thread.
1828 	 */
1829 	td->td_mailbox = NULL;
1830 	td->td_usticks = 0;
1831 	return (error);	/* go sync */
1832 }
1833 
1834 /*
1835  * Enforce single-threading.
1836  *
1837  * Returns 1 if the caller must abort (another thread is waiting to
1838  * exit the process or similar). Process is locked!
1839  * Returns 0 when you are successfully the only thread running.
1840  * A process has successfully single threaded in the suspend mode when
1841  * There are no threads in user mode. Threads in the kernel must be
1842  * allowed to continue until they get to the user boundary. They may even
1843  * copy out their return values and data before suspending. They may however be
1844  * accellerated in reaching the user boundary as we will wake up
1845  * any sleeping threads that are interruptable. (PCATCH).
1846  */
1847 int
1848 thread_single(int force_exit)
1849 {
1850 	struct thread *td;
1851 	struct thread *td2;
1852 	struct proc *p;
1853 
1854 	td = curthread;
1855 	p = td->td_proc;
1856 	mtx_assert(&Giant, MA_OWNED);
1857 	PROC_LOCK_ASSERT(p, MA_OWNED);
1858 	KASSERT((td != NULL), ("curthread is NULL"));
1859 
1860 	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
1861 		return (0);
1862 
1863 	/* Is someone already single threading? */
1864 	if (p->p_singlethread)
1865 		return (1);
1866 
1867 	if (force_exit == SINGLE_EXIT) {
1868 		p->p_flag |= P_SINGLE_EXIT;
1869 	} else
1870 		p->p_flag &= ~P_SINGLE_EXIT;
1871 	p->p_flag |= P_STOPPED_SINGLE;
1872 	mtx_lock_spin(&sched_lock);
1873 	p->p_singlethread = td;
1874 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1875 		FOREACH_THREAD_IN_PROC(p, td2) {
1876 			if (td2 == td)
1877 				continue;
1878 			td2->td_flags |= TDF_ASTPENDING;
1879 			if (TD_IS_INHIBITED(td2)) {
1880 				if (force_exit == SINGLE_EXIT) {
1881 					if (TD_IS_SUSPENDED(td2)) {
1882 						thread_unsuspend_one(td2);
1883 					}
1884 					if (TD_ON_SLEEPQ(td2) &&
1885 					    (td2->td_flags & TDF_SINTR)) {
1886 						if (td2->td_flags & TDF_CVWAITQ)
1887 							cv_abort(td2);
1888 						else
1889 							abortsleep(td2);
1890 					}
1891 				} else {
1892 					if (TD_IS_SUSPENDED(td2))
1893 						continue;
1894 					/*
1895 					 * maybe other inhibitted states too?
1896 					 * XXXKSE Is it totally safe to
1897 					 * suspend a non-interruptable thread?
1898 					 */
1899 					if (td2->td_inhibitors &
1900 					    (TDI_SLEEPING | TDI_SWAPPED))
1901 						thread_suspend_one(td2);
1902 				}
1903 			}
1904 		}
1905 		/*
1906 		 * Maybe we suspended some threads.. was it enough?
1907 		 */
1908 		if ((p->p_numthreads - p->p_suspcount) == 1)
1909 			break;
1910 
1911 		/*
1912 		 * Wake us up when everyone else has suspended.
1913 		 * In the mean time we suspend as well.
1914 		 */
1915 		thread_suspend_one(td);
1916 		DROP_GIANT();
1917 		PROC_UNLOCK(p);
1918 		p->p_stats->p_ru.ru_nvcsw++;
1919 		mi_switch();
1920 		mtx_unlock_spin(&sched_lock);
1921 		PICKUP_GIANT();
1922 		PROC_LOCK(p);
1923 		mtx_lock_spin(&sched_lock);
1924 	}
1925 	if (force_exit == SINGLE_EXIT) {
1926 		if (td->td_upcall)
1927 			upcall_remove(td);
1928 		kse_purge(p, td);
1929 	}
1930 	mtx_unlock_spin(&sched_lock);
1931 	return (0);
1932 }
1933 
1934 /*
1935  * Called in from locations that can safely check to see
1936  * whether we have to suspend or at least throttle for a
1937  * single-thread event (e.g. fork).
1938  *
1939  * Such locations include userret().
1940  * If the "return_instead" argument is non zero, the thread must be able to
1941  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1942  *
1943  * The 'return_instead' argument tells the function if it may do a
1944  * thread_exit() or suspend, or whether the caller must abort and back
1945  * out instead.
1946  *
1947  * If the thread that set the single_threading request has set the
1948  * P_SINGLE_EXIT bit in the process flags then this call will never return
1949  * if 'return_instead' is false, but will exit.
1950  *
1951  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1952  *---------------+--------------------+---------------------
1953  *       0       | returns 0          |   returns 0 or 1
1954  *               | when ST ends       |   immediatly
1955  *---------------+--------------------+---------------------
1956  *       1       | thread exits       |   returns 1
1957  *               |                    |  immediatly
1958  * 0 = thread_exit() or suspension ok,
1959  * other = return error instead of stopping the thread.
1960  *
1961  * While a full suspension is under effect, even a single threading
1962  * thread would be suspended if it made this call (but it shouldn't).
1963  * This call should only be made from places where
1964  * thread_exit() would be safe as that may be the outcome unless
1965  * return_instead is set.
1966  */
1967 int
1968 thread_suspend_check(int return_instead)
1969 {
1970 	struct thread *td;
1971 	struct proc *p;
1972 
1973 	td = curthread;
1974 	p = td->td_proc;
1975 	PROC_LOCK_ASSERT(p, MA_OWNED);
1976 	while (P_SHOULDSTOP(p)) {
1977 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1978 			KASSERT(p->p_singlethread != NULL,
1979 			    ("singlethread not set"));
1980 			/*
1981 			 * The only suspension in action is a
1982 			 * single-threading. Single threader need not stop.
1983 			 * XXX Should be safe to access unlocked
1984 			 * as it can only be set to be true by us.
1985 			 */
1986 			if (p->p_singlethread == td)
1987 				return (0);	/* Exempt from stopping. */
1988 		}
1989 		if (return_instead)
1990 			return (1);
1991 
1992 		mtx_lock_spin(&sched_lock);
1993 		thread_stopped(p);
1994 		/*
1995 		 * If the process is waiting for us to exit,
1996 		 * this thread should just suicide.
1997 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1998 		 */
1999 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
2000 			while (mtx_owned(&Giant))
2001 				mtx_unlock(&Giant);
2002 			if (p->p_flag & P_SA)
2003 				thread_exit();
2004 			else
2005 				thr_exit1();
2006 		}
2007 
2008 		/*
2009 		 * When a thread suspends, it just
2010 		 * moves to the processes's suspend queue
2011 		 * and stays there.
2012 		 */
2013 		thread_suspend_one(td);
2014 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2015 			if (p->p_numthreads == p->p_suspcount) {
2016 				thread_unsuspend_one(p->p_singlethread);
2017 			}
2018 		}
2019 		DROP_GIANT();
2020 		PROC_UNLOCK(p);
2021 		p->p_stats->p_ru.ru_nivcsw++;
2022 		mi_switch();
2023 		mtx_unlock_spin(&sched_lock);
2024 		PICKUP_GIANT();
2025 		PROC_LOCK(p);
2026 	}
2027 	return (0);
2028 }
2029 
2030 void
2031 thread_suspend_one(struct thread *td)
2032 {
2033 	struct proc *p = td->td_proc;
2034 
2035 	mtx_assert(&sched_lock, MA_OWNED);
2036 	PROC_LOCK_ASSERT(p, MA_OWNED);
2037 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
2038 	p->p_suspcount++;
2039 	TD_SET_SUSPENDED(td);
2040 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
2041 	/*
2042 	 * Hack: If we are suspending but are on the sleep queue
2043 	 * then we are in msleep or the cv equivalent. We
2044 	 * want to look like we have two Inhibitors.
2045 	 * May already be set.. doesn't matter.
2046 	 */
2047 	if (TD_ON_SLEEPQ(td))
2048 		TD_SET_SLEEPING(td);
2049 }
2050 
2051 void
2052 thread_unsuspend_one(struct thread *td)
2053 {
2054 	struct proc *p = td->td_proc;
2055 
2056 	mtx_assert(&sched_lock, MA_OWNED);
2057 	PROC_LOCK_ASSERT(p, MA_OWNED);
2058 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
2059 	TD_CLR_SUSPENDED(td);
2060 	p->p_suspcount--;
2061 	setrunnable(td);
2062 }
2063 
2064 /*
2065  * Allow all threads blocked by single threading to continue running.
2066  */
2067 void
2068 thread_unsuspend(struct proc *p)
2069 {
2070 	struct thread *td;
2071 
2072 	mtx_assert(&sched_lock, MA_OWNED);
2073 	PROC_LOCK_ASSERT(p, MA_OWNED);
2074 	if (!P_SHOULDSTOP(p)) {
2075 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2076 			thread_unsuspend_one(td);
2077 		}
2078 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2079 	    (p->p_numthreads == p->p_suspcount)) {
2080 		/*
2081 		 * Stopping everything also did the job for the single
2082 		 * threading request. Now we've downgraded to single-threaded,
2083 		 * let it continue.
2084 		 */
2085 		thread_unsuspend_one(p->p_singlethread);
2086 	}
2087 }
2088 
2089 void
2090 thread_single_end(void)
2091 {
2092 	struct thread *td;
2093 	struct proc *p;
2094 
2095 	td = curthread;
2096 	p = td->td_proc;
2097 	PROC_LOCK_ASSERT(p, MA_OWNED);
2098 	p->p_flag &= ~P_STOPPED_SINGLE;
2099 	mtx_lock_spin(&sched_lock);
2100 	p->p_singlethread = NULL;
2101 	/*
2102 	 * If there are other threads they mey now run,
2103 	 * unless of course there is a blanket 'stop order'
2104 	 * on the process. The single threader must be allowed
2105 	 * to continue however as this is a bad place to stop.
2106 	 */
2107 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2108 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2109 			thread_unsuspend_one(td);
2110 		}
2111 	}
2112 	mtx_unlock_spin(&sched_lock);
2113 }
2114 
2115 
2116