xref: /freebsd/sys/kern/kern_thread.c (revision 2b035cbe5a817f7c86a12d30997ab134c2cf4e3f)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/sysproto.h>
41 #include <sys/filedesc.h>
42 #include <sys/sched.h>
43 #include <sys/signalvar.h>
44 #include <sys/sx.h>
45 #include <sys/tty.h>
46 #include <sys/user.h>
47 #include <sys/jail.h>
48 #include <sys/kse.h>
49 #include <sys/ktr.h>
50 #include <sys/ucontext.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/pmap.h>
55 #include <vm/uma.h>
56 #include <vm/vm_map.h>
57 
58 #include <machine/frame.h>
59 
60 /*
61  * KSEGRP related storage.
62  */
63 static uma_zone_t ksegrp_zone;
64 static uma_zone_t kse_zone;
65 static uma_zone_t thread_zone;
66 static uma_zone_t upcall_zone;
67 
68 /* DEBUG ONLY */
69 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70 static int thread_debug = 0;
71 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72 	&thread_debug, 0, "thread debug");
73 
74 static int max_threads_per_proc = 150;
75 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76 	&max_threads_per_proc, 0, "Limit on threads per proc");
77 
78 static int max_groups_per_proc = 50;
79 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
81 
82 static int max_threads_hits;
83 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
84 	&max_threads_hits, 0, "");
85 
86 static int virtual_cpu;
87 
88 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
89 
90 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
91 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
92 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
93 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
94 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
95 struct mtx kse_zombie_lock;
96 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
97 
98 static void kse_purge(struct proc *p, struct thread *td);
99 static void kse_purge_group(struct thread *td);
100 static int thread_update_usr_ticks(struct thread *td, int user);
101 static void thread_alloc_spare(struct thread *td, struct thread *spare);
102 
103 static int
104 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
105 {
106 	int error, new_val;
107 	int def_val;
108 
109 #ifdef SMP
110 	def_val = mp_ncpus;
111 #else
112 	def_val = 1;
113 #endif
114 	if (virtual_cpu == 0)
115 		new_val = def_val;
116 	else
117 		new_val = virtual_cpu;
118 	error = sysctl_handle_int(oidp, &new_val, 0, req);
119         if (error != 0 || req->newptr == NULL)
120 		return (error);
121 	if (new_val < 0)
122 		return (EINVAL);
123 	virtual_cpu = new_val;
124 	return (0);
125 }
126 
127 /* DEBUG ONLY */
128 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
129 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
130 	"debug virtual cpus");
131 
132 /*
133  * Prepare a thread for use.
134  */
135 static void
136 thread_ctor(void *mem, int size, void *arg)
137 {
138 	struct thread	*td;
139 
140 	td = (struct thread *)mem;
141 	td->td_state = TDS_INACTIVE;
142 	td->td_oncpu	= NOCPU;
143 }
144 
145 /*
146  * Reclaim a thread after use.
147  */
148 static void
149 thread_dtor(void *mem, int size, void *arg)
150 {
151 	struct thread	*td;
152 
153 	td = (struct thread *)mem;
154 
155 #ifdef INVARIANTS
156 	/* Verify that this thread is in a safe state to free. */
157 	switch (td->td_state) {
158 	case TDS_INHIBITED:
159 	case TDS_RUNNING:
160 	case TDS_CAN_RUN:
161 	case TDS_RUNQ:
162 		/*
163 		 * We must never unlink a thread that is in one of
164 		 * these states, because it is currently active.
165 		 */
166 		panic("bad state for thread unlinking");
167 		/* NOTREACHED */
168 	case TDS_INACTIVE:
169 		break;
170 	default:
171 		panic("bad thread state");
172 		/* NOTREACHED */
173 	}
174 #endif
175 }
176 
177 /*
178  * Initialize type-stable parts of a thread (when newly created).
179  */
180 static void
181 thread_init(void *mem, int size)
182 {
183 	struct thread	*td;
184 
185 	td = (struct thread *)mem;
186 	mtx_lock(&Giant);
187 	pmap_new_thread(td, 0);
188 	mtx_unlock(&Giant);
189 	cpu_thread_setup(td);
190 	td->td_sched = (struct td_sched *)&td[1];
191 }
192 
193 /*
194  * Tear down type-stable parts of a thread (just before being discarded).
195  */
196 static void
197 thread_fini(void *mem, int size)
198 {
199 	struct thread	*td;
200 
201 	td = (struct thread *)mem;
202 	pmap_dispose_thread(td);
203 }
204 
205 /*
206  * Initialize type-stable parts of a kse (when newly created).
207  */
208 static void
209 kse_init(void *mem, int size)
210 {
211 	struct kse	*ke;
212 
213 	ke = (struct kse *)mem;
214 	ke->ke_sched = (struct ke_sched *)&ke[1];
215 }
216 
217 /*
218  * Initialize type-stable parts of a ksegrp (when newly created).
219  */
220 static void
221 ksegrp_init(void *mem, int size)
222 {
223 	struct ksegrp	*kg;
224 
225 	kg = (struct ksegrp *)mem;
226 	kg->kg_sched = (struct kg_sched *)&kg[1];
227 }
228 
229 /*
230  * KSE is linked into kse group.
231  */
232 void
233 kse_link(struct kse *ke, struct ksegrp *kg)
234 {
235 	struct proc *p = kg->kg_proc;
236 
237 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
238 	kg->kg_kses++;
239 	ke->ke_state	= KES_UNQUEUED;
240 	ke->ke_proc	= p;
241 	ke->ke_ksegrp	= kg;
242 	ke->ke_thread	= NULL;
243 	ke->ke_oncpu	= NOCPU;
244 	ke->ke_flags	= 0;
245 }
246 
247 void
248 kse_unlink(struct kse *ke)
249 {
250 	struct ksegrp *kg;
251 
252 	mtx_assert(&sched_lock, MA_OWNED);
253 	kg = ke->ke_ksegrp;
254 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
255 	if (ke->ke_state == KES_IDLE) {
256 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
257 		kg->kg_idle_kses--;
258 	}
259 	if (--kg->kg_kses == 0)
260 		ksegrp_unlink(kg);
261 	/*
262 	 * Aggregate stats from the KSE
263 	 */
264 	kse_stash(ke);
265 }
266 
267 void
268 ksegrp_link(struct ksegrp *kg, struct proc *p)
269 {
270 
271 	TAILQ_INIT(&kg->kg_threads);
272 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
273 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
274 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
275 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
276 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
277 	kg->kg_proc = p;
278 	/*
279 	 * the following counters are in the -zero- section
280 	 * and may not need clearing
281 	 */
282 	kg->kg_numthreads = 0;
283 	kg->kg_runnable   = 0;
284 	kg->kg_kses       = 0;
285 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
286 	kg->kg_idle_kses  = 0;
287 	kg->kg_numupcalls = 0;
288 	/* link it in now that it's consistent */
289 	p->p_numksegrps++;
290 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
291 }
292 
293 void
294 ksegrp_unlink(struct ksegrp *kg)
295 {
296 	struct proc *p;
297 
298 	mtx_assert(&sched_lock, MA_OWNED);
299 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
300 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
301 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
302 
303 	p = kg->kg_proc;
304 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
305 	p->p_numksegrps--;
306 	/*
307 	 * Aggregate stats from the KSE
308 	 */
309 	ksegrp_stash(kg);
310 }
311 
312 struct kse_upcall *
313 upcall_alloc(void)
314 {
315 	struct kse_upcall *ku;
316 
317 	ku = uma_zalloc(upcall_zone, M_WAITOK);
318 	bzero(ku, sizeof(*ku));
319 	return (ku);
320 }
321 
322 void
323 upcall_free(struct kse_upcall *ku)
324 {
325 
326 	uma_zfree(upcall_zone, ku);
327 }
328 
329 void
330 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
331 {
332 
333 	mtx_assert(&sched_lock, MA_OWNED);
334 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
335 	ku->ku_ksegrp = kg;
336 	kg->kg_numupcalls++;
337 }
338 
339 void
340 upcall_unlink(struct kse_upcall *ku)
341 {
342 	struct ksegrp *kg = ku->ku_ksegrp;
343 
344 	mtx_assert(&sched_lock, MA_OWNED);
345 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
346 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
347 	kg->kg_numupcalls--;
348 	upcall_stash(ku);
349 }
350 
351 void
352 upcall_remove(struct thread *td)
353 {
354 
355 	if (td->td_upcall) {
356 		td->td_upcall->ku_owner = NULL;
357 		upcall_unlink(td->td_upcall);
358 		td->td_upcall = 0;
359 	}
360 }
361 
362 /*
363  * For a newly created process,
364  * link up all the structures and its initial threads etc.
365  */
366 void
367 proc_linkup(struct proc *p, struct ksegrp *kg,
368 	    struct kse *ke, struct thread *td)
369 {
370 
371 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
372 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
373 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
374 	p->p_numksegrps = 0;
375 	p->p_numthreads = 0;
376 
377 	ksegrp_link(kg, p);
378 	kse_link(ke, kg);
379 	thread_link(td, kg);
380 }
381 
382 /*
383 struct kse_thr_interrupt_args {
384 	struct kse_thr_mailbox * tmbx;
385 };
386 */
387 int
388 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
389 {
390 	struct proc *p;
391 	struct thread *td2;
392 
393 	p = td->td_proc;
394 	if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
395 		return (EINVAL);
396 	mtx_lock_spin(&sched_lock);
397 	FOREACH_THREAD_IN_PROC(p, td2) {
398 		if (td2->td_mailbox == uap->tmbx) {
399 			td2->td_flags |= TDF_INTERRUPT;
400 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
401 				if (td2->td_flags & TDF_CVWAITQ)
402 					cv_abort(td2);
403 				else
404 					abortsleep(td2);
405 			}
406 			mtx_unlock_spin(&sched_lock);
407 			return (0);
408 		}
409 	}
410 	mtx_unlock_spin(&sched_lock);
411 	return (ESRCH);
412 }
413 
414 /*
415 struct kse_exit_args {
416 	register_t dummy;
417 };
418 */
419 int
420 kse_exit(struct thread *td, struct kse_exit_args *uap)
421 {
422 	struct proc *p;
423 	struct ksegrp *kg;
424 	struct kse *ke;
425 	struct kse_upcall *ku, *ku2;
426 	int    error, count;
427 
428 	p = td->td_proc;
429 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
430 		return (EINVAL);
431 	kg = td->td_ksegrp;
432 	count = 0;
433 	PROC_LOCK(p);
434 	mtx_lock_spin(&sched_lock);
435 	FOREACH_UPCALL_IN_GROUP(kg, ku2) {
436 		if (ku2->ku_flags & KUF_EXITING)
437 			count++;
438 	}
439 	if ((kg->kg_numupcalls - count) == 1 &&
440 	    (kg->kg_numthreads > 1)) {
441 		mtx_unlock_spin(&sched_lock);
442 		PROC_UNLOCK(p);
443 		return (EDEADLK);
444 	}
445 	ku->ku_flags |= KUF_EXITING;
446 	mtx_unlock_spin(&sched_lock);
447 	PROC_UNLOCK(p);
448 	error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
449 	PROC_LOCK(p);
450 	if (error)
451 		psignal(p, SIGSEGV);
452 	mtx_lock_spin(&sched_lock);
453 	upcall_remove(td);
454 	ke = td->td_kse;
455 	if (p->p_numthreads == 1) {
456 		kse_purge(p, td);
457 		p->p_flag &= ~P_THREADED;
458 		mtx_unlock_spin(&sched_lock);
459 		PROC_UNLOCK(p);
460 	} else {
461 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
462 			kse_purge_group(td);
463 			ke->ke_flags |= KEF_EXIT;
464 		}
465 		thread_stopped(p);
466 		thread_exit();
467 		/* NOTREACHED */
468 	}
469 	return (0);
470 }
471 
472 /*
473  * Either becomes an upcall or waits for an awakening event and
474  * then becomes an upcall. Only error cases return.
475  */
476 /*
477 struct kse_release_args {
478 	struct timespec *timeout;
479 };
480 */
481 int
482 kse_release(struct thread *td, struct kse_release_args *uap)
483 {
484 	struct proc *p;
485 	struct ksegrp *kg;
486 	struct timespec ts, ts2, ts3, timeout;
487 	struct timeval tv;
488 	int error;
489 
490 	p = td->td_proc;
491 	kg = td->td_ksegrp;
492 	if (td->td_upcall == NULL || TD_CAN_UNBIND(td))
493 		return (EINVAL);
494 	if (uap->timeout != NULL) {
495 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
496 			return (error);
497 		getnanouptime(&ts);
498 		timespecadd(&ts, &timeout);
499 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
500 	}
501 	mtx_lock_spin(&sched_lock);
502 	/* Change OURSELF to become an upcall. */
503 	td->td_flags = TDF_UPCALLING;
504 #if 0	/* XXX This shouldn't be necessary */
505 	if (p->p_sflag & PS_NEEDSIGCHK)
506 		td->td_flags |= TDF_ASTPENDING;
507 #endif
508 	mtx_unlock_spin(&sched_lock);
509 	PROC_LOCK(p);
510 	while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
511 	       (kg->kg_completed == NULL)) {
512 		kg->kg_upsleeps++;
513 		error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH,
514 			"kse_rel", (uap->timeout ? tvtohz(&tv) : 0));
515 		kg->kg_upsleeps--;
516 		PROC_UNLOCK(p);
517 		if (uap->timeout == NULL || error != EWOULDBLOCK)
518 			return (0);
519 		getnanouptime(&ts2);
520 		if (timespeccmp(&ts2, &ts, >=))
521 			return (0);
522 		ts3 = ts;
523 		timespecsub(&ts3, &ts2);
524 		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
525 		PROC_LOCK(p);
526 	}
527 	PROC_UNLOCK(p);
528 	return (0);
529 }
530 
531 /* struct kse_wakeup_args {
532 	struct kse_mailbox *mbx;
533 }; */
534 int
535 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
536 {
537 	struct proc *p;
538 	struct ksegrp *kg;
539 	struct kse_upcall *ku;
540 	struct thread *td2;
541 
542 	p = td->td_proc;
543 	td2 = NULL;
544 	ku = NULL;
545 	/* KSE-enabled processes only, please. */
546 	if (!(p->p_flag & P_THREADED))
547 		return (EINVAL);
548 	PROC_LOCK(p);
549 	mtx_lock_spin(&sched_lock);
550 	if (uap->mbx) {
551 		FOREACH_KSEGRP_IN_PROC(p, kg) {
552 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
553 				if (ku->ku_mailbox == uap->mbx)
554 					break;
555 			}
556 			if (ku)
557 				break;
558 		}
559 	} else {
560 		kg = td->td_ksegrp;
561 		if (kg->kg_upsleeps) {
562 			wakeup_one(&kg->kg_completed);
563 			mtx_unlock_spin(&sched_lock);
564 			PROC_UNLOCK(p);
565 			return (0);
566 		}
567 		ku = TAILQ_FIRST(&kg->kg_upcalls);
568 	}
569 	if (ku) {
570 		if ((td2 = ku->ku_owner) == NULL) {
571 			panic("%s: no owner", __func__);
572 		} else if (TD_ON_SLEEPQ(td2) &&
573 		           (td2->td_wchan == &kg->kg_completed)) {
574 			abortsleep(td2);
575 		} else {
576 			ku->ku_flags |= KUF_DOUPCALL;
577 		}
578 		mtx_unlock_spin(&sched_lock);
579 		PROC_UNLOCK(p);
580 		return (0);
581 	}
582 	mtx_unlock_spin(&sched_lock);
583 	PROC_UNLOCK(p);
584 	return (ESRCH);
585 }
586 
587 /*
588  * No new KSEG: first call: use current KSE, don't schedule an upcall
589  * All other situations, do allocate max new KSEs and schedule an upcall.
590  */
591 /* struct kse_create_args {
592 	struct kse_mailbox *mbx;
593 	int newgroup;
594 }; */
595 int
596 kse_create(struct thread *td, struct kse_create_args *uap)
597 {
598 	struct kse *newke;
599 	struct ksegrp *newkg;
600 	struct ksegrp *kg;
601 	struct proc *p;
602 	struct kse_mailbox mbx;
603 	struct kse_upcall *newku;
604 	int err, ncpus;
605 
606 	p = td->td_proc;
607 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
608 		return (err);
609 
610 	/* Too bad, why hasn't kernel always a cpu counter !? */
611 #ifdef SMP
612 	ncpus = mp_ncpus;
613 #else
614 	ncpus = 1;
615 #endif
616 	if (thread_debug && virtual_cpu != 0)
617 		ncpus = virtual_cpu;
618 
619 	/* Easier to just set it than to test and set */
620 	PROC_LOCK(p);
621 	p->p_flag |= P_THREADED;
622 	PROC_UNLOCK(p);
623 	kg = td->td_ksegrp;
624 	if (uap->newgroup) {
625 		/* Have race condition but it is cheap */
626 		if (p->p_numksegrps >= max_groups_per_proc)
627 			return (EPROCLIM);
628 		/*
629 		 * If we want a new KSEGRP it doesn't matter whether
630 		 * we have already fired up KSE mode before or not.
631 		 * We put the process in KSE mode and create a new KSEGRP.
632 		 */
633 		newkg = ksegrp_alloc();
634 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
635 		      kg_startzero, kg_endzero));
636 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
637 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
638 		mtx_lock_spin(&sched_lock);
639 		if (p->p_numksegrps >= max_groups_per_proc) {
640 			mtx_unlock_spin(&sched_lock);
641 			ksegrp_free(newkg);
642 			return (EPROCLIM);
643 		}
644 		ksegrp_link(newkg, p);
645 		mtx_unlock_spin(&sched_lock);
646 	} else {
647 		newkg = kg;
648 	}
649 
650 	/*
651 	 * Creating upcalls more than number of physical cpu does
652 	 * not help performance.
653 	 */
654 	if (newkg->kg_numupcalls >= ncpus)
655 		return (EPROCLIM);
656 
657 	if (newkg->kg_numupcalls == 0) {
658 		/*
659 		 * Initialize KSE group, optimized for MP.
660 		 * Create KSEs as many as physical cpus, this increases
661 		 * concurrent even if userland is not MP safe and can only run
662 		 * on single CPU (for early version of libpthread, it is true).
663 		 * In ideal world, every physical cpu should execute a thread.
664 		 * If there is enough KSEs, threads in kernel can be
665 		 * executed parallel on different cpus with full speed,
666 		 * Concurrent in kernel shouldn't be restricted by number of
667 		 * upcalls userland provides.
668 		 * Adding more upcall structures only increases concurrent
669 		 * in userland.
670 		 * Highest performance configuration is:
671 		 * N kses = N upcalls = N phyiscal cpus
672 		 */
673 		while (newkg->kg_kses < ncpus) {
674 			newke = kse_alloc();
675 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
676 			      ke_startzero, ke_endzero));
677 #if 0
678 			mtx_lock_spin(&sched_lock);
679 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
680 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
681 			mtx_unlock_spin(&sched_lock);
682 #endif
683 			mtx_lock_spin(&sched_lock);
684 			kse_link(newke, newkg);
685 			/* Add engine */
686 			kse_reassign(newke);
687 			mtx_unlock_spin(&sched_lock);
688 		}
689 	}
690 	newku = upcall_alloc();
691 	newku->ku_mailbox = uap->mbx;
692 	newku->ku_func = mbx.km_func;
693 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
694 
695 	/* For the first call this may not have been set */
696 	if (td->td_standin == NULL)
697 		thread_alloc_spare(td, NULL);
698 
699 	mtx_lock_spin(&sched_lock);
700 	if (newkg->kg_numupcalls >= ncpus) {
701 		mtx_unlock_spin(&sched_lock);
702 		upcall_free(newku);
703 		return (EPROCLIM);
704 	}
705 	upcall_link(newku, newkg);
706 	if (mbx.km_quantum)
707 		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
708 
709 	/*
710 	 * Each upcall structure has an owner thread, find which
711 	 * one owns it.
712 	 */
713 	if (uap->newgroup) {
714 		/*
715 		 * Because new ksegrp hasn't thread,
716 		 * create an initial upcall thread to own it.
717 		 */
718 		thread_schedule_upcall(td, newku);
719 	} else {
720 		/*
721 		 * If current thread hasn't an upcall structure,
722 		 * just assign the upcall to it.
723 		 */
724 		if (td->td_upcall == NULL) {
725 			newku->ku_owner = td;
726 			td->td_upcall = newku;
727 		} else {
728 			/*
729 			 * Create a new upcall thread to own it.
730 			 */
731 			thread_schedule_upcall(td, newku);
732 		}
733 	}
734 	mtx_unlock_spin(&sched_lock);
735 	return (0);
736 }
737 
738 /*
739  * Initialize global thread allocation resources.
740  */
741 void
742 threadinit(void)
743 {
744 
745 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
746 	    thread_ctor, thread_dtor, thread_init, thread_fini,
747 	    UMA_ALIGN_CACHE, 0);
748 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
749 	    NULL, NULL, ksegrp_init, NULL,
750 	    UMA_ALIGN_CACHE, 0);
751 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
752 	    NULL, NULL, kse_init, NULL,
753 	    UMA_ALIGN_CACHE, 0);
754 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
755 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
756 }
757 
758 /*
759  * Stash an embarasingly extra thread into the zombie thread queue.
760  */
761 void
762 thread_stash(struct thread *td)
763 {
764 	mtx_lock_spin(&kse_zombie_lock);
765 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
766 	mtx_unlock_spin(&kse_zombie_lock);
767 }
768 
769 /*
770  * Stash an embarasingly extra kse into the zombie kse queue.
771  */
772 void
773 kse_stash(struct kse *ke)
774 {
775 	mtx_lock_spin(&kse_zombie_lock);
776 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
777 	mtx_unlock_spin(&kse_zombie_lock);
778 }
779 
780 /*
781  * Stash an embarasingly extra upcall into the zombie upcall queue.
782  */
783 
784 void
785 upcall_stash(struct kse_upcall *ku)
786 {
787 	mtx_lock_spin(&kse_zombie_lock);
788 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
789 	mtx_unlock_spin(&kse_zombie_lock);
790 }
791 
792 /*
793  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
794  */
795 void
796 ksegrp_stash(struct ksegrp *kg)
797 {
798 	mtx_lock_spin(&kse_zombie_lock);
799 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
800 	mtx_unlock_spin(&kse_zombie_lock);
801 }
802 
803 /*
804  * Reap zombie kse resource.
805  */
806 void
807 thread_reap(void)
808 {
809 	struct thread *td_first, *td_next;
810 	struct kse *ke_first, *ke_next;
811 	struct ksegrp *kg_first, * kg_next;
812 	struct kse_upcall *ku_first, *ku_next;
813 
814 	/*
815 	 * Don't even bother to lock if none at this instant,
816 	 * we really don't care about the next instant..
817 	 */
818 	if ((!TAILQ_EMPTY(&zombie_threads))
819 	    || (!TAILQ_EMPTY(&zombie_kses))
820 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
821 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
822 		mtx_lock_spin(&kse_zombie_lock);
823 		td_first = TAILQ_FIRST(&zombie_threads);
824 		ke_first = TAILQ_FIRST(&zombie_kses);
825 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
826 		ku_first = TAILQ_FIRST(&zombie_upcalls);
827 		if (td_first)
828 			TAILQ_INIT(&zombie_threads);
829 		if (ke_first)
830 			TAILQ_INIT(&zombie_kses);
831 		if (kg_first)
832 			TAILQ_INIT(&zombie_ksegrps);
833 		if (ku_first)
834 			TAILQ_INIT(&zombie_upcalls);
835 		mtx_unlock_spin(&kse_zombie_lock);
836 		while (td_first) {
837 			td_next = TAILQ_NEXT(td_first, td_runq);
838 			if (td_first->td_ucred)
839 				crfree(td_first->td_ucred);
840 			thread_free(td_first);
841 			td_first = td_next;
842 		}
843 		while (ke_first) {
844 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
845 			kse_free(ke_first);
846 			ke_first = ke_next;
847 		}
848 		while (kg_first) {
849 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
850 			ksegrp_free(kg_first);
851 			kg_first = kg_next;
852 		}
853 		while (ku_first) {
854 			ku_next = TAILQ_NEXT(ku_first, ku_link);
855 			upcall_free(ku_first);
856 			ku_first = ku_next;
857 		}
858 	}
859 }
860 
861 /*
862  * Allocate a ksegrp.
863  */
864 struct ksegrp *
865 ksegrp_alloc(void)
866 {
867 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
868 }
869 
870 /*
871  * Allocate a kse.
872  */
873 struct kse *
874 kse_alloc(void)
875 {
876 	return (uma_zalloc(kse_zone, M_WAITOK));
877 }
878 
879 /*
880  * Allocate a thread.
881  */
882 struct thread *
883 thread_alloc(void)
884 {
885 	thread_reap(); /* check if any zombies to get */
886 	return (uma_zalloc(thread_zone, M_WAITOK));
887 }
888 
889 /*
890  * Deallocate a ksegrp.
891  */
892 void
893 ksegrp_free(struct ksegrp *td)
894 {
895 	uma_zfree(ksegrp_zone, td);
896 }
897 
898 /*
899  * Deallocate a kse.
900  */
901 void
902 kse_free(struct kse *td)
903 {
904 	uma_zfree(kse_zone, td);
905 }
906 
907 /*
908  * Deallocate a thread.
909  */
910 void
911 thread_free(struct thread *td)
912 {
913 
914 	cpu_thread_clean(td);
915 	uma_zfree(thread_zone, td);
916 }
917 
918 /*
919  * Store the thread context in the UTS's mailbox.
920  * then add the mailbox at the head of a list we are building in user space.
921  * The list is anchored in the ksegrp structure.
922  */
923 int
924 thread_export_context(struct thread *td)
925 {
926 	struct proc *p;
927 	struct ksegrp *kg;
928 	uintptr_t mbx;
929 	void *addr;
930 	int error,temp;
931 	mcontext_t mc;
932 
933 	p = td->td_proc;
934 	kg = td->td_ksegrp;
935 
936 	/* Export the user/machine context. */
937 	get_mcontext(td, &mc, 0);
938 	addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
939 	error = copyout(&mc, addr, sizeof(mcontext_t));
940 	if (error)
941 		goto bad;
942 
943 	/* Exports clock ticks in kernel mode */
944 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
945 	temp = fuword(addr) + td->td_usticks;
946 	if (suword(addr, temp)) {
947 		error = EFAULT;
948 		goto bad;
949 	}
950 
951 	/* Get address in latest mbox of list pointer */
952 	addr = (void *)(&td->td_mailbox->tm_next);
953 	/*
954 	 * Put the saved address of the previous first
955 	 * entry into this one
956 	 */
957 	for (;;) {
958 		mbx = (uintptr_t)kg->kg_completed;
959 		if (suword(addr, mbx)) {
960 			error = EFAULT;
961 			goto bad;
962 		}
963 		PROC_LOCK(p);
964 		if (mbx == (uintptr_t)kg->kg_completed) {
965 			kg->kg_completed = td->td_mailbox;
966 			/*
967 			 * The thread context may be taken away by
968 			 * other upcall threads when we unlock
969 			 * process lock. it's no longer valid to
970 			 * use it again in any other places.
971 			 */
972 			td->td_mailbox = NULL;
973 			PROC_UNLOCK(p);
974 			break;
975 		}
976 		PROC_UNLOCK(p);
977 	}
978 	td->td_usticks = 0;
979 	return (0);
980 
981 bad:
982 	PROC_LOCK(p);
983 	psignal(p, SIGSEGV);
984 	PROC_UNLOCK(p);
985 	/* The mailbox is bad, don't use it */
986 	td->td_mailbox = NULL;
987 	td->td_usticks = 0;
988 	return (error);
989 }
990 
991 /*
992  * Take the list of completed mailboxes for this KSEGRP and put them on this
993  * upcall's mailbox as it's the next one going up.
994  */
995 static int
996 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
997 {
998 	struct proc *p = kg->kg_proc;
999 	void *addr;
1000 	uintptr_t mbx;
1001 
1002 	addr = (void *)(&ku->ku_mailbox->km_completed);
1003 	for (;;) {
1004 		mbx = (uintptr_t)kg->kg_completed;
1005 		if (suword(addr, mbx)) {
1006 			PROC_LOCK(p);
1007 			psignal(p, SIGSEGV);
1008 			PROC_UNLOCK(p);
1009 			return (EFAULT);
1010 		}
1011 		PROC_LOCK(p);
1012 		if (mbx == (uintptr_t)kg->kg_completed) {
1013 			kg->kg_completed = NULL;
1014 			PROC_UNLOCK(p);
1015 			break;
1016 		}
1017 		PROC_UNLOCK(p);
1018 	}
1019 	return (0);
1020 }
1021 
1022 /*
1023  * This function should be called at statclock interrupt time
1024  */
1025 int
1026 thread_statclock(int user)
1027 {
1028 	struct thread *td = curthread;
1029 
1030 	if (td->td_ksegrp->kg_numupcalls == 0)
1031 		return (-1);
1032 	if (user) {
1033 		/* Current always do via ast() */
1034 		mtx_lock_spin(&sched_lock);
1035 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1036 		mtx_unlock_spin(&sched_lock);
1037 		td->td_uuticks++;
1038 	} else {
1039 		if (td->td_mailbox != NULL)
1040 			td->td_usticks++;
1041 		else {
1042 			/* XXXKSE
1043 		 	 * We will call thread_user_enter() for every
1044 			 * kernel entry in future, so if the thread mailbox
1045 			 * is NULL, it must be a UTS kernel, don't account
1046 			 * clock ticks for it.
1047 			 */
1048 		}
1049 	}
1050 	return (0);
1051 }
1052 
1053 /*
1054  * Export state clock ticks for userland
1055  */
1056 static int
1057 thread_update_usr_ticks(struct thread *td, int user)
1058 {
1059 	struct proc *p = td->td_proc;
1060 	struct kse_thr_mailbox *tmbx;
1061 	struct kse_upcall *ku;
1062 	struct ksegrp *kg;
1063 	caddr_t addr;
1064 	uint uticks;
1065 
1066 	if ((ku = td->td_upcall) == NULL)
1067 		return (-1);
1068 
1069 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1070 	if ((tmbx == NULL) || (tmbx == (void *)-1))
1071 		return (-1);
1072 	if (user) {
1073 		uticks = td->td_uuticks;
1074 		td->td_uuticks = 0;
1075 		addr = (caddr_t)&tmbx->tm_uticks;
1076 	} else {
1077 		uticks = td->td_usticks;
1078 		td->td_usticks = 0;
1079 		addr = (caddr_t)&tmbx->tm_sticks;
1080 	}
1081 	if (uticks) {
1082 		if (suword(addr, uticks+fuword(addr))) {
1083 			PROC_LOCK(p);
1084 			psignal(p, SIGSEGV);
1085 			PROC_UNLOCK(p);
1086 			return (-2);
1087 		}
1088 	}
1089 	kg = td->td_ksegrp;
1090 	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1091 		mtx_lock_spin(&sched_lock);
1092 		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1093 		mtx_unlock_spin(&sched_lock);
1094 	}
1095 	return (0);
1096 }
1097 
1098 /*
1099  * Discard the current thread and exit from its context.
1100  *
1101  * Because we can't free a thread while we're operating under its context,
1102  * push the current thread into our CPU's deadthread holder. This means
1103  * we needn't worry about someone else grabbing our context before we
1104  * do a cpu_throw().
1105  */
1106 void
1107 thread_exit(void)
1108 {
1109 	struct thread *td;
1110 	struct kse *ke;
1111 	struct proc *p;
1112 	struct ksegrp	*kg;
1113 
1114 	td = curthread;
1115 	kg = td->td_ksegrp;
1116 	p = td->td_proc;
1117 	ke = td->td_kse;
1118 
1119 	mtx_assert(&sched_lock, MA_OWNED);
1120 	KASSERT(p != NULL, ("thread exiting without a process"));
1121 	KASSERT(ke != NULL, ("thread exiting without a kse"));
1122 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1123 	PROC_LOCK_ASSERT(p, MA_OWNED);
1124 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1125 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1126 
1127 	if (td->td_standin != NULL) {
1128 		thread_stash(td->td_standin);
1129 		td->td_standin = NULL;
1130 	}
1131 
1132 	cpu_thread_exit(td);	/* XXXSMP */
1133 
1134 	/*
1135 	 * The last thread is left attached to the process
1136 	 * So that the whole bundle gets recycled. Skip
1137 	 * all this stuff.
1138 	 */
1139 	if (p->p_numthreads > 1) {
1140 		thread_unlink(td);
1141 		if (p->p_maxthrwaits)
1142 			wakeup(&p->p_numthreads);
1143 		/*
1144 		 * The test below is NOT true if we are the
1145 		 * sole exiting thread. P_STOPPED_SNGL is unset
1146 		 * in exit1() after it is the only survivor.
1147 		 */
1148 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1149 			if (p->p_numthreads == p->p_suspcount) {
1150 				thread_unsuspend_one(p->p_singlethread);
1151 			}
1152 		}
1153 
1154 		/*
1155 		 * Because each upcall structure has an owner thread,
1156 		 * owner thread exits only when process is in exiting
1157 		 * state, so upcall to userland is no longer needed,
1158 		 * deleting upcall structure is safe here.
1159 		 * So when all threads in a group is exited, all upcalls
1160 		 * in the group should be automatically freed.
1161 		 */
1162 		if (td->td_upcall)
1163 			upcall_remove(td);
1164 
1165 		ke->ke_state = KES_UNQUEUED;
1166 		ke->ke_thread = NULL;
1167 		/*
1168 		 * Decide what to do with the KSE attached to this thread.
1169 		 */
1170 		if (ke->ke_flags & KEF_EXIT)
1171 			kse_unlink(ke);
1172 		else
1173 			kse_reassign(ke);
1174 		PROC_UNLOCK(p);
1175 		td->td_kse	= NULL;
1176 		td->td_state	= TDS_INACTIVE;
1177 #if 0
1178 		td->td_proc	= NULL;
1179 #endif
1180 		td->td_ksegrp	= NULL;
1181 		td->td_last_kse	= NULL;
1182 		PCPU_SET(deadthread, td);
1183 	} else {
1184 		PROC_UNLOCK(p);
1185 	}
1186 	/* XXX Shouldn't cpu_throw() here. */
1187 	mtx_assert(&sched_lock, MA_OWNED);
1188 #if !defined(__alpha__) && !defined(__powerpc__)
1189 	cpu_throw(td, choosethread());
1190 #else
1191 	cpu_throw();
1192 #endif
1193 	panic("I'm a teapot!");
1194 	/* NOTREACHED */
1195 }
1196 
1197 /*
1198  * Do any thread specific cleanups that may be needed in wait()
1199  * called with Giant held, proc and schedlock not held.
1200  */
1201 void
1202 thread_wait(struct proc *p)
1203 {
1204 	struct thread *td;
1205 
1206 	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1207 	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1208 	FOREACH_THREAD_IN_PROC(p, td) {
1209 		if (td->td_standin != NULL) {
1210 			thread_free(td->td_standin);
1211 			td->td_standin = NULL;
1212 		}
1213 		cpu_thread_clean(td);
1214 	}
1215 	thread_reap();	/* check for zombie threads etc. */
1216 }
1217 
1218 /*
1219  * Link a thread to a process.
1220  * set up anything that needs to be initialized for it to
1221  * be used by the process.
1222  *
1223  * Note that we do not link to the proc's ucred here.
1224  * The thread is linked as if running but no KSE assigned.
1225  */
1226 void
1227 thread_link(struct thread *td, struct ksegrp *kg)
1228 {
1229 	struct proc *p;
1230 
1231 	p = kg->kg_proc;
1232 	td->td_state    = TDS_INACTIVE;
1233 	td->td_proc     = p;
1234 	td->td_ksegrp   = kg;
1235 	td->td_last_kse = NULL;
1236 	td->td_flags    = 0;
1237 	td->td_kse      = NULL;
1238 
1239 	LIST_INIT(&td->td_contested);
1240 	callout_init(&td->td_slpcallout, 1);
1241 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1242 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1243 	p->p_numthreads++;
1244 	kg->kg_numthreads++;
1245 }
1246 
1247 void
1248 thread_unlink(struct thread *td)
1249 {
1250 	struct proc *p = td->td_proc;
1251 	struct ksegrp *kg = td->td_ksegrp;
1252 
1253 	mtx_assert(&sched_lock, MA_OWNED);
1254 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1255 	p->p_numthreads--;
1256 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1257 	kg->kg_numthreads--;
1258 	/* could clear a few other things here */
1259 }
1260 
1261 /*
1262  * Purge a ksegrp resource. When a ksegrp is preparing to
1263  * exit, it calls this function.
1264  */
1265 static void
1266 kse_purge_group(struct thread *td)
1267 {
1268 	struct ksegrp *kg;
1269 	struct kse *ke;
1270 
1271 	kg = td->td_ksegrp;
1272  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1273 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1274 		KASSERT(ke->ke_state == KES_IDLE,
1275 			("%s: wrong idle KSE state", __func__));
1276 		kse_unlink(ke);
1277 	}
1278 	KASSERT((kg->kg_kses == 1),
1279 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1280 	KASSERT((kg->kg_numupcalls == 0),
1281 	        ("%s: ksegrp still has %d upcall datas",
1282 		__func__, kg->kg_numupcalls));
1283 }
1284 
1285 /*
1286  * Purge a process's KSE resource. When a process is preparing to
1287  * exit, it calls kse_purge to release any extra KSE resources in
1288  * the process.
1289  */
1290 static void
1291 kse_purge(struct proc *p, struct thread *td)
1292 {
1293 	struct ksegrp *kg;
1294 	struct kse *ke;
1295 
1296  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1297 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1298 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1299 		p->p_numksegrps--;
1300 		/*
1301 		 * There is no ownership for KSE, after all threads
1302 		 * in the group exited, it is possible that some KSEs
1303 		 * were left in idle queue, gc them now.
1304 		 */
1305 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1306 			KASSERT(ke->ke_state == KES_IDLE,
1307 			   ("%s: wrong idle KSE state", __func__));
1308 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1309 			kg->kg_idle_kses--;
1310 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1311 			kg->kg_kses--;
1312 			kse_stash(ke);
1313 		}
1314 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1315 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1316 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1317 		KASSERT((kg->kg_numupcalls == 0),
1318 		        ("%s: ksegrp still has %d upcall datas",
1319 			__func__, kg->kg_numupcalls));
1320 
1321 		if (kg != td->td_ksegrp)
1322 			ksegrp_stash(kg);
1323 	}
1324 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1325 	p->p_numksegrps++;
1326 }
1327 
1328 /*
1329  * This function is intended to be used to initialize a spare thread
1330  * for upcall. Initialize thread's large data area outside sched_lock
1331  * for thread_schedule_upcall().
1332  */
1333 void
1334 thread_alloc_spare(struct thread *td, struct thread *spare)
1335 {
1336 	if (td->td_standin)
1337 		return;
1338 	if (spare == NULL)
1339 		spare = thread_alloc();
1340 	td->td_standin = spare;
1341 	bzero(&spare->td_startzero,
1342 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1343 	spare->td_proc = td->td_proc;
1344 	spare->td_ucred = crhold(td->td_ucred);
1345 }
1346 
1347 /*
1348  * Create a thread and schedule it for upcall on the KSE given.
1349  * Use our thread's standin so that we don't have to allocate one.
1350  */
1351 struct thread *
1352 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1353 {
1354 	struct thread *td2;
1355 
1356 	mtx_assert(&sched_lock, MA_OWNED);
1357 
1358 	/*
1359 	 * Schedule an upcall thread on specified kse_upcall,
1360 	 * the kse_upcall must be free.
1361 	 * td must have a spare thread.
1362 	 */
1363 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1364 	if ((td2 = td->td_standin) != NULL) {
1365 		td->td_standin = NULL;
1366 	} else {
1367 		panic("no reserve thread when scheduling an upcall");
1368 		return (NULL);
1369 	}
1370 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1371 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1372 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1373 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1374 	thread_link(td2, ku->ku_ksegrp);
1375 	/* inherit blocked thread's context */
1376 	bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1377 	cpu_set_upcall(td2, td->td_pcb);
1378 	/* Let the new thread become owner of the upcall */
1379 	ku->ku_owner   = td2;
1380 	td2->td_upcall = ku;
1381 	td2->td_flags  = TDF_UPCALLING;
1382 #if 0	/* XXX This shouldn't be necessary */
1383 	if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
1384 		td2->td_flags |= TDF_ASTPENDING;
1385 #endif
1386 	td2->td_kse    = NULL;
1387 	td2->td_state  = TDS_CAN_RUN;
1388 	td2->td_inhibitors = 0;
1389 	setrunqueue(td2);
1390 	return (td2);	/* bogus.. should be a void function */
1391 }
1392 
1393 void
1394 thread_signal_add(struct thread *td, int sig)
1395 {
1396 	struct kse_upcall *ku;
1397 	struct proc *p;
1398 	sigset_t ss;
1399 	int error;
1400 
1401 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1402 	td = curthread;
1403 	ku = td->td_upcall;
1404 	p = td->td_proc;
1405 
1406 	PROC_UNLOCK(p);
1407 	error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1408 	if (error)
1409 		goto error;
1410 
1411 	SIGADDSET(ss, sig);
1412 
1413 	error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t));
1414 	if (error)
1415 		goto error;
1416 
1417 	PROC_LOCK(p);
1418 	return;
1419 error:
1420 	PROC_LOCK(p);
1421 	sigexit(td, SIGILL);
1422 }
1423 
1424 
1425 /*
1426  * Schedule an upcall to notify a KSE process recieved signals.
1427  *
1428  */
1429 void
1430 thread_signal_upcall(struct thread *td)
1431 {
1432 	mtx_lock_spin(&sched_lock);
1433 	td->td_flags |= TDF_UPCALLING;
1434 	mtx_unlock_spin(&sched_lock);
1435 
1436 	return;
1437 }
1438 
1439 void
1440 thread_switchout(struct thread *td)
1441 {
1442 	struct kse_upcall *ku;
1443 
1444 	mtx_assert(&sched_lock, MA_OWNED);
1445 
1446 	/*
1447 	 * If the outgoing thread is in threaded group and has never
1448 	 * scheduled an upcall, decide whether this is a short
1449 	 * or long term event and thus whether or not to schedule
1450 	 * an upcall.
1451 	 * If it is a short term event, just suspend it in
1452 	 * a way that takes its KSE with it.
1453 	 * Select the events for which we want to schedule upcalls.
1454 	 * For now it's just sleep.
1455 	 * XXXKSE eventually almost any inhibition could do.
1456 	 */
1457 	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1458 		/*
1459 		 * Release ownership of upcall, and schedule an upcall
1460 		 * thread, this new upcall thread becomes the owner of
1461 		 * the upcall structure.
1462 		 */
1463 		ku = td->td_upcall;
1464 		ku->ku_owner = NULL;
1465 		td->td_upcall = NULL;
1466 		td->td_flags &= ~TDF_CAN_UNBIND;
1467 		thread_schedule_upcall(td, ku);
1468 	}
1469 }
1470 
1471 /*
1472  * Setup done on the thread when it enters the kernel.
1473  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1474  */
1475 void
1476 thread_user_enter(struct proc *p, struct thread *td)
1477 {
1478 	struct ksegrp *kg;
1479 	struct kse_upcall *ku;
1480 	struct kse_thr_mailbox *tmbx;
1481 
1482 	kg = td->td_ksegrp;
1483 
1484 	/*
1485 	 * First check that we shouldn't just abort.
1486 	 * But check if we are the single thread first!
1487 	 */
1488 	PROC_LOCK(p);
1489 	if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1490 		mtx_lock_spin(&sched_lock);
1491 		thread_stopped(p);
1492 		thread_exit();
1493 		/* NOTREACHED */
1494 	}
1495 	PROC_UNLOCK(p);
1496 
1497 	/*
1498 	 * If we are doing a syscall in a KSE environment,
1499 	 * note where our mailbox is. There is always the
1500 	 * possibility that we could do this lazily (in kse_reassign()),
1501 	 * but for now do it every time.
1502 	 */
1503 	kg = td->td_ksegrp;
1504 	if (kg->kg_numupcalls) {
1505 		ku = td->td_upcall;
1506 		KASSERT(ku, ("%s: no upcall owned", __func__));
1507 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1508 		KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1509 		ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags);
1510 		tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1511 		if ((tmbx == NULL) || (tmbx == (void *)-1)) {
1512 			td->td_mailbox = NULL;
1513 		} else {
1514 			td->td_mailbox = tmbx;
1515 			if (td->td_standin == NULL)
1516 				thread_alloc_spare(td, NULL);
1517 			mtx_lock_spin(&sched_lock);
1518 			if (ku->ku_mflags & KMF_NOUPCALL)
1519 				td->td_flags &= ~TDF_CAN_UNBIND;
1520 			else
1521 				td->td_flags |= TDF_CAN_UNBIND;
1522 			mtx_unlock_spin(&sched_lock);
1523 		}
1524 	}
1525 }
1526 
1527 /*
1528  * The extra work we go through if we are a threaded process when we
1529  * return to userland.
1530  *
1531  * If we are a KSE process and returning to user mode, check for
1532  * extra work to do before we return (e.g. for more syscalls
1533  * to complete first).  If we were in a critical section, we should
1534  * just return to let it finish. Same if we were in the UTS (in
1535  * which case the mailbox's context's busy indicator will be set).
1536  * The only traps we suport will have set the mailbox.
1537  * We will clear it here.
1538  */
1539 int
1540 thread_userret(struct thread *td, struct trapframe *frame)
1541 {
1542 	int error = 0, upcalls, uts_crit;
1543 	struct kse_upcall *ku;
1544 	struct ksegrp *kg, *kg2;
1545 	struct proc *p;
1546 	struct timespec ts;
1547 
1548 	p = td->td_proc;
1549 	kg = td->td_ksegrp;
1550 
1551 	/* Nothing to do with non-threaded group/process */
1552 	if (td->td_ksegrp->kg_numupcalls == 0)
1553 		return (0);
1554 
1555 	/*
1556 	 * Stat clock interrupt hit in userland, it
1557 	 * is returning from interrupt, charge thread's
1558 	 * userland time for UTS.
1559 	 */
1560 	if (td->td_flags & TDF_USTATCLOCK) {
1561 		thread_update_usr_ticks(td, 1);
1562 		mtx_lock_spin(&sched_lock);
1563 		td->td_flags &= ~TDF_USTATCLOCK;
1564 		mtx_unlock_spin(&sched_lock);
1565 		if (kg->kg_completed ||
1566 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1567 			thread_user_enter(p, td);
1568 	}
1569 
1570 	uts_crit = (td->td_mailbox == NULL);
1571 	ku = td->td_upcall;
1572 	/*
1573 	 * Optimisation:
1574 	 * This thread has not started any upcall.
1575 	 * If there is no work to report other than ourself,
1576 	 * then it can return direct to userland.
1577 	 */
1578 	if (TD_CAN_UNBIND(td)) {
1579 		mtx_lock_spin(&sched_lock);
1580 		td->td_flags &= ~TDF_CAN_UNBIND;
1581 		if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1582 		    (kg->kg_completed == NULL) &&
1583 		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1584 		    (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1585 			mtx_unlock_spin(&sched_lock);
1586 			thread_update_usr_ticks(td, 0);
1587 			nanotime(&ts);
1588 			error = copyout(&ts,
1589 				(caddr_t)&ku->ku_mailbox->km_timeofday,
1590 				sizeof(ts));
1591 			td->td_mailbox = 0;
1592 			ku->ku_mflags = 0;
1593 			if (error)
1594 				goto out;
1595 			return (0);
1596 		}
1597 		mtx_unlock_spin(&sched_lock);
1598 		error = thread_export_context(td);
1599 		if (error) {
1600 			/*
1601 			 * Failing to do the KSE operation just defaults
1602 			 * back to synchonous operation, so just return from
1603 			 * the syscall.
1604 			 */
1605 			goto out;
1606 		}
1607 		/*
1608 		 * There is something to report, and we own an upcall
1609 		 * strucuture, we can go to userland.
1610 		 * Turn ourself into an upcall thread.
1611 		 */
1612 		mtx_lock_spin(&sched_lock);
1613 		td->td_flags |= TDF_UPCALLING;
1614 		mtx_unlock_spin(&sched_lock);
1615 	} else if (td->td_mailbox && (ku == NULL)) {
1616 		error = thread_export_context(td);
1617 		/* possibly upcall with error? */
1618 		PROC_LOCK(p);
1619 		/*
1620 		 * There are upcall threads waiting for
1621 		 * work to do, wake one of them up.
1622 		 * XXXKSE Maybe wake all of them up.
1623 		 */
1624 		if (!error && kg->kg_upsleeps)
1625 			wakeup_one(&kg->kg_completed);
1626 		mtx_lock_spin(&sched_lock);
1627 		thread_stopped(p);
1628 		thread_exit();
1629 		/* NOTREACHED */
1630 	}
1631 
1632 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1633 
1634 	if (p->p_numthreads > max_threads_per_proc) {
1635 		max_threads_hits++;
1636 		PROC_LOCK(p);
1637 		mtx_lock_spin(&sched_lock);
1638 		while (p->p_numthreads > max_threads_per_proc) {
1639 			if (P_SHOULDSTOP(p))
1640 				break;
1641 			upcalls = 0;
1642 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1643 				if (kg2->kg_numupcalls == 0)
1644 					upcalls++;
1645 				else
1646 					upcalls += kg2->kg_numupcalls;
1647 			}
1648 			if (upcalls >= max_threads_per_proc)
1649 				break;
1650 			mtx_unlock_spin(&sched_lock);
1651 			p->p_maxthrwaits++;
1652 			msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1653 			    "maxthreads", NULL);
1654 			p->p_maxthrwaits--;
1655 			mtx_lock_spin(&sched_lock);
1656 		}
1657 		mtx_unlock_spin(&sched_lock);
1658 		PROC_UNLOCK(p);
1659 	}
1660 
1661 	if (td->td_flags & TDF_UPCALLING) {
1662 		uts_crit = 0;
1663 		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1664 		/*
1665 		 * There is no more work to do and we are going to ride
1666 		 * this thread up to userland as an upcall.
1667 		 * Do the last parts of the setup needed for the upcall.
1668 		 */
1669 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1670 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1671 
1672 		mtx_lock_spin(&sched_lock);
1673 		td->td_flags &= ~TDF_UPCALLING;
1674 		if (ku->ku_flags & KUF_DOUPCALL)
1675 			ku->ku_flags &= ~KUF_DOUPCALL;
1676 		mtx_unlock_spin(&sched_lock);
1677 
1678 		/*
1679 		 * Set user context to the UTS
1680 		 */
1681 		if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1682 			cpu_set_upcall_kse(td, ku);
1683 			error = suword(&ku->ku_mailbox->km_curthread, 0);
1684 			if (error)
1685 				goto out;
1686 		}
1687 
1688 		/*
1689 		 * Unhook the list of completed threads.
1690 		 * anything that completes after this gets to
1691 		 * come in next time.
1692 		 * Put the list of completed thread mailboxes on
1693 		 * this KSE's mailbox.
1694 		 */
1695 		if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1696 		    (error = thread_link_mboxes(kg, ku)) != 0)
1697 			goto out;
1698 	}
1699 	if (!uts_crit) {
1700 		nanotime(&ts);
1701 		error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1702 	}
1703 
1704 out:
1705 	if (error) {
1706 		/*
1707 		 * Things are going to be so screwed we should just kill
1708 		 * the process.
1709 		 * how do we do that?
1710 		 */
1711 		PROC_LOCK(td->td_proc);
1712 		psignal(td->td_proc, SIGSEGV);
1713 		PROC_UNLOCK(td->td_proc);
1714 	} else {
1715 		/*
1716 		 * Optimisation:
1717 		 * Ensure that we have a spare thread available,
1718 		 * for when we re-enter the kernel.
1719 		 */
1720 		if (td->td_standin == NULL)
1721 			thread_alloc_spare(td, NULL);
1722 	}
1723 
1724 	ku->ku_mflags = 0;
1725 	/*
1726 	 * Clear thread mailbox first, then clear system tick count.
1727 	 * The order is important because thread_statclock() use
1728 	 * mailbox pointer to see if it is an userland thread or
1729 	 * an UTS kernel thread.
1730 	 */
1731 	td->td_mailbox = NULL;
1732 	td->td_usticks = 0;
1733 	return (error);	/* go sync */
1734 }
1735 
1736 /*
1737  * Enforce single-threading.
1738  *
1739  * Returns 1 if the caller must abort (another thread is waiting to
1740  * exit the process or similar). Process is locked!
1741  * Returns 0 when you are successfully the only thread running.
1742  * A process has successfully single threaded in the suspend mode when
1743  * There are no threads in user mode. Threads in the kernel must be
1744  * allowed to continue until they get to the user boundary. They may even
1745  * copy out their return values and data before suspending. They may however be
1746  * accellerated in reaching the user boundary as we will wake up
1747  * any sleeping threads that are interruptable. (PCATCH).
1748  */
1749 int
1750 thread_single(int force_exit)
1751 {
1752 	struct thread *td;
1753 	struct thread *td2;
1754 	struct proc *p;
1755 
1756 	td = curthread;
1757 	p = td->td_proc;
1758 	mtx_assert(&Giant, MA_OWNED);
1759 	PROC_LOCK_ASSERT(p, MA_OWNED);
1760 	KASSERT((td != NULL), ("curthread is NULL"));
1761 
1762 	if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1)
1763 		return (0);
1764 
1765 	/* Is someone already single threading? */
1766 	if (p->p_singlethread)
1767 		return (1);
1768 
1769 	if (force_exit == SINGLE_EXIT) {
1770 		p->p_flag |= P_SINGLE_EXIT;
1771 	} else
1772 		p->p_flag &= ~P_SINGLE_EXIT;
1773 	p->p_flag |= P_STOPPED_SINGLE;
1774 	mtx_lock_spin(&sched_lock);
1775 	p->p_singlethread = td;
1776 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1777 		FOREACH_THREAD_IN_PROC(p, td2) {
1778 			if (td2 == td)
1779 				continue;
1780 			td2->td_flags |= TDF_ASTPENDING;
1781 			if (TD_IS_INHIBITED(td2)) {
1782 				if (force_exit == SINGLE_EXIT) {
1783 					if (TD_IS_SUSPENDED(td2)) {
1784 						thread_unsuspend_one(td2);
1785 					}
1786 					if (TD_ON_SLEEPQ(td2) &&
1787 					    (td2->td_flags & TDF_SINTR)) {
1788 						if (td2->td_flags & TDF_CVWAITQ)
1789 							cv_abort(td2);
1790 						else
1791 							abortsleep(td2);
1792 					}
1793 				} else {
1794 					if (TD_IS_SUSPENDED(td2))
1795 						continue;
1796 					/*
1797 					 * maybe other inhibitted states too?
1798 					 * XXXKSE Is it totally safe to
1799 					 * suspend a non-interruptable thread?
1800 					 */
1801 					if (td2->td_inhibitors &
1802 					    (TDI_SLEEPING | TDI_SWAPPED))
1803 						thread_suspend_one(td2);
1804 				}
1805 			}
1806 		}
1807 		/*
1808 		 * Maybe we suspended some threads.. was it enough?
1809 		 */
1810 		if ((p->p_numthreads - p->p_suspcount) == 1)
1811 			break;
1812 
1813 		/*
1814 		 * Wake us up when everyone else has suspended.
1815 		 * In the mean time we suspend as well.
1816 		 */
1817 		thread_suspend_one(td);
1818 		DROP_GIANT();
1819 		PROC_UNLOCK(p);
1820 		p->p_stats->p_ru.ru_nvcsw++;
1821 		mi_switch();
1822 		mtx_unlock_spin(&sched_lock);
1823 		PICKUP_GIANT();
1824 		PROC_LOCK(p);
1825 		mtx_lock_spin(&sched_lock);
1826 	}
1827 	if (force_exit == SINGLE_EXIT) {
1828 		if (td->td_upcall)
1829 			upcall_remove(td);
1830 		kse_purge(p, td);
1831 	}
1832 	mtx_unlock_spin(&sched_lock);
1833 	return (0);
1834 }
1835 
1836 /*
1837  * Called in from locations that can safely check to see
1838  * whether we have to suspend or at least throttle for a
1839  * single-thread event (e.g. fork).
1840  *
1841  * Such locations include userret().
1842  * If the "return_instead" argument is non zero, the thread must be able to
1843  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1844  *
1845  * The 'return_instead' argument tells the function if it may do a
1846  * thread_exit() or suspend, or whether the caller must abort and back
1847  * out instead.
1848  *
1849  * If the thread that set the single_threading request has set the
1850  * P_SINGLE_EXIT bit in the process flags then this call will never return
1851  * if 'return_instead' is false, but will exit.
1852  *
1853  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1854  *---------------+--------------------+---------------------
1855  *       0       | returns 0          |   returns 0 or 1
1856  *               | when ST ends       |   immediatly
1857  *---------------+--------------------+---------------------
1858  *       1       | thread exits       |   returns 1
1859  *               |                    |  immediatly
1860  * 0 = thread_exit() or suspension ok,
1861  * other = return error instead of stopping the thread.
1862  *
1863  * While a full suspension is under effect, even a single threading
1864  * thread would be suspended if it made this call (but it shouldn't).
1865  * This call should only be made from places where
1866  * thread_exit() would be safe as that may be the outcome unless
1867  * return_instead is set.
1868  */
1869 int
1870 thread_suspend_check(int return_instead)
1871 {
1872 	struct thread *td;
1873 	struct proc *p;
1874 
1875 	td = curthread;
1876 	p = td->td_proc;
1877 	PROC_LOCK_ASSERT(p, MA_OWNED);
1878 	while (P_SHOULDSTOP(p)) {
1879 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1880 			KASSERT(p->p_singlethread != NULL,
1881 			    ("singlethread not set"));
1882 			/*
1883 			 * The only suspension in action is a
1884 			 * single-threading. Single threader need not stop.
1885 			 * XXX Should be safe to access unlocked
1886 			 * as it can only be set to be true by us.
1887 			 */
1888 			if (p->p_singlethread == td)
1889 				return (0);	/* Exempt from stopping. */
1890 		}
1891 		if (return_instead)
1892 			return (1);
1893 
1894 		mtx_lock_spin(&sched_lock);
1895 		thread_stopped(p);
1896 		/*
1897 		 * If the process is waiting for us to exit,
1898 		 * this thread should just suicide.
1899 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1900 		 */
1901 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1902 			while (mtx_owned(&Giant))
1903 				mtx_unlock(&Giant);
1904 			if (p->p_flag & P_THREADED)
1905 				thread_exit();
1906 			else
1907 				thr_exit1();
1908 		}
1909 
1910 		/*
1911 		 * When a thread suspends, it just
1912 		 * moves to the processes's suspend queue
1913 		 * and stays there.
1914 		 */
1915 		thread_suspend_one(td);
1916 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1917 			if (p->p_numthreads == p->p_suspcount) {
1918 				thread_unsuspend_one(p->p_singlethread);
1919 			}
1920 		}
1921 		DROP_GIANT();
1922 		PROC_UNLOCK(p);
1923 		p->p_stats->p_ru.ru_nivcsw++;
1924 		mi_switch();
1925 		mtx_unlock_spin(&sched_lock);
1926 		PICKUP_GIANT();
1927 		PROC_LOCK(p);
1928 	}
1929 	return (0);
1930 }
1931 
1932 void
1933 thread_suspend_one(struct thread *td)
1934 {
1935 	struct proc *p = td->td_proc;
1936 
1937 	mtx_assert(&sched_lock, MA_OWNED);
1938 	PROC_LOCK_ASSERT(p, MA_OWNED);
1939 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1940 	p->p_suspcount++;
1941 	TD_SET_SUSPENDED(td);
1942 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1943 	/*
1944 	 * Hack: If we are suspending but are on the sleep queue
1945 	 * then we are in msleep or the cv equivalent. We
1946 	 * want to look like we have two Inhibitors.
1947 	 * May already be set.. doesn't matter.
1948 	 */
1949 	if (TD_ON_SLEEPQ(td))
1950 		TD_SET_SLEEPING(td);
1951 }
1952 
1953 void
1954 thread_unsuspend_one(struct thread *td)
1955 {
1956 	struct proc *p = td->td_proc;
1957 
1958 	mtx_assert(&sched_lock, MA_OWNED);
1959 	PROC_LOCK_ASSERT(p, MA_OWNED);
1960 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1961 	TD_CLR_SUSPENDED(td);
1962 	p->p_suspcount--;
1963 	setrunnable(td);
1964 }
1965 
1966 /*
1967  * Allow all threads blocked by single threading to continue running.
1968  */
1969 void
1970 thread_unsuspend(struct proc *p)
1971 {
1972 	struct thread *td;
1973 
1974 	mtx_assert(&sched_lock, MA_OWNED);
1975 	PROC_LOCK_ASSERT(p, MA_OWNED);
1976 	if (!P_SHOULDSTOP(p)) {
1977 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
1978 			thread_unsuspend_one(td);
1979 		}
1980 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1981 	    (p->p_numthreads == p->p_suspcount)) {
1982 		/*
1983 		 * Stopping everything also did the job for the single
1984 		 * threading request. Now we've downgraded to single-threaded,
1985 		 * let it continue.
1986 		 */
1987 		thread_unsuspend_one(p->p_singlethread);
1988 	}
1989 }
1990 
1991 void
1992 thread_single_end(void)
1993 {
1994 	struct thread *td;
1995 	struct proc *p;
1996 
1997 	td = curthread;
1998 	p = td->td_proc;
1999 	PROC_LOCK_ASSERT(p, MA_OWNED);
2000 	p->p_flag &= ~P_STOPPED_SINGLE;
2001 	mtx_lock_spin(&sched_lock);
2002 	p->p_singlethread = NULL;
2003 	/*
2004 	 * If there are other threads they mey now run,
2005 	 * unless of course there is a blanket 'stop order'
2006 	 * on the process. The single threader must be allowed
2007 	 * to continue however as this is a bad place to stop.
2008 	 */
2009 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2010 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2011 			thread_unsuspend_one(td);
2012 		}
2013 	}
2014 	mtx_unlock_spin(&sched_lock);
2015 }
2016 
2017 
2018