xref: /freebsd/sys/kern/kern_thread.c (revision 729362425c09cf6b362366aabc6fb547eee8035a)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/sysproto.h>
41 #include <sys/filedesc.h>
42 #include <sys/sched.h>
43 #include <sys/signalvar.h>
44 #include <sys/sx.h>
45 #include <sys/tty.h>
46 #include <sys/user.h>
47 #include <sys/jail.h>
48 #include <sys/kse.h>
49 #include <sys/ktr.h>
50 #include <sys/ucontext.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/pmap.h>
55 #include <vm/uma.h>
56 #include <vm/vm_map.h>
57 
58 #include <machine/frame.h>
59 
60 /*
61  * KSEGRP related storage.
62  */
63 static uma_zone_t ksegrp_zone;
64 static uma_zone_t kse_zone;
65 static uma_zone_t thread_zone;
66 static uma_zone_t upcall_zone;
67 
68 /* DEBUG ONLY */
69 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70 static int thread_debug = 0;
71 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72 	&thread_debug, 0, "thread debug");
73 
74 static int max_threads_per_proc = 30;
75 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76 	&max_threads_per_proc, 0, "Limit on threads per proc");
77 
78 static int max_groups_per_proc = 5;
79 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
81 
82 static int max_threads_hits;
83 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
84 	&max_threads_hits, 0, "");
85 
86 static int virtual_cpu;
87 
88 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
89 
90 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
91 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
92 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
93 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
94 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
95 struct mtx kse_zombie_lock;
96 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
97 
98 static void kse_purge(struct proc *p, struct thread *td);
99 static void kse_purge_group(struct thread *td);
100 static int thread_update_usr_ticks(struct thread *td, int user);
101 static void thread_alloc_spare(struct thread *td, struct thread *spare);
102 
103 static int
104 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
105 {
106 	int error, new_val;
107 	int def_val;
108 
109 #ifdef SMP
110 	def_val = mp_ncpus;
111 #else
112 	def_val = 1;
113 #endif
114 	if (virtual_cpu == 0)
115 		new_val = def_val;
116 	else
117 		new_val = virtual_cpu;
118 	error = sysctl_handle_int(oidp, &new_val, 0, req);
119         if (error != 0 || req->newptr == NULL)
120 		return (error);
121 	if (new_val < 0)
122 		return (EINVAL);
123 	virtual_cpu = new_val;
124 	return (0);
125 }
126 
127 /* DEBUG ONLY */
128 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
129 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
130 	"debug virtual cpus");
131 
132 /*
133  * Prepare a thread for use.
134  */
135 static void
136 thread_ctor(void *mem, int size, void *arg)
137 {
138 	struct thread	*td;
139 
140 	td = (struct thread *)mem;
141 	td->td_state = TDS_INACTIVE;
142 }
143 
144 /*
145  * Reclaim a thread after use.
146  */
147 static void
148 thread_dtor(void *mem, int size, void *arg)
149 {
150 	struct thread	*td;
151 
152 	td = (struct thread *)mem;
153 
154 #ifdef INVARIANTS
155 	/* Verify that this thread is in a safe state to free. */
156 	switch (td->td_state) {
157 	case TDS_INHIBITED:
158 	case TDS_RUNNING:
159 	case TDS_CAN_RUN:
160 	case TDS_RUNQ:
161 		/*
162 		 * We must never unlink a thread that is in one of
163 		 * these states, because it is currently active.
164 		 */
165 		panic("bad state for thread unlinking");
166 		/* NOTREACHED */
167 	case TDS_INACTIVE:
168 		break;
169 	default:
170 		panic("bad thread state");
171 		/* NOTREACHED */
172 	}
173 #endif
174 }
175 
176 /*
177  * Initialize type-stable parts of a thread (when newly created).
178  */
179 static void
180 thread_init(void *mem, int size)
181 {
182 	struct thread	*td;
183 
184 	td = (struct thread *)mem;
185 	mtx_lock(&Giant);
186 	pmap_new_thread(td, 0);
187 	mtx_unlock(&Giant);
188 	cpu_thread_setup(td);
189 	td->td_sched = (struct td_sched *)&td[1];
190 }
191 
192 /*
193  * Tear down type-stable parts of a thread (just before being discarded).
194  */
195 static void
196 thread_fini(void *mem, int size)
197 {
198 	struct thread	*td;
199 
200 	td = (struct thread *)mem;
201 	pmap_dispose_thread(td);
202 }
203 
204 /*
205  * Initialize type-stable parts of a kse (when newly created).
206  */
207 static void
208 kse_init(void *mem, int size)
209 {
210 	struct kse	*ke;
211 
212 	ke = (struct kse *)mem;
213 	ke->ke_sched = (struct ke_sched *)&ke[1];
214 }
215 
216 /*
217  * Initialize type-stable parts of a ksegrp (when newly created).
218  */
219 static void
220 ksegrp_init(void *mem, int size)
221 {
222 	struct ksegrp	*kg;
223 
224 	kg = (struct ksegrp *)mem;
225 	kg->kg_sched = (struct kg_sched *)&kg[1];
226 }
227 
228 /*
229  * KSE is linked into kse group.
230  */
231 void
232 kse_link(struct kse *ke, struct ksegrp *kg)
233 {
234 	struct proc *p = kg->kg_proc;
235 
236 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
237 	kg->kg_kses++;
238 	ke->ke_state	= KES_UNQUEUED;
239 	ke->ke_proc	= p;
240 	ke->ke_ksegrp	= kg;
241 	ke->ke_thread	= NULL;
242 	ke->ke_oncpu	= NOCPU;
243 	ke->ke_flags	= 0;
244 }
245 
246 void
247 kse_unlink(struct kse *ke)
248 {
249 	struct ksegrp *kg;
250 
251 	mtx_assert(&sched_lock, MA_OWNED);
252 	kg = ke->ke_ksegrp;
253 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
254 	if (ke->ke_state == KES_IDLE) {
255 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
256 		kg->kg_idle_kses--;
257 	}
258 	if (--kg->kg_kses == 0)
259 		ksegrp_unlink(kg);
260 	/*
261 	 * Aggregate stats from the KSE
262 	 */
263 	kse_stash(ke);
264 }
265 
266 void
267 ksegrp_link(struct ksegrp *kg, struct proc *p)
268 {
269 
270 	TAILQ_INIT(&kg->kg_threads);
271 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
272 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
273 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
274 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
275 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
276 	kg->kg_proc = p;
277 	/*
278 	 * the following counters are in the -zero- section
279 	 * and may not need clearing
280 	 */
281 	kg->kg_numthreads = 0;
282 	kg->kg_runnable   = 0;
283 	kg->kg_kses       = 0;
284 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
285 	kg->kg_idle_kses  = 0;
286 	kg->kg_numupcalls = 0;
287 	/* link it in now that it's consistent */
288 	p->p_numksegrps++;
289 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
290 }
291 
292 void
293 ksegrp_unlink(struct ksegrp *kg)
294 {
295 	struct proc *p;
296 
297 	mtx_assert(&sched_lock, MA_OWNED);
298 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
299 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
300 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
301 
302 	p = kg->kg_proc;
303 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
304 	p->p_numksegrps--;
305 	/*
306 	 * Aggregate stats from the KSE
307 	 */
308 	ksegrp_stash(kg);
309 }
310 
311 struct kse_upcall *
312 upcall_alloc(void)
313 {
314 	struct kse_upcall *ku;
315 
316 	ku = uma_zalloc(upcall_zone, M_WAITOK);
317 	bzero(ku, sizeof(*ku));
318 	return (ku);
319 }
320 
321 void
322 upcall_free(struct kse_upcall *ku)
323 {
324 
325 	uma_zfree(upcall_zone, ku);
326 }
327 
328 void
329 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
330 {
331 
332 	mtx_assert(&sched_lock, MA_OWNED);
333 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
334 	ku->ku_ksegrp = kg;
335 	kg->kg_numupcalls++;
336 }
337 
338 void
339 upcall_unlink(struct kse_upcall *ku)
340 {
341 	struct ksegrp *kg = ku->ku_ksegrp;
342 
343 	mtx_assert(&sched_lock, MA_OWNED);
344 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
345 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
346 	kg->kg_numupcalls--;
347 	upcall_stash(ku);
348 }
349 
350 void
351 upcall_remove(struct thread *td)
352 {
353 
354 	if (td->td_upcall) {
355 		td->td_upcall->ku_owner = NULL;
356 		upcall_unlink(td->td_upcall);
357 		td->td_upcall = 0;
358 	}
359 }
360 
361 /*
362  * For a newly created process,
363  * link up all the structures and its initial threads etc.
364  */
365 void
366 proc_linkup(struct proc *p, struct ksegrp *kg,
367 	    struct kse *ke, struct thread *td)
368 {
369 
370 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
371 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
372 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
373 	p->p_numksegrps = 0;
374 	p->p_numthreads = 0;
375 
376 	ksegrp_link(kg, p);
377 	kse_link(ke, kg);
378 	thread_link(td, kg);
379 }
380 
381 /*
382 struct kse_thr_interrupt_args {
383 	struct kse_thr_mailbox * tmbx;
384 };
385 */
386 int
387 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
388 {
389 	struct proc *p;
390 	struct thread *td2;
391 
392 	p = td->td_proc;
393 	if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
394 		return (EINVAL);
395 	mtx_lock_spin(&sched_lock);
396 	FOREACH_THREAD_IN_PROC(p, td2) {
397 		if (td2->td_mailbox == uap->tmbx) {
398 			td2->td_flags |= TDF_INTERRUPT;
399 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
400 				if (td2->td_flags & TDF_CVWAITQ)
401 					cv_abort(td2);
402 				else
403 					abortsleep(td2);
404 			}
405 			mtx_unlock_spin(&sched_lock);
406 			return (0);
407 		}
408 	}
409 	mtx_unlock_spin(&sched_lock);
410 	return (ESRCH);
411 }
412 
413 /*
414 struct kse_exit_args {
415 	register_t dummy;
416 };
417 */
418 int
419 kse_exit(struct thread *td, struct kse_exit_args *uap)
420 {
421 	struct proc *p;
422 	struct ksegrp *kg;
423 	struct kse *ke;
424 
425 	p = td->td_proc;
426 	/*
427 	 * Only UTS can call the syscall and current group
428 	 * should be a threaded group.
429 	 */
430 	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
431 		return (EINVAL);
432 	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
433 
434 	kg = td->td_ksegrp;
435 	/* Serialize removing upcall */
436 	PROC_LOCK(p);
437 	mtx_lock_spin(&sched_lock);
438 	if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
439 		mtx_unlock_spin(&sched_lock);
440 		PROC_UNLOCK(p);
441 		return (EDEADLK);
442 	}
443 	ke = td->td_kse;
444 	upcall_remove(td);
445 	if (p->p_numthreads == 1) {
446 		kse_purge(p, td);
447 		p->p_flag &= ~P_THREADED;
448 		mtx_unlock_spin(&sched_lock);
449 		PROC_UNLOCK(p);
450 	} else {
451 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
452 			kse_purge_group(td);
453 			ke->ke_flags |= KEF_EXIT;
454 		}
455 		thread_stopped(p);
456 		thread_exit();
457 		/* NOTREACHED */
458 	}
459 	return (0);
460 }
461 
462 /*
463  * Either becomes an upcall or waits for an awakening event and
464  * then becomes an upcall. Only error cases return.
465  */
466 /*
467 struct kse_release_args {
468 	struct timespec *timeout;
469 };
470 */
471 int
472 kse_release(struct thread *td, struct kse_release_args *uap)
473 {
474 	struct proc *p;
475 	struct ksegrp *kg;
476 	struct timespec ts, ts2, ts3, timeout;
477 	struct timeval tv;
478 	int error;
479 
480 	p = td->td_proc;
481 	kg = td->td_ksegrp;
482 	/*
483 	 * Only UTS can call the syscall and current group
484 	 * should be a threaded group.
485 	 */
486 	if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
487 		return (EINVAL);
488 	KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
489 	if (uap->timeout != NULL) {
490 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
491 			return (error);
492 		getnanouptime(&ts);
493 		timespecadd(&ts, &timeout);
494 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
495 	}
496 	mtx_lock_spin(&sched_lock);
497 	/* Change OURSELF to become an upcall. */
498 	td->td_flags = TDF_UPCALLING;
499 	if (p->p_sflag & PS_NEEDSIGCHK)
500 		td->td_flags |= TDF_ASTPENDING;
501 	mtx_unlock_spin(&sched_lock);
502 	PROC_LOCK(p);
503 	while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
504 	       (kg->kg_completed == NULL)) {
505 		kg->kg_upsleeps++;
506 		error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH,
507 			"kse_rel", (uap->timeout ? tvtohz(&tv) : 0));
508 		kg->kg_upsleeps--;
509 		PROC_UNLOCK(p);
510 		if (uap->timeout == NULL || error != EWOULDBLOCK)
511 			return (0);
512 		getnanouptime(&ts2);
513 		if (timespeccmp(&ts2, &ts, >=))
514 			return (0);
515 		ts3 = ts;
516 		timespecsub(&ts3, &ts2);
517 		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
518 		PROC_LOCK(p);
519 	}
520 	PROC_UNLOCK(p);
521 	return (0);
522 }
523 
524 /* struct kse_wakeup_args {
525 	struct kse_mailbox *mbx;
526 }; */
527 int
528 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
529 {
530 	struct proc *p;
531 	struct ksegrp *kg;
532 	struct kse_upcall *ku;
533 	struct thread *td2;
534 
535 	p = td->td_proc;
536 	td2 = NULL;
537 	ku = NULL;
538 	/* KSE-enabled processes only, please. */
539 	if (!(p->p_flag & P_THREADED))
540 		return (EINVAL);
541 	PROC_LOCK(p);
542 	mtx_lock_spin(&sched_lock);
543 	if (uap->mbx) {
544 		FOREACH_KSEGRP_IN_PROC(p, kg) {
545 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
546 				if (ku->ku_mailbox == uap->mbx)
547 					break;
548 			}
549 			if (ku)
550 				break;
551 		}
552 	} else {
553 		kg = td->td_ksegrp;
554 		if (kg->kg_upsleeps) {
555 			wakeup_one(&kg->kg_completed);
556 			mtx_unlock_spin(&sched_lock);
557 			PROC_UNLOCK(p);
558 			return (0);
559 		}
560 		ku = TAILQ_FIRST(&kg->kg_upcalls);
561 	}
562 	if (ku) {
563 		if ((td2 = ku->ku_owner) == NULL) {
564 			panic("%s: no owner", __func__);
565 		} else if (TD_ON_SLEEPQ(td2) &&
566 		           (td2->td_wchan == &kg->kg_completed)) {
567 			abortsleep(td2);
568 		} else {
569 			ku->ku_flags |= KUF_DOUPCALL;
570 		}
571 		mtx_unlock_spin(&sched_lock);
572 		PROC_UNLOCK(p);
573 		return (0);
574 	}
575 	mtx_unlock_spin(&sched_lock);
576 	PROC_UNLOCK(p);
577 	return (ESRCH);
578 }
579 
580 /*
581  * No new KSEG: first call: use current KSE, don't schedule an upcall
582  * All other situations, do allocate max new KSEs and schedule an upcall.
583  */
584 /* struct kse_create_args {
585 	struct kse_mailbox *mbx;
586 	int newgroup;
587 }; */
588 int
589 kse_create(struct thread *td, struct kse_create_args *uap)
590 {
591 	struct kse *newke;
592 	struct ksegrp *newkg;
593 	struct ksegrp *kg;
594 	struct proc *p;
595 	struct kse_mailbox mbx;
596 	struct kse_upcall *newku;
597 	int err, ncpus;
598 
599 	p = td->td_proc;
600 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
601 		return (err);
602 
603 	/* Too bad, why hasn't kernel always a cpu counter !? */
604 #ifdef SMP
605 	ncpus = mp_ncpus;
606 #else
607 	ncpus = 1;
608 #endif
609 	if (thread_debug && virtual_cpu != 0)
610 		ncpus = virtual_cpu;
611 
612 	/* Easier to just set it than to test and set */
613 	PROC_LOCK(p);
614 	p->p_flag |= P_THREADED;
615 	PROC_UNLOCK(p);
616 	kg = td->td_ksegrp;
617 	if (uap->newgroup) {
618 		/* Have race condition but it is cheap */
619 		if (p->p_numksegrps >= max_groups_per_proc)
620 			return (EPROCLIM);
621 		/*
622 		 * If we want a new KSEGRP it doesn't matter whether
623 		 * we have already fired up KSE mode before or not.
624 		 * We put the process in KSE mode and create a new KSEGRP.
625 		 */
626 		newkg = ksegrp_alloc();
627 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
628 		      kg_startzero, kg_endzero));
629 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
630 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
631 		mtx_lock_spin(&sched_lock);
632 		if (p->p_numksegrps >= max_groups_per_proc) {
633 			mtx_unlock_spin(&sched_lock);
634 			ksegrp_free(newkg);
635 			return (EPROCLIM);
636 		}
637 		ksegrp_link(newkg, p);
638 		mtx_unlock_spin(&sched_lock);
639 	} else {
640 		newkg = kg;
641 	}
642 
643 	/*
644 	 * Creating upcalls more than number of physical cpu does
645 	 * not help performance.
646 	 */
647 	if (newkg->kg_numupcalls >= ncpus)
648 		return (EPROCLIM);
649 
650 	if (newkg->kg_numupcalls == 0) {
651 		/*
652 		 * Initialize KSE group, optimized for MP.
653 		 * Create KSEs as many as physical cpus, this increases
654 		 * concurrent even if userland is not MP safe and can only run
655 		 * on single CPU (for early version of libpthread, it is true).
656 		 * In ideal world, every physical cpu should execute a thread.
657 		 * If there is enough KSEs, threads in kernel can be
658 		 * executed parallel on different cpus with full speed,
659 		 * Concurrent in kernel shouldn't be restricted by number of
660 		 * upcalls userland provides.
661 		 * Adding more upcall structures only increases concurrent
662 		 * in userland.
663 		 * Highest performance configuration is:
664 		 * N kses = N upcalls = N phyiscal cpus
665 		 */
666 		while (newkg->kg_kses < ncpus) {
667 			newke = kse_alloc();
668 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
669 			      ke_startzero, ke_endzero));
670 #if 0
671 			mtx_lock_spin(&sched_lock);
672 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
673 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
674 			mtx_unlock_spin(&sched_lock);
675 #endif
676 			mtx_lock_spin(&sched_lock);
677 			kse_link(newke, newkg);
678 			/* Add engine */
679 			kse_reassign(newke);
680 			mtx_unlock_spin(&sched_lock);
681 		}
682 	}
683 	newku = upcall_alloc();
684 	newku->ku_mailbox = uap->mbx;
685 	newku->ku_func = mbx.km_func;
686 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
687 
688 	/* For the first call this may not have been set */
689 	if (td->td_standin == NULL)
690 		thread_alloc_spare(td, NULL);
691 
692 	mtx_lock_spin(&sched_lock);
693 	if (newkg->kg_numupcalls >= ncpus) {
694 		mtx_unlock_spin(&sched_lock);
695 		upcall_free(newku);
696 		return (EPROCLIM);
697 	}
698 	upcall_link(newku, newkg);
699 	if (mbx.km_quantum)
700 		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
701 
702 	/*
703 	 * Each upcall structure has an owner thread, find which
704 	 * one owns it.
705 	 */
706 	if (uap->newgroup) {
707 		/*
708 		 * Because new ksegrp hasn't thread,
709 		 * create an initial upcall thread to own it.
710 		 */
711 		thread_schedule_upcall(td, newku);
712 	} else {
713 		/*
714 		 * If current thread hasn't an upcall structure,
715 		 * just assign the upcall to it.
716 		 */
717 		if (td->td_upcall == NULL) {
718 			newku->ku_owner = td;
719 			td->td_upcall = newku;
720 		} else {
721 			/*
722 			 * Create a new upcall thread to own it.
723 			 */
724 			thread_schedule_upcall(td, newku);
725 		}
726 	}
727 	mtx_unlock_spin(&sched_lock);
728 	return (0);
729 }
730 
731 /*
732  * Fill a ucontext_t with a thread's context information.
733  *
734  * This is an analogue to getcontext(3).
735  */
736 void
737 thread_getcontext(struct thread *td, ucontext_t *uc)
738 {
739 
740 /*
741  * XXX this is declared in a MD include file, i386/include/ucontext.h but
742  * is used in MI code.
743  */
744 #ifdef __i386__
745 	get_mcontext(td, &uc->uc_mcontext);
746 #endif
747 	uc->uc_sigmask = td->td_proc->p_sigmask;
748 }
749 
750 /*
751  * Set a thread's context from a ucontext_t.
752  *
753  * This is an analogue to setcontext(3).
754  */
755 int
756 thread_setcontext(struct thread *td, ucontext_t *uc)
757 {
758 	int ret;
759 
760 /*
761  * XXX this is declared in a MD include file, i386/include/ucontext.h but
762  * is used in MI code.
763  */
764 #ifdef __i386__
765 	ret = set_mcontext(td, &uc->uc_mcontext);
766 #else
767 	ret = ENOSYS;
768 #endif
769 	if (ret == 0) {
770 		SIG_CANTMASK(uc->uc_sigmask);
771 		PROC_LOCK(td->td_proc);
772 		td->td_proc->p_sigmask = uc->uc_sigmask;
773 		PROC_UNLOCK(td->td_proc);
774 	}
775 	return (ret);
776 }
777 
778 /*
779  * Initialize global thread allocation resources.
780  */
781 void
782 threadinit(void)
783 {
784 
785 #ifndef __ia64__
786 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
787 	    thread_ctor, thread_dtor, thread_init, thread_fini,
788 	    UMA_ALIGN_CACHE, 0);
789 #else
790 	/*
791 	 * XXX the ia64 kstack allocator is really lame and is at the mercy
792 	 * of contigmallloc().  This hackery is to pre-construct a whole
793 	 * pile of thread structures with associated kernel stacks early
794 	 * in the system startup while contigmalloc() still works. Once we
795 	 * have them, keep them.  Sigh.
796 	 */
797 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
798 	    thread_ctor, thread_dtor, thread_init, thread_fini,
799 	    UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
800 	uma_prealloc(thread_zone, 512);		/* XXX arbitary */
801 #endif
802 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
803 	    NULL, NULL, ksegrp_init, NULL,
804 	    UMA_ALIGN_CACHE, 0);
805 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
806 	    NULL, NULL, kse_init, NULL,
807 	    UMA_ALIGN_CACHE, 0);
808 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
809 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
810 }
811 
812 /*
813  * Stash an embarasingly extra thread into the zombie thread queue.
814  */
815 void
816 thread_stash(struct thread *td)
817 {
818 	mtx_lock_spin(&kse_zombie_lock);
819 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
820 	mtx_unlock_spin(&kse_zombie_lock);
821 }
822 
823 /*
824  * Stash an embarasingly extra kse into the zombie kse queue.
825  */
826 void
827 kse_stash(struct kse *ke)
828 {
829 	mtx_lock_spin(&kse_zombie_lock);
830 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
831 	mtx_unlock_spin(&kse_zombie_lock);
832 }
833 
834 /*
835  * Stash an embarasingly extra upcall into the zombie upcall queue.
836  */
837 
838 void
839 upcall_stash(struct kse_upcall *ku)
840 {
841 	mtx_lock_spin(&kse_zombie_lock);
842 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
843 	mtx_unlock_spin(&kse_zombie_lock);
844 }
845 
846 /*
847  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
848  */
849 void
850 ksegrp_stash(struct ksegrp *kg)
851 {
852 	mtx_lock_spin(&kse_zombie_lock);
853 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
854 	mtx_unlock_spin(&kse_zombie_lock);
855 }
856 
857 /*
858  * Reap zombie kse resource.
859  */
860 void
861 thread_reap(void)
862 {
863 	struct thread *td_first, *td_next;
864 	struct kse *ke_first, *ke_next;
865 	struct ksegrp *kg_first, * kg_next;
866 	struct kse_upcall *ku_first, *ku_next;
867 
868 	/*
869 	 * Don't even bother to lock if none at this instant,
870 	 * we really don't care about the next instant..
871 	 */
872 	if ((!TAILQ_EMPTY(&zombie_threads))
873 	    || (!TAILQ_EMPTY(&zombie_kses))
874 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
875 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
876 		mtx_lock_spin(&kse_zombie_lock);
877 		td_first = TAILQ_FIRST(&zombie_threads);
878 		ke_first = TAILQ_FIRST(&zombie_kses);
879 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
880 		ku_first = TAILQ_FIRST(&zombie_upcalls);
881 		if (td_first)
882 			TAILQ_INIT(&zombie_threads);
883 		if (ke_first)
884 			TAILQ_INIT(&zombie_kses);
885 		if (kg_first)
886 			TAILQ_INIT(&zombie_ksegrps);
887 		if (ku_first)
888 			TAILQ_INIT(&zombie_upcalls);
889 		mtx_unlock_spin(&kse_zombie_lock);
890 		while (td_first) {
891 			td_next = TAILQ_NEXT(td_first, td_runq);
892 			if (td_first->td_ucred)
893 				crfree(td_first->td_ucred);
894 			thread_free(td_first);
895 			td_first = td_next;
896 		}
897 		while (ke_first) {
898 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
899 			kse_free(ke_first);
900 			ke_first = ke_next;
901 		}
902 		while (kg_first) {
903 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
904 			ksegrp_free(kg_first);
905 			kg_first = kg_next;
906 		}
907 		while (ku_first) {
908 			ku_next = TAILQ_NEXT(ku_first, ku_link);
909 			upcall_free(ku_first);
910 			ku_first = ku_next;
911 		}
912 	}
913 }
914 
915 /*
916  * Allocate a ksegrp.
917  */
918 struct ksegrp *
919 ksegrp_alloc(void)
920 {
921 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
922 }
923 
924 /*
925  * Allocate a kse.
926  */
927 struct kse *
928 kse_alloc(void)
929 {
930 	return (uma_zalloc(kse_zone, M_WAITOK));
931 }
932 
933 /*
934  * Allocate a thread.
935  */
936 struct thread *
937 thread_alloc(void)
938 {
939 	thread_reap(); /* check if any zombies to get */
940 	return (uma_zalloc(thread_zone, M_WAITOK));
941 }
942 
943 /*
944  * Deallocate a ksegrp.
945  */
946 void
947 ksegrp_free(struct ksegrp *td)
948 {
949 	uma_zfree(ksegrp_zone, td);
950 }
951 
952 /*
953  * Deallocate a kse.
954  */
955 void
956 kse_free(struct kse *td)
957 {
958 	uma_zfree(kse_zone, td);
959 }
960 
961 /*
962  * Deallocate a thread.
963  */
964 void
965 thread_free(struct thread *td)
966 {
967 
968 	cpu_thread_clean(td);
969 	uma_zfree(thread_zone, td);
970 }
971 
972 /*
973  * Store the thread context in the UTS's mailbox.
974  * then add the mailbox at the head of a list we are building in user space.
975  * The list is anchored in the ksegrp structure.
976  */
977 int
978 thread_export_context(struct thread *td)
979 {
980 	struct proc *p;
981 	struct ksegrp *kg;
982 	uintptr_t mbx;
983 	void *addr;
984 	int error,temp;
985 	ucontext_t uc;
986 
987 	p = td->td_proc;
988 	kg = td->td_ksegrp;
989 
990 	/* Export the user/machine context. */
991 	addr = (void *)(&td->td_mailbox->tm_context);
992 	error = copyin(addr, &uc, sizeof(ucontext_t));
993 	if (error)
994 		goto bad;
995 
996 	thread_getcontext(td, &uc);
997 	error = copyout(&uc, addr, sizeof(ucontext_t));
998 	if (error)
999 		goto bad;
1000 
1001 	/* Exports clock ticks in kernel mode */
1002 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
1003 	temp = fuword(addr) + td->td_usticks;
1004 	if (suword(addr, temp))
1005 		goto bad;
1006 
1007 	/* Get address in latest mbox of list pointer */
1008 	addr = (void *)(&td->td_mailbox->tm_next);
1009 	/*
1010 	 * Put the saved address of the previous first
1011 	 * entry into this one
1012 	 */
1013 	for (;;) {
1014 		mbx = (uintptr_t)kg->kg_completed;
1015 		if (suword(addr, mbx)) {
1016 			error = EFAULT;
1017 			goto bad;
1018 		}
1019 		PROC_LOCK(p);
1020 		if (mbx == (uintptr_t)kg->kg_completed) {
1021 			kg->kg_completed = td->td_mailbox;
1022 			/*
1023 			 * The thread context may be taken away by
1024 			 * other upcall threads when we unlock
1025 			 * process lock. it's no longer valid to
1026 			 * use it again in any other places.
1027 			 */
1028 			td->td_mailbox = NULL;
1029 			PROC_UNLOCK(p);
1030 			break;
1031 		}
1032 		PROC_UNLOCK(p);
1033 	}
1034 	td->td_usticks = 0;
1035 	return (0);
1036 
1037 bad:
1038 	PROC_LOCK(p);
1039 	psignal(p, SIGSEGV);
1040 	PROC_UNLOCK(p);
1041 	/* The mailbox is bad, don't use it */
1042 	td->td_mailbox = NULL;
1043 	td->td_usticks = 0;
1044 	return (error);
1045 }
1046 
1047 /*
1048  * Take the list of completed mailboxes for this KSEGRP and put them on this
1049  * upcall's mailbox as it's the next one going up.
1050  */
1051 static int
1052 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1053 {
1054 	struct proc *p = kg->kg_proc;
1055 	void *addr;
1056 	uintptr_t mbx;
1057 
1058 	addr = (void *)(&ku->ku_mailbox->km_completed);
1059 	for (;;) {
1060 		mbx = (uintptr_t)kg->kg_completed;
1061 		if (suword(addr, mbx)) {
1062 			PROC_LOCK(p);
1063 			psignal(p, SIGSEGV);
1064 			PROC_UNLOCK(p);
1065 			return (EFAULT);
1066 		}
1067 		PROC_LOCK(p);
1068 		if (mbx == (uintptr_t)kg->kg_completed) {
1069 			kg->kg_completed = NULL;
1070 			PROC_UNLOCK(p);
1071 			break;
1072 		}
1073 		PROC_UNLOCK(p);
1074 	}
1075 	return (0);
1076 }
1077 
1078 /*
1079  * This function should be called at statclock interrupt time
1080  */
1081 int
1082 thread_statclock(int user)
1083 {
1084 	struct thread *td = curthread;
1085 
1086 	if (td->td_ksegrp->kg_numupcalls == 0)
1087 		return (-1);
1088 	if (user) {
1089 		/* Current always do via ast() */
1090 		mtx_lock_spin(&sched_lock);
1091 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1092 		mtx_unlock_spin(&sched_lock);
1093 		td->td_uuticks++;
1094 	} else {
1095 		if (td->td_mailbox != NULL)
1096 			td->td_usticks++;
1097 		else {
1098 			/* XXXKSE
1099 		 	 * We will call thread_user_enter() for every
1100 			 * kernel entry in future, so if the thread mailbox
1101 			 * is NULL, it must be a UTS kernel, don't account
1102 			 * clock ticks for it.
1103 			 */
1104 		}
1105 	}
1106 	return (0);
1107 }
1108 
1109 /*
1110  * Export state clock ticks for userland
1111  */
1112 static int
1113 thread_update_usr_ticks(struct thread *td, int user)
1114 {
1115 	struct proc *p = td->td_proc;
1116 	struct kse_thr_mailbox *tmbx;
1117 	struct kse_upcall *ku;
1118 	struct ksegrp *kg;
1119 	caddr_t addr;
1120 	uint uticks;
1121 
1122 	if ((ku = td->td_upcall) == NULL)
1123 		return (-1);
1124 
1125 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1126 	if ((tmbx == NULL) || (tmbx == (void *)-1))
1127 		return (-1);
1128 	if (user) {
1129 		uticks = td->td_uuticks;
1130 		td->td_uuticks = 0;
1131 		addr = (caddr_t)&tmbx->tm_uticks;
1132 	} else {
1133 		uticks = td->td_usticks;
1134 		td->td_usticks = 0;
1135 		addr = (caddr_t)&tmbx->tm_sticks;
1136 	}
1137 	if (uticks) {
1138 		if (suword(addr, uticks+fuword(addr))) {
1139 			PROC_LOCK(p);
1140 			psignal(p, SIGSEGV);
1141 			PROC_UNLOCK(p);
1142 			return (-2);
1143 		}
1144 	}
1145 	kg = td->td_ksegrp;
1146 	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1147 		mtx_lock_spin(&sched_lock);
1148 		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1149 		mtx_unlock_spin(&sched_lock);
1150 	}
1151 	return (0);
1152 }
1153 
1154 /*
1155  * Discard the current thread and exit from its context.
1156  *
1157  * Because we can't free a thread while we're operating under its context,
1158  * push the current thread into our CPU's deadthread holder. This means
1159  * we needn't worry about someone else grabbing our context before we
1160  * do a cpu_throw().
1161  */
1162 void
1163 thread_exit(void)
1164 {
1165 	struct thread *td;
1166 	struct kse *ke;
1167 	struct proc *p;
1168 	struct ksegrp	*kg;
1169 
1170 	td = curthread;
1171 	kg = td->td_ksegrp;
1172 	p = td->td_proc;
1173 	ke = td->td_kse;
1174 
1175 	mtx_assert(&sched_lock, MA_OWNED);
1176 	KASSERT(p != NULL, ("thread exiting without a process"));
1177 	KASSERT(ke != NULL, ("thread exiting without a kse"));
1178 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1179 	PROC_LOCK_ASSERT(p, MA_OWNED);
1180 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1181 	KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1182 
1183 	if (td->td_standin != NULL) {
1184 		thread_stash(td->td_standin);
1185 		td->td_standin = NULL;
1186 	}
1187 
1188 	cpu_thread_exit(td);	/* XXXSMP */
1189 
1190 	/*
1191 	 * The last thread is left attached to the process
1192 	 * So that the whole bundle gets recycled. Skip
1193 	 * all this stuff.
1194 	 */
1195 	if (p->p_numthreads > 1) {
1196 		/*
1197 		 * Unlink this thread from its proc and the kseg.
1198 		 * In keeping with the other structs we probably should
1199 		 * have a thread_unlink() that does some of this but it
1200 		 * would only be called from here (I think) so it would
1201 		 * be a waste. (might be useful for proc_fini() as well.)
1202  		 */
1203 		TAILQ_REMOVE(&p->p_threads, td, td_plist);
1204 		p->p_numthreads--;
1205 		TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1206 		kg->kg_numthreads--;
1207 		if (p->p_maxthrwaits)
1208 			wakeup(&p->p_numthreads);
1209 		/*
1210 		 * The test below is NOT true if we are the
1211 		 * sole exiting thread. P_STOPPED_SNGL is unset
1212 		 * in exit1() after it is the only survivor.
1213 		 */
1214 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1215 			if (p->p_numthreads == p->p_suspcount) {
1216 				thread_unsuspend_one(p->p_singlethread);
1217 			}
1218 		}
1219 
1220 		/*
1221 		 * Because each upcall structure has an owner thread,
1222 		 * owner thread exits only when process is in exiting
1223 		 * state, so upcall to userland is no longer needed,
1224 		 * deleting upcall structure is safe here.
1225 		 * So when all threads in a group is exited, all upcalls
1226 		 * in the group should be automatically freed.
1227 		 */
1228 		if (td->td_upcall)
1229 			upcall_remove(td);
1230 
1231 		ke->ke_state = KES_UNQUEUED;
1232 		ke->ke_thread = NULL;
1233 		/*
1234 		 * Decide what to do with the KSE attached to this thread.
1235 		 */
1236 		if (ke->ke_flags & KEF_EXIT)
1237 			kse_unlink(ke);
1238 		else
1239 			kse_reassign(ke);
1240 		PROC_UNLOCK(p);
1241 		td->td_kse	= NULL;
1242 		td->td_state	= TDS_INACTIVE;
1243 		td->td_proc	= NULL;
1244 		td->td_ksegrp	= NULL;
1245 		td->td_last_kse	= NULL;
1246 		PCPU_SET(deadthread, td);
1247 	} else {
1248 		PROC_UNLOCK(p);
1249 	}
1250 	cpu_throw();
1251 	/* NOTREACHED */
1252 }
1253 
1254 /*
1255  * Do any thread specific cleanups that may be needed in wait()
1256  * called with Giant held, proc and schedlock not held.
1257  */
1258 void
1259 thread_wait(struct proc *p)
1260 {
1261 	struct thread *td;
1262 
1263 	KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1264 	KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1265 	FOREACH_THREAD_IN_PROC(p, td) {
1266 		if (td->td_standin != NULL) {
1267 			thread_free(td->td_standin);
1268 			td->td_standin = NULL;
1269 		}
1270 		cpu_thread_clean(td);
1271 	}
1272 	thread_reap();	/* check for zombie threads etc. */
1273 }
1274 
1275 /*
1276  * Link a thread to a process.
1277  * set up anything that needs to be initialized for it to
1278  * be used by the process.
1279  *
1280  * Note that we do not link to the proc's ucred here.
1281  * The thread is linked as if running but no KSE assigned.
1282  */
1283 void
1284 thread_link(struct thread *td, struct ksegrp *kg)
1285 {
1286 	struct proc *p;
1287 
1288 	p = kg->kg_proc;
1289 	td->td_state    = TDS_INACTIVE;
1290 	td->td_proc     = p;
1291 	td->td_ksegrp   = kg;
1292 	td->td_last_kse = NULL;
1293 	td->td_flags    = 0;
1294 	td->td_kse      = NULL;
1295 
1296 	LIST_INIT(&td->td_contested);
1297 	callout_init(&td->td_slpcallout, 1);
1298 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1299 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1300 	p->p_numthreads++;
1301 	kg->kg_numthreads++;
1302 }
1303 
1304 /*
1305  * Purge a ksegrp resource. When a ksegrp is preparing to
1306  * exit, it calls this function.
1307  */
1308 void
1309 kse_purge_group(struct thread *td)
1310 {
1311 	struct ksegrp *kg;
1312 	struct kse *ke;
1313 
1314 	kg = td->td_ksegrp;
1315  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1316 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1317 		KASSERT(ke->ke_state == KES_IDLE,
1318 			("%s: wrong idle KSE state", __func__));
1319 		kse_unlink(ke);
1320 	}
1321 	KASSERT((kg->kg_kses == 1),
1322 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1323 	KASSERT((kg->kg_numupcalls == 0),
1324 	        ("%s: ksegrp still has %d upcall datas",
1325 		__func__, kg->kg_numupcalls));
1326 }
1327 
1328 /*
1329  * Purge a process's KSE resource. When a process is preparing to
1330  * exit, it calls kse_purge to release any extra KSE resources in
1331  * the process.
1332  */
1333 void
1334 kse_purge(struct proc *p, struct thread *td)
1335 {
1336 	struct ksegrp *kg;
1337 	struct kse *ke;
1338 
1339  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1340 	mtx_lock_spin(&sched_lock);
1341 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1342 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1343 		p->p_numksegrps--;
1344 		/*
1345 		 * There is no ownership for KSE, after all threads
1346 		 * in the group exited, it is possible that some KSEs
1347 		 * were left in idle queue, gc them now.
1348 		 */
1349 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1350 			KASSERT(ke->ke_state == KES_IDLE,
1351 			   ("%s: wrong idle KSE state", __func__));
1352 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1353 			kg->kg_idle_kses--;
1354 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1355 			kg->kg_kses--;
1356 			kse_stash(ke);
1357 		}
1358 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1359 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1360 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1361 		KASSERT((kg->kg_numupcalls == 0),
1362 		        ("%s: ksegrp still has %d upcall datas",
1363 			__func__, kg->kg_numupcalls));
1364 
1365 		if (kg != td->td_ksegrp)
1366 			ksegrp_stash(kg);
1367 	}
1368 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1369 	p->p_numksegrps++;
1370 	mtx_unlock_spin(&sched_lock);
1371 }
1372 
1373 /*
1374  * This function is intended to be used to initialize a spare thread
1375  * for upcall. Initialize thread's large data area outside sched_lock
1376  * for thread_schedule_upcall().
1377  */
1378 void
1379 thread_alloc_spare(struct thread *td, struct thread *spare)
1380 {
1381 	if (td->td_standin)
1382 		return;
1383 	if (spare == NULL)
1384 		spare = thread_alloc();
1385 	td->td_standin = spare;
1386 	bzero(&spare->td_startzero,
1387 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1388 	spare->td_proc = td->td_proc;
1389 	/* Setup PCB and fork address */
1390 	cpu_set_upcall(spare, td->td_pcb);
1391 	/*
1392 	 * XXXKSE do we really need this? (default values for the
1393 	 * frame).
1394 	 */
1395 	bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
1396 	spare->td_ucred = crhold(td->td_ucred);
1397 }
1398 
1399 /*
1400  * Create a thread and schedule it for upcall on the KSE given.
1401  * Use our thread's standin so that we don't have to allocate one.
1402  */
1403 struct thread *
1404 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1405 {
1406 	struct thread *td2;
1407 
1408 	mtx_assert(&sched_lock, MA_OWNED);
1409 
1410 	/*
1411 	 * Schedule an upcall thread on specified kse_upcall,
1412 	 * the kse_upcall must be free.
1413 	 * td must have a spare thread.
1414 	 */
1415 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1416 	if ((td2 = td->td_standin) != NULL) {
1417 		td->td_standin = NULL;
1418 	} else {
1419 		panic("no reserve thread when scheduling an upcall");
1420 		return (NULL);
1421 	}
1422 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1423 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1424 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1425 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1426 	thread_link(td2, ku->ku_ksegrp);
1427 	/* Let the new thread become owner of the upcall */
1428 	ku->ku_owner   = td2;
1429 	td2->td_upcall = ku;
1430 	td2->td_flags  = TDF_UPCALLING;
1431 	if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
1432 		td2->td_flags |= TDF_ASTPENDING;
1433 	td2->td_kse    = NULL;
1434 	td2->td_state  = TDS_CAN_RUN;
1435 	td2->td_inhibitors = 0;
1436 	setrunqueue(td2);
1437 	return (td2);	/* bogus.. should be a void function */
1438 }
1439 
1440 void
1441 thread_signal_add(struct thread *td, int sig)
1442 {
1443 	struct kse_upcall *ku;
1444 	struct proc *p;
1445 	sigset_t ss;
1446 	int error;
1447 
1448 	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1449 	td = curthread;
1450 	ku = td->td_upcall;
1451 	p = td->td_proc;
1452 
1453 	PROC_UNLOCK(p);
1454 	error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1455 	if (error)
1456 		goto error;
1457 
1458 	SIGADDSET(ss, sig);
1459 
1460 	error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t));
1461 	if (error)
1462 		goto error;
1463 
1464 	PROC_LOCK(p);
1465 	return;
1466 error:
1467 	PROC_LOCK(p);
1468 	sigexit(td, SIGILL);
1469 }
1470 
1471 
1472 /*
1473  * Schedule an upcall to notify a KSE process recieved signals.
1474  *
1475  */
1476 void
1477 thread_signal_upcall(struct thread *td)
1478 {
1479 	mtx_lock_spin(&sched_lock);
1480 	td->td_flags |= TDF_UPCALLING;
1481 	mtx_unlock_spin(&sched_lock);
1482 
1483 	return;
1484 }
1485 
1486 void
1487 thread_switchout(struct thread *td)
1488 {
1489 	struct kse_upcall *ku;
1490 
1491 	mtx_assert(&sched_lock, MA_OWNED);
1492 
1493 	/*
1494 	 * If the outgoing thread is in threaded group and has never
1495 	 * scheduled an upcall, decide whether this is a short
1496 	 * or long term event and thus whether or not to schedule
1497 	 * an upcall.
1498 	 * If it is a short term event, just suspend it in
1499 	 * a way that takes its KSE with it.
1500 	 * Select the events for which we want to schedule upcalls.
1501 	 * For now it's just sleep.
1502 	 * XXXKSE eventually almost any inhibition could do.
1503 	 */
1504 	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1505 		/*
1506 		 * Release ownership of upcall, and schedule an upcall
1507 		 * thread, this new upcall thread becomes the owner of
1508 		 * the upcall structure.
1509 		 */
1510 		ku = td->td_upcall;
1511 		ku->ku_owner = NULL;
1512 		td->td_upcall = NULL;
1513 		td->td_flags &= ~TDF_CAN_UNBIND;
1514 		thread_schedule_upcall(td, ku);
1515 	}
1516 }
1517 
1518 /*
1519  * Setup done on the thread when it enters the kernel.
1520  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1521  */
1522 void
1523 thread_user_enter(struct proc *p, struct thread *td)
1524 {
1525 	struct ksegrp *kg;
1526 	struct kse_upcall *ku;
1527 
1528 	kg = td->td_ksegrp;
1529 	/*
1530 	 * First check that we shouldn't just abort.
1531 	 * But check if we are the single thread first!
1532 	 * XXX p_singlethread not locked, but should be safe.
1533 	 */
1534 	if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1535 		PROC_LOCK(p);
1536 		mtx_lock_spin(&sched_lock);
1537 		thread_stopped(p);
1538 		thread_exit();
1539 		/* NOTREACHED */
1540 	}
1541 
1542 	/*
1543 	 * If we are doing a syscall in a KSE environment,
1544 	 * note where our mailbox is. There is always the
1545 	 * possibility that we could do this lazily (in kse_reassign()),
1546 	 * but for now do it every time.
1547 	 */
1548 	kg = td->td_ksegrp;
1549 	if (kg->kg_numupcalls) {
1550 		ku = td->td_upcall;
1551 		KASSERT(ku, ("%s: no upcall owned", __func__));
1552 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1553 		td->td_mailbox =
1554 		    (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1555 		if ((td->td_mailbox == NULL) ||
1556 		    (td->td_mailbox == (void *)-1)) {
1557 		    	/* Don't schedule upcall when blocked */
1558 			td->td_mailbox = NULL;
1559 			mtx_lock_spin(&sched_lock);
1560 			td->td_flags &= ~TDF_CAN_UNBIND;
1561 			mtx_unlock_spin(&sched_lock);
1562 		} else {
1563 			if (td->td_standin == NULL)
1564 				thread_alloc_spare(td, NULL);
1565 			mtx_lock_spin(&sched_lock);
1566 			td->td_flags |= TDF_CAN_UNBIND;
1567 			mtx_unlock_spin(&sched_lock);
1568 		}
1569 	}
1570 }
1571 
1572 /*
1573  * The extra work we go through if we are a threaded process when we
1574  * return to userland.
1575  *
1576  * If we are a KSE process and returning to user mode, check for
1577  * extra work to do before we return (e.g. for more syscalls
1578  * to complete first).  If we were in a critical section, we should
1579  * just return to let it finish. Same if we were in the UTS (in
1580  * which case the mailbox's context's busy indicator will be set).
1581  * The only traps we suport will have set the mailbox.
1582  * We will clear it here.
1583  */
1584 int
1585 thread_userret(struct thread *td, struct trapframe *frame)
1586 {
1587 	int error = 0, upcalls;
1588 	struct kse_upcall *ku;
1589 	struct ksegrp *kg, *kg2;
1590 	struct proc *p;
1591 	struct timespec ts;
1592 
1593 	p = td->td_proc;
1594 	kg = td->td_ksegrp;
1595 
1596 	/* Nothing to do with non-threaded group/process */
1597 	if (td->td_ksegrp->kg_numupcalls == 0)
1598 		return (0);
1599 
1600 	/*
1601 	 * Stat clock interrupt hit in userland, it
1602 	 * is returning from interrupt, charge thread's
1603 	 * userland time for UTS.
1604 	 */
1605 	if (td->td_flags & TDF_USTATCLOCK) {
1606 		thread_update_usr_ticks(td, 1);
1607 		mtx_lock_spin(&sched_lock);
1608 		td->td_flags &= ~TDF_USTATCLOCK;
1609 		mtx_unlock_spin(&sched_lock);
1610 		if (kg->kg_completed ||
1611 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1612 			thread_user_enter(p, td);
1613 	}
1614 
1615 	/*
1616 	 * Optimisation:
1617 	 * This thread has not started any upcall.
1618 	 * If there is no work to report other than ourself,
1619 	 * then it can return direct to userland.
1620 	 */
1621 	if (TD_CAN_UNBIND(td)) {
1622 		mtx_lock_spin(&sched_lock);
1623 		td->td_flags &= ~TDF_CAN_UNBIND;
1624 		mtx_unlock_spin(&sched_lock);
1625 		ku = td->td_upcall;
1626 		if ((p->p_sflag & PS_NEEDSIGCHK) == 0 &&
1627 		    (kg->kg_completed == NULL) &&
1628 		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1629 		    (kg->kg_upquantum && ticks >= kg->kg_nextupcall)) {
1630 			thread_update_usr_ticks(td, 0);
1631 			nanotime(&ts);
1632 			error = copyout(&ts,
1633 				(caddr_t)&ku->ku_mailbox->km_timeofday,
1634 				sizeof(ts));
1635 			td->td_mailbox = 0;
1636 			if (error)
1637 				goto out;
1638 			return (0);
1639 		}
1640 		error = thread_export_context(td);
1641 		if (error) {
1642 			/*
1643 			 * Failing to do the KSE operation just defaults
1644 			 * back to synchonous operation, so just return from
1645 			 * the syscall.
1646 			 */
1647 			return (0);
1648 		}
1649 		/*
1650 		 * There is something to report, and we own an upcall
1651 		 * strucuture, we can go to userland.
1652 		 * Turn ourself into an upcall thread.
1653 		 */
1654 		mtx_lock_spin(&sched_lock);
1655 		td->td_flags |= TDF_UPCALLING;
1656 		mtx_unlock_spin(&sched_lock);
1657 	} else if (td->td_mailbox) {
1658 		error = thread_export_context(td);
1659 		/* possibly upcall with error? */
1660 		PROC_LOCK(p);
1661 		/*
1662 		 * There are upcall threads waiting for
1663 		 * work to do, wake one of them up.
1664 		 * XXXKSE Maybe wake all of them up.
1665 		 */
1666 		if (!error && kg->kg_upsleeps)
1667 			wakeup_one(&kg->kg_completed);
1668 		mtx_lock_spin(&sched_lock);
1669 		thread_stopped(p);
1670 		thread_exit();
1671 		/* NOTREACHED */
1672 	}
1673 
1674 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1675 
1676 	if (p->p_numthreads > max_threads_per_proc) {
1677 		max_threads_hits++;
1678 		PROC_LOCK(p);
1679 		while (p->p_numthreads > max_threads_per_proc) {
1680 			if (P_SHOULDSTOP(p))
1681 				break;
1682 			upcalls = 0;
1683 			mtx_lock_spin(&sched_lock);
1684 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1685 				if (kg2->kg_numupcalls == 0)
1686 					upcalls++;
1687 				else
1688 					upcalls += kg2->kg_numupcalls;
1689 			}
1690 			mtx_unlock_spin(&sched_lock);
1691 			if (upcalls >= max_threads_per_proc)
1692 				break;
1693 			p->p_maxthrwaits++;
1694 			msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1695 			    "maxthreads", NULL);
1696 			p->p_maxthrwaits--;
1697 		}
1698 		PROC_UNLOCK(p);
1699 	}
1700 
1701 	if (td->td_flags & TDF_UPCALLING) {
1702 		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1703 		ku = td->td_upcall;
1704 		/*
1705 		 * There is no more work to do and we are going to ride
1706 		 * this thread up to userland as an upcall.
1707 		 * Do the last parts of the setup needed for the upcall.
1708 		 */
1709 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1710 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1711 
1712 		/*
1713 		 * Set user context to the UTS.
1714 		 * Will use Giant in cpu_thread_clean() because it uses
1715 		 * kmem_free(kernel_map, ...)
1716 		 */
1717 		cpu_set_upcall_kse(td, ku);
1718 		mtx_lock_spin(&sched_lock);
1719 		td->td_flags &= ~TDF_UPCALLING;
1720 		if (ku->ku_flags & KUF_DOUPCALL)
1721 			ku->ku_flags &= ~KUF_DOUPCALL;
1722 		mtx_unlock_spin(&sched_lock);
1723 
1724 		/*
1725 		 * Unhook the list of completed threads.
1726 		 * anything that completes after this gets to
1727 		 * come in next time.
1728 		 * Put the list of completed thread mailboxes on
1729 		 * this KSE's mailbox.
1730 		 */
1731 		error = thread_link_mboxes(kg, ku);
1732 		if (error)
1733 			goto out;
1734 
1735 		/*
1736 		 * Set state and clear the  thread mailbox pointer.
1737 		 * From now on we are just a bound outgoing process.
1738 		 * **Problem** userret is often called several times.
1739 		 * it would be nice if this all happenned only on the first
1740 		 * time through. (the scan for extra work etc.)
1741 		 */
1742 		error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
1743 		if (error)
1744 			goto out;
1745 
1746 		/* Export current system time */
1747 		nanotime(&ts);
1748 		error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday,
1749 			sizeof(ts));
1750 	}
1751 
1752 out:
1753 	if (error) {
1754 		/*
1755 		 * Things are going to be so screwed we should just kill
1756 		 * the process.
1757 		 * how do we do that?
1758 		 */
1759 		PROC_LOCK(td->td_proc);
1760 		psignal(td->td_proc, SIGSEGV);
1761 		PROC_UNLOCK(td->td_proc);
1762 	} else {
1763 		/*
1764 		 * Optimisation:
1765 		 * Ensure that we have a spare thread available,
1766 		 * for when we re-enter the kernel.
1767 		 */
1768 		if (td->td_standin == NULL)
1769 			thread_alloc_spare(td, NULL);
1770 	}
1771 
1772 	/*
1773 	 * Clear thread mailbox first, then clear system tick count.
1774 	 * The order is important because thread_statclock() use
1775 	 * mailbox pointer to see if it is an userland thread or
1776 	 * an UTS kernel thread.
1777 	 */
1778 	td->td_mailbox = NULL;
1779 	td->td_usticks = 0;
1780 	return (error);	/* go sync */
1781 }
1782 
1783 /*
1784  * Enforce single-threading.
1785  *
1786  * Returns 1 if the caller must abort (another thread is waiting to
1787  * exit the process or similar). Process is locked!
1788  * Returns 0 when you are successfully the only thread running.
1789  * A process has successfully single threaded in the suspend mode when
1790  * There are no threads in user mode. Threads in the kernel must be
1791  * allowed to continue until they get to the user boundary. They may even
1792  * copy out their return values and data before suspending. They may however be
1793  * accellerated in reaching the user boundary as we will wake up
1794  * any sleeping threads that are interruptable. (PCATCH).
1795  */
1796 int
1797 thread_single(int force_exit)
1798 {
1799 	struct thread *td;
1800 	struct thread *td2;
1801 	struct proc *p;
1802 
1803 	td = curthread;
1804 	p = td->td_proc;
1805 	mtx_assert(&Giant, MA_OWNED);
1806 	PROC_LOCK_ASSERT(p, MA_OWNED);
1807 	KASSERT((td != NULL), ("curthread is NULL"));
1808 
1809 	if ((p->p_flag & P_THREADED) == 0)
1810 		return (0);
1811 
1812 	/* Is someone already single threading? */
1813 	if (p->p_singlethread)
1814 		return (1);
1815 
1816 	if (force_exit == SINGLE_EXIT) {
1817 		p->p_flag |= P_SINGLE_EXIT;
1818 	} else
1819 		p->p_flag &= ~P_SINGLE_EXIT;
1820 	p->p_flag |= P_STOPPED_SINGLE;
1821 	p->p_singlethread = td;
1822 	/* XXXKSE Which lock protects the below values? */
1823 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1824 		mtx_lock_spin(&sched_lock);
1825 		FOREACH_THREAD_IN_PROC(p, td2) {
1826 			if (td2 == td)
1827 				continue;
1828 			td->td_flags |= TDF_ASTPENDING;
1829 			if (TD_IS_INHIBITED(td2)) {
1830 				if (force_exit == SINGLE_EXIT) {
1831 					if (TD_IS_SUSPENDED(td2)) {
1832 						thread_unsuspend_one(td2);
1833 					}
1834 					if (TD_ON_SLEEPQ(td2) &&
1835 					    (td2->td_flags & TDF_SINTR)) {
1836 						if (td2->td_flags & TDF_CVWAITQ)
1837 							cv_abort(td2);
1838 						else
1839 							abortsleep(td2);
1840 					}
1841 				} else {
1842 					if (TD_IS_SUSPENDED(td2))
1843 						continue;
1844 					/*
1845 					 * maybe other inhibitted states too?
1846 					 * XXXKSE Is it totally safe to
1847 					 * suspend a non-interruptable thread?
1848 					 */
1849 					if (td2->td_inhibitors &
1850 					    (TDI_SLEEPING | TDI_SWAPPED))
1851 						thread_suspend_one(td2);
1852 				}
1853 			}
1854 		}
1855 		/*
1856 		 * Maybe we suspended some threads.. was it enough?
1857 		 */
1858 		if ((p->p_numthreads - p->p_suspcount) == 1) {
1859 			mtx_unlock_spin(&sched_lock);
1860 			break;
1861 		}
1862 
1863 		/*
1864 		 * Wake us up when everyone else has suspended.
1865 		 * In the mean time we suspend as well.
1866 		 */
1867 		thread_suspend_one(td);
1868 		mtx_unlock(&Giant);
1869 		PROC_UNLOCK(p);
1870 		p->p_stats->p_ru.ru_nvcsw++;
1871 		mi_switch();
1872 		mtx_unlock_spin(&sched_lock);
1873 		mtx_lock(&Giant);
1874 		PROC_LOCK(p);
1875 	}
1876 	if (force_exit == SINGLE_EXIT) {
1877 		if (td->td_upcall) {
1878 			mtx_lock_spin(&sched_lock);
1879 			upcall_remove(td);
1880 			mtx_unlock_spin(&sched_lock);
1881 		}
1882 		kse_purge(p, td);
1883 	}
1884 	return (0);
1885 }
1886 
1887 /*
1888  * Called in from locations that can safely check to see
1889  * whether we have to suspend or at least throttle for a
1890  * single-thread event (e.g. fork).
1891  *
1892  * Such locations include userret().
1893  * If the "return_instead" argument is non zero, the thread must be able to
1894  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1895  *
1896  * The 'return_instead' argument tells the function if it may do a
1897  * thread_exit() or suspend, or whether the caller must abort and back
1898  * out instead.
1899  *
1900  * If the thread that set the single_threading request has set the
1901  * P_SINGLE_EXIT bit in the process flags then this call will never return
1902  * if 'return_instead' is false, but will exit.
1903  *
1904  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1905  *---------------+--------------------+---------------------
1906  *       0       | returns 0          |   returns 0 or 1
1907  *               | when ST ends       |   immediatly
1908  *---------------+--------------------+---------------------
1909  *       1       | thread exits       |   returns 1
1910  *               |                    |  immediatly
1911  * 0 = thread_exit() or suspension ok,
1912  * other = return error instead of stopping the thread.
1913  *
1914  * While a full suspension is under effect, even a single threading
1915  * thread would be suspended if it made this call (but it shouldn't).
1916  * This call should only be made from places where
1917  * thread_exit() would be safe as that may be the outcome unless
1918  * return_instead is set.
1919  */
1920 int
1921 thread_suspend_check(int return_instead)
1922 {
1923 	struct thread *td;
1924 	struct proc *p;
1925 	struct ksegrp *kg;
1926 
1927 	td = curthread;
1928 	p = td->td_proc;
1929 	kg = td->td_ksegrp;
1930 	PROC_LOCK_ASSERT(p, MA_OWNED);
1931 	while (P_SHOULDSTOP(p)) {
1932 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1933 			KASSERT(p->p_singlethread != NULL,
1934 			    ("singlethread not set"));
1935 			/*
1936 			 * The only suspension in action is a
1937 			 * single-threading. Single threader need not stop.
1938 			 * XXX Should be safe to access unlocked
1939 			 * as it can only be set to be true by us.
1940 			 */
1941 			if (p->p_singlethread == td)
1942 				return (0);	/* Exempt from stopping. */
1943 		}
1944 		if (return_instead)
1945 			return (1);
1946 
1947 		mtx_lock_spin(&sched_lock);
1948 		thread_stopped(p);
1949 		/*
1950 		 * If the process is waiting for us to exit,
1951 		 * this thread should just suicide.
1952 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1953 		 */
1954 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1955 			while (mtx_owned(&Giant))
1956 				mtx_unlock(&Giant);
1957 			thread_exit();
1958 		}
1959 
1960 		/*
1961 		 * When a thread suspends, it just
1962 		 * moves to the processes's suspend queue
1963 		 * and stays there.
1964 		 */
1965 		mtx_assert(&Giant, MA_NOTOWNED);
1966 		thread_suspend_one(td);
1967 		PROC_UNLOCK(p);
1968 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1969 			if (p->p_numthreads == p->p_suspcount) {
1970 				thread_unsuspend_one(p->p_singlethread);
1971 			}
1972 		}
1973 		p->p_stats->p_ru.ru_nivcsw++;
1974 		mi_switch();
1975 		mtx_unlock_spin(&sched_lock);
1976 		PROC_LOCK(p);
1977 	}
1978 	return (0);
1979 }
1980 
1981 void
1982 thread_suspend_one(struct thread *td)
1983 {
1984 	struct proc *p = td->td_proc;
1985 
1986 	mtx_assert(&sched_lock, MA_OWNED);
1987 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1988 	p->p_suspcount++;
1989 	TD_SET_SUSPENDED(td);
1990 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1991 	/*
1992 	 * Hack: If we are suspending but are on the sleep queue
1993 	 * then we are in msleep or the cv equivalent. We
1994 	 * want to look like we have two Inhibitors.
1995 	 * May already be set.. doesn't matter.
1996 	 */
1997 	if (TD_ON_SLEEPQ(td))
1998 		TD_SET_SLEEPING(td);
1999 }
2000 
2001 void
2002 thread_unsuspend_one(struct thread *td)
2003 {
2004 	struct proc *p = td->td_proc;
2005 
2006 	mtx_assert(&sched_lock, MA_OWNED);
2007 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
2008 	TD_CLR_SUSPENDED(td);
2009 	p->p_suspcount--;
2010 	setrunnable(td);
2011 }
2012 
2013 /*
2014  * Allow all threads blocked by single threading to continue running.
2015  */
2016 void
2017 thread_unsuspend(struct proc *p)
2018 {
2019 	struct thread *td;
2020 
2021 	mtx_assert(&sched_lock, MA_OWNED);
2022 	PROC_LOCK_ASSERT(p, MA_OWNED);
2023 	if (!P_SHOULDSTOP(p)) {
2024 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2025 			thread_unsuspend_one(td);
2026 		}
2027 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2028 	    (p->p_numthreads == p->p_suspcount)) {
2029 		/*
2030 		 * Stopping everything also did the job for the single
2031 		 * threading request. Now we've downgraded to single-threaded,
2032 		 * let it continue.
2033 		 */
2034 		thread_unsuspend_one(p->p_singlethread);
2035 	}
2036 }
2037 
2038 void
2039 thread_single_end(void)
2040 {
2041 	struct thread *td;
2042 	struct proc *p;
2043 
2044 	td = curthread;
2045 	p = td->td_proc;
2046 	PROC_LOCK_ASSERT(p, MA_OWNED);
2047 	p->p_flag &= ~P_STOPPED_SINGLE;
2048 	p->p_singlethread = NULL;
2049 	/*
2050 	 * If there are other threads they mey now run,
2051 	 * unless of course there is a blanket 'stop order'
2052 	 * on the process. The single threader must be allowed
2053 	 * to continue however as this is a bad place to stop.
2054 	 */
2055 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2056 		mtx_lock_spin(&sched_lock);
2057 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2058 			thread_unsuspend_one(td);
2059 		}
2060 		mtx_unlock_spin(&sched_lock);
2061 	}
2062 }
2063 
2064 
2065