xref: /freebsd/sys/kern/kern_thread.c (revision f6a4109212fd8fbabc731f07b2dd5c7e07fbec33)
1 /*
2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3  *  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysproto.h>
42 #include <sys/filedesc.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sx.h>
46 #include <sys/tty.h>
47 #include <sys/turnstile.h>
48 #include <sys/user.h>
49 #include <sys/kse.h>
50 #include <sys/ktr.h>
51 #include <sys/ucontext.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_object.h>
56 #include <vm/pmap.h>
57 #include <vm/uma.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/frame.h>
61 
62 /*
63  * KSEGRP related storage.
64  */
65 static uma_zone_t ksegrp_zone;
66 static uma_zone_t kse_zone;
67 static uma_zone_t thread_zone;
68 static uma_zone_t upcall_zone;
69 
70 /* DEBUG ONLY */
71 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
72 static int thread_debug = 0;
73 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
74 	&thread_debug, 0, "thread debug");
75 
76 static int max_threads_per_proc = 150;
77 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
78 	&max_threads_per_proc, 0, "Limit on threads per proc");
79 
80 static int max_groups_per_proc = 50;
81 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
82 	&max_groups_per_proc, 0, "Limit on thread groups per proc");
83 
84 static int max_threads_hits;
85 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
86 	&max_threads_hits, 0, "");
87 
88 static int virtual_cpu;
89 
90 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
91 
92 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
93 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
94 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
95 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
96 	TAILQ_HEAD_INITIALIZER(zombie_upcalls);
97 struct mtx kse_zombie_lock;
98 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
99 
100 static void kse_purge(struct proc *p, struct thread *td);
101 static void kse_purge_group(struct thread *td);
102 static int thread_update_usr_ticks(struct thread *td, int user);
103 static void thread_alloc_spare(struct thread *td, struct thread *spare);
104 
105 static int
106 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
107 {
108 	int error, new_val;
109 	int def_val;
110 
111 #ifdef SMP
112 	def_val = mp_ncpus;
113 #else
114 	def_val = 1;
115 #endif
116 	if (virtual_cpu == 0)
117 		new_val = def_val;
118 	else
119 		new_val = virtual_cpu;
120 	error = sysctl_handle_int(oidp, &new_val, 0, req);
121         if (error != 0 || req->newptr == NULL)
122 		return (error);
123 	if (new_val < 0)
124 		return (EINVAL);
125 	virtual_cpu = new_val;
126 	return (0);
127 }
128 
129 /* DEBUG ONLY */
130 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
131 	0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
132 	"debug virtual cpus");
133 
134 /*
135  * Prepare a thread for use.
136  */
137 static void
138 thread_ctor(void *mem, int size, void *arg)
139 {
140 	struct thread	*td;
141 
142 	td = (struct thread *)mem;
143 	td->td_state = TDS_INACTIVE;
144 	td->td_oncpu	= NOCPU;
145 	td->td_critnest = 1;
146 }
147 
148 /*
149  * Reclaim a thread after use.
150  */
151 static void
152 thread_dtor(void *mem, int size, void *arg)
153 {
154 	struct thread	*td;
155 
156 	td = (struct thread *)mem;
157 
158 #ifdef INVARIANTS
159 	/* Verify that this thread is in a safe state to free. */
160 	switch (td->td_state) {
161 	case TDS_INHIBITED:
162 	case TDS_RUNNING:
163 	case TDS_CAN_RUN:
164 	case TDS_RUNQ:
165 		/*
166 		 * We must never unlink a thread that is in one of
167 		 * these states, because it is currently active.
168 		 */
169 		panic("bad state for thread unlinking");
170 		/* NOTREACHED */
171 	case TDS_INACTIVE:
172 		break;
173 	default:
174 		panic("bad thread state");
175 		/* NOTREACHED */
176 	}
177 #endif
178 }
179 
180 /*
181  * Initialize type-stable parts of a thread (when newly created).
182  */
183 static void
184 thread_init(void *mem, int size)
185 {
186 	struct thread	*td;
187 
188 	td = (struct thread *)mem;
189 	vm_thread_new(td, 0);
190 	cpu_thread_setup(td);
191 	td->td_turnstile = turnstile_alloc();
192 	td->td_sched = (struct td_sched *)&td[1];
193 }
194 
195 /*
196  * Tear down type-stable parts of a thread (just before being discarded).
197  */
198 static void
199 thread_fini(void *mem, int size)
200 {
201 	struct thread	*td;
202 
203 	td = (struct thread *)mem;
204 	turnstile_free(td->td_turnstile);
205 	vm_thread_dispose(td);
206 }
207 
208 /*
209  * Initialize type-stable parts of a kse (when newly created).
210  */
211 static void
212 kse_init(void *mem, int size)
213 {
214 	struct kse	*ke;
215 
216 	ke = (struct kse *)mem;
217 	ke->ke_sched = (struct ke_sched *)&ke[1];
218 }
219 
220 /*
221  * Initialize type-stable parts of a ksegrp (when newly created).
222  */
223 static void
224 ksegrp_init(void *mem, int size)
225 {
226 	struct ksegrp	*kg;
227 
228 	kg = (struct ksegrp *)mem;
229 	kg->kg_sched = (struct kg_sched *)&kg[1];
230 }
231 
232 /*
233  * KSE is linked into kse group.
234  */
235 void
236 kse_link(struct kse *ke, struct ksegrp *kg)
237 {
238 	struct proc *p = kg->kg_proc;
239 
240 	TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
241 	kg->kg_kses++;
242 	ke->ke_state	= KES_UNQUEUED;
243 	ke->ke_proc	= p;
244 	ke->ke_ksegrp	= kg;
245 	ke->ke_thread	= NULL;
246 	ke->ke_oncpu	= NOCPU;
247 	ke->ke_flags	= 0;
248 }
249 
250 void
251 kse_unlink(struct kse *ke)
252 {
253 	struct ksegrp *kg;
254 
255 	mtx_assert(&sched_lock, MA_OWNED);
256 	kg = ke->ke_ksegrp;
257 	TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
258 	if (ke->ke_state == KES_IDLE) {
259 		TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
260 		kg->kg_idle_kses--;
261 	}
262 	--kg->kg_kses;
263 	/*
264 	 * Aggregate stats from the KSE
265 	 */
266 	kse_stash(ke);
267 }
268 
269 void
270 ksegrp_link(struct ksegrp *kg, struct proc *p)
271 {
272 
273 	TAILQ_INIT(&kg->kg_threads);
274 	TAILQ_INIT(&kg->kg_runq);	/* links with td_runq */
275 	TAILQ_INIT(&kg->kg_slpq);	/* links with td_runq */
276 	TAILQ_INIT(&kg->kg_kseq);	/* all kses in ksegrp */
277 	TAILQ_INIT(&kg->kg_iq);		/* all idle kses in ksegrp */
278 	TAILQ_INIT(&kg->kg_upcalls);	/* all upcall structure in ksegrp */
279 	kg->kg_proc = p;
280 	/*
281 	 * the following counters are in the -zero- section
282 	 * and may not need clearing
283 	 */
284 	kg->kg_numthreads = 0;
285 	kg->kg_runnable   = 0;
286 	kg->kg_kses       = 0;
287 	kg->kg_runq_kses  = 0; /* XXXKSE change name */
288 	kg->kg_idle_kses  = 0;
289 	kg->kg_numupcalls = 0;
290 	/* link it in now that it's consistent */
291 	p->p_numksegrps++;
292 	TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
293 }
294 
295 void
296 ksegrp_unlink(struct ksegrp *kg)
297 {
298 	struct proc *p;
299 
300 	mtx_assert(&sched_lock, MA_OWNED);
301 	KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
302 	KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
303 	KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
304 
305 	p = kg->kg_proc;
306 	TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
307 	p->p_numksegrps--;
308 	/*
309 	 * Aggregate stats from the KSE
310 	 */
311 	ksegrp_stash(kg);
312 }
313 
314 struct kse_upcall *
315 upcall_alloc(void)
316 {
317 	struct kse_upcall *ku;
318 
319 	ku = uma_zalloc(upcall_zone, M_WAITOK);
320 	bzero(ku, sizeof(*ku));
321 	return (ku);
322 }
323 
324 void
325 upcall_free(struct kse_upcall *ku)
326 {
327 
328 	uma_zfree(upcall_zone, ku);
329 }
330 
331 void
332 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
333 {
334 
335 	mtx_assert(&sched_lock, MA_OWNED);
336 	TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
337 	ku->ku_ksegrp = kg;
338 	kg->kg_numupcalls++;
339 }
340 
341 void
342 upcall_unlink(struct kse_upcall *ku)
343 {
344 	struct ksegrp *kg = ku->ku_ksegrp;
345 
346 	mtx_assert(&sched_lock, MA_OWNED);
347 	KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
348 	TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
349 	kg->kg_numupcalls--;
350 	upcall_stash(ku);
351 }
352 
353 void
354 upcall_remove(struct thread *td)
355 {
356 
357 	if (td->td_upcall) {
358 		td->td_upcall->ku_owner = NULL;
359 		upcall_unlink(td->td_upcall);
360 		td->td_upcall = 0;
361 	}
362 }
363 
364 /*
365  * For a newly created process,
366  * link up all the structures and its initial threads etc.
367  */
368 void
369 proc_linkup(struct proc *p, struct ksegrp *kg,
370 	    struct kse *ke, struct thread *td)
371 {
372 
373 	TAILQ_INIT(&p->p_ksegrps);	     /* all ksegrps in proc */
374 	TAILQ_INIT(&p->p_threads);	     /* all threads in proc */
375 	TAILQ_INIT(&p->p_suspended);	     /* Threads suspended */
376 	p->p_numksegrps = 0;
377 	p->p_numthreads = 0;
378 
379 	ksegrp_link(kg, p);
380 	kse_link(ke, kg);
381 	thread_link(td, kg);
382 }
383 
384 #ifndef _SYS_SYSPROTO_H_
385 struct kse_switchin_args {
386 	const struct __mcontext *mcp;
387 	long val;
388 	long *loc;
389 };
390 #endif
391 
392 int
393 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
394 {
395 	mcontext_t mc;
396 	int error;
397 
398 	error = (uap->mcp == NULL) ? EINVAL : 0;
399 	if (!error)
400 		error = copyin(uap->mcp, &mc, sizeof(mc));
401 	if (!error && uap->loc != NULL)
402 		error = (suword(uap->loc, uap->val) != 0) ? EINVAL : 0;
403 	if (!error)
404 		error = set_mcontext(td, &mc);
405 	return ((error == 0) ? EJUSTRETURN : error);
406 }
407 
408 /*
409 struct kse_thr_interrupt_args {
410 	struct kse_thr_mailbox * tmbx;
411 	int cmd;
412 	long data;
413 };
414 */
415 int
416 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
417 {
418 	struct proc *p;
419 	struct thread *td2;
420 
421 	p = td->td_proc;
422 
423 	if (!(p->p_flag & P_SA))
424 		return (EINVAL);
425 
426 	switch (uap->cmd) {
427 	case KSE_INTR_SENDSIG:
428 		if (uap->data < 0 || uap->data > _SIG_MAXSIG)
429 			return (EINVAL);
430 	case KSE_INTR_INTERRUPT:
431 	case KSE_INTR_RESTART:
432 		PROC_LOCK(p);
433 		mtx_lock_spin(&sched_lock);
434 		FOREACH_THREAD_IN_PROC(p, td2) {
435 			if (td2->td_mailbox == uap->tmbx)
436 				break;
437 		}
438 		if (td2 == NULL) {
439 			mtx_unlock_spin(&sched_lock);
440 			PROC_UNLOCK(p);
441 			return (ESRCH);
442 		}
443 		if (uap->cmd == KSE_INTR_SENDSIG) {
444 			if (uap->data > 0) {
445 				td2->td_flags &= ~TDF_INTERRUPT;
446 				mtx_unlock_spin(&sched_lock);
447 				tdsignal(td2, (int)uap->data, SIGTARGET_TD);
448 			} else {
449 				mtx_unlock_spin(&sched_lock);
450 			}
451 		} else {
452 			td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
453 			if (TD_CAN_UNBIND(td2))
454 				td2->td_upcall->ku_flags |= KUF_DOUPCALL;
455 			if (uap->cmd == KSE_INTR_INTERRUPT)
456 				td2->td_intrval = EINTR;
457 			else
458 				td2->td_intrval = ERESTART;
459 			if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
460 				if (td2->td_flags & TDF_CVWAITQ)
461 					cv_abort(td2);
462 				else
463 					abortsleep(td2);
464 			}
465 			mtx_unlock_spin(&sched_lock);
466 		}
467 		PROC_UNLOCK(p);
468 		break;
469 	case KSE_INTR_SIGEXIT:
470 		if (uap->data < 1 || uap->data > _SIG_MAXSIG)
471 			return (EINVAL);
472 		PROC_LOCK(p);
473 		sigexit(td, (int)uap->data);
474 		break;
475 	default:
476 		return (EINVAL);
477 	}
478 	return (0);
479 }
480 
481 /*
482 struct kse_exit_args {
483 	register_t dummy;
484 };
485 */
486 int
487 kse_exit(struct thread *td, struct kse_exit_args *uap)
488 {
489 	struct proc *p;
490 	struct ksegrp *kg;
491 	struct kse *ke;
492 	struct kse_upcall *ku, *ku2;
493 	int    error, count;
494 
495 	p = td->td_proc;
496 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
497 		return (EINVAL);
498 	kg = td->td_ksegrp;
499 	count = 0;
500 	PROC_LOCK(p);
501 	mtx_lock_spin(&sched_lock);
502 	FOREACH_UPCALL_IN_GROUP(kg, ku2) {
503 		if (ku2->ku_flags & KUF_EXITING)
504 			count++;
505 	}
506 	if ((kg->kg_numupcalls - count) == 1 &&
507 	    (kg->kg_numthreads > 1)) {
508 		mtx_unlock_spin(&sched_lock);
509 		PROC_UNLOCK(p);
510 		return (EDEADLK);
511 	}
512 	ku->ku_flags |= KUF_EXITING;
513 	mtx_unlock_spin(&sched_lock);
514 	PROC_UNLOCK(p);
515 	error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
516 	PROC_LOCK(p);
517 	if (error)
518 		psignal(p, SIGSEGV);
519 	mtx_lock_spin(&sched_lock);
520 	upcall_remove(td);
521 	ke = td->td_kse;
522 	if (p->p_numthreads == 1) {
523 		kse_purge(p, td);
524 		p->p_flag &= ~P_SA;
525 		mtx_unlock_spin(&sched_lock);
526 		PROC_UNLOCK(p);
527 	} else {
528 		if (kg->kg_numthreads == 1) { /* Shutdown a group */
529 			kse_purge_group(td);
530 			ke->ke_flags |= KEF_EXIT;
531 		}
532 		thread_stopped(p);
533 		thread_exit();
534 		/* NOTREACHED */
535 	}
536 	return (0);
537 }
538 
539 /*
540  * Either becomes an upcall or waits for an awakening event and
541  * then becomes an upcall. Only error cases return.
542  */
543 /*
544 struct kse_release_args {
545 	struct timespec *timeout;
546 };
547 */
548 int
549 kse_release(struct thread *td, struct kse_release_args *uap)
550 {
551 	struct proc *p;
552 	struct ksegrp *kg;
553 	struct kse_upcall *ku;
554 	struct timespec timeout;
555 	struct timeval tv;
556 	sigset_t sigset;
557 	int error;
558 
559 	p = td->td_proc;
560 	kg = td->td_ksegrp;
561 	if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
562 		return (EINVAL);
563 	if (uap->timeout != NULL) {
564 		if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
565 			return (error);
566 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
567 	}
568 	if (td->td_flags & TDF_SA)
569 		td->td_pflags |= TDP_UPCALLING;
570 	else {
571 		ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
572 		if (ku->ku_mflags == -1) {
573 			PROC_LOCK(p);
574 			sigexit(td, SIGSEGV);
575 		}
576 	}
577 	PROC_LOCK(p);
578 	if (ku->ku_mflags & KMF_WAITSIGEVENT) {
579 		/* UTS wants to wait for signal event */
580 		if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL))
581 			error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
582 			    "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
583 		p->p_flag &= ~P_SIGEVENT;
584 		sigset = p->p_siglist;
585 		PROC_UNLOCK(p);
586 		error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
587 		    sizeof(sigset));
588 	} else {
589 		 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) {
590 			kg->kg_upsleeps++;
591 			error = msleep(&kg->kg_completed, &p->p_mtx,
592 				PPAUSE|PCATCH, "kserel",
593 				(uap->timeout ? tvtohz(&tv) : 0));
594 			kg->kg_upsleeps--;
595 		}
596 		PROC_UNLOCK(p);
597 	}
598 	if (ku->ku_flags & KUF_DOUPCALL) {
599 		mtx_lock_spin(&sched_lock);
600 		ku->ku_flags &= ~KUF_DOUPCALL;
601 		mtx_unlock_spin(&sched_lock);
602 	}
603 	return (0);
604 }
605 
606 /* struct kse_wakeup_args {
607 	struct kse_mailbox *mbx;
608 }; */
609 int
610 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
611 {
612 	struct proc *p;
613 	struct ksegrp *kg;
614 	struct kse_upcall *ku;
615 	struct thread *td2;
616 
617 	p = td->td_proc;
618 	td2 = NULL;
619 	ku = NULL;
620 	/* KSE-enabled processes only, please. */
621 	if (!(p->p_flag & P_SA))
622 		return (EINVAL);
623 	PROC_LOCK(p);
624 	mtx_lock_spin(&sched_lock);
625 	if (uap->mbx) {
626 		FOREACH_KSEGRP_IN_PROC(p, kg) {
627 			FOREACH_UPCALL_IN_GROUP(kg, ku) {
628 				if (ku->ku_mailbox == uap->mbx)
629 					break;
630 			}
631 			if (ku)
632 				break;
633 		}
634 	} else {
635 		kg = td->td_ksegrp;
636 		if (kg->kg_upsleeps) {
637 			wakeup_one(&kg->kg_completed);
638 			mtx_unlock_spin(&sched_lock);
639 			PROC_UNLOCK(p);
640 			return (0);
641 		}
642 		ku = TAILQ_FIRST(&kg->kg_upcalls);
643 	}
644 	if (ku) {
645 		if ((td2 = ku->ku_owner) == NULL) {
646 			panic("%s: no owner", __func__);
647 		} else if (TD_ON_SLEEPQ(td2) &&
648 		           ((td2->td_wchan == &kg->kg_completed) ||
649 			    (td2->td_wchan == &p->p_siglist &&
650 			     (ku->ku_mflags & KMF_WAITSIGEVENT)))) {
651 			abortsleep(td2);
652 		} else {
653 			ku->ku_flags |= KUF_DOUPCALL;
654 		}
655 		mtx_unlock_spin(&sched_lock);
656 		PROC_UNLOCK(p);
657 		return (0);
658 	}
659 	mtx_unlock_spin(&sched_lock);
660 	PROC_UNLOCK(p);
661 	return (ESRCH);
662 }
663 
664 /*
665  * No new KSEG: first call: use current KSE, don't schedule an upcall
666  * All other situations, do allocate max new KSEs and schedule an upcall.
667  */
668 /* struct kse_create_args {
669 	struct kse_mailbox *mbx;
670 	int newgroup;
671 }; */
672 int
673 kse_create(struct thread *td, struct kse_create_args *uap)
674 {
675 	struct kse *newke;
676 	struct ksegrp *newkg;
677 	struct ksegrp *kg;
678 	struct proc *p;
679 	struct kse_mailbox mbx;
680 	struct kse_upcall *newku;
681 	int err, ncpus, sa = 0, first = 0;
682 	struct thread *newtd;
683 
684 	p = td->td_proc;
685 	if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
686 		return (err);
687 
688 	/* Too bad, why hasn't kernel always a cpu counter !? */
689 #ifdef SMP
690 	ncpus = mp_ncpus;
691 #else
692 	ncpus = 1;
693 #endif
694 	if (virtual_cpu != 0)
695 		ncpus = virtual_cpu;
696 	if (!(mbx.km_flags & KMF_BOUND))
697 		sa = TDF_SA;
698 	else
699 		ncpus = 1;
700 	PROC_LOCK(p);
701 	if (!(p->p_flag & P_SA)) {
702 		first = 1;
703 		p->p_flag |= P_SA;
704 	}
705 	PROC_UNLOCK(p);
706 	if (!sa && !uap->newgroup && !first)
707 		return (EINVAL);
708 	kg = td->td_ksegrp;
709 	if (uap->newgroup) {
710 		/* Have race condition but it is cheap */
711 		if (p->p_numksegrps >= max_groups_per_proc)
712 			return (EPROCLIM);
713 		/*
714 		 * If we want a new KSEGRP it doesn't matter whether
715 		 * we have already fired up KSE mode before or not.
716 		 * We put the process in KSE mode and create a new KSEGRP.
717 		 */
718 		newkg = ksegrp_alloc();
719 		bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
720 		      kg_startzero, kg_endzero));
721 		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
722 		      RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
723 		PROC_LOCK(p);
724 		mtx_lock_spin(&sched_lock);
725 		if (p->p_numksegrps >= max_groups_per_proc) {
726 			mtx_unlock_spin(&sched_lock);
727 			PROC_UNLOCK(p);
728 			ksegrp_free(newkg);
729 			return (EPROCLIM);
730 		}
731 		ksegrp_link(newkg, p);
732 		sched_fork_ksegrp(kg, newkg);
733 		mtx_unlock_spin(&sched_lock);
734 		PROC_UNLOCK(p);
735 	} else {
736 		if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
737 			return (EINVAL);
738 		newkg = kg;
739 	}
740 
741 	/*
742 	 * Creating upcalls more than number of physical cpu does
743 	 * not help performance.
744 	 */
745 	if (newkg->kg_numupcalls >= ncpus)
746 		return (EPROCLIM);
747 
748 	if (newkg->kg_numupcalls == 0) {
749 		/*
750 		 * Initialize KSE group
751 		 *
752 		 * For multiplxed group, create KSEs as many as physical
753 		 * cpus. This increases concurrent even if userland
754 		 * is not MP safe and can only run on single CPU.
755 		 * In ideal world, every physical cpu should execute a thread.
756 		 * If there is enough KSEs, threads in kernel can be
757 		 * executed parallel on different cpus with full speed,
758 		 * Concurrent in kernel shouldn't be restricted by number of
759 		 * upcalls userland provides. Adding more upcall structures
760 		 * only increases concurrent in userland.
761 		 *
762 		 * For bound thread group, because there is only thread in the
763 		 * group, we only create one KSE for the group. Thread in this
764 		 * kind of group will never schedule an upcall when blocked,
765 		 * this intends to simulate pthread system scope thread.
766 		 */
767 		while (newkg->kg_kses < ncpus) {
768 			newke = kse_alloc();
769 			bzero(&newke->ke_startzero, RANGEOF(struct kse,
770 			      ke_startzero, ke_endzero));
771 #if 0
772 			mtx_lock_spin(&sched_lock);
773 			bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
774 			      RANGEOF(struct kse, ke_startcopy, ke_endcopy));
775 			mtx_unlock_spin(&sched_lock);
776 #endif
777 			mtx_lock_spin(&sched_lock);
778 			kse_link(newke, newkg);
779 			sched_fork_kse(td->td_kse, newke);
780 			/* Add engine */
781 			kse_reassign(newke);
782 			mtx_unlock_spin(&sched_lock);
783 		}
784 	}
785 	newku = upcall_alloc();
786 	newku->ku_mailbox = uap->mbx;
787 	newku->ku_func = mbx.km_func;
788 	bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
789 
790 	/* For the first call this may not have been set */
791 	if (td->td_standin == NULL)
792 		thread_alloc_spare(td, NULL);
793 
794 	PROC_LOCK(p);
795 	if (newkg->kg_numupcalls >= ncpus) {
796 		PROC_UNLOCK(p);
797 		upcall_free(newku);
798 		return (EPROCLIM);
799 	}
800 	if (first && sa) {
801 		SIGSETOR(p->p_siglist, td->td_siglist);
802 		SIGEMPTYSET(td->td_siglist);
803 		SIGFILLSET(td->td_sigmask);
804 		SIG_CANTMASK(td->td_sigmask);
805 	}
806 	mtx_lock_spin(&sched_lock);
807 	PROC_UNLOCK(p);
808 	upcall_link(newku, newkg);
809 	if (mbx.km_quantum)
810 		newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
811 
812 	/*
813 	 * Each upcall structure has an owner thread, find which
814 	 * one owns it.
815 	 */
816 	if (uap->newgroup) {
817 		/*
818 		 * Because new ksegrp hasn't thread,
819 		 * create an initial upcall thread to own it.
820 		 */
821 		newtd = thread_schedule_upcall(td, newku);
822 	} else {
823 		/*
824 		 * If current thread hasn't an upcall structure,
825 		 * just assign the upcall to it.
826 		 */
827 		if (td->td_upcall == NULL) {
828 			newku->ku_owner = td;
829 			td->td_upcall = newku;
830 			newtd = td;
831 		} else {
832 			/*
833 			 * Create a new upcall thread to own it.
834 			 */
835 			newtd = thread_schedule_upcall(td, newku);
836 		}
837 	}
838 	if (!sa) {
839 		newtd->td_mailbox = mbx.km_curthread;
840 		newtd->td_flags &= ~TDF_SA;
841 		if (newtd != td) {
842 			mtx_unlock_spin(&sched_lock);
843 			cpu_set_upcall_kse(newtd, newku);
844 			mtx_lock_spin(&sched_lock);
845 		}
846 	} else {
847 		newtd->td_flags |= TDF_SA;
848 	}
849 	if (newtd != td)
850 		setrunqueue(newtd);
851 	mtx_unlock_spin(&sched_lock);
852 	return (0);
853 }
854 
855 /*
856  * Initialize global thread allocation resources.
857  */
858 void
859 threadinit(void)
860 {
861 
862 	thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
863 	    thread_ctor, thread_dtor, thread_init, thread_fini,
864 	    UMA_ALIGN_CACHE, 0);
865 	ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
866 	    NULL, NULL, ksegrp_init, NULL,
867 	    UMA_ALIGN_CACHE, 0);
868 	kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
869 	    NULL, NULL, kse_init, NULL,
870 	    UMA_ALIGN_CACHE, 0);
871 	upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
872 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
873 }
874 
875 /*
876  * Stash an embarasingly extra thread into the zombie thread queue.
877  */
878 void
879 thread_stash(struct thread *td)
880 {
881 	mtx_lock_spin(&kse_zombie_lock);
882 	TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
883 	mtx_unlock_spin(&kse_zombie_lock);
884 }
885 
886 /*
887  * Stash an embarasingly extra kse into the zombie kse queue.
888  */
889 void
890 kse_stash(struct kse *ke)
891 {
892 	mtx_lock_spin(&kse_zombie_lock);
893 	TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
894 	mtx_unlock_spin(&kse_zombie_lock);
895 }
896 
897 /*
898  * Stash an embarasingly extra upcall into the zombie upcall queue.
899  */
900 
901 void
902 upcall_stash(struct kse_upcall *ku)
903 {
904 	mtx_lock_spin(&kse_zombie_lock);
905 	TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
906 	mtx_unlock_spin(&kse_zombie_lock);
907 }
908 
909 /*
910  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
911  */
912 void
913 ksegrp_stash(struct ksegrp *kg)
914 {
915 	mtx_lock_spin(&kse_zombie_lock);
916 	TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
917 	mtx_unlock_spin(&kse_zombie_lock);
918 }
919 
920 /*
921  * Reap zombie kse resource.
922  */
923 void
924 thread_reap(void)
925 {
926 	struct thread *td_first, *td_next;
927 	struct kse *ke_first, *ke_next;
928 	struct ksegrp *kg_first, * kg_next;
929 	struct kse_upcall *ku_first, *ku_next;
930 
931 	/*
932 	 * Don't even bother to lock if none at this instant,
933 	 * we really don't care about the next instant..
934 	 */
935 	if ((!TAILQ_EMPTY(&zombie_threads))
936 	    || (!TAILQ_EMPTY(&zombie_kses))
937 	    || (!TAILQ_EMPTY(&zombie_ksegrps))
938 	    || (!TAILQ_EMPTY(&zombie_upcalls))) {
939 		mtx_lock_spin(&kse_zombie_lock);
940 		td_first = TAILQ_FIRST(&zombie_threads);
941 		ke_first = TAILQ_FIRST(&zombie_kses);
942 		kg_first = TAILQ_FIRST(&zombie_ksegrps);
943 		ku_first = TAILQ_FIRST(&zombie_upcalls);
944 		if (td_first)
945 			TAILQ_INIT(&zombie_threads);
946 		if (ke_first)
947 			TAILQ_INIT(&zombie_kses);
948 		if (kg_first)
949 			TAILQ_INIT(&zombie_ksegrps);
950 		if (ku_first)
951 			TAILQ_INIT(&zombie_upcalls);
952 		mtx_unlock_spin(&kse_zombie_lock);
953 		while (td_first) {
954 			td_next = TAILQ_NEXT(td_first, td_runq);
955 			if (td_first->td_ucred)
956 				crfree(td_first->td_ucred);
957 			thread_free(td_first);
958 			td_first = td_next;
959 		}
960 		while (ke_first) {
961 			ke_next = TAILQ_NEXT(ke_first, ke_procq);
962 			kse_free(ke_first);
963 			ke_first = ke_next;
964 		}
965 		while (kg_first) {
966 			kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
967 			ksegrp_free(kg_first);
968 			kg_first = kg_next;
969 		}
970 		while (ku_first) {
971 			ku_next = TAILQ_NEXT(ku_first, ku_link);
972 			upcall_free(ku_first);
973 			ku_first = ku_next;
974 		}
975 	}
976 }
977 
978 /*
979  * Allocate a ksegrp.
980  */
981 struct ksegrp *
982 ksegrp_alloc(void)
983 {
984 	return (uma_zalloc(ksegrp_zone, M_WAITOK));
985 }
986 
987 /*
988  * Allocate a kse.
989  */
990 struct kse *
991 kse_alloc(void)
992 {
993 	return (uma_zalloc(kse_zone, M_WAITOK));
994 }
995 
996 /*
997  * Allocate a thread.
998  */
999 struct thread *
1000 thread_alloc(void)
1001 {
1002 	thread_reap(); /* check if any zombies to get */
1003 	return (uma_zalloc(thread_zone, M_WAITOK));
1004 }
1005 
1006 /*
1007  * Deallocate a ksegrp.
1008  */
1009 void
1010 ksegrp_free(struct ksegrp *td)
1011 {
1012 	uma_zfree(ksegrp_zone, td);
1013 }
1014 
1015 /*
1016  * Deallocate a kse.
1017  */
1018 void
1019 kse_free(struct kse *td)
1020 {
1021 	uma_zfree(kse_zone, td);
1022 }
1023 
1024 /*
1025  * Deallocate a thread.
1026  */
1027 void
1028 thread_free(struct thread *td)
1029 {
1030 
1031 	cpu_thread_clean(td);
1032 	uma_zfree(thread_zone, td);
1033 }
1034 
1035 /*
1036  * Store the thread context in the UTS's mailbox.
1037  * then add the mailbox at the head of a list we are building in user space.
1038  * The list is anchored in the ksegrp structure.
1039  */
1040 int
1041 thread_export_context(struct thread *td, int willexit)
1042 {
1043 	struct proc *p;
1044 	struct ksegrp *kg;
1045 	uintptr_t mbx;
1046 	void *addr;
1047 	int error = 0, temp, sig;
1048 	mcontext_t mc;
1049 
1050 	p = td->td_proc;
1051 	kg = td->td_ksegrp;
1052 
1053 	/* Export the user/machine context. */
1054 	get_mcontext(td, &mc, 0);
1055 	addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
1056 	error = copyout(&mc, addr, sizeof(mcontext_t));
1057 	if (error)
1058 		goto bad;
1059 
1060 	/* Exports clock ticks in kernel mode */
1061 	addr = (caddr_t)(&td->td_mailbox->tm_sticks);
1062 	temp = fuword32(addr) + td->td_usticks;
1063 	if (suword32(addr, temp)) {
1064 		error = EFAULT;
1065 		goto bad;
1066 	}
1067 
1068 	/*
1069 	 * Post sync signal, or process SIGKILL and SIGSTOP.
1070 	 * For sync signal, it is only possible when the signal is not
1071 	 * caught by userland or process is being debugged.
1072 	 */
1073 	PROC_LOCK(p);
1074 	if (td->td_flags & TDF_NEEDSIGCHK) {
1075 		mtx_lock_spin(&sched_lock);
1076 		td->td_flags &= ~TDF_NEEDSIGCHK;
1077 		mtx_unlock_spin(&sched_lock);
1078 		mtx_lock(&p->p_sigacts->ps_mtx);
1079 		while ((sig = cursig(td)) != 0)
1080 			postsig(sig);
1081 		mtx_unlock(&p->p_sigacts->ps_mtx);
1082 	}
1083 	if (willexit)
1084 		SIGFILLSET(td->td_sigmask);
1085 	PROC_UNLOCK(p);
1086 
1087 	/* Get address in latest mbox of list pointer */
1088 	addr = (void *)(&td->td_mailbox->tm_next);
1089 	/*
1090 	 * Put the saved address of the previous first
1091 	 * entry into this one
1092 	 */
1093 	for (;;) {
1094 		mbx = (uintptr_t)kg->kg_completed;
1095 		if (suword(addr, mbx)) {
1096 			error = EFAULT;
1097 			goto bad;
1098 		}
1099 		PROC_LOCK(p);
1100 		if (mbx == (uintptr_t)kg->kg_completed) {
1101 			kg->kg_completed = td->td_mailbox;
1102 			/*
1103 			 * The thread context may be taken away by
1104 			 * other upcall threads when we unlock
1105 			 * process lock. it's no longer valid to
1106 			 * use it again in any other places.
1107 			 */
1108 			td->td_mailbox = NULL;
1109 			PROC_UNLOCK(p);
1110 			break;
1111 		}
1112 		PROC_UNLOCK(p);
1113 	}
1114 	td->td_usticks = 0;
1115 	return (0);
1116 
1117 bad:
1118 	PROC_LOCK(p);
1119 	sigexit(td, SIGILL);
1120 	return (error);
1121 }
1122 
1123 /*
1124  * Take the list of completed mailboxes for this KSEGRP and put them on this
1125  * upcall's mailbox as it's the next one going up.
1126  */
1127 static int
1128 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1129 {
1130 	struct proc *p = kg->kg_proc;
1131 	void *addr;
1132 	uintptr_t mbx;
1133 
1134 	addr = (void *)(&ku->ku_mailbox->km_completed);
1135 	for (;;) {
1136 		mbx = (uintptr_t)kg->kg_completed;
1137 		if (suword(addr, mbx)) {
1138 			PROC_LOCK(p);
1139 			psignal(p, SIGSEGV);
1140 			PROC_UNLOCK(p);
1141 			return (EFAULT);
1142 		}
1143 		PROC_LOCK(p);
1144 		if (mbx == (uintptr_t)kg->kg_completed) {
1145 			kg->kg_completed = NULL;
1146 			PROC_UNLOCK(p);
1147 			break;
1148 		}
1149 		PROC_UNLOCK(p);
1150 	}
1151 	return (0);
1152 }
1153 
1154 /*
1155  * This function should be called at statclock interrupt time
1156  */
1157 int
1158 thread_statclock(int user)
1159 {
1160 	struct thread *td = curthread;
1161 	struct ksegrp *kg = td->td_ksegrp;
1162 
1163 	if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
1164 		return (0);
1165 	if (user) {
1166 		/* Current always do via ast() */
1167 		mtx_lock_spin(&sched_lock);
1168 		td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1169 		mtx_unlock_spin(&sched_lock);
1170 		td->td_uuticks++;
1171 	} else {
1172 		if (td->td_mailbox != NULL)
1173 			td->td_usticks++;
1174 		else {
1175 			/* XXXKSE
1176 		 	 * We will call thread_user_enter() for every
1177 			 * kernel entry in future, so if the thread mailbox
1178 			 * is NULL, it must be a UTS kernel, don't account
1179 			 * clock ticks for it.
1180 			 */
1181 		}
1182 	}
1183 	return (0);
1184 }
1185 
1186 /*
1187  * Export state clock ticks for userland
1188  */
1189 static int
1190 thread_update_usr_ticks(struct thread *td, int user)
1191 {
1192 	struct proc *p = td->td_proc;
1193 	struct kse_thr_mailbox *tmbx;
1194 	struct kse_upcall *ku;
1195 	struct ksegrp *kg;
1196 	caddr_t addr;
1197 	u_int uticks;
1198 
1199 	if ((ku = td->td_upcall) == NULL)
1200 		return (-1);
1201 
1202 	tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1203 	if ((tmbx == NULL) || (tmbx == (void *)-1))
1204 		return (-1);
1205 	if (user) {
1206 		uticks = td->td_uuticks;
1207 		td->td_uuticks = 0;
1208 		addr = (caddr_t)&tmbx->tm_uticks;
1209 	} else {
1210 		uticks = td->td_usticks;
1211 		td->td_usticks = 0;
1212 		addr = (caddr_t)&tmbx->tm_sticks;
1213 	}
1214 	if (uticks) {
1215 		if (suword32(addr, uticks+fuword32(addr))) {
1216 			PROC_LOCK(p);
1217 			psignal(p, SIGSEGV);
1218 			PROC_UNLOCK(p);
1219 			return (-2);
1220 		}
1221 	}
1222 	kg = td->td_ksegrp;
1223 	if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1224 		mtx_lock_spin(&sched_lock);
1225 		td->td_upcall->ku_flags |= KUF_DOUPCALL;
1226 		mtx_unlock_spin(&sched_lock);
1227 	}
1228 	return (0);
1229 }
1230 
1231 /*
1232  * Discard the current thread and exit from its context.
1233  *
1234  * Because we can't free a thread while we're operating under its context,
1235  * push the current thread into our CPU's deadthread holder. This means
1236  * we needn't worry about someone else grabbing our context before we
1237  * do a cpu_throw().
1238  */
1239 void
1240 thread_exit(void)
1241 {
1242 	struct thread *td;
1243 	struct kse *ke;
1244 	struct proc *p;
1245 	struct ksegrp	*kg;
1246 
1247 	td = curthread;
1248 	kg = td->td_ksegrp;
1249 	p = td->td_proc;
1250 	ke = td->td_kse;
1251 
1252 	mtx_assert(&sched_lock, MA_OWNED);
1253 	KASSERT(p != NULL, ("thread exiting without a process"));
1254 	KASSERT(ke != NULL, ("thread exiting without a kse"));
1255 	KASSERT(kg != NULL, ("thread exiting without a kse group"));
1256 	PROC_LOCK_ASSERT(p, MA_OWNED);
1257 	CTR1(KTR_PROC, "thread_exit: thread %p", td);
1258 	mtx_assert(&Giant, MA_NOTOWNED);
1259 
1260 	if (td->td_standin != NULL) {
1261 		thread_stash(td->td_standin);
1262 		td->td_standin = NULL;
1263 	}
1264 
1265 	cpu_thread_exit(td);	/* XXXSMP */
1266 
1267 	/*
1268 	 * The last thread is left attached to the process
1269 	 * So that the whole bundle gets recycled. Skip
1270 	 * all this stuff.
1271 	 */
1272 	if (p->p_numthreads > 1) {
1273 		thread_unlink(td);
1274 		if (p->p_maxthrwaits)
1275 			wakeup(&p->p_numthreads);
1276 		/*
1277 		 * The test below is NOT true if we are the
1278 		 * sole exiting thread. P_STOPPED_SNGL is unset
1279 		 * in exit1() after it is the only survivor.
1280 		 */
1281 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1282 			if (p->p_numthreads == p->p_suspcount) {
1283 				thread_unsuspend_one(p->p_singlethread);
1284 			}
1285 		}
1286 
1287 		/*
1288 		 * Because each upcall structure has an owner thread,
1289 		 * owner thread exits only when process is in exiting
1290 		 * state, so upcall to userland is no longer needed,
1291 		 * deleting upcall structure is safe here.
1292 		 * So when all threads in a group is exited, all upcalls
1293 		 * in the group should be automatically freed.
1294 		 */
1295 		if (td->td_upcall)
1296 			upcall_remove(td);
1297 
1298 		sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
1299 		sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
1300 		ke->ke_state = KES_UNQUEUED;
1301 		ke->ke_thread = NULL;
1302 		/*
1303 		 * Decide what to do with the KSE attached to this thread.
1304 		 */
1305 		if (ke->ke_flags & KEF_EXIT) {
1306 			kse_unlink(ke);
1307 			if (kg->kg_kses == 0) {
1308 				sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
1309 				ksegrp_unlink(kg);
1310 			}
1311 		}
1312 		else
1313 			kse_reassign(ke);
1314 		PROC_UNLOCK(p);
1315 		td->td_kse	= NULL;
1316 		td->td_state	= TDS_INACTIVE;
1317 #if 0
1318 		td->td_proc	= NULL;
1319 #endif
1320 		td->td_ksegrp	= NULL;
1321 		td->td_last_kse	= NULL;
1322 		PCPU_SET(deadthread, td);
1323 	} else {
1324 		PROC_UNLOCK(p);
1325 	}
1326 	/* XXX Shouldn't cpu_throw() here. */
1327 	mtx_assert(&sched_lock, MA_OWNED);
1328 	cpu_throw(td, choosethread());
1329 	panic("I'm a teapot!");
1330 	/* NOTREACHED */
1331 }
1332 
1333 /*
1334  * Do any thread specific cleanups that may be needed in wait()
1335  * called with Giant held, proc and schedlock not held.
1336  */
1337 void
1338 thread_wait(struct proc *p)
1339 {
1340 	struct thread *td;
1341 
1342 	KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
1343 	KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
1344 	FOREACH_THREAD_IN_PROC(p, td) {
1345 		if (td->td_standin != NULL) {
1346 			thread_free(td->td_standin);
1347 			td->td_standin = NULL;
1348 		}
1349 		cpu_thread_clean(td);
1350 	}
1351 	thread_reap();	/* check for zombie threads etc. */
1352 }
1353 
1354 /*
1355  * Link a thread to a process.
1356  * set up anything that needs to be initialized for it to
1357  * be used by the process.
1358  *
1359  * Note that we do not link to the proc's ucred here.
1360  * The thread is linked as if running but no KSE assigned.
1361  */
1362 void
1363 thread_link(struct thread *td, struct ksegrp *kg)
1364 {
1365 	struct proc *p;
1366 
1367 	p = kg->kg_proc;
1368 	td->td_state    = TDS_INACTIVE;
1369 	td->td_proc     = p;
1370 	td->td_ksegrp   = kg;
1371 	td->td_last_kse = NULL;
1372 	td->td_flags    = 0;
1373 	td->td_kse      = NULL;
1374 
1375 	LIST_INIT(&td->td_contested);
1376 	callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
1377 	TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1378 	TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1379 	p->p_numthreads++;
1380 	kg->kg_numthreads++;
1381 }
1382 
1383 void
1384 thread_unlink(struct thread *td)
1385 {
1386 	struct proc *p = td->td_proc;
1387 	struct ksegrp *kg = td->td_ksegrp;
1388 
1389 	mtx_assert(&sched_lock, MA_OWNED);
1390 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
1391 	p->p_numthreads--;
1392 	TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1393 	kg->kg_numthreads--;
1394 	/* could clear a few other things here */
1395 }
1396 
1397 /*
1398  * Purge a ksegrp resource. When a ksegrp is preparing to
1399  * exit, it calls this function.
1400  */
1401 static void
1402 kse_purge_group(struct thread *td)
1403 {
1404 	struct ksegrp *kg;
1405 	struct kse *ke;
1406 
1407 	kg = td->td_ksegrp;
1408  	KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1409 	while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1410 		KASSERT(ke->ke_state == KES_IDLE,
1411 			("%s: wrong idle KSE state", __func__));
1412 		kse_unlink(ke);
1413 	}
1414 	KASSERT((kg->kg_kses == 1),
1415 		("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1416 	KASSERT((kg->kg_numupcalls == 0),
1417 	        ("%s: ksegrp still has %d upcall datas",
1418 		__func__, kg->kg_numupcalls));
1419 }
1420 
1421 /*
1422  * Purge a process's KSE resource. When a process is preparing to
1423  * exit, it calls kse_purge to release any extra KSE resources in
1424  * the process.
1425  */
1426 static void
1427 kse_purge(struct proc *p, struct thread *td)
1428 {
1429 	struct ksegrp *kg;
1430 	struct kse *ke;
1431 
1432  	KASSERT(p->p_numthreads == 1, ("bad thread number"));
1433 	while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1434 		TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1435 		p->p_numksegrps--;
1436 		/*
1437 		 * There is no ownership for KSE, after all threads
1438 		 * in the group exited, it is possible that some KSEs
1439 		 * were left in idle queue, gc them now.
1440 		 */
1441 		while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1442 			KASSERT(ke->ke_state == KES_IDLE,
1443 			   ("%s: wrong idle KSE state", __func__));
1444 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1445 			kg->kg_idle_kses--;
1446 			TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1447 			kg->kg_kses--;
1448 			kse_stash(ke);
1449 		}
1450 		KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1451 		        ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1452 		        ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1453 		KASSERT((kg->kg_numupcalls == 0),
1454 		        ("%s: ksegrp still has %d upcall datas",
1455 			__func__, kg->kg_numupcalls));
1456 
1457 		if (kg != td->td_ksegrp)
1458 			ksegrp_stash(kg);
1459 	}
1460 	TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1461 	p->p_numksegrps++;
1462 }
1463 
1464 /*
1465  * This function is intended to be used to initialize a spare thread
1466  * for upcall. Initialize thread's large data area outside sched_lock
1467  * for thread_schedule_upcall().
1468  */
1469 void
1470 thread_alloc_spare(struct thread *td, struct thread *spare)
1471 {
1472 	if (td->td_standin)
1473 		return;
1474 	if (spare == NULL)
1475 		spare = thread_alloc();
1476 	td->td_standin = spare;
1477 	bzero(&spare->td_startzero,
1478 	    (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1479 	spare->td_proc = td->td_proc;
1480 	spare->td_ucred = crhold(td->td_ucred);
1481 }
1482 
1483 /*
1484  * Create a thread and schedule it for upcall on the KSE given.
1485  * Use our thread's standin so that we don't have to allocate one.
1486  */
1487 struct thread *
1488 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1489 {
1490 	struct thread *td2;
1491 
1492 	mtx_assert(&sched_lock, MA_OWNED);
1493 
1494 	/*
1495 	 * Schedule an upcall thread on specified kse_upcall,
1496 	 * the kse_upcall must be free.
1497 	 * td must have a spare thread.
1498 	 */
1499 	KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1500 	if ((td2 = td->td_standin) != NULL) {
1501 		td->td_standin = NULL;
1502 	} else {
1503 		panic("no reserve thread when scheduling an upcall");
1504 		return (NULL);
1505 	}
1506 	CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1507 	     td2, td->td_proc->p_pid, td->td_proc->p_comm);
1508 	bcopy(&td->td_startcopy, &td2->td_startcopy,
1509 	    (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1510 	thread_link(td2, ku->ku_ksegrp);
1511 	/* inherit blocked thread's context */
1512 	cpu_set_upcall(td2, td);
1513 	/* Let the new thread become owner of the upcall */
1514 	ku->ku_owner   = td2;
1515 	td2->td_upcall = ku;
1516 	td2->td_flags  = TDF_SA;
1517 	td2->td_pflags = TDP_UPCALLING;
1518 	td2->td_kse    = NULL;
1519 	td2->td_state  = TDS_CAN_RUN;
1520 	td2->td_inhibitors = 0;
1521 	SIGFILLSET(td2->td_sigmask);
1522 	SIG_CANTMASK(td2->td_sigmask);
1523 	sched_fork_thread(td, td2);
1524 	return (td2);	/* bogus.. should be a void function */
1525 }
1526 
1527 /*
1528  * It is only used when thread generated a trap and process is being
1529  * debugged.
1530  */
1531 void
1532 thread_signal_add(struct thread *td, int sig)
1533 {
1534 	struct proc *p;
1535 	siginfo_t siginfo;
1536 	struct sigacts *ps;
1537 	int error;
1538 
1539 	p = td->td_proc;
1540 	PROC_LOCK_ASSERT(p, MA_OWNED);
1541 	ps = p->p_sigacts;
1542 	mtx_assert(&ps->ps_mtx, MA_OWNED);
1543 
1544 	cpu_thread_siginfo(sig, 0, &siginfo);
1545 	mtx_unlock(&ps->ps_mtx);
1546 	PROC_UNLOCK(p);
1547 	error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1548 	if (error) {
1549 		PROC_LOCK(p);
1550 		sigexit(td, SIGILL);
1551 	}
1552 	PROC_LOCK(p);
1553 	SIGADDSET(td->td_sigmask, sig);
1554 	mtx_lock(&ps->ps_mtx);
1555 }
1556 
1557 void
1558 thread_switchout(struct thread *td)
1559 {
1560 	struct kse_upcall *ku;
1561 	struct thread *td2;
1562 
1563 	mtx_assert(&sched_lock, MA_OWNED);
1564 
1565 	/*
1566 	 * If the outgoing thread is in threaded group and has never
1567 	 * scheduled an upcall, decide whether this is a short
1568 	 * or long term event and thus whether or not to schedule
1569 	 * an upcall.
1570 	 * If it is a short term event, just suspend it in
1571 	 * a way that takes its KSE with it.
1572 	 * Select the events for which we want to schedule upcalls.
1573 	 * For now it's just sleep.
1574 	 * XXXKSE eventually almost any inhibition could do.
1575 	 */
1576 	if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1577 		/*
1578 		 * Release ownership of upcall, and schedule an upcall
1579 		 * thread, this new upcall thread becomes the owner of
1580 		 * the upcall structure.
1581 		 */
1582 		ku = td->td_upcall;
1583 		ku->ku_owner = NULL;
1584 		td->td_upcall = NULL;
1585 		td->td_flags &= ~TDF_CAN_UNBIND;
1586 		td2 = thread_schedule_upcall(td, ku);
1587 		setrunqueue(td2);
1588 	}
1589 }
1590 
1591 /*
1592  * Setup done on the thread when it enters the kernel.
1593  * XXXKSE Presently only for syscalls but eventually all kernel entries.
1594  */
1595 void
1596 thread_user_enter(struct proc *p, struct thread *td)
1597 {
1598 	struct ksegrp *kg;
1599 	struct kse_upcall *ku;
1600 	struct kse_thr_mailbox *tmbx;
1601 	uint32_t tflags;
1602 
1603 	kg = td->td_ksegrp;
1604 
1605 	/*
1606 	 * First check that we shouldn't just abort.
1607 	 * But check if we are the single thread first!
1608 	 */
1609 	if (p->p_flag & P_SINGLE_EXIT) {
1610 		PROC_LOCK(p);
1611 		mtx_lock_spin(&sched_lock);
1612 		thread_stopped(p);
1613 		thread_exit();
1614 		/* NOTREACHED */
1615 	}
1616 
1617 	/*
1618 	 * If we are doing a syscall in a KSE environment,
1619 	 * note where our mailbox is. There is always the
1620 	 * possibility that we could do this lazily (in kse_reassign()),
1621 	 * but for now do it every time.
1622 	 */
1623 	kg = td->td_ksegrp;
1624 	if (td->td_flags & TDF_SA) {
1625 		ku = td->td_upcall;
1626 		KASSERT(ku, ("%s: no upcall owned", __func__));
1627 		KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1628 		KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1629 		ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1630 		tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1631 		if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1632 		    (ku->ku_mflags & KMF_NOUPCALL)) {
1633 			td->td_mailbox = NULL;
1634 		} else {
1635 			if (td->td_standin == NULL)
1636 				thread_alloc_spare(td, NULL);
1637 			tflags = fuword32(&tmbx->tm_flags);
1638 			/*
1639 			 * On some architectures, TP register points to thread
1640 			 * mailbox but not points to kse mailbox, and userland
1641 			 * can not atomically clear km_curthread, but can
1642 			 * use TP register, and set TMF_NOUPCALL in thread
1643 			 * flag	to indicate a critical region.
1644 			 */
1645 			if (tflags & TMF_NOUPCALL) {
1646 				td->td_mailbox = NULL;
1647 			} else {
1648 				td->td_mailbox = tmbx;
1649 				mtx_lock_spin(&sched_lock);
1650 				td->td_flags |= TDF_CAN_UNBIND;
1651 				mtx_unlock_spin(&sched_lock);
1652 			}
1653 		}
1654 	}
1655 }
1656 
1657 /*
1658  * The extra work we go through if we are a threaded process when we
1659  * return to userland.
1660  *
1661  * If we are a KSE process and returning to user mode, check for
1662  * extra work to do before we return (e.g. for more syscalls
1663  * to complete first).  If we were in a critical section, we should
1664  * just return to let it finish. Same if we were in the UTS (in
1665  * which case the mailbox's context's busy indicator will be set).
1666  * The only traps we suport will have set the mailbox.
1667  * We will clear it here.
1668  */
1669 int
1670 thread_userret(struct thread *td, struct trapframe *frame)
1671 {
1672 	int error = 0, upcalls, uts_crit;
1673 	struct kse_upcall *ku;
1674 	struct ksegrp *kg, *kg2;
1675 	struct proc *p;
1676 	struct timespec ts;
1677 
1678 	p = td->td_proc;
1679 	kg = td->td_ksegrp;
1680 	ku = td->td_upcall;
1681 
1682 	/* Nothing to do with bound thread */
1683 	if (!(td->td_flags & TDF_SA))
1684 		return (0);
1685 
1686 	/*
1687 	 * Stat clock interrupt hit in userland, it
1688 	 * is returning from interrupt, charge thread's
1689 	 * userland time for UTS.
1690 	 */
1691 	if (td->td_flags & TDF_USTATCLOCK) {
1692 		thread_update_usr_ticks(td, 1);
1693 		mtx_lock_spin(&sched_lock);
1694 		td->td_flags &= ~TDF_USTATCLOCK;
1695 		mtx_unlock_spin(&sched_lock);
1696 		if (kg->kg_completed ||
1697 		    (td->td_upcall->ku_flags & KUF_DOUPCALL))
1698 			thread_user_enter(p, td);
1699 	}
1700 
1701 	uts_crit = (td->td_mailbox == NULL);
1702 	/*
1703 	 * Optimisation:
1704 	 * This thread has not started any upcall.
1705 	 * If there is no work to report other than ourself,
1706 	 * then it can return direct to userland.
1707 	 */
1708 	if (TD_CAN_UNBIND(td)) {
1709 		mtx_lock_spin(&sched_lock);
1710 		td->td_flags &= ~TDF_CAN_UNBIND;
1711 		if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1712 		    (kg->kg_completed == NULL) &&
1713 		    (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1714 		    (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1715 			mtx_unlock_spin(&sched_lock);
1716 			thread_update_usr_ticks(td, 0);
1717 			nanotime(&ts);
1718 			error = copyout(&ts,
1719 				(caddr_t)&ku->ku_mailbox->km_timeofday,
1720 				sizeof(ts));
1721 			td->td_mailbox = 0;
1722 			ku->ku_mflags = 0;
1723 			if (error)
1724 				goto out;
1725 			return (0);
1726 		}
1727 		mtx_unlock_spin(&sched_lock);
1728 		thread_export_context(td, 0);
1729 		/*
1730 		 * There is something to report, and we own an upcall
1731 		 * strucuture, we can go to userland.
1732 		 * Turn ourself into an upcall thread.
1733 		 */
1734 		td->td_pflags |= TDP_UPCALLING;
1735 	} else if (td->td_mailbox && (ku == NULL)) {
1736 		thread_export_context(td, 1);
1737 		PROC_LOCK(p);
1738 		/*
1739 		 * There are upcall threads waiting for
1740 		 * work to do, wake one of them up.
1741 		 * XXXKSE Maybe wake all of them up.
1742 		 */
1743 		if (kg->kg_upsleeps)
1744 			wakeup_one(&kg->kg_completed);
1745 		mtx_lock_spin(&sched_lock);
1746 		thread_stopped(p);
1747 		thread_exit();
1748 		/* NOTREACHED */
1749 	}
1750 
1751 	KASSERT(ku != NULL, ("upcall is NULL\n"));
1752 	KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1753 
1754 	if (p->p_numthreads > max_threads_per_proc) {
1755 		max_threads_hits++;
1756 		PROC_LOCK(p);
1757 		mtx_lock_spin(&sched_lock);
1758 		p->p_maxthrwaits++;
1759 		while (p->p_numthreads > max_threads_per_proc) {
1760 			upcalls = 0;
1761 			FOREACH_KSEGRP_IN_PROC(p, kg2) {
1762 				if (kg2->kg_numupcalls == 0)
1763 					upcalls++;
1764 				else
1765 					upcalls += kg2->kg_numupcalls;
1766 			}
1767 			if (upcalls >= max_threads_per_proc)
1768 				break;
1769 			mtx_unlock_spin(&sched_lock);
1770 			if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1771 			    "maxthreads", 0)) {
1772 				mtx_lock_spin(&sched_lock);
1773 				break;
1774 			} else {
1775 				mtx_lock_spin(&sched_lock);
1776 			}
1777 		}
1778 		p->p_maxthrwaits--;
1779 		mtx_unlock_spin(&sched_lock);
1780 		PROC_UNLOCK(p);
1781 	}
1782 
1783 	if (td->td_pflags & TDP_UPCALLING) {
1784 		uts_crit = 0;
1785 		kg->kg_nextupcall = ticks+kg->kg_upquantum;
1786 		/*
1787 		 * There is no more work to do and we are going to ride
1788 		 * this thread up to userland as an upcall.
1789 		 * Do the last parts of the setup needed for the upcall.
1790 		 */
1791 		CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1792 		    td, td->td_proc->p_pid, td->td_proc->p_comm);
1793 
1794 		td->td_pflags &= ~TDP_UPCALLING;
1795 		if (ku->ku_flags & KUF_DOUPCALL) {
1796 			mtx_lock_spin(&sched_lock);
1797 			ku->ku_flags &= ~KUF_DOUPCALL;
1798 			mtx_unlock_spin(&sched_lock);
1799 		}
1800 		/*
1801 		 * Set user context to the UTS
1802 		 */
1803 		if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1804 			cpu_set_upcall_kse(td, ku);
1805 			error = suword(&ku->ku_mailbox->km_curthread, 0);
1806 			if (error)
1807 				goto out;
1808 		}
1809 
1810 		/*
1811 		 * Unhook the list of completed threads.
1812 		 * anything that completes after this gets to
1813 		 * come in next time.
1814 		 * Put the list of completed thread mailboxes on
1815 		 * this KSE's mailbox.
1816 		 */
1817 		if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1818 		    (error = thread_link_mboxes(kg, ku)) != 0)
1819 			goto out;
1820 	}
1821 	if (!uts_crit) {
1822 		nanotime(&ts);
1823 		error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1824 	}
1825 
1826 out:
1827 	if (error) {
1828 		/*
1829 		 * Things are going to be so screwed we should just kill
1830 		 * the process.
1831 		 * how do we do that?
1832 		 */
1833 		PROC_LOCK(td->td_proc);
1834 		psignal(td->td_proc, SIGSEGV);
1835 		PROC_UNLOCK(td->td_proc);
1836 	} else {
1837 		/*
1838 		 * Optimisation:
1839 		 * Ensure that we have a spare thread available,
1840 		 * for when we re-enter the kernel.
1841 		 */
1842 		if (td->td_standin == NULL)
1843 			thread_alloc_spare(td, NULL);
1844 	}
1845 
1846 	ku->ku_mflags = 0;
1847 	/*
1848 	 * Clear thread mailbox first, then clear system tick count.
1849 	 * The order is important because thread_statclock() use
1850 	 * mailbox pointer to see if it is an userland thread or
1851 	 * an UTS kernel thread.
1852 	 */
1853 	td->td_mailbox = NULL;
1854 	td->td_usticks = 0;
1855 	return (error);	/* go sync */
1856 }
1857 
1858 /*
1859  * Enforce single-threading.
1860  *
1861  * Returns 1 if the caller must abort (another thread is waiting to
1862  * exit the process or similar). Process is locked!
1863  * Returns 0 when you are successfully the only thread running.
1864  * A process has successfully single threaded in the suspend mode when
1865  * There are no threads in user mode. Threads in the kernel must be
1866  * allowed to continue until they get to the user boundary. They may even
1867  * copy out their return values and data before suspending. They may however be
1868  * accellerated in reaching the user boundary as we will wake up
1869  * any sleeping threads that are interruptable. (PCATCH).
1870  */
1871 int
1872 thread_single(int force_exit)
1873 {
1874 	struct thread *td;
1875 	struct thread *td2;
1876 	struct proc *p;
1877 
1878 	td = curthread;
1879 	p = td->td_proc;
1880 	mtx_assert(&Giant, MA_OWNED);
1881 	PROC_LOCK_ASSERT(p, MA_OWNED);
1882 	KASSERT((td != NULL), ("curthread is NULL"));
1883 
1884 	if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
1885 		return (0);
1886 
1887 	/* Is someone already single threading? */
1888 	if (p->p_singlethread)
1889 		return (1);
1890 
1891 	if (force_exit == SINGLE_EXIT) {
1892 		p->p_flag |= P_SINGLE_EXIT;
1893 	} else
1894 		p->p_flag &= ~P_SINGLE_EXIT;
1895 	p->p_flag |= P_STOPPED_SINGLE;
1896 	mtx_lock_spin(&sched_lock);
1897 	p->p_singlethread = td;
1898 	while ((p->p_numthreads - p->p_suspcount) != 1) {
1899 		FOREACH_THREAD_IN_PROC(p, td2) {
1900 			if (td2 == td)
1901 				continue;
1902 			td2->td_flags |= TDF_ASTPENDING;
1903 			if (TD_IS_INHIBITED(td2)) {
1904 				if (force_exit == SINGLE_EXIT) {
1905 					if (TD_IS_SUSPENDED(td2)) {
1906 						thread_unsuspend_one(td2);
1907 					}
1908 					if (TD_ON_SLEEPQ(td2) &&
1909 					    (td2->td_flags & TDF_SINTR)) {
1910 						if (td2->td_flags & TDF_CVWAITQ)
1911 							cv_abort(td2);
1912 						else
1913 							abortsleep(td2);
1914 					}
1915 				} else {
1916 					if (TD_IS_SUSPENDED(td2))
1917 						continue;
1918 					/*
1919 					 * maybe other inhibitted states too?
1920 					 * XXXKSE Is it totally safe to
1921 					 * suspend a non-interruptable thread?
1922 					 */
1923 					if (td2->td_inhibitors &
1924 					    (TDI_SLEEPING | TDI_SWAPPED))
1925 						thread_suspend_one(td2);
1926 				}
1927 			}
1928 		}
1929 		/*
1930 		 * Maybe we suspended some threads.. was it enough?
1931 		 */
1932 		if ((p->p_numthreads - p->p_suspcount) == 1)
1933 			break;
1934 
1935 		/*
1936 		 * Wake us up when everyone else has suspended.
1937 		 * In the mean time we suspend as well.
1938 		 */
1939 		thread_suspend_one(td);
1940 		DROP_GIANT();
1941 		PROC_UNLOCK(p);
1942 		mi_switch(SW_VOL);
1943 		mtx_unlock_spin(&sched_lock);
1944 		PICKUP_GIANT();
1945 		PROC_LOCK(p);
1946 		mtx_lock_spin(&sched_lock);
1947 	}
1948 	if (force_exit == SINGLE_EXIT) {
1949 		if (td->td_upcall)
1950 			upcall_remove(td);
1951 		kse_purge(p, td);
1952 	}
1953 	mtx_unlock_spin(&sched_lock);
1954 	return (0);
1955 }
1956 
1957 /*
1958  * Called in from locations that can safely check to see
1959  * whether we have to suspend or at least throttle for a
1960  * single-thread event (e.g. fork).
1961  *
1962  * Such locations include userret().
1963  * If the "return_instead" argument is non zero, the thread must be able to
1964  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1965  *
1966  * The 'return_instead' argument tells the function if it may do a
1967  * thread_exit() or suspend, or whether the caller must abort and back
1968  * out instead.
1969  *
1970  * If the thread that set the single_threading request has set the
1971  * P_SINGLE_EXIT bit in the process flags then this call will never return
1972  * if 'return_instead' is false, but will exit.
1973  *
1974  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1975  *---------------+--------------------+---------------------
1976  *       0       | returns 0          |   returns 0 or 1
1977  *               | when ST ends       |   immediatly
1978  *---------------+--------------------+---------------------
1979  *       1       | thread exits       |   returns 1
1980  *               |                    |  immediatly
1981  * 0 = thread_exit() or suspension ok,
1982  * other = return error instead of stopping the thread.
1983  *
1984  * While a full suspension is under effect, even a single threading
1985  * thread would be suspended if it made this call (but it shouldn't).
1986  * This call should only be made from places where
1987  * thread_exit() would be safe as that may be the outcome unless
1988  * return_instead is set.
1989  */
1990 int
1991 thread_suspend_check(int return_instead)
1992 {
1993 	struct thread *td;
1994 	struct proc *p;
1995 
1996 	td = curthread;
1997 	p = td->td_proc;
1998 	PROC_LOCK_ASSERT(p, MA_OWNED);
1999 	while (P_SHOULDSTOP(p)) {
2000 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2001 			KASSERT(p->p_singlethread != NULL,
2002 			    ("singlethread not set"));
2003 			/*
2004 			 * The only suspension in action is a
2005 			 * single-threading. Single threader need not stop.
2006 			 * XXX Should be safe to access unlocked
2007 			 * as it can only be set to be true by us.
2008 			 */
2009 			if (p->p_singlethread == td)
2010 				return (0);	/* Exempt from stopping. */
2011 		}
2012 		if (return_instead)
2013 			return (1);
2014 
2015 		mtx_lock_spin(&sched_lock);
2016 		thread_stopped(p);
2017 		/*
2018 		 * If the process is waiting for us to exit,
2019 		 * this thread should just suicide.
2020 		 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
2021 		 */
2022 		if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
2023 			while (mtx_owned(&Giant))
2024 				mtx_unlock(&Giant);
2025 			if (p->p_flag & P_SA)
2026 				thread_exit();
2027 			else
2028 				thr_exit1();
2029 		}
2030 
2031 		/*
2032 		 * When a thread suspends, it just
2033 		 * moves to the processes's suspend queue
2034 		 * and stays there.
2035 		 */
2036 		thread_suspend_one(td);
2037 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
2038 			if (p->p_numthreads == p->p_suspcount) {
2039 				thread_unsuspend_one(p->p_singlethread);
2040 			}
2041 		}
2042 		DROP_GIANT();
2043 		PROC_UNLOCK(p);
2044 		mi_switch(SW_INVOL);
2045 		mtx_unlock_spin(&sched_lock);
2046 		PICKUP_GIANT();
2047 		PROC_LOCK(p);
2048 	}
2049 	return (0);
2050 }
2051 
2052 void
2053 thread_suspend_one(struct thread *td)
2054 {
2055 	struct proc *p = td->td_proc;
2056 
2057 	mtx_assert(&sched_lock, MA_OWNED);
2058 	PROC_LOCK_ASSERT(p, MA_OWNED);
2059 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
2060 	p->p_suspcount++;
2061 	TD_SET_SUSPENDED(td);
2062 	TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
2063 	/*
2064 	 * Hack: If we are suspending but are on the sleep queue
2065 	 * then we are in msleep or the cv equivalent. We
2066 	 * want to look like we have two Inhibitors.
2067 	 * May already be set.. doesn't matter.
2068 	 */
2069 	if (TD_ON_SLEEPQ(td))
2070 		TD_SET_SLEEPING(td);
2071 }
2072 
2073 void
2074 thread_unsuspend_one(struct thread *td)
2075 {
2076 	struct proc *p = td->td_proc;
2077 
2078 	mtx_assert(&sched_lock, MA_OWNED);
2079 	PROC_LOCK_ASSERT(p, MA_OWNED);
2080 	TAILQ_REMOVE(&p->p_suspended, td, td_runq);
2081 	TD_CLR_SUSPENDED(td);
2082 	p->p_suspcount--;
2083 	setrunnable(td);
2084 }
2085 
2086 /*
2087  * Allow all threads blocked by single threading to continue running.
2088  */
2089 void
2090 thread_unsuspend(struct proc *p)
2091 {
2092 	struct thread *td;
2093 
2094 	mtx_assert(&sched_lock, MA_OWNED);
2095 	PROC_LOCK_ASSERT(p, MA_OWNED);
2096 	if (!P_SHOULDSTOP(p)) {
2097 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2098 			thread_unsuspend_one(td);
2099 		}
2100 	} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2101 	    (p->p_numthreads == p->p_suspcount)) {
2102 		/*
2103 		 * Stopping everything also did the job for the single
2104 		 * threading request. Now we've downgraded to single-threaded,
2105 		 * let it continue.
2106 		 */
2107 		thread_unsuspend_one(p->p_singlethread);
2108 	}
2109 }
2110 
2111 void
2112 thread_single_end(void)
2113 {
2114 	struct thread *td;
2115 	struct proc *p;
2116 
2117 	td = curthread;
2118 	p = td->td_proc;
2119 	PROC_LOCK_ASSERT(p, MA_OWNED);
2120 	p->p_flag &= ~P_STOPPED_SINGLE;
2121 	mtx_lock_spin(&sched_lock);
2122 	p->p_singlethread = NULL;
2123 	/*
2124 	 * If there are other threads they mey now run,
2125 	 * unless of course there is a blanket 'stop order'
2126 	 * on the process. The single threader must be allowed
2127 	 * to continue however as this is a bad place to stop.
2128 	 */
2129 	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2130 		while (( td = TAILQ_FIRST(&p->p_suspended))) {
2131 			thread_unsuspend_one(td);
2132 		}
2133 	}
2134 	mtx_unlock_spin(&sched_lock);
2135 }
2136