xref: /freebsd/sys/kern/kern_switch.c (revision 71fad9fdeefd5d874768802125f98ea6450cfa5c)
1dba6c5a6SPeter Wemm /*
2d5a08a60SJake Burkholder  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3d5a08a60SJake Burkholder  * All rights reserved.
4dba6c5a6SPeter Wemm  *
5dba6c5a6SPeter Wemm  * Redistribution and use in source and binary forms, with or without
6dba6c5a6SPeter Wemm  * modification, are permitted provided that the following conditions
7dba6c5a6SPeter Wemm  * are met:
8dba6c5a6SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
10dba6c5a6SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
11dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
12dba6c5a6SPeter Wemm  *    documentation and/or other materials provided with the distribution.
13dba6c5a6SPeter Wemm  *
14dba6c5a6SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15dba6c5a6SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16dba6c5a6SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17dba6c5a6SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18dba6c5a6SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19dba6c5a6SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20dba6c5a6SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21dba6c5a6SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22dba6c5a6SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23dba6c5a6SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24dba6c5a6SPeter Wemm  * SUCH DAMAGE.
25dba6c5a6SPeter Wemm  *
26dba6c5a6SPeter Wemm  * $FreeBSD$
27dba6c5a6SPeter Wemm  */
28dba6c5a6SPeter Wemm 
29e602ba25SJulian Elischer /***
30e602ba25SJulian Elischer 
31e602ba25SJulian Elischer Here is the logic..
32e602ba25SJulian Elischer 
33e602ba25SJulian Elischer If there are N processors, then there are at most N KSEs (kernel
34e602ba25SJulian Elischer schedulable entities) working to process threads that belong to a
35e602ba25SJulian Elischer KSEGOUP (kg). If there are X of these KSEs actually running at the
36e602ba25SJulian Elischer moment in question, then there are at most M (N-X) of these KSEs on
37e602ba25SJulian Elischer the run queue, as running KSEs are not on the queue.
38e602ba25SJulian Elischer 
39e602ba25SJulian Elischer Runnable threads are queued off the KSEGROUP in priority order.
40e602ba25SJulian Elischer If there are M or more threads runnable, the top M threads
41e602ba25SJulian Elischer (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42e602ba25SJulian Elischer their priority from those threads and are put on the run queue.
43e602ba25SJulian Elischer 
44e602ba25SJulian Elischer The last thread that had a priority high enough to have a KSE associated
45e602ba25SJulian Elischer with it, AND IS ON THE RUN QUEUE is pointed to by
46e602ba25SJulian Elischer kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47e602ba25SJulian Elischer assigned as all the available KSEs are activly running, or because there
48e602ba25SJulian Elischer are no threads queued, that pointer is NULL.
49e602ba25SJulian Elischer 
50e602ba25SJulian Elischer When a KSE is removed from the run queue to become runnable, we know
51e602ba25SJulian Elischer it was associated with the highest priority thread in the queue (at the head
52e602ba25SJulian Elischer of the queue). If it is also the last assigned we know M was 1 and must
53e602ba25SJulian Elischer now be 0. Since the thread is no longer queued that pointer must be
54e602ba25SJulian Elischer removed from it. Since we know there were no more KSEs available,
55e602ba25SJulian Elischer (M was 1 and is now 0) and since we are not FREEING our KSE
56e602ba25SJulian Elischer but using it, we know there are STILL no more KSEs available, we can prove
57e602ba25SJulian Elischer that the next thread in the ksegrp list will not have a KSE to assign to
58e602ba25SJulian Elischer it, so we can show that the pointer must be made 'invalid' (NULL).
59e602ba25SJulian Elischer 
60e602ba25SJulian Elischer The pointer exists so that when a new thread is made runnable, it can
61e602ba25SJulian Elischer have its priority compared with the last assigned thread to see if
62e602ba25SJulian Elischer it should 'steal' its KSE or not.. i.e. is it 'earlier'
63e602ba25SJulian Elischer on the list than that thread or later.. If it's earlier, then the KSE is
64e602ba25SJulian Elischer removed from the last assigned (which is now not assigned a KSE)
65e602ba25SJulian Elischer and reassigned to the new thread, which is placed earlier in the list.
66e602ba25SJulian Elischer The pointer is then backed up to the previous thread (which may or may not
67e602ba25SJulian Elischer be the new thread).
68e602ba25SJulian Elischer 
69e602ba25SJulian Elischer When a thread sleeps or is removed, the KSE becomes available and if there
70e602ba25SJulian Elischer are queued threads that are not assigned KSEs, the highest priority one of
71e602ba25SJulian Elischer them is assigned the KSE, which is then placed back on the run queue at
72e602ba25SJulian Elischer the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73e602ba25SJulian Elischer to point to it.
74e602ba25SJulian Elischer 
75e602ba25SJulian Elischer The following diagram shows 2 KSEs and 3 threads from a single process.
76e602ba25SJulian Elischer 
77e602ba25SJulian Elischer  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78e602ba25SJulian Elischer               \    \____
79e602ba25SJulian Elischer                \        \
80e602ba25SJulian Elischer     KSEGROUP---thread--thread--thread    (queued in priority order)
81e602ba25SJulian Elischer         \                 /
82e602ba25SJulian Elischer          \_______________/
83e602ba25SJulian Elischer           (last_assigned)
84e602ba25SJulian Elischer 
85e602ba25SJulian Elischer The result of this scheme is that the M available KSEs are always
86e602ba25SJulian Elischer queued at the priorities they have inherrited from the M highest priority
87e602ba25SJulian Elischer threads for that KSEGROUP. If this situation changes, the KSEs are
88e602ba25SJulian Elischer reassigned to keep this true.
89e602ba25SJulian Elischer 
90e602ba25SJulian Elischer */
91e602ba25SJulian Elischer 
92dba6c5a6SPeter Wemm #include <sys/param.h>
93dba6c5a6SPeter Wemm #include <sys/systm.h>
94dba6c5a6SPeter Wemm #include <sys/kernel.h>
950384fff8SJason Evans #include <sys/ktr.h>
96f34fa851SJohn Baldwin #include <sys/lock.h>
9735e0e5b3SJohn Baldwin #include <sys/mutex.h>
98dba6c5a6SPeter Wemm #include <sys/proc.h>
99dba6c5a6SPeter Wemm #include <sys/queue.h>
100182da820SMatthew Dillon #include <machine/critical.h>
101dba6c5a6SPeter Wemm 
102d2ac2316SJake Burkholder CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
103d2ac2316SJake Burkholder 
104dba6c5a6SPeter Wemm /*
105d5a08a60SJake Burkholder  * Global run queue.
106dba6c5a6SPeter Wemm  */
107d5a08a60SJake Burkholder static struct runq runq;
108d5a08a60SJake Burkholder SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
109dba6c5a6SPeter Wemm 
110e602ba25SJulian Elischer static void runq_readjust(struct runq *rq, struct kse *ke);
111e602ba25SJulian Elischer /************************************************************************
112e602ba25SJulian Elischer  * Functions that manipulate runnability from a thread perspective.	*
113e602ba25SJulian Elischer  ************************************************************************/
114dba6c5a6SPeter Wemm 
115e602ba25SJulian Elischer /*
116e602ba25SJulian Elischer  * Select the KSE that will be run next.  From that find the thread, and x
117e602ba25SJulian Elischer  * remove it from the KSEGRP's run queue.  If there is thread clustering,
118e602ba25SJulian Elischer  * this will be what does it.
119e602ba25SJulian Elischer  */
120b40ce416SJulian Elischer struct thread *
121b40ce416SJulian Elischer choosethread(void)
122dba6c5a6SPeter Wemm {
123e602ba25SJulian Elischer 	struct kse *ke;
124e602ba25SJulian Elischer 	struct thread *td;
125e602ba25SJulian Elischer 	struct ksegrp *kg;
126e602ba25SJulian Elischer 
127fe799533SAndrew Gallatin retry:
128e602ba25SJulian Elischer 	if ((ke = runq_choose(&runq))) {
129e602ba25SJulian Elischer 		td = ke->ke_thread;
130e602ba25SJulian Elischer 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
131e602ba25SJulian Elischer 		kg = ke->ke_ksegrp;
132e602ba25SJulian Elischer 		if (td->td_flags & TDF_UNBOUND) {
133e602ba25SJulian Elischer 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
134e602ba25SJulian Elischer 			if (kg->kg_last_assigned == td)
135e602ba25SJulian Elischer 				if (TAILQ_PREV(td, threadqueue, td_runq)
136e602ba25SJulian Elischer 				    != NULL)
137e602ba25SJulian Elischer 					printf("Yo MAMA!\n");
138e602ba25SJulian Elischer 				kg->kg_last_assigned = TAILQ_PREV(td,
139e602ba25SJulian Elischer 				    threadqueue, td_runq);
140e602ba25SJulian Elischer 			/*
141e602ba25SJulian Elischer 			 *  If we have started running an upcall,
142e602ba25SJulian Elischer 			 * Then TDF_UNBOUND WAS set because the thread was
143e602ba25SJulian Elischer 			 * created without a KSE. Now that we have one,
144e602ba25SJulian Elischer 			 * and it is our time to run, we make sure
145e602ba25SJulian Elischer 			 * that BOUND semantics apply for the rest of
146e602ba25SJulian Elischer 			 * the journey to userland, and into the UTS.
147e602ba25SJulian Elischer 			 */
148e602ba25SJulian Elischer #ifdef	NOTYET
149e602ba25SJulian Elischer 			if (td->td_flags & TDF_UPCALLING)
150e602ba25SJulian Elischer 				tdf->td_flags &= ~TDF_UNBOUND;
151e602ba25SJulian Elischer #endif
152e602ba25SJulian Elischer 		}
153e602ba25SJulian Elischer 		kg->kg_runnable--;
154e602ba25SJulian Elischer 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
155e602ba25SJulian Elischer 		    td, td->td_priority);
156e602ba25SJulian Elischer 	} else {
15740e55026SJulian Elischer 		/* Simulate runq_choose() having returned the idle thread */
158e602ba25SJulian Elischer 		td = PCPU_GET(idlethread);
159472be958SJulian Elischer 		ke = td->td_kse;
160e602ba25SJulian Elischer 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
161e602ba25SJulian Elischer 	}
162472be958SJulian Elischer 	ke->ke_flags |= KEF_DIDRUN;
163fe799533SAndrew Gallatin 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
164fe799533SAndrew Gallatin 	    (td->td_flags & TDF_INPANIC) == 0))
165fe799533SAndrew Gallatin 		goto retry;
16671fad9fdSJulian Elischer 	TD_SET_RUNNING(td);
167e602ba25SJulian Elischer 	return (td);
168e602ba25SJulian Elischer }
169e602ba25SJulian Elischer 
170e602ba25SJulian Elischer /*
171e602ba25SJulian Elischer  * Given a KSE (now surplus), either assign a new runable thread to it
172e602ba25SJulian Elischer  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
173e602ba25SJulian Elischer  * Assumes the kse is not linked to any threads any more. (has been cleaned).
174e602ba25SJulian Elischer  */
175e602ba25SJulian Elischer void
176e602ba25SJulian Elischer kse_reassign(struct kse *ke)
177e602ba25SJulian Elischer {
178e602ba25SJulian Elischer 	struct ksegrp *kg;
179e602ba25SJulian Elischer 	struct thread *td;
180e602ba25SJulian Elischer 
181e602ba25SJulian Elischer 	kg = ke->ke_ksegrp;
182e602ba25SJulian Elischer 
183e602ba25SJulian Elischer 	/*
184e602ba25SJulian Elischer 	 * Find the first unassigned thread
185e602ba25SJulian Elischer 	 * If there is a 'last assigned' then see what's next.
186e602ba25SJulian Elischer 	 * otherwise look at what is first.
187e602ba25SJulian Elischer 	 */
188e602ba25SJulian Elischer 	if ((td = kg->kg_last_assigned)) {
189e602ba25SJulian Elischer 		td = TAILQ_NEXT(td, td_runq);
190e602ba25SJulian Elischer 	} else {
191e602ba25SJulian Elischer 		td = TAILQ_FIRST(&kg->kg_runq);
192e602ba25SJulian Elischer 	}
193e602ba25SJulian Elischer 
194e602ba25SJulian Elischer 	/*
195e602ba25SJulian Elischer 	 * If we found one assign it the kse, otherwise idle the kse.
196e602ba25SJulian Elischer 	 */
197e602ba25SJulian Elischer 	if (td) {
198e602ba25SJulian Elischer 		kg->kg_last_assigned = td;
199e602ba25SJulian Elischer 		td->td_kse = ke;
200e602ba25SJulian Elischer 		ke->ke_thread = td;
201e602ba25SJulian Elischer 		runq_add(&runq, ke);
202e602ba25SJulian Elischer 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
203e602ba25SJulian Elischer 	} else {
204e602ba25SJulian Elischer 		ke->ke_state = KES_IDLE;
205e602ba25SJulian Elischer 		ke->ke_thread = NULL;
206e602ba25SJulian Elischer 		TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
207e602ba25SJulian Elischer 		kg->kg_idle_kses++;
208e602ba25SJulian Elischer 		CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
209e602ba25SJulian Elischer 	}
210d5a08a60SJake Burkholder }
211d5a08a60SJake Burkholder 
212d5a08a60SJake Burkholder int
213e602ba25SJulian Elischer kserunnable(void)
214d5a08a60SJake Burkholder {
215d5a08a60SJake Burkholder 	return runq_check(&runq);
216d5a08a60SJake Burkholder }
217d5a08a60SJake Burkholder 
218e602ba25SJulian Elischer /*
219e602ba25SJulian Elischer  * Remove a thread from its KSEGRP's run queue.
220e602ba25SJulian Elischer  * This in turn may remove it from a KSE if it was already assigned
221e602ba25SJulian Elischer  * to one, possibly causing a new thread to be assigned to the KSE
222e602ba25SJulian Elischer  * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
223e602ba25SJulian Elischer  */
224d5a08a60SJake Burkholder void
225b40ce416SJulian Elischer remrunqueue(struct thread *td)
226d5a08a60SJake Burkholder {
227e602ba25SJulian Elischer 	struct thread *td2, *td3;
228e602ba25SJulian Elischer 	struct ksegrp *kg;
229e602ba25SJulian Elischer 	struct kse *ke;
230e602ba25SJulian Elischer 
231e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
23271fad9fdSJulian Elischer 	KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
233e602ba25SJulian Elischer 	kg = td->td_ksegrp;
234e602ba25SJulian Elischer 	ke = td->td_kse;
235e602ba25SJulian Elischer 	/*
236e602ba25SJulian Elischer 	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
237e602ba25SJulian Elischer 	 * threads are BOUND.
238e602ba25SJulian Elischer 	 */
239e602ba25SJulian Elischer 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
240e602ba25SJulian Elischer 	kg->kg_runnable--;
24171fad9fdSJulian Elischer 	TD_SET_CAN_RUN(td);
242e602ba25SJulian Elischer 	if ((td->td_flags & TDF_UNBOUND) == 0)  {
243e602ba25SJulian Elischer 		/* Bring its kse with it, leave the thread attached */
244e602ba25SJulian Elischer 		runq_remove(&runq, ke);
245c3b98db0SJulian Elischer 		ke->ke_state = KES_THREAD;
246e602ba25SJulian Elischer 		return;
247d5a08a60SJake Burkholder 	}
248e602ba25SJulian Elischer 	if (ke) {
249e602ba25SJulian Elischer 		/*
250e602ba25SJulian Elischer 		 * This thread has been assigned to a KSE.
251e602ba25SJulian Elischer 		 * We need to dissociate it and try assign the
252e602ba25SJulian Elischer 		 * KSE to the next available thread. Then, we should
253e602ba25SJulian Elischer 		 * see if we need to move the KSE in the run queues.
254e602ba25SJulian Elischer 		 */
255e602ba25SJulian Elischer 		td2 = kg->kg_last_assigned;
256e602ba25SJulian Elischer 		KASSERT((td2 != NULL), ("last assigned has wrong value "));
257e602ba25SJulian Elischer 		td->td_kse = NULL;
258e602ba25SJulian Elischer 		if ((td3 = TAILQ_NEXT(td2, td_runq))) {
259e602ba25SJulian Elischer 			KASSERT(td3 != td, ("td3 somehow matched td"));
260e602ba25SJulian Elischer 			/*
261e602ba25SJulian Elischer 			 * Give the next unassigned thread to the KSE
262e602ba25SJulian Elischer 			 * so the number of runnable KSEs remains
263e602ba25SJulian Elischer 			 * constant.
264e602ba25SJulian Elischer 			 */
265e602ba25SJulian Elischer 			td3->td_kse = ke;
266e602ba25SJulian Elischer 			ke->ke_thread = td3;
267e602ba25SJulian Elischer 			kg->kg_last_assigned = td3;
268e602ba25SJulian Elischer 			runq_readjust(&runq, ke);
269e602ba25SJulian Elischer 		} else {
270e602ba25SJulian Elischer 			/*
271e602ba25SJulian Elischer 			 * There is no unassigned thread.
272e602ba25SJulian Elischer 			 * If we were the last assigned one,
273e602ba25SJulian Elischer 			 * adjust the last assigned pointer back
274e602ba25SJulian Elischer 			 * one, which may result in NULL.
275e602ba25SJulian Elischer 			 */
276e602ba25SJulian Elischer 			if (td == td2) {
277e602ba25SJulian Elischer 				kg->kg_last_assigned =
278e602ba25SJulian Elischer 				    TAILQ_PREV(td, threadqueue, td_runq);
279e602ba25SJulian Elischer 			}
280e602ba25SJulian Elischer 			runq_remove(&runq, ke);
281e602ba25SJulian Elischer 			KASSERT((ke->ke_state != KES_IDLE),
282e602ba25SJulian Elischer 			    ("kse already idle"));
283e602ba25SJulian Elischer 			ke->ke_state = KES_IDLE;
284e602ba25SJulian Elischer 			ke->ke_thread = NULL;
285e602ba25SJulian Elischer 			TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
286e602ba25SJulian Elischer 			kg->kg_idle_kses++;
287e602ba25SJulian Elischer 		}
288e602ba25SJulian Elischer 	}
289e602ba25SJulian Elischer 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
290e602ba25SJulian Elischer }
291e602ba25SJulian Elischer 
292d5a08a60SJake Burkholder void
293b40ce416SJulian Elischer setrunqueue(struct thread *td)
294d5a08a60SJake Burkholder {
295e602ba25SJulian Elischer 	struct kse *ke;
296e602ba25SJulian Elischer 	struct ksegrp *kg;
297e602ba25SJulian Elischer 	struct thread *td2;
298e602ba25SJulian Elischer 	struct thread *tda;
299e602ba25SJulian Elischer 
300e602ba25SJulian Elischer 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
301e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
30271fad9fdSJulian Elischer 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
30371fad9fdSJulian Elischer 	    ("setrunqueue: bad thread state"));
30471fad9fdSJulian Elischer 	TD_SET_RUNQ(td);
305e602ba25SJulian Elischer 	kg = td->td_ksegrp;
306e602ba25SJulian Elischer 	kg->kg_runnable++;
307e602ba25SJulian Elischer 	if ((td->td_flags & TDF_UNBOUND) == 0) {
308e602ba25SJulian Elischer 		KASSERT((td->td_kse != NULL),
309e602ba25SJulian Elischer 		    ("queueing BAD thread to run queue"));
310e602ba25SJulian Elischer 		/*
311e602ba25SJulian Elischer 		 * Common path optimisation: Only one of everything
312e602ba25SJulian Elischer 		 * and the KSE is always already attached.
313e602ba25SJulian Elischer 		 * Totally ignore the ksegrp run queue.
314e602ba25SJulian Elischer 		 */
315b40ce416SJulian Elischer 		runq_add(&runq, td->td_kse);
316e602ba25SJulian Elischer 		return;
317e602ba25SJulian Elischer 	}
318e602ba25SJulian Elischer 	/*
319e602ba25SJulian Elischer 	 * Ok, so we are threading with this thread.
320e602ba25SJulian Elischer 	 * We don't have a KSE, see if we can get one..
321e602ba25SJulian Elischer 	 */
322e602ba25SJulian Elischer 	tda = kg->kg_last_assigned;
323e602ba25SJulian Elischer 	if ((ke = td->td_kse) == NULL) {
324e602ba25SJulian Elischer 		/*
325e602ba25SJulian Elischer 		 * We will need a KSE, see if there is one..
326e602ba25SJulian Elischer 		 * First look for a free one, before getting desperate.
327e602ba25SJulian Elischer 		 * If we can't get one, our priority is not high enough..
328e602ba25SJulian Elischer 		 * that's ok..
329e602ba25SJulian Elischer 		 */
330e602ba25SJulian Elischer 		if (kg->kg_idle_kses) {
331e602ba25SJulian Elischer 			/*
332e602ba25SJulian Elischer 			 * There is a free one so it's ours for the asking..
333e602ba25SJulian Elischer 			 */
334e602ba25SJulian Elischer 			ke = TAILQ_FIRST(&kg->kg_iq);
335e602ba25SJulian Elischer 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
336c3b98db0SJulian Elischer 			ke->ke_state = KES_THREAD;
337e602ba25SJulian Elischer 			kg->kg_idle_kses--;
338e602ba25SJulian Elischer 		} else if (tda && (tda->td_priority > td->td_priority)) {
339e602ba25SJulian Elischer 			/*
340e602ba25SJulian Elischer 			 * None free, but there is one we can commandeer.
341e602ba25SJulian Elischer 			 */
342e602ba25SJulian Elischer 			ke = tda->td_kse;
343e602ba25SJulian Elischer 			tda->td_kse = NULL;
344e602ba25SJulian Elischer 			ke->ke_thread = NULL;
345e602ba25SJulian Elischer 			tda = kg->kg_last_assigned =
346e602ba25SJulian Elischer 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
347e602ba25SJulian Elischer 			runq_remove(&runq, ke);
348e602ba25SJulian Elischer 		}
349e602ba25SJulian Elischer 	} else {
350c3b98db0SJulian Elischer 		/*
351c3b98db0SJulian Elischer 		 * Temporarily disassociate so it looks like the other cases.
352c3b98db0SJulian Elischer 		 */
353e602ba25SJulian Elischer 		ke->ke_thread = NULL;
354e602ba25SJulian Elischer 		td->td_kse = NULL;
355d5a08a60SJake Burkholder 	}
356d5a08a60SJake Burkholder 
357e602ba25SJulian Elischer 	/*
358e602ba25SJulian Elischer 	 * Add the thread to the ksegrp's run queue at
359e602ba25SJulian Elischer 	 * the appropriate place.
360e602ba25SJulian Elischer 	 */
361e602ba25SJulian Elischer 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
362e602ba25SJulian Elischer 		if (td2->td_priority > td->td_priority) {
363e602ba25SJulian Elischer 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
364e602ba25SJulian Elischer 			break;
365e602ba25SJulian Elischer 		}
366e602ba25SJulian Elischer 	}
367e602ba25SJulian Elischer 	if (td2 == NULL) {
368e602ba25SJulian Elischer 		/* We ran off the end of the TAILQ or it was empty. */
369e602ba25SJulian Elischer 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
370e602ba25SJulian Elischer 	}
371e602ba25SJulian Elischer 
372e602ba25SJulian Elischer 	/*
373e602ba25SJulian Elischer 	 * If we have a ke to use, then put it on the run queue and
374e602ba25SJulian Elischer 	 * If needed, readjust the last_assigned pointer.
375e602ba25SJulian Elischer 	 */
376e602ba25SJulian Elischer 	if (ke) {
377e602ba25SJulian Elischer 		if (tda == NULL) {
378e602ba25SJulian Elischer 			/*
379e602ba25SJulian Elischer 			 * No pre-existing last assigned so whoever is first
380c3b98db0SJulian Elischer 			 * gets the KSE we brought in.. (maybe us)
381e602ba25SJulian Elischer 			 */
382e602ba25SJulian Elischer 			td2 = TAILQ_FIRST(&kg->kg_runq);
383e602ba25SJulian Elischer 			KASSERT((td2->td_kse == NULL),
384e602ba25SJulian Elischer 			    ("unexpected ke present"));
385e602ba25SJulian Elischer 			td2->td_kse = ke;
386e602ba25SJulian Elischer 			ke->ke_thread = td2;
387e602ba25SJulian Elischer 			kg->kg_last_assigned = td2;
388e602ba25SJulian Elischer 		} else if (tda->td_priority > td->td_priority) {
389e602ba25SJulian Elischer 			/*
390e602ba25SJulian Elischer 			 * It's ours, grab it, but last_assigned is past us
391e602ba25SJulian Elischer 			 * so don't change it.
392e602ba25SJulian Elischer 			 */
393e602ba25SJulian Elischer 			td->td_kse = ke;
394e602ba25SJulian Elischer 			ke->ke_thread = td;
395e602ba25SJulian Elischer 		} else {
396e602ba25SJulian Elischer 			/*
397e602ba25SJulian Elischer 			 * We are past last_assigned, so
398e602ba25SJulian Elischer 			 * put the new kse on whatever is next,
399e602ba25SJulian Elischer 			 * which may or may not be us.
400e602ba25SJulian Elischer 			 */
401e602ba25SJulian Elischer 			td2 = TAILQ_NEXT(tda, td_runq);
402e602ba25SJulian Elischer 			kg->kg_last_assigned = td2;
403e602ba25SJulian Elischer 			td2->td_kse = ke;
404e602ba25SJulian Elischer 			ke->ke_thread = td2;
405e602ba25SJulian Elischer 		}
406e602ba25SJulian Elischer 		runq_add(&runq, ke);
407e602ba25SJulian Elischer 	}
408e602ba25SJulian Elischer }
409e602ba25SJulian Elischer 
410e602ba25SJulian Elischer /************************************************************************
411e602ba25SJulian Elischer  * Critical section marker functions					*
412e602ba25SJulian Elischer  ************************************************************************/
4137e1f6dfeSJohn Baldwin /* Critical sections that prevent preemption. */
4147e1f6dfeSJohn Baldwin void
4157e1f6dfeSJohn Baldwin critical_enter(void)
4167e1f6dfeSJohn Baldwin {
4177e1f6dfeSJohn Baldwin 	struct thread *td;
4187e1f6dfeSJohn Baldwin 
4197e1f6dfeSJohn Baldwin 	td = curthread;
4207e1f6dfeSJohn Baldwin 	if (td->td_critnest == 0)
421d74ac681SMatthew Dillon 		cpu_critical_enter();
4227e1f6dfeSJohn Baldwin 	td->td_critnest++;
4237e1f6dfeSJohn Baldwin }
4247e1f6dfeSJohn Baldwin 
4257e1f6dfeSJohn Baldwin void
4267e1f6dfeSJohn Baldwin critical_exit(void)
4277e1f6dfeSJohn Baldwin {
4287e1f6dfeSJohn Baldwin 	struct thread *td;
4297e1f6dfeSJohn Baldwin 
4307e1f6dfeSJohn Baldwin 	td = curthread;
4317e1f6dfeSJohn Baldwin 	if (td->td_critnest == 1) {
4327e1f6dfeSJohn Baldwin 		td->td_critnest = 0;
433d74ac681SMatthew Dillon 		cpu_critical_exit();
434d74ac681SMatthew Dillon 	} else {
4357e1f6dfeSJohn Baldwin 		td->td_critnest--;
4367e1f6dfeSJohn Baldwin 	}
437d74ac681SMatthew Dillon }
4387e1f6dfeSJohn Baldwin 
439e602ba25SJulian Elischer 
440e602ba25SJulian Elischer /************************************************************************
441e602ba25SJulian Elischer  * SYSTEM RUN QUEUE manipulations and tests				*
442e602ba25SJulian Elischer  ************************************************************************/
443e602ba25SJulian Elischer /*
444e602ba25SJulian Elischer  * Initialize a run structure.
445e602ba25SJulian Elischer  */
446e602ba25SJulian Elischer void
447e602ba25SJulian Elischer runq_init(struct runq *rq)
448e602ba25SJulian Elischer {
449e602ba25SJulian Elischer 	int i;
450e602ba25SJulian Elischer 
451e602ba25SJulian Elischer 	bzero(rq, sizeof *rq);
452e602ba25SJulian Elischer 	for (i = 0; i < RQ_NQS; i++)
453e602ba25SJulian Elischer 		TAILQ_INIT(&rq->rq_queues[i]);
454e602ba25SJulian Elischer }
455e602ba25SJulian Elischer 
456d5a08a60SJake Burkholder /*
457d5a08a60SJake Burkholder  * Clear the status bit of the queue corresponding to priority level pri,
458d5a08a60SJake Burkholder  * indicating that it is empty.
459d5a08a60SJake Burkholder  */
460d5a08a60SJake Burkholder static __inline void
461d5a08a60SJake Burkholder runq_clrbit(struct runq *rq, int pri)
462d5a08a60SJake Burkholder {
463d5a08a60SJake Burkholder 	struct rqbits *rqb;
464d5a08a60SJake Burkholder 
465d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
466d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
467d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
468d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
469d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
470d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
471d5a08a60SJake Burkholder }
472d5a08a60SJake Burkholder 
473d5a08a60SJake Burkholder /*
474d5a08a60SJake Burkholder  * Find the index of the first non-empty run queue.  This is done by
475d5a08a60SJake Burkholder  * scanning the status bits, a set bit indicates a non-empty queue.
476d5a08a60SJake Burkholder  */
477d5a08a60SJake Burkholder static __inline int
478d5a08a60SJake Burkholder runq_findbit(struct runq *rq)
479d5a08a60SJake Burkholder {
480d5a08a60SJake Burkholder 	struct rqbits *rqb;
481d5a08a60SJake Burkholder 	int pri;
482d5a08a60SJake Burkholder 	int i;
483d5a08a60SJake Burkholder 
484d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
485d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
486d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
4872f9267ecSPeter Wemm 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
488d5a08a60SJake Burkholder 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
489d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i, pri);
490d5a08a60SJake Burkholder 			return (pri);
491d5a08a60SJake Burkholder 		}
492d5a08a60SJake Burkholder 
493d5a08a60SJake Burkholder 	return (-1);
494d5a08a60SJake Burkholder }
495d5a08a60SJake Burkholder 
496d5a08a60SJake Burkholder /*
497d5a08a60SJake Burkholder  * Set the status bit of the queue corresponding to priority level pri,
498d5a08a60SJake Burkholder  * indicating that it is non-empty.
499d5a08a60SJake Burkholder  */
500d5a08a60SJake Burkholder static __inline void
501d5a08a60SJake Burkholder runq_setbit(struct runq *rq, int pri)
502d5a08a60SJake Burkholder {
503d5a08a60SJake Burkholder 	struct rqbits *rqb;
504d5a08a60SJake Burkholder 
505d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
506d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
507d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
508d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
509d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
510d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
511d5a08a60SJake Burkholder }
512d5a08a60SJake Burkholder 
513d5a08a60SJake Burkholder /*
514e602ba25SJulian Elischer  * Add the KSE to the queue specified by its priority, and set the
515d5a08a60SJake Burkholder  * corresponding status bit.
516d5a08a60SJake Burkholder  */
517d5a08a60SJake Burkholder void
518b40ce416SJulian Elischer runq_add(struct runq *rq, struct kse *ke)
519d5a08a60SJake Burkholder {
520d5a08a60SJake Burkholder 	struct rqhead *rqh;
521d5a08a60SJake Burkholder 	int pri;
522dba6c5a6SPeter Wemm 
5230384fff8SJason Evans 	mtx_assert(&sched_lock, MA_OWNED);
524e602ba25SJulian Elischer 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
525c3b98db0SJulian Elischer 	KASSERT((ke->ke_thread->td_kse != NULL),
526c3b98db0SJulian Elischer 	    ("runq_add: No KSE on thread"));
527e602ba25SJulian Elischer 	KASSERT(ke->ke_state != KES_ONRUNQ,
528e602ba25SJulian Elischer 	    ("runq_add: kse %p (%s) already in run queue", ke,
529e602ba25SJulian Elischer 	    ke->ke_proc->p_comm));
5309eb881f8SSeigo Tanimura 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
5319eb881f8SSeigo Tanimura 		("runq_add: process swapped out"));
5322c100766SJulian Elischer 	pri = ke->ke_thread->td_priority / RQ_PPQ;
533b40ce416SJulian Elischer 	ke->ke_rqindex = pri;
534d5a08a60SJake Burkholder 	runq_setbit(rq, pri);
535d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
536d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
5372c100766SJulian Elischer 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
538b40ce416SJulian Elischer 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
539e602ba25SJulian Elischer 	ke->ke_ksegrp->kg_runq_kses++;
540e602ba25SJulian Elischer 	ke->ke_state = KES_ONRUNQ;
541dba6c5a6SPeter Wemm }
542d5a08a60SJake Burkholder 
543d5a08a60SJake Burkholder /*
544d5a08a60SJake Burkholder  * Return true if there are runnable processes of any priority on the run
545d5a08a60SJake Burkholder  * queue, false otherwise.  Has no side effects, does not modify the run
546d5a08a60SJake Burkholder  * queue structure.
547d5a08a60SJake Burkholder  */
548d5a08a60SJake Burkholder int
549d5a08a60SJake Burkholder runq_check(struct runq *rq)
550d5a08a60SJake Burkholder {
551d5a08a60SJake Burkholder 	struct rqbits *rqb;
552d5a08a60SJake Burkholder 	int i;
553d5a08a60SJake Burkholder 
554d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
555d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
556d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
557d5a08a60SJake Burkholder 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
558d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i);
559d5a08a60SJake Burkholder 			return (1);
560dba6c5a6SPeter Wemm 		}
561d5a08a60SJake Burkholder 	CTR0(KTR_RUNQ, "runq_check: empty");
562d5a08a60SJake Burkholder 
563d5a08a60SJake Burkholder 	return (0);
564dba6c5a6SPeter Wemm }
565d5a08a60SJake Burkholder 
566d5a08a60SJake Burkholder /*
567d5a08a60SJake Burkholder  * Find and remove the highest priority process from the run queue.
568d5a08a60SJake Burkholder  * If there are no runnable processes, the per-cpu idle process is
569d5a08a60SJake Burkholder  * returned.  Will not return NULL under any circumstances.
570d5a08a60SJake Burkholder  */
571b40ce416SJulian Elischer struct kse *
572d5a08a60SJake Burkholder runq_choose(struct runq *rq)
573d5a08a60SJake Burkholder {
574d5a08a60SJake Burkholder 	struct rqhead *rqh;
575b40ce416SJulian Elischer 	struct kse *ke;
576d5a08a60SJake Burkholder 	int pri;
577d5a08a60SJake Burkholder 
578d5a08a60SJake Burkholder 	mtx_assert(&sched_lock, MA_OWNED);
579e602ba25SJulian Elischer 	while ((pri = runq_findbit(rq)) != -1) {
580d5a08a60SJake Burkholder 		rqh = &rq->rq_queues[pri];
581b40ce416SJulian Elischer 		ke = TAILQ_FIRST(rqh);
582b40ce416SJulian Elischer 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
583e602ba25SJulian Elischer 		CTR3(KTR_RUNQ,
584e602ba25SJulian Elischer 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
585b40ce416SJulian Elischer 		TAILQ_REMOVE(rqh, ke, ke_procq);
586e602ba25SJulian Elischer 		ke->ke_ksegrp->kg_runq_kses--;
587d5a08a60SJake Burkholder 		if (TAILQ_EMPTY(rqh)) {
588d5a08a60SJake Burkholder 			CTR0(KTR_RUNQ, "runq_choose: empty");
589d5a08a60SJake Burkholder 			runq_clrbit(rq, pri);
590d5a08a60SJake Burkholder 		}
591e602ba25SJulian Elischer 
592c3b98db0SJulian Elischer 		ke->ke_state = KES_THREAD;
593e602ba25SJulian Elischer 		KASSERT((ke->ke_thread != NULL),
594e602ba25SJulian Elischer 		    ("runq_choose: No thread on KSE"));
595e602ba25SJulian Elischer 		KASSERT((ke->ke_thread->td_kse != NULL),
596e602ba25SJulian Elischer 		    ("runq_choose: No KSE on thread"));
5979eb881f8SSeigo Tanimura 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
5989eb881f8SSeigo Tanimura 			("runq_choose: process swapped out"));
599b40ce416SJulian Elischer 		return (ke);
600d5a08a60SJake Burkholder 	}
601d5a08a60SJake Burkholder 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
602d5a08a60SJake Burkholder 
603e602ba25SJulian Elischer 	return (NULL);
604d5a08a60SJake Burkholder }
605d5a08a60SJake Burkholder 
606d5a08a60SJake Burkholder /*
607e602ba25SJulian Elischer  * Remove the KSE from the queue specified by its priority, and clear the
608d5a08a60SJake Burkholder  * corresponding status bit if the queue becomes empty.
609e602ba25SJulian Elischer  * Caller must set ke->ke_state afterwards.
610d5a08a60SJake Burkholder  */
611d5a08a60SJake Burkholder void
612b40ce416SJulian Elischer runq_remove(struct runq *rq, struct kse *ke)
613d5a08a60SJake Burkholder {
614d5a08a60SJake Burkholder 	struct rqhead *rqh;
615d5a08a60SJake Burkholder 	int pri;
616d5a08a60SJake Burkholder 
617e602ba25SJulian Elischer 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
618d5a08a60SJake Burkholder 	mtx_assert(&sched_lock, MA_OWNED);
6199eb881f8SSeigo Tanimura 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
6209eb881f8SSeigo Tanimura 		("runq_remove: process swapped out"));
621b40ce416SJulian Elischer 	pri = ke->ke_rqindex;
622d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
623d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
6242c100766SJulian Elischer 	    ke, ke->ke_thread->td_priority, pri, rqh);
625b40ce416SJulian Elischer 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
626b40ce416SJulian Elischer 	TAILQ_REMOVE(rqh, ke, ke_procq);
627d5a08a60SJake Burkholder 	if (TAILQ_EMPTY(rqh)) {
628d5a08a60SJake Burkholder 		CTR0(KTR_RUNQ, "runq_remove: empty");
629d5a08a60SJake Burkholder 		runq_clrbit(rq, pri);
630d5a08a60SJake Burkholder 	}
631c3b98db0SJulian Elischer 	ke->ke_state = KES_THREAD;
632e602ba25SJulian Elischer 	ke->ke_ksegrp->kg_runq_kses--;
633dba6c5a6SPeter Wemm }
634e602ba25SJulian Elischer 
635e602ba25SJulian Elischer static void
636e602ba25SJulian Elischer runq_readjust(struct runq *rq, struct kse *ke)
637e602ba25SJulian Elischer {
638e602ba25SJulian Elischer 
639e602ba25SJulian Elischer 	if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
640e602ba25SJulian Elischer 		runq_remove(rq, ke);
641e602ba25SJulian Elischer 		runq_add(rq, ke);
642e602ba25SJulian Elischer 	}
643e602ba25SJulian Elischer }
644e602ba25SJulian Elischer 
6455e3da64eSJulian Elischer #if 0
646e602ba25SJulian Elischer void
647e602ba25SJulian Elischer thread_sanity_check(struct thread *td)
648e602ba25SJulian Elischer {
649e602ba25SJulian Elischer 	struct proc *p;
650e602ba25SJulian Elischer 	struct ksegrp *kg;
651e602ba25SJulian Elischer 	struct kse *ke;
652e602ba25SJulian Elischer 	struct thread *td2;
653e602ba25SJulian Elischer 	unsigned int prevpri;
654e602ba25SJulian Elischer 	int	saw_lastassigned;
655e602ba25SJulian Elischer 	int unassigned;
656e602ba25SJulian Elischer 	int assigned;
657e602ba25SJulian Elischer 
658e602ba25SJulian Elischer 	p = td->td_proc;
659e602ba25SJulian Elischer 	kg = td->td_ksegrp;
660e602ba25SJulian Elischer 	ke = td->td_kse;
661e602ba25SJulian Elischer 
662e602ba25SJulian Elischer 	if (kg != &p->p_ksegrp) {
663e602ba25SJulian Elischer 		panic ("wrong ksegrp");
664e602ba25SJulian Elischer 	}
665e602ba25SJulian Elischer 
666e602ba25SJulian Elischer 	if (ke) {
667e602ba25SJulian Elischer 		if (ke != &p->p_kse) {
668e602ba25SJulian Elischer 			panic("wrong kse");
669e602ba25SJulian Elischer 		}
670e602ba25SJulian Elischer 		if (ke->ke_thread != td) {
671e602ba25SJulian Elischer 			panic("wrong thread");
672e602ba25SJulian Elischer 		}
673e602ba25SJulian Elischer 	}
674e602ba25SJulian Elischer 
675e602ba25SJulian Elischer 	if ((p->p_flag & P_KSES) == 0) {
676e602ba25SJulian Elischer 		if (ke == NULL) {
677e602ba25SJulian Elischer 			panic("non KSE thread lost kse");
678e602ba25SJulian Elischer 		}
679e602ba25SJulian Elischer 	} else {
680e602ba25SJulian Elischer 		prevpri = 0;
681e602ba25SJulian Elischer 		saw_lastassigned = 0;
682e602ba25SJulian Elischer 		unassigned = 0;
683e602ba25SJulian Elischer 		assigned = 0;
684e602ba25SJulian Elischer 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
685e602ba25SJulian Elischer 			if (td2->td_priority < prevpri) {
686e602ba25SJulian Elischer 				panic("thread runqueue unosorted");
687e602ba25SJulian Elischer 			}
688e602ba25SJulian Elischer 			prevpri = td2->td_priority;
689e602ba25SJulian Elischer 			if (td2->td_kse) {
690e602ba25SJulian Elischer 				assigned++;
691e602ba25SJulian Elischer 				if (unassigned) {
692e602ba25SJulian Elischer 					panic("unassigned before assigned");
693e602ba25SJulian Elischer 				}
694e602ba25SJulian Elischer  				if  (kg->kg_last_assigned == NULL) {
695e602ba25SJulian Elischer 					panic("lastassigned corrupt");
696e602ba25SJulian Elischer 				}
697e602ba25SJulian Elischer 				if (saw_lastassigned) {
698e602ba25SJulian Elischer 					panic("last assigned not last");
699e602ba25SJulian Elischer 				}
700e602ba25SJulian Elischer 				if (td2->td_kse->ke_thread != td2) {
701e602ba25SJulian Elischer 					panic("mismatched kse/thread");
702e602ba25SJulian Elischer 				}
703e602ba25SJulian Elischer 			} else {
704e602ba25SJulian Elischer 				unassigned++;
705e602ba25SJulian Elischer 			}
706e602ba25SJulian Elischer 			if (td2 == kg->kg_last_assigned) {
707e602ba25SJulian Elischer 				saw_lastassigned = 1;
708e602ba25SJulian Elischer 				if (td2->td_kse == NULL) {
709e602ba25SJulian Elischer 					panic("last assigned not assigned");
710e602ba25SJulian Elischer 				}
711e602ba25SJulian Elischer 			}
712e602ba25SJulian Elischer 		}
713e602ba25SJulian Elischer 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
714e602ba25SJulian Elischer 			panic("where on earth does lastassigned point?");
715e602ba25SJulian Elischer 		}
716e602ba25SJulian Elischer 		FOREACH_THREAD_IN_GROUP(kg, td2) {
717e602ba25SJulian Elischer 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
71871fad9fdSJulian Elischer 			    (TD_ON_RUNQ(td2))) {
719e602ba25SJulian Elischer 				assigned++;
720e602ba25SJulian Elischer 				if (td2->td_kse == NULL) {
721e602ba25SJulian Elischer 					panic ("BOUND thread with no KSE");
722e602ba25SJulian Elischer 				}
723e602ba25SJulian Elischer 			}
724e602ba25SJulian Elischer 		}
725e602ba25SJulian Elischer #if 0
726e602ba25SJulian Elischer 		if ((unassigned + assigned) != kg->kg_runnable) {
727e602ba25SJulian Elischer 			panic("wrong number in runnable");
728e602ba25SJulian Elischer 		}
729e602ba25SJulian Elischer #endif
730e602ba25SJulian Elischer 	}
731e602ba25SJulian Elischer }
7325e3da64eSJulian Elischer #endif
733e602ba25SJulian Elischer 
734