xref: /freebsd/sys/kern/kern_switch.c (revision faaa20f6397d1d8023add8019b3136062dc3ec5e)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /***
30 
31 Here is the logic..
32 
33 If there are N processors, then there are at most N KSEs (kernel
34 schedulable entities) working to process threads that belong to a
35 KSEGOUP (kg). If there are X of these KSEs actually running at the
36 moment in question, then there are at most M (N-X) of these KSEs on
37 the run queue, as running KSEs are not on the queue.
38 
39 Runnable threads are queued off the KSEGROUP in priority order.
40 If there are M or more threads runnable, the top M threads
41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42 their priority from those threads and are put on the run queue.
43 
44 The last thread that had a priority high enough to have a KSE associated
45 with it, AND IS ON THE RUN QUEUE is pointed to by
46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47 assigned as all the available KSEs are activly running, or because there
48 are no threads queued, that pointer is NULL.
49 
50 When a KSE is removed from the run queue to become runnable, we know
51 it was associated with the highest priority thread in the queue (at the head
52 of the queue). If it is also the last assigned we know M was 1 and must
53 now be 0. Since the thread is no longer queued that pointer must be
54 removed from it. Since we know there were no more KSEs available,
55 (M was 1 and is now 0) and since we are not FREEING our KSE
56 but using it, we know there are STILL no more KSEs available, we can prove
57 that the next thread in the ksegrp list will not have a KSE to assign to
58 it, so we can show that the pointer must be made 'invalid' (NULL).
59 
60 The pointer exists so that when a new thread is made runnable, it can
61 have its priority compared with the last assigned thread to see if
62 it should 'steal' its KSE or not.. i.e. is it 'earlier'
63 on the list than that thread or later.. If it's earlier, then the KSE is
64 removed from the last assigned (which is now not assigned a KSE)
65 and reassigned to the new thread, which is placed earlier in the list.
66 The pointer is then backed up to the previous thread (which may or may not
67 be the new thread).
68 
69 When a thread sleeps or is removed, the KSE becomes available and if there
70 are queued threads that are not assigned KSEs, the highest priority one of
71 them is assigned the KSE, which is then placed back on the run queue at
72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73 to point to it.
74 
75 The following diagram shows 2 KSEs and 3 threads from a single process.
76 
77  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78               \    \____
79                \        \
80     KSEGROUP---thread--thread--thread    (queued in priority order)
81         \                 /
82          \_______________/
83           (last_assigned)
84 
85 The result of this scheme is that the M available KSEs are always
86 queued at the priorities they have inherrited from the M highest priority
87 threads for that KSEGROUP. If this situation changes, the KSEs are
88 reassigned to keep this true.
89 
90 */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/sched.h>
101 #if defined(SMP) && defined(__i386__)
102 #include <sys/smp.h>
103 #endif
104 #include <machine/critical.h>
105 
106 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
107 
108 void panc(char *string1, char *string2);
109 
110 #if 0
111 static void runq_readjust(struct runq *rq, struct kse *ke);
112 #endif
113 /************************************************************************
114  * Functions that manipulate runnability from a thread perspective.	*
115  ************************************************************************/
116 /*
117  * Select the KSE that will be run next.  From that find the thread, and
118  * remove it from the KSEGRP's run queue.  If there is thread clustering,
119  * this will be what does it.
120  */
121 struct thread *
122 choosethread(void)
123 {
124 	struct kse *ke;
125 	struct thread *td;
126 	struct ksegrp *kg;
127 
128 #if defined(SMP) && defined(__i386__)
129 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
130 		/* Shutting down, run idlethread on AP's */
131 		td = PCPU_GET(idlethread);
132 		ke = td->td_kse;
133 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
134 		ke->ke_flags |= KEF_DIDRUN;
135 		TD_SET_RUNNING(td);
136 		return (td);
137 	}
138 #endif
139 
140 retry:
141 	ke = sched_choose();
142 	if (ke) {
143 		td = ke->ke_thread;
144 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
145 		kg = ke->ke_ksegrp;
146 		if (td->td_proc->p_flag & P_THREADED) {
147 			if (kg->kg_last_assigned == td) {
148 				kg->kg_last_assigned = TAILQ_PREV(td,
149 				    threadqueue, td_runq);
150 			}
151 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
152 		}
153 		kg->kg_runnable--;
154 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
155 		    td, td->td_priority);
156 	} else {
157 		/* Simulate runq_choose() having returned the idle thread */
158 		td = PCPU_GET(idlethread);
159 		ke = td->td_kse;
160 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
161 	}
162 	ke->ke_flags |= KEF_DIDRUN;
163 
164 	/*
165 	 * If we are in panic, only allow system threads,
166 	 * plus the one we are running in, to be run.
167 	 */
168 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
169 	    (td->td_flags & TDF_INPANIC) == 0)) {
170 		/* note that it is no longer on the run queue */
171 		TD_SET_CAN_RUN(td);
172 		goto retry;
173 	}
174 
175 	TD_SET_RUNNING(td);
176 	return (td);
177 }
178 
179 /*
180  * Given a surplus KSE, either assign a new runable thread to it
181  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
182  * Assumes that the original thread is not runnable.
183  */
184 void
185 kse_reassign(struct kse *ke)
186 {
187 	struct ksegrp *kg;
188 	struct thread *td;
189 	struct thread *original;
190 
191 	mtx_assert(&sched_lock, MA_OWNED);
192 	original = ke->ke_thread;
193 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
194     	    ("reassigning KSE with runnable thread"));
195 	kg = ke->ke_ksegrp;
196 	if (original)
197 		original->td_kse = NULL;
198 
199 	/*
200 	 * Find the first unassigned thread
201 	 */
202 	if ((td = kg->kg_last_assigned) != NULL)
203 		td = TAILQ_NEXT(td, td_runq);
204 	else
205 		td = TAILQ_FIRST(&kg->kg_runq);
206 
207 	/*
208 	 * If we found one, assign it the kse, otherwise idle the kse.
209 	 */
210 	if (td) {
211 		kg->kg_last_assigned = td;
212 		td->td_kse = ke;
213 		ke->ke_thread = td;
214 		sched_add(ke);
215 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
216 		return;
217 	}
218 
219 	ke->ke_state = KES_IDLE;
220 	ke->ke_thread = NULL;
221 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
222 	kg->kg_idle_kses++;
223 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
224 	return;
225 }
226 
227 #if 0
228 /*
229  * Remove a thread from its KSEGRP's run queue.
230  * This in turn may remove it from a KSE if it was already assigned
231  * to one, possibly causing a new thread to be assigned to the KSE
232  * and the KSE getting a new priority.
233  */
234 static void
235 remrunqueue(struct thread *td)
236 {
237 	struct thread *td2, *td3;
238 	struct ksegrp *kg;
239 	struct kse *ke;
240 
241 	mtx_assert(&sched_lock, MA_OWNED);
242 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
243 	kg = td->td_ksegrp;
244 	ke = td->td_kse;
245 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
246 	kg->kg_runnable--;
247 	TD_SET_CAN_RUN(td);
248 	/*
249 	 * If it is not a threaded process, take the shortcut.
250 	 */
251 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
252 		/* Bring its kse with it, leave the thread attached */
253 		sched_rem(ke);
254 		ke->ke_state = KES_THREAD;
255 		return;
256 	}
257    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
258 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
259 	if (ke) {
260 		/*
261 		 * This thread has been assigned to a KSE.
262 		 * We need to dissociate it and try assign the
263 		 * KSE to the next available thread. Then, we should
264 		 * see if we need to move the KSE in the run queues.
265 		 */
266 		sched_rem(ke);
267 		ke->ke_state = KES_THREAD;
268 		td2 = kg->kg_last_assigned;
269 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
270 		if (td2 == td)
271 			kg->kg_last_assigned = td3;
272 		kse_reassign(ke);
273 	}
274 }
275 #endif
276 
277 /*
278  * Change the priority of a thread that is on the run queue.
279  */
280 void
281 adjustrunqueue( struct thread *td, int newpri)
282 {
283 	struct ksegrp *kg;
284 	struct kse *ke;
285 
286 	mtx_assert(&sched_lock, MA_OWNED);
287 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
288 
289 	ke = td->td_kse;
290 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
291 	/*
292 	 * If it is not a threaded process, take the shortcut.
293 	 */
294 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
295 		/* We only care about the kse in the run queue. */
296 		td->td_priority = newpri;
297 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
298 			sched_rem(ke);
299 			sched_add(ke);
300 		}
301 		return;
302 	}
303 
304 	/* It is a threaded process */
305 	kg = td->td_ksegrp;
306 	kg->kg_runnable--;
307 	TD_SET_CAN_RUN(td);
308 	if (ke) {
309 		if (kg->kg_last_assigned == td) {
310 			kg->kg_last_assigned =
311 			    TAILQ_PREV(td, threadqueue, td_runq);
312 		}
313 		sched_rem(ke);
314 	}
315 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
316 	td->td_priority = newpri;
317 	setrunqueue(td);
318 }
319 
320 void
321 setrunqueue(struct thread *td)
322 {
323 	struct kse *ke;
324 	struct ksegrp *kg;
325 	struct thread *td2;
326 	struct thread *tda;
327 
328 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
329 	mtx_assert(&sched_lock, MA_OWNED);
330 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
331 	    ("setrunqueue: bad thread state"));
332 	TD_SET_RUNQ(td);
333 	kg = td->td_ksegrp;
334 	kg->kg_runnable++;
335 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
336 		/*
337 		 * Common path optimisation: Only one of everything
338 		 * and the KSE is always already attached.
339 		 * Totally ignore the ksegrp run queue.
340 		 */
341 		sched_add(td->td_kse);
342 		return;
343 	}
344 
345 	tda = kg->kg_last_assigned;
346 	if ((ke = td->td_kse) == NULL) {
347 		if (kg->kg_idle_kses) {
348 			/*
349 			 * There is a free one so it's ours for the asking..
350 			 */
351 			ke = TAILQ_FIRST(&kg->kg_iq);
352 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
353 			ke->ke_state = KES_THREAD;
354 			kg->kg_idle_kses--;
355 		} else if (tda && (tda->td_priority > td->td_priority)) {
356 			/*
357 			 * None free, but there is one we can commandeer.
358 			 */
359 			ke = tda->td_kse;
360 			tda->td_kse = NULL;
361 			ke->ke_thread = NULL;
362 			tda = kg->kg_last_assigned =
363 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
364 			sched_rem(ke);
365 		}
366 	} else {
367 		/*
368 		 * Temporarily disassociate so it looks like the other cases.
369 		 */
370 		ke->ke_thread = NULL;
371 		td->td_kse = NULL;
372 	}
373 
374 	/*
375 	 * Add the thread to the ksegrp's run queue at
376 	 * the appropriate place.
377 	 */
378 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
379 		if (td2->td_priority > td->td_priority) {
380 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
381 			break;
382 		}
383 	}
384 	if (td2 == NULL) {
385 		/* We ran off the end of the TAILQ or it was empty. */
386 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
387 	}
388 
389 	/*
390 	 * If we have a ke to use, then put it on the run queue and
391 	 * If needed, readjust the last_assigned pointer.
392 	 */
393 	if (ke) {
394 		if (tda == NULL) {
395 			/*
396 			 * No pre-existing last assigned so whoever is first
397 			 * gets the KSE we brought in.. (maybe us)
398 			 */
399 			td2 = TAILQ_FIRST(&kg->kg_runq);
400 			KASSERT((td2->td_kse == NULL),
401 			    ("unexpected ke present"));
402 			td2->td_kse = ke;
403 			ke->ke_thread = td2;
404 			kg->kg_last_assigned = td2;
405 		} else if (tda->td_priority > td->td_priority) {
406 			/*
407 			 * It's ours, grab it, but last_assigned is past us
408 			 * so don't change it.
409 			 */
410 			td->td_kse = ke;
411 			ke->ke_thread = td;
412 		} else {
413 			/*
414 			 * We are past last_assigned, so
415 			 * put the new kse on whatever is next,
416 			 * which may or may not be us.
417 			 */
418 			td2 = TAILQ_NEXT(tda, td_runq);
419 			kg->kg_last_assigned = td2;
420 			td2->td_kse = ke;
421 			ke->ke_thread = td2;
422 		}
423 		sched_add(ke);
424 	}
425 }
426 
427 /************************************************************************
428  * Critical section marker functions					*
429  ************************************************************************/
430 /* Critical sections that prevent preemption. */
431 void
432 critical_enter(void)
433 {
434 	struct thread *td;
435 
436 	td = curthread;
437 	if (td->td_critnest == 0)
438 		cpu_critical_enter();
439 	td->td_critnest++;
440 }
441 
442 void
443 critical_exit(void)
444 {
445 	struct thread *td;
446 
447 	td = curthread;
448 	if (td->td_critnest == 1) {
449 		td->td_critnest = 0;
450 		cpu_critical_exit();
451 	} else {
452 		td->td_critnest--;
453 	}
454 }
455 
456 
457 /************************************************************************
458  * SYSTEM RUN QUEUE manipulations and tests				*
459  ************************************************************************/
460 /*
461  * Initialize a run structure.
462  */
463 void
464 runq_init(struct runq *rq)
465 {
466 	int i;
467 
468 	bzero(rq, sizeof *rq);
469 	for (i = 0; i < RQ_NQS; i++)
470 		TAILQ_INIT(&rq->rq_queues[i]);
471 }
472 
473 /*
474  * Clear the status bit of the queue corresponding to priority level pri,
475  * indicating that it is empty.
476  */
477 static __inline void
478 runq_clrbit(struct runq *rq, int pri)
479 {
480 	struct rqbits *rqb;
481 
482 	rqb = &rq->rq_status;
483 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
484 	    rqb->rqb_bits[RQB_WORD(pri)],
485 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
486 	    RQB_BIT(pri), RQB_WORD(pri));
487 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
488 }
489 
490 /*
491  * Find the index of the first non-empty run queue.  This is done by
492  * scanning the status bits, a set bit indicates a non-empty queue.
493  */
494 static __inline int
495 runq_findbit(struct runq *rq)
496 {
497 	struct rqbits *rqb;
498 	int pri;
499 	int i;
500 
501 	rqb = &rq->rq_status;
502 	for (i = 0; i < RQB_LEN; i++)
503 		if (rqb->rqb_bits[i]) {
504 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
505 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
506 			    rqb->rqb_bits[i], i, pri);
507 			return (pri);
508 		}
509 
510 	return (-1);
511 }
512 
513 /*
514  * Set the status bit of the queue corresponding to priority level pri,
515  * indicating that it is non-empty.
516  */
517 static __inline void
518 runq_setbit(struct runq *rq, int pri)
519 {
520 	struct rqbits *rqb;
521 
522 	rqb = &rq->rq_status;
523 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
524 	    rqb->rqb_bits[RQB_WORD(pri)],
525 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
526 	    RQB_BIT(pri), RQB_WORD(pri));
527 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
528 }
529 
530 /*
531  * Add the KSE to the queue specified by its priority, and set the
532  * corresponding status bit.
533  */
534 void
535 runq_add(struct runq *rq, struct kse *ke)
536 {
537 	struct rqhead *rqh;
538 	int pri;
539 
540 	pri = ke->ke_thread->td_priority / RQ_PPQ;
541 	ke->ke_rqindex = pri;
542 	runq_setbit(rq, pri);
543 	rqh = &rq->rq_queues[pri];
544 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
545 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
546 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
547 }
548 
549 /*
550  * Return true if there are runnable processes of any priority on the run
551  * queue, false otherwise.  Has no side effects, does not modify the run
552  * queue structure.
553  */
554 int
555 runq_check(struct runq *rq)
556 {
557 	struct rqbits *rqb;
558 	int i;
559 
560 	rqb = &rq->rq_status;
561 	for (i = 0; i < RQB_LEN; i++)
562 		if (rqb->rqb_bits[i]) {
563 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
564 			    rqb->rqb_bits[i], i);
565 			return (1);
566 		}
567 	CTR0(KTR_RUNQ, "runq_check: empty");
568 
569 	return (0);
570 }
571 
572 /*
573  * Find the highest priority process on the run queue.
574  */
575 struct kse *
576 runq_choose(struct runq *rq)
577 {
578 	struct rqhead *rqh;
579 	struct kse *ke;
580 	int pri;
581 
582 	mtx_assert(&sched_lock, MA_OWNED);
583 	while ((pri = runq_findbit(rq)) != -1) {
584 		rqh = &rq->rq_queues[pri];
585 		ke = TAILQ_FIRST(rqh);
586 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
587 		CTR3(KTR_RUNQ,
588 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
589 		return (ke);
590 	}
591 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
592 
593 	return (NULL);
594 }
595 
596 /*
597  * Remove the KSE from the queue specified by its priority, and clear the
598  * corresponding status bit if the queue becomes empty.
599  * Caller must set ke->ke_state afterwards.
600  */
601 void
602 runq_remove(struct runq *rq, struct kse *ke)
603 {
604 	struct rqhead *rqh;
605 	int pri;
606 
607 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
608 		("runq_remove: process swapped out"));
609 	pri = ke->ke_rqindex;
610 	rqh = &rq->rq_queues[pri];
611 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
612 	    ke, ke->ke_thread->td_priority, pri, rqh);
613 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
614 	TAILQ_REMOVE(rqh, ke, ke_procq);
615 	if (TAILQ_EMPTY(rqh)) {
616 		CTR0(KTR_RUNQ, "runq_remove: empty");
617 		runq_clrbit(rq, pri);
618 	}
619 }
620 
621 #if 0
622 void
623 panc(char *string1, char *string2)
624 {
625 	printf("%s", string1);
626 	Debugger(string2);
627 }
628 
629 void
630 thread_sanity_check(struct thread *td, char *string)
631 {
632 	struct proc *p;
633 	struct ksegrp *kg;
634 	struct kse *ke;
635 	struct thread *td2 = NULL;
636 	unsigned int prevpri;
637 	int	saw_lastassigned = 0;
638 	int unassigned = 0;
639 	int assigned = 0;
640 
641 	p = td->td_proc;
642 	kg = td->td_ksegrp;
643 	ke = td->td_kse;
644 
645 
646 	if (ke) {
647 		if (p != ke->ke_proc) {
648 			panc(string, "wrong proc");
649 		}
650 		if (ke->ke_thread != td) {
651 			panc(string, "wrong thread");
652 		}
653 	}
654 
655 	if ((p->p_flag & P_THREADED) == 0) {
656 		if (ke == NULL) {
657 			panc(string, "non KSE thread lost kse");
658 		}
659 	} else {
660 		prevpri = 0;
661 		saw_lastassigned = 0;
662 		unassigned = 0;
663 		assigned = 0;
664 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
665 			if (td2->td_priority < prevpri) {
666 				panc(string, "thread runqueue unosorted");
667 			}
668 			if ((td2->td_state == TDS_RUNQ) &&
669 			    td2->td_kse &&
670 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
671 				panc(string, "KSE wrong state");
672 			}
673 			prevpri = td2->td_priority;
674 			if (td2->td_kse) {
675 				assigned++;
676 				if (unassigned) {
677 					panc(string, "unassigned before assigned");
678 				}
679  				if  (kg->kg_last_assigned == NULL) {
680 					panc(string, "lastassigned corrupt");
681 				}
682 				if (saw_lastassigned) {
683 					panc(string, "last assigned not last");
684 				}
685 				if (td2->td_kse->ke_thread != td2) {
686 					panc(string, "mismatched kse/thread");
687 				}
688 			} else {
689 				unassigned++;
690 			}
691 			if (td2 == kg->kg_last_assigned) {
692 				saw_lastassigned = 1;
693 				if (td2->td_kse == NULL) {
694 					panc(string, "last assigned not assigned");
695 				}
696 			}
697 		}
698 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
699 			panc(string, "where on earth does lastassigned point?");
700 		}
701 #if 0
702 		FOREACH_THREAD_IN_GROUP(kg, td2) {
703 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
704 			    (TD_ON_RUNQ(td2))) {
705 				assigned++;
706 				if (td2->td_kse == NULL) {
707 					panc(string, "BOUND thread with no KSE");
708 				}
709 			}
710 		}
711 #endif
712 #if 0
713 		if ((unassigned + assigned) != kg->kg_runnable) {
714 			panc(string, "wrong number in runnable");
715 		}
716 #endif
717 	}
718 	if (assigned == 12345) {
719 		printf("%p %p %p %p %p %d, %d",
720 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
721 	}
722 }
723 #endif
724 
725