xref: /freebsd/sys/kern/kern_switch.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGOUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/mutex.h>
97 #include <sys/proc.h>
98 #include <sys/queue.h>
99 #include <sys/sched.h>
100 #if defined(SMP) && defined(__i386__)
101 #include <sys/smp.h>
102 #endif
103 #include <machine/critical.h>
104 
105 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
106 
107 void panc(char *string1, char *string2);
108 
109 #if 0
110 static void runq_readjust(struct runq *rq, struct kse *ke);
111 #endif
112 /************************************************************************
113  * Functions that manipulate runnability from a thread perspective.	*
114  ************************************************************************/
115 /*
116  * Select the KSE that will be run next.  From that find the thread, and
117  * remove it from the KSEGRP's run queue.  If there is thread clustering,
118  * this will be what does it.
119  */
120 struct thread *
121 choosethread(void)
122 {
123 	struct kse *ke;
124 	struct thread *td;
125 	struct ksegrp *kg;
126 
127 #if defined(SMP) && defined(__i386__)
128 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
129 		/* Shutting down, run idlethread on AP's */
130 		td = PCPU_GET(idlethread);
131 		ke = td->td_kse;
132 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
133 		ke->ke_flags |= KEF_DIDRUN;
134 		TD_SET_RUNNING(td);
135 		return (td);
136 	}
137 #endif
138 
139 retry:
140 	ke = sched_choose();
141 	if (ke) {
142 		td = ke->ke_thread;
143 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
144 		kg = ke->ke_ksegrp;
145 		if (td->td_proc->p_flag & P_SA) {
146 			if (kg->kg_last_assigned == td) {
147 				kg->kg_last_assigned = TAILQ_PREV(td,
148 				    threadqueue, td_runq);
149 			}
150 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
151 		}
152 		kg->kg_runnable--;
153 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
154 		    td, td->td_priority);
155 	} else {
156 		/* Simulate runq_choose() having returned the idle thread */
157 		td = PCPU_GET(idlethread);
158 		ke = td->td_kse;
159 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
160 	}
161 	ke->ke_flags |= KEF_DIDRUN;
162 
163 	/*
164 	 * If we are in panic, only allow system threads,
165 	 * plus the one we are running in, to be run.
166 	 */
167 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
168 	    (td->td_flags & TDF_INPANIC) == 0)) {
169 		/* note that it is no longer on the run queue */
170 		TD_SET_CAN_RUN(td);
171 		goto retry;
172 	}
173 
174 	TD_SET_RUNNING(td);
175 	return (td);
176 }
177 
178 /*
179  * Given a surplus KSE, either assign a new runable thread to it
180  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
181  * Assumes that the original thread is not runnable.
182  */
183 void
184 kse_reassign(struct kse *ke)
185 {
186 	struct ksegrp *kg;
187 	struct thread *td;
188 	struct thread *original;
189 
190 	mtx_assert(&sched_lock, MA_OWNED);
191 	original = ke->ke_thread;
192 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
193     	    ("reassigning KSE with runnable thread"));
194 	kg = ke->ke_ksegrp;
195 	if (original)
196 		original->td_kse = NULL;
197 
198 	/*
199 	 * Find the first unassigned thread
200 	 */
201 	if ((td = kg->kg_last_assigned) != NULL)
202 		td = TAILQ_NEXT(td, td_runq);
203 	else
204 		td = TAILQ_FIRST(&kg->kg_runq);
205 
206 	/*
207 	 * If we found one, assign it the kse, otherwise idle the kse.
208 	 */
209 	if (td) {
210 		kg->kg_last_assigned = td;
211 		td->td_kse = ke;
212 		ke->ke_thread = td;
213 		sched_add(ke);
214 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
215 		return;
216 	}
217 
218 	ke->ke_state = KES_IDLE;
219 	ke->ke_thread = NULL;
220 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
221 	kg->kg_idle_kses++;
222 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
223 	return;
224 }
225 
226 #if 0
227 /*
228  * Remove a thread from its KSEGRP's run queue.
229  * This in turn may remove it from a KSE if it was already assigned
230  * to one, possibly causing a new thread to be assigned to the KSE
231  * and the KSE getting a new priority.
232  */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 	struct thread *td2, *td3;
237 	struct ksegrp *kg;
238 	struct kse *ke;
239 
240 	mtx_assert(&sched_lock, MA_OWNED);
241 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 	kg = td->td_ksegrp;
243 	ke = td->td_kse;
244 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 	kg->kg_runnable--;
246 	TD_SET_CAN_RUN(td);
247 	/*
248 	 * If it is not a threaded process, take the shortcut.
249 	 */
250 	if ((td->td_proc->p_flag & P_SA) == 0) {
251 		/* Bring its kse with it, leave the thread attached */
252 		sched_rem(ke);
253 		ke->ke_state = KES_THREAD;
254 		return;
255 	}
256    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
257 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
258 	if (ke) {
259 		/*
260 		 * This thread has been assigned to a KSE.
261 		 * We need to dissociate it and try assign the
262 		 * KSE to the next available thread. Then, we should
263 		 * see if we need to move the KSE in the run queues.
264 		 */
265 		sched_rem(ke);
266 		ke->ke_state = KES_THREAD;
267 		td2 = kg->kg_last_assigned;
268 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 		if (td2 == td)
270 			kg->kg_last_assigned = td3;
271 		kse_reassign(ke);
272 	}
273 }
274 #endif
275 
276 /*
277  * Change the priority of a thread that is on the run queue.
278  */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 	struct ksegrp *kg;
283 	struct kse *ke;
284 
285 	mtx_assert(&sched_lock, MA_OWNED);
286 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287 
288 	ke = td->td_kse;
289 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 	/*
291 	 * If it is not a threaded process, take the shortcut.
292 	 */
293 	if ((td->td_proc->p_flag & P_SA) == 0) {
294 		/* We only care about the kse in the run queue. */
295 		td->td_priority = newpri;
296 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 			sched_rem(ke);
298 			sched_add(ke);
299 		}
300 		return;
301 	}
302 
303 	/* It is a threaded process */
304 	kg = td->td_ksegrp;
305 	kg->kg_runnable--;
306 	TD_SET_CAN_RUN(td);
307 	if (ke) {
308 		if (kg->kg_last_assigned == td) {
309 			kg->kg_last_assigned =
310 			    TAILQ_PREV(td, threadqueue, td_runq);
311 		}
312 		sched_rem(ke);
313 	}
314 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
315 	td->td_priority = newpri;
316 	setrunqueue(td);
317 }
318 
319 void
320 setrunqueue(struct thread *td)
321 {
322 	struct kse *ke;
323 	struct ksegrp *kg;
324 	struct thread *td2;
325 	struct thread *tda;
326 
327 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
328 	mtx_assert(&sched_lock, MA_OWNED);
329 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
330 	    ("setrunqueue: bad thread state"));
331 	TD_SET_RUNQ(td);
332 	kg = td->td_ksegrp;
333 	kg->kg_runnable++;
334 	if ((td->td_proc->p_flag & P_SA) == 0) {
335 		/*
336 		 * Common path optimisation: Only one of everything
337 		 * and the KSE is always already attached.
338 		 * Totally ignore the ksegrp run queue.
339 		 */
340 		sched_add(td->td_kse);
341 		return;
342 	}
343 
344 	tda = kg->kg_last_assigned;
345 	if ((ke = td->td_kse) == NULL) {
346 		if (kg->kg_idle_kses) {
347 			/*
348 			 * There is a free one so it's ours for the asking..
349 			 */
350 			ke = TAILQ_FIRST(&kg->kg_iq);
351 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
352 			ke->ke_state = KES_THREAD;
353 			kg->kg_idle_kses--;
354 		} else if (tda && (tda->td_priority > td->td_priority)) {
355 			/*
356 			 * None free, but there is one we can commandeer.
357 			 */
358 			ke = tda->td_kse;
359 			tda->td_kse = NULL;
360 			ke->ke_thread = NULL;
361 			tda = kg->kg_last_assigned =
362 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
363 			sched_rem(ke);
364 		}
365 	} else {
366 		/*
367 		 * Temporarily disassociate so it looks like the other cases.
368 		 */
369 		ke->ke_thread = NULL;
370 		td->td_kse = NULL;
371 	}
372 
373 	/*
374 	 * Add the thread to the ksegrp's run queue at
375 	 * the appropriate place.
376 	 */
377 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
378 		if (td2->td_priority > td->td_priority) {
379 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
380 			break;
381 		}
382 	}
383 	if (td2 == NULL) {
384 		/* We ran off the end of the TAILQ or it was empty. */
385 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
386 	}
387 
388 	/*
389 	 * If we have a ke to use, then put it on the run queue and
390 	 * If needed, readjust the last_assigned pointer.
391 	 */
392 	if (ke) {
393 		if (tda == NULL) {
394 			/*
395 			 * No pre-existing last assigned so whoever is first
396 			 * gets the KSE we brought in.. (maybe us)
397 			 */
398 			td2 = TAILQ_FIRST(&kg->kg_runq);
399 			KASSERT((td2->td_kse == NULL),
400 			    ("unexpected ke present"));
401 			td2->td_kse = ke;
402 			ke->ke_thread = td2;
403 			kg->kg_last_assigned = td2;
404 		} else if (tda->td_priority > td->td_priority) {
405 			/*
406 			 * It's ours, grab it, but last_assigned is past us
407 			 * so don't change it.
408 			 */
409 			td->td_kse = ke;
410 			ke->ke_thread = td;
411 		} else {
412 			/*
413 			 * We are past last_assigned, so
414 			 * put the new kse on whatever is next,
415 			 * which may or may not be us.
416 			 */
417 			td2 = TAILQ_NEXT(tda, td_runq);
418 			kg->kg_last_assigned = td2;
419 			td2->td_kse = ke;
420 			ke->ke_thread = td2;
421 		}
422 		sched_add(ke);
423 	}
424 }
425 
426 /************************************************************************
427  * Critical section marker functions					*
428  ************************************************************************/
429 /* Critical sections that prevent preemption. */
430 void
431 critical_enter(void)
432 {
433 	struct thread *td;
434 
435 	td = curthread;
436 	if (td->td_critnest == 0)
437 		cpu_critical_enter();
438 	td->td_critnest++;
439 }
440 
441 void
442 critical_exit(void)
443 {
444 	struct thread *td;
445 
446 	td = curthread;
447 	if (td->td_critnest == 1) {
448 		td->td_critnest = 0;
449 		cpu_critical_exit();
450 	} else {
451 		td->td_critnest--;
452 	}
453 }
454 
455 
456 /************************************************************************
457  * SYSTEM RUN QUEUE manipulations and tests				*
458  ************************************************************************/
459 /*
460  * Initialize a run structure.
461  */
462 void
463 runq_init(struct runq *rq)
464 {
465 	int i;
466 
467 	bzero(rq, sizeof *rq);
468 	for (i = 0; i < RQ_NQS; i++)
469 		TAILQ_INIT(&rq->rq_queues[i]);
470 }
471 
472 /*
473  * Clear the status bit of the queue corresponding to priority level pri,
474  * indicating that it is empty.
475  */
476 static __inline void
477 runq_clrbit(struct runq *rq, int pri)
478 {
479 	struct rqbits *rqb;
480 
481 	rqb = &rq->rq_status;
482 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
483 	    rqb->rqb_bits[RQB_WORD(pri)],
484 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
485 	    RQB_BIT(pri), RQB_WORD(pri));
486 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
487 }
488 
489 /*
490  * Find the index of the first non-empty run queue.  This is done by
491  * scanning the status bits, a set bit indicates a non-empty queue.
492  */
493 static __inline int
494 runq_findbit(struct runq *rq)
495 {
496 	struct rqbits *rqb;
497 	int pri;
498 	int i;
499 
500 	rqb = &rq->rq_status;
501 	for (i = 0; i < RQB_LEN; i++)
502 		if (rqb->rqb_bits[i]) {
503 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
504 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
505 			    rqb->rqb_bits[i], i, pri);
506 			return (pri);
507 		}
508 
509 	return (-1);
510 }
511 
512 /*
513  * Set the status bit of the queue corresponding to priority level pri,
514  * indicating that it is non-empty.
515  */
516 static __inline void
517 runq_setbit(struct runq *rq, int pri)
518 {
519 	struct rqbits *rqb;
520 
521 	rqb = &rq->rq_status;
522 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
523 	    rqb->rqb_bits[RQB_WORD(pri)],
524 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
525 	    RQB_BIT(pri), RQB_WORD(pri));
526 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
527 }
528 
529 /*
530  * Add the KSE to the queue specified by its priority, and set the
531  * corresponding status bit.
532  */
533 void
534 runq_add(struct runq *rq, struct kse *ke)
535 {
536 	struct rqhead *rqh;
537 	int pri;
538 
539 	pri = ke->ke_thread->td_priority / RQ_PPQ;
540 	ke->ke_rqindex = pri;
541 	runq_setbit(rq, pri);
542 	rqh = &rq->rq_queues[pri];
543 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
544 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
545 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
546 }
547 
548 /*
549  * Return true if there are runnable processes of any priority on the run
550  * queue, false otherwise.  Has no side effects, does not modify the run
551  * queue structure.
552  */
553 int
554 runq_check(struct runq *rq)
555 {
556 	struct rqbits *rqb;
557 	int i;
558 
559 	rqb = &rq->rq_status;
560 	for (i = 0; i < RQB_LEN; i++)
561 		if (rqb->rqb_bits[i]) {
562 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
563 			    rqb->rqb_bits[i], i);
564 			return (1);
565 		}
566 	CTR0(KTR_RUNQ, "runq_check: empty");
567 
568 	return (0);
569 }
570 
571 /*
572  * Find the highest priority process on the run queue.
573  */
574 struct kse *
575 runq_choose(struct runq *rq)
576 {
577 	struct rqhead *rqh;
578 	struct kse *ke;
579 	int pri;
580 
581 	mtx_assert(&sched_lock, MA_OWNED);
582 	while ((pri = runq_findbit(rq)) != -1) {
583 		rqh = &rq->rq_queues[pri];
584 		ke = TAILQ_FIRST(rqh);
585 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
586 		CTR3(KTR_RUNQ,
587 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
588 		return (ke);
589 	}
590 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
591 
592 	return (NULL);
593 }
594 
595 /*
596  * Remove the KSE from the queue specified by its priority, and clear the
597  * corresponding status bit if the queue becomes empty.
598  * Caller must set ke->ke_state afterwards.
599  */
600 void
601 runq_remove(struct runq *rq, struct kse *ke)
602 {
603 	struct rqhead *rqh;
604 	int pri;
605 
606 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
607 		("runq_remove: process swapped out"));
608 	pri = ke->ke_rqindex;
609 	rqh = &rq->rq_queues[pri];
610 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
611 	    ke, ke->ke_thread->td_priority, pri, rqh);
612 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
613 	TAILQ_REMOVE(rqh, ke, ke_procq);
614 	if (TAILQ_EMPTY(rqh)) {
615 		CTR0(KTR_RUNQ, "runq_remove: empty");
616 		runq_clrbit(rq, pri);
617 	}
618 }
619 
620 #if 0
621 void
622 panc(char *string1, char *string2)
623 {
624 	printf("%s", string1);
625 	Debugger(string2);
626 }
627 
628 void
629 thread_sanity_check(struct thread *td, char *string)
630 {
631 	struct proc *p;
632 	struct ksegrp *kg;
633 	struct kse *ke;
634 	struct thread *td2 = NULL;
635 	unsigned int prevpri;
636 	int	saw_lastassigned = 0;
637 	int unassigned = 0;
638 	int assigned = 0;
639 
640 	p = td->td_proc;
641 	kg = td->td_ksegrp;
642 	ke = td->td_kse;
643 
644 
645 	if (ke) {
646 		if (p != ke->ke_proc) {
647 			panc(string, "wrong proc");
648 		}
649 		if (ke->ke_thread != td) {
650 			panc(string, "wrong thread");
651 		}
652 	}
653 
654 	if ((p->p_flag & P_SA) == 0) {
655 		if (ke == NULL) {
656 			panc(string, "non KSE thread lost kse");
657 		}
658 	} else {
659 		prevpri = 0;
660 		saw_lastassigned = 0;
661 		unassigned = 0;
662 		assigned = 0;
663 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
664 			if (td2->td_priority < prevpri) {
665 				panc(string, "thread runqueue unosorted");
666 			}
667 			if ((td2->td_state == TDS_RUNQ) &&
668 			    td2->td_kse &&
669 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
670 				panc(string, "KSE wrong state");
671 			}
672 			prevpri = td2->td_priority;
673 			if (td2->td_kse) {
674 				assigned++;
675 				if (unassigned) {
676 					panc(string, "unassigned before assigned");
677 				}
678  				if  (kg->kg_last_assigned == NULL) {
679 					panc(string, "lastassigned corrupt");
680 				}
681 				if (saw_lastassigned) {
682 					panc(string, "last assigned not last");
683 				}
684 				if (td2->td_kse->ke_thread != td2) {
685 					panc(string, "mismatched kse/thread");
686 				}
687 			} else {
688 				unassigned++;
689 			}
690 			if (td2 == kg->kg_last_assigned) {
691 				saw_lastassigned = 1;
692 				if (td2->td_kse == NULL) {
693 					panc(string, "last assigned not assigned");
694 				}
695 			}
696 		}
697 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
698 			panc(string, "where on earth does lastassigned point?");
699 		}
700 #if 0
701 		FOREACH_THREAD_IN_GROUP(kg, td2) {
702 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
703 			    (TD_ON_RUNQ(td2))) {
704 				assigned++;
705 				if (td2->td_kse == NULL) {
706 					panc(string, "BOUND thread with no KSE");
707 				}
708 			}
709 		}
710 #endif
711 #if 0
712 		if ((unassigned + assigned) != kg->kg_runnable) {
713 			panc(string, "wrong number in runnable");
714 		}
715 #endif
716 	}
717 	if (assigned == 12345) {
718 		printf("%p %p %p %p %p %d, %d",
719 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
720 	}
721 }
722 #endif
723 
724