xref: /freebsd/sys/kern/kern_switch.c (revision fca542bcaab3c28addb4bbd8841703575e3bc968)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/mutex.h>
97 #include <sys/proc.h>
98 #include <sys/queue.h>
99 #include <sys/sched.h>
100 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
101 #include <sys/smp.h>
102 #endif
103 #include <machine/critical.h>
104 
105 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
106 
107 void panc(char *string1, char *string2);
108 
109 #if 0
110 static void runq_readjust(struct runq *rq, struct kse *ke);
111 #endif
112 /************************************************************************
113  * Functions that manipulate runnability from a thread perspective.	*
114  ************************************************************************/
115 /*
116  * Select the KSE that will be run next.  From that find the thread, and
117  * remove it from the KSEGRP's run queue.  If there is thread clustering,
118  * this will be what does it.
119  */
120 struct thread *
121 choosethread(void)
122 {
123 	struct kse *ke;
124 	struct thread *td;
125 	struct ksegrp *kg;
126 
127 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
128 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
129 		/* Shutting down, run idlethread on AP's */
130 		td = PCPU_GET(idlethread);
131 		ke = td->td_kse;
132 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
133 		ke->ke_flags |= KEF_DIDRUN;
134 		TD_SET_RUNNING(td);
135 		return (td);
136 	}
137 #endif
138 
139 retry:
140 	ke = sched_choose();
141 	if (ke) {
142 		td = ke->ke_thread;
143 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
144 		kg = ke->ke_ksegrp;
145 		if (td->td_proc->p_flag & P_SA) {
146 			if (kg->kg_last_assigned == td) {
147 				kg->kg_last_assigned = TAILQ_PREV(td,
148 				    threadqueue, td_runq);
149 			}
150 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
151 		}
152 		kg->kg_runnable--;
153 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
154 		    td, td->td_priority);
155 	} else {
156 		/* Simulate runq_choose() having returned the idle thread */
157 		td = PCPU_GET(idlethread);
158 		ke = td->td_kse;
159 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
160 	}
161 	ke->ke_flags |= KEF_DIDRUN;
162 
163 	/*
164 	 * If we are in panic, only allow system threads,
165 	 * plus the one we are running in, to be run.
166 	 */
167 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
168 	    (td->td_flags & TDF_INPANIC) == 0)) {
169 		/* note that it is no longer on the run queue */
170 		TD_SET_CAN_RUN(td);
171 		goto retry;
172 	}
173 
174 	TD_SET_RUNNING(td);
175 	return (td);
176 }
177 
178 /*
179  * Given a surplus KSE, either assign a new runable thread to it
180  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
181  * Assumes that the original thread is not runnable.
182  */
183 void
184 kse_reassign(struct kse *ke)
185 {
186 	struct ksegrp *kg;
187 	struct thread *td;
188 	struct thread *original;
189 
190 	mtx_assert(&sched_lock, MA_OWNED);
191 	original = ke->ke_thread;
192 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
193     	    ("reassigning KSE with runnable thread"));
194 	kg = ke->ke_ksegrp;
195 	if (original)
196 		original->td_kse = NULL;
197 
198 	/*
199 	 * Find the first unassigned thread
200 	 */
201 	if ((td = kg->kg_last_assigned) != NULL)
202 		td = TAILQ_NEXT(td, td_runq);
203 	else
204 		td = TAILQ_FIRST(&kg->kg_runq);
205 
206 	/*
207 	 * If we found one, assign it the kse, otherwise idle the kse.
208 	 */
209 	if (td) {
210 		kg->kg_last_assigned = td;
211 		td->td_kse = ke;
212 		ke->ke_thread = td;
213 		sched_add(td);
214 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
215 		return;
216 	}
217 
218 	ke->ke_state = KES_IDLE;
219 	ke->ke_thread = NULL;
220 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
221 	kg->kg_idle_kses++;
222 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
223 	return;
224 }
225 
226 #if 0
227 /*
228  * Remove a thread from its KSEGRP's run queue.
229  * This in turn may remove it from a KSE if it was already assigned
230  * to one, possibly causing a new thread to be assigned to the KSE
231  * and the KSE getting a new priority.
232  */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 	struct thread *td2, *td3;
237 	struct ksegrp *kg;
238 	struct kse *ke;
239 
240 	mtx_assert(&sched_lock, MA_OWNED);
241 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 	kg = td->td_ksegrp;
243 	ke = td->td_kse;
244 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 	kg->kg_runnable--;
246 	TD_SET_CAN_RUN(td);
247 	/*
248 	 * If it is not a threaded process, take the shortcut.
249 	 */
250 	if ((td->td_proc->p_flag & P_SA) == 0) {
251 		/* Bring its kse with it, leave the thread attached */
252 		sched_rem(td);
253 		ke->ke_state = KES_THREAD;
254 		return;
255 	}
256    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
257 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
258 	if (ke) {
259 		/*
260 		 * This thread has been assigned to a KSE.
261 		 * We need to dissociate it and try assign the
262 		 * KSE to the next available thread. Then, we should
263 		 * see if we need to move the KSE in the run queues.
264 		 */
265 		sched_rem(td);
266 		ke->ke_state = KES_THREAD;
267 		td2 = kg->kg_last_assigned;
268 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 		if (td2 == td)
270 			kg->kg_last_assigned = td3;
271 		kse_reassign(ke);
272 	}
273 }
274 #endif
275 
276 /*
277  * Change the priority of a thread that is on the run queue.
278  */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 	struct ksegrp *kg;
283 	struct kse *ke;
284 
285 	mtx_assert(&sched_lock, MA_OWNED);
286 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287 
288 	ke = td->td_kse;
289 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 	/*
291 	 * If it is not a threaded process, take the shortcut.
292 	 */
293 	if ((td->td_proc->p_flag & P_SA) == 0) {
294 		/* We only care about the kse in the run queue. */
295 		td->td_priority = newpri;
296 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 			sched_rem(td);
298 			sched_add(td);
299 		}
300 		return;
301 	}
302 
303 	/* It is a threaded process */
304 	kg = td->td_ksegrp;
305 	kg->kg_runnable--;
306 	TD_SET_CAN_RUN(td);
307 	if (ke) {
308 		if (kg->kg_last_assigned == td) {
309 			kg->kg_last_assigned =
310 			    TAILQ_PREV(td, threadqueue, td_runq);
311 		}
312 		sched_rem(td);
313 	}
314 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
315 	td->td_priority = newpri;
316 	setrunqueue(td);
317 }
318 
319 void
320 setrunqueue(struct thread *td)
321 {
322 	struct kse *ke;
323 	struct ksegrp *kg;
324 	struct thread *td2;
325 	struct thread *tda;
326 
327 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
328 	mtx_assert(&sched_lock, MA_OWNED);
329 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
330 	    ("setrunqueue: bad thread state"));
331 	TD_SET_RUNQ(td);
332 	kg = td->td_ksegrp;
333 	kg->kg_runnable++;
334 	if ((td->td_proc->p_flag & P_SA) == 0) {
335 		/*
336 		 * Common path optimisation: Only one of everything
337 		 * and the KSE is always already attached.
338 		 * Totally ignore the ksegrp run queue.
339 		 */
340 		sched_add(td);
341 		return;
342 	}
343 
344 	tda = kg->kg_last_assigned;
345 	if ((ke = td->td_kse) == NULL) {
346 		if (kg->kg_idle_kses) {
347 			/*
348 			 * There is a free one so it's ours for the asking..
349 			 */
350 			ke = TAILQ_FIRST(&kg->kg_iq);
351 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
352 			ke->ke_state = KES_THREAD;
353 			kg->kg_idle_kses--;
354 		} else if (tda && (tda->td_priority > td->td_priority)) {
355 			/*
356 			 * None free, but there is one we can commandeer.
357 			 */
358 			ke = tda->td_kse;
359 			sched_rem(tda);
360 			tda->td_kse = NULL;
361 			ke->ke_thread = NULL;
362 			tda = kg->kg_last_assigned =
363 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
364 		}
365 	} else {
366 		/*
367 		 * Temporarily disassociate so it looks like the other cases.
368 		 */
369 		ke->ke_thread = NULL;
370 		td->td_kse = NULL;
371 	}
372 
373 	/*
374 	 * Add the thread to the ksegrp's run queue at
375 	 * the appropriate place.
376 	 */
377 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
378 		if (td2->td_priority > td->td_priority) {
379 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
380 			break;
381 		}
382 	}
383 	if (td2 == NULL) {
384 		/* We ran off the end of the TAILQ or it was empty. */
385 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
386 	}
387 
388 	/*
389 	 * If we have a ke to use, then put it on the run queue and
390 	 * If needed, readjust the last_assigned pointer.
391 	 */
392 	if (ke) {
393 		if (tda == NULL) {
394 			/*
395 			 * No pre-existing last assigned so whoever is first
396 			 * gets the KSE we brought in.. (maybe us)
397 			 */
398 			td2 = TAILQ_FIRST(&kg->kg_runq);
399 			KASSERT((td2->td_kse == NULL),
400 			    ("unexpected ke present"));
401 			td2->td_kse = ke;
402 			ke->ke_thread = td2;
403 			kg->kg_last_assigned = td2;
404 		} else if (tda->td_priority > td->td_priority) {
405 			/*
406 			 * It's ours, grab it, but last_assigned is past us
407 			 * so don't change it.
408 			 */
409 			td->td_kse = ke;
410 			ke->ke_thread = td;
411 		} else {
412 			/*
413 			 * We are past last_assigned, so
414 			 * put the new kse on whatever is next,
415 			 * which may or may not be us.
416 			 */
417 			td2 = TAILQ_NEXT(tda, td_runq);
418 			kg->kg_last_assigned = td2;
419 			td2->td_kse = ke;
420 			ke->ke_thread = td2;
421 		}
422 		sched_add(ke->ke_thread);
423 	}
424 }
425 
426 /************************************************************************
427  * Critical section marker functions					*
428  ************************************************************************/
429 /* Critical sections that prevent preemption. */
430 void
431 critical_enter(void)
432 {
433 	struct thread *td;
434 
435 	td = curthread;
436 	if (td->td_critnest == 0)
437 		cpu_critical_enter();
438 	td->td_critnest++;
439 }
440 
441 void
442 critical_exit(void)
443 {
444 	struct thread *td;
445 
446 	td = curthread;
447 	KASSERT(td->td_critnest > 0, ("critical_exit:  critnest < 0"));
448 	if (td->td_critnest == 1) {
449 		td->td_critnest = 0;
450 		cpu_critical_exit();
451 	} else {
452 		td->td_critnest--;
453 	}
454 }
455 
456 
457 /************************************************************************
458  * SYSTEM RUN QUEUE manipulations and tests				*
459  ************************************************************************/
460 /*
461  * Initialize a run structure.
462  */
463 void
464 runq_init(struct runq *rq)
465 {
466 	int i;
467 
468 	bzero(rq, sizeof *rq);
469 	for (i = 0; i < RQ_NQS; i++)
470 		TAILQ_INIT(&rq->rq_queues[i]);
471 }
472 
473 /*
474  * Clear the status bit of the queue corresponding to priority level pri,
475  * indicating that it is empty.
476  */
477 static __inline void
478 runq_clrbit(struct runq *rq, int pri)
479 {
480 	struct rqbits *rqb;
481 
482 	rqb = &rq->rq_status;
483 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
484 	    rqb->rqb_bits[RQB_WORD(pri)],
485 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
486 	    RQB_BIT(pri), RQB_WORD(pri));
487 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
488 }
489 
490 /*
491  * Find the index of the first non-empty run queue.  This is done by
492  * scanning the status bits, a set bit indicates a non-empty queue.
493  */
494 static __inline int
495 runq_findbit(struct runq *rq)
496 {
497 	struct rqbits *rqb;
498 	int pri;
499 	int i;
500 
501 	rqb = &rq->rq_status;
502 	for (i = 0; i < RQB_LEN; i++)
503 		if (rqb->rqb_bits[i]) {
504 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
505 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
506 			    rqb->rqb_bits[i], i, pri);
507 			return (pri);
508 		}
509 
510 	return (-1);
511 }
512 
513 /*
514  * Set the status bit of the queue corresponding to priority level pri,
515  * indicating that it is non-empty.
516  */
517 static __inline void
518 runq_setbit(struct runq *rq, int pri)
519 {
520 	struct rqbits *rqb;
521 
522 	rqb = &rq->rq_status;
523 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
524 	    rqb->rqb_bits[RQB_WORD(pri)],
525 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
526 	    RQB_BIT(pri), RQB_WORD(pri));
527 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
528 }
529 
530 /*
531  * Add the KSE to the queue specified by its priority, and set the
532  * corresponding status bit.
533  */
534 void
535 runq_add(struct runq *rq, struct kse *ke)
536 {
537 	struct rqhead *rqh;
538 	int pri;
539 
540 	pri = ke->ke_thread->td_priority / RQ_PPQ;
541 	ke->ke_rqindex = pri;
542 	runq_setbit(rq, pri);
543 	rqh = &rq->rq_queues[pri];
544 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
545 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
546 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
547 }
548 
549 /*
550  * Return true if there are runnable processes of any priority on the run
551  * queue, false otherwise.  Has no side effects, does not modify the run
552  * queue structure.
553  */
554 int
555 runq_check(struct runq *rq)
556 {
557 	struct rqbits *rqb;
558 	int i;
559 
560 	rqb = &rq->rq_status;
561 	for (i = 0; i < RQB_LEN; i++)
562 		if (rqb->rqb_bits[i]) {
563 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
564 			    rqb->rqb_bits[i], i);
565 			return (1);
566 		}
567 	CTR0(KTR_RUNQ, "runq_check: empty");
568 
569 	return (0);
570 }
571 
572 /*
573  * Find the highest priority process on the run queue.
574  */
575 struct kse *
576 runq_choose(struct runq *rq)
577 {
578 	struct rqhead *rqh;
579 	struct kse *ke;
580 	int pri;
581 
582 	mtx_assert(&sched_lock, MA_OWNED);
583 	while ((pri = runq_findbit(rq)) != -1) {
584 		rqh = &rq->rq_queues[pri];
585 		ke = TAILQ_FIRST(rqh);
586 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
587 		CTR3(KTR_RUNQ,
588 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
589 		return (ke);
590 	}
591 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
592 
593 	return (NULL);
594 }
595 
596 /*
597  * Remove the KSE from the queue specified by its priority, and clear the
598  * corresponding status bit if the queue becomes empty.
599  * Caller must set ke->ke_state afterwards.
600  */
601 void
602 runq_remove(struct runq *rq, struct kse *ke)
603 {
604 	struct rqhead *rqh;
605 	int pri;
606 
607 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
608 		("runq_remove: process swapped out"));
609 	pri = ke->ke_rqindex;
610 	rqh = &rq->rq_queues[pri];
611 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
612 	    ke, ke->ke_thread->td_priority, pri, rqh);
613 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
614 	TAILQ_REMOVE(rqh, ke, ke_procq);
615 	if (TAILQ_EMPTY(rqh)) {
616 		CTR0(KTR_RUNQ, "runq_remove: empty");
617 		runq_clrbit(rq, pri);
618 	}
619 }
620 
621 #if 0
622 void
623 panc(char *string1, char *string2)
624 {
625 	printf("%s", string1);
626 	Debugger(string2);
627 }
628 
629 void
630 thread_sanity_check(struct thread *td, char *string)
631 {
632 	struct proc *p;
633 	struct ksegrp *kg;
634 	struct kse *ke;
635 	struct thread *td2 = NULL;
636 	unsigned int prevpri;
637 	int	saw_lastassigned = 0;
638 	int unassigned = 0;
639 	int assigned = 0;
640 
641 	p = td->td_proc;
642 	kg = td->td_ksegrp;
643 	ke = td->td_kse;
644 
645 
646 	if (ke) {
647 		if (p != ke->ke_proc) {
648 			panc(string, "wrong proc");
649 		}
650 		if (ke->ke_thread != td) {
651 			panc(string, "wrong thread");
652 		}
653 	}
654 
655 	if ((p->p_flag & P_SA) == 0) {
656 		if (ke == NULL) {
657 			panc(string, "non KSE thread lost kse");
658 		}
659 	} else {
660 		prevpri = 0;
661 		saw_lastassigned = 0;
662 		unassigned = 0;
663 		assigned = 0;
664 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
665 			if (td2->td_priority < prevpri) {
666 				panc(string, "thread runqueue unosorted");
667 			}
668 			if ((td2->td_state == TDS_RUNQ) &&
669 			    td2->td_kse &&
670 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
671 				panc(string, "KSE wrong state");
672 			}
673 			prevpri = td2->td_priority;
674 			if (td2->td_kse) {
675 				assigned++;
676 				if (unassigned) {
677 					panc(string, "unassigned before assigned");
678 				}
679  				if  (kg->kg_last_assigned == NULL) {
680 					panc(string, "lastassigned corrupt");
681 				}
682 				if (saw_lastassigned) {
683 					panc(string, "last assigned not last");
684 				}
685 				if (td2->td_kse->ke_thread != td2) {
686 					panc(string, "mismatched kse/thread");
687 				}
688 			} else {
689 				unassigned++;
690 			}
691 			if (td2 == kg->kg_last_assigned) {
692 				saw_lastassigned = 1;
693 				if (td2->td_kse == NULL) {
694 					panc(string, "last assigned not assigned");
695 				}
696 			}
697 		}
698 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
699 			panc(string, "where on earth does lastassigned point?");
700 		}
701 #if 0
702 		FOREACH_THREAD_IN_GROUP(kg, td2) {
703 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
704 			    (TD_ON_RUNQ(td2))) {
705 				assigned++;
706 				if (td2->td_kse == NULL) {
707 					panc(string, "BOUND thread with no KSE");
708 				}
709 			}
710 		}
711 #endif
712 #if 0
713 		if ((unassigned + assigned) != kg->kg_runnable) {
714 			panc(string, "wrong number in runnable");
715 		}
716 #endif
717 	}
718 	if (assigned == 12345) {
719 		printf("%p %p %p %p %p %d, %d",
720 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
721 	}
722 }
723 #endif
724 
725