xref: /freebsd/sys/kern/kern_switch.c (revision d37ea99837e6ad50837fd9fe1771ddf1c3ba6002)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/mutex.h>
97 #include <sys/proc.h>
98 #include <sys/queue.h>
99 #include <sys/sched.h>
100 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
101 #include <sys/smp.h>
102 #endif
103 #include <machine/critical.h>
104 
105 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
106 
107 void panc(char *string1, char *string2);
108 
109 #if 0
110 static void runq_readjust(struct runq *rq, struct kse *ke);
111 #endif
112 /************************************************************************
113  * Functions that manipulate runnability from a thread perspective.	*
114  ************************************************************************/
115 /*
116  * Select the KSE that will be run next.  From that find the thread, and
117  * remove it from the KSEGRP's run queue.  If there is thread clustering,
118  * this will be what does it.
119  */
120 struct thread *
121 choosethread(void)
122 {
123 	struct kse *ke;
124 	struct thread *td;
125 	struct ksegrp *kg;
126 
127 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
128 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
129 		/* Shutting down, run idlethread on AP's */
130 		td = PCPU_GET(idlethread);
131 		ke = td->td_kse;
132 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
133 		ke->ke_flags |= KEF_DIDRUN;
134 		TD_SET_RUNNING(td);
135 		return (td);
136 	}
137 #endif
138 
139 retry:
140 	ke = sched_choose();
141 	if (ke) {
142 		td = ke->ke_thread;
143 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
144 		kg = ke->ke_ksegrp;
145 		if (td->td_proc->p_flag & P_SA) {
146 			if (kg->kg_last_assigned == td) {
147 				kg->kg_last_assigned = TAILQ_PREV(td,
148 				    threadqueue, td_runq);
149 			}
150 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
151 		}
152 		kg->kg_runnable--;
153 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
154 		    td, td->td_priority);
155 	} else {
156 		/* Simulate runq_choose() having returned the idle thread */
157 		td = PCPU_GET(idlethread);
158 		ke = td->td_kse;
159 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
160 	}
161 	ke->ke_flags |= KEF_DIDRUN;
162 
163 	/*
164 	 * If we are in panic, only allow system threads,
165 	 * plus the one we are running in, to be run.
166 	 */
167 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
168 	    (td->td_flags & TDF_INPANIC) == 0)) {
169 		/* note that it is no longer on the run queue */
170 		TD_SET_CAN_RUN(td);
171 		goto retry;
172 	}
173 
174 	TD_SET_RUNNING(td);
175 	return (td);
176 }
177 
178 /*
179  * Given a surplus KSE, either assign a new runable thread to it
180  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
181  * Assumes that the original thread is not runnable.
182  */
183 void
184 kse_reassign(struct kse *ke)
185 {
186 	struct ksegrp *kg;
187 	struct thread *td;
188 	struct thread *original;
189 
190 	mtx_assert(&sched_lock, MA_OWNED);
191 	original = ke->ke_thread;
192 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
193     	    ("reassigning KSE with runnable thread"));
194 	kg = ke->ke_ksegrp;
195 	if (original)
196 		original->td_kse = NULL;
197 
198 	/*
199 	 * Find the first unassigned thread
200 	 */
201 	if ((td = kg->kg_last_assigned) != NULL)
202 		td = TAILQ_NEXT(td, td_runq);
203 	else
204 		td = TAILQ_FIRST(&kg->kg_runq);
205 
206 	/*
207 	 * If we found one, assign it the kse, otherwise idle the kse.
208 	 */
209 	if (td) {
210 		kg->kg_last_assigned = td;
211 		td->td_kse = ke;
212 		ke->ke_thread = td;
213 		sched_add(td);
214 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
215 		return;
216 	}
217 
218 	ke->ke_state = KES_IDLE;
219 	ke->ke_thread = NULL;
220 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
221 	kg->kg_idle_kses++;
222 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
223 	return;
224 }
225 
226 #if 0
227 /*
228  * Remove a thread from its KSEGRP's run queue.
229  * This in turn may remove it from a KSE if it was already assigned
230  * to one, possibly causing a new thread to be assigned to the KSE
231  * and the KSE getting a new priority.
232  */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 	struct thread *td2, *td3;
237 	struct ksegrp *kg;
238 	struct kse *ke;
239 
240 	mtx_assert(&sched_lock, MA_OWNED);
241 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 	kg = td->td_ksegrp;
243 	ke = td->td_kse;
244 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 	kg->kg_runnable--;
246 	TD_SET_CAN_RUN(td);
247 	/*
248 	 * If it is not a threaded process, take the shortcut.
249 	 */
250 	if ((td->td_proc->p_flag & P_SA) == 0) {
251 		/* Bring its kse with it, leave the thread attached */
252 		sched_rem(td);
253 		ke->ke_state = KES_THREAD;
254 		return;
255 	}
256    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
257 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
258 	if (ke) {
259 		/*
260 		 * This thread has been assigned to a KSE.
261 		 * We need to dissociate it and try assign the
262 		 * KSE to the next available thread. Then, we should
263 		 * see if we need to move the KSE in the run queues.
264 		 */
265 		sched_rem(td);
266 		ke->ke_state = KES_THREAD;
267 		td2 = kg->kg_last_assigned;
268 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 		if (td2 == td)
270 			kg->kg_last_assigned = td3;
271 		kse_reassign(ke);
272 	}
273 }
274 #endif
275 
276 /*
277  * Change the priority of a thread that is on the run queue.
278  */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 	struct ksegrp *kg;
283 	struct kse *ke;
284 
285 	mtx_assert(&sched_lock, MA_OWNED);
286 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287 
288 	ke = td->td_kse;
289 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 	/*
291 	 * If it is not a threaded process, take the shortcut.
292 	 */
293 	if ((td->td_proc->p_flag & P_SA) == 0) {
294 		/* We only care about the kse in the run queue. */
295 		td->td_priority = newpri;
296 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 			sched_rem(td);
298 			sched_add(td);
299 		}
300 		return;
301 	}
302 
303 	/* It is a threaded process */
304 	kg = td->td_ksegrp;
305 	kg->kg_runnable--;
306 	TD_SET_CAN_RUN(td);
307 	if (ke) {
308 		if (kg->kg_last_assigned == td) {
309 			kg->kg_last_assigned =
310 			    TAILQ_PREV(td, threadqueue, td_runq);
311 		}
312 		sched_rem(td);
313 	}
314 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
315 	td->td_priority = newpri;
316 	setrunqueue(td);
317 }
318 
319 void
320 setrunqueue(struct thread *td)
321 {
322 	struct kse *ke;
323 	struct ksegrp *kg;
324 	struct thread *td2;
325 	struct thread *tda;
326 
327 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
328 	mtx_assert(&sched_lock, MA_OWNED);
329 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
330 	    ("setrunqueue: bad thread state"));
331 	TD_SET_RUNQ(td);
332 	kg = td->td_ksegrp;
333 	kg->kg_runnable++;
334 	if ((td->td_proc->p_flag & P_SA) == 0) {
335 		/*
336 		 * Common path optimisation: Only one of everything
337 		 * and the KSE is always already attached.
338 		 * Totally ignore the ksegrp run queue.
339 		 */
340 		sched_add(td);
341 		return;
342 	}
343 
344 	tda = kg->kg_last_assigned;
345 	if ((ke = td->td_kse) == NULL) {
346 		if (kg->kg_idle_kses) {
347 			/*
348 			 * There is a free one so it's ours for the asking..
349 			 */
350 			ke = TAILQ_FIRST(&kg->kg_iq);
351 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
352 			ke->ke_state = KES_THREAD;
353 			kg->kg_idle_kses--;
354 		} else if (tda && (tda->td_priority > td->td_priority)) {
355 			/*
356 			 * None free, but there is one we can commandeer.
357 			 */
358 			ke = tda->td_kse;
359 			sched_rem(tda);
360 			tda->td_kse = NULL;
361 			ke->ke_thread = NULL;
362 			tda = kg->kg_last_assigned =
363 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
364 		}
365 	} else {
366 		/*
367 		 * Temporarily disassociate so it looks like the other cases.
368 		 */
369 		ke->ke_thread = NULL;
370 		td->td_kse = NULL;
371 	}
372 
373 	/*
374 	 * Add the thread to the ksegrp's run queue at
375 	 * the appropriate place.
376 	 */
377 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
378 		if (td2->td_priority > td->td_priority) {
379 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
380 			break;
381 		}
382 	}
383 	if (td2 == NULL) {
384 		/* We ran off the end of the TAILQ or it was empty. */
385 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
386 	}
387 
388 	/*
389 	 * If we have a ke to use, then put it on the run queue and
390 	 * If needed, readjust the last_assigned pointer.
391 	 */
392 	if (ke) {
393 		if (tda == NULL) {
394 			/*
395 			 * No pre-existing last assigned so whoever is first
396 			 * gets the KSE we brought in.. (maybe us)
397 			 */
398 			td2 = TAILQ_FIRST(&kg->kg_runq);
399 			KASSERT((td2->td_kse == NULL),
400 			    ("unexpected ke present"));
401 			td2->td_kse = ke;
402 			ke->ke_thread = td2;
403 			kg->kg_last_assigned = td2;
404 		} else if (tda->td_priority > td->td_priority) {
405 			/*
406 			 * It's ours, grab it, but last_assigned is past us
407 			 * so don't change it.
408 			 */
409 			td->td_kse = ke;
410 			ke->ke_thread = td;
411 		} else {
412 			/*
413 			 * We are past last_assigned, so
414 			 * put the new kse on whatever is next,
415 			 * which may or may not be us.
416 			 */
417 			td2 = TAILQ_NEXT(tda, td_runq);
418 			kg->kg_last_assigned = td2;
419 			td2->td_kse = ke;
420 			ke->ke_thread = td2;
421 		}
422 		sched_add(ke->ke_thread);
423 	}
424 }
425 
426 /************************************************************************
427  * Critical section marker functions					*
428  ************************************************************************/
429 /* Critical sections that prevent preemption. */
430 void
431 critical_enter(void)
432 {
433 	struct thread *td;
434 
435 	td = curthread;
436 	if (td->td_critnest == 0)
437 		cpu_critical_enter();
438 	td->td_critnest++;
439 }
440 
441 void
442 critical_exit(void)
443 {
444 	struct thread *td;
445 
446 	td = curthread;
447 	KASSERT(td->td_critnest != 0,
448 	    ("critical_exit: td_critnest == 0"));
449 	if (td->td_critnest == 1) {
450 		td->td_critnest = 0;
451 		cpu_critical_exit();
452 	} else {
453 		td->td_critnest--;
454 	}
455 }
456 
457 
458 /************************************************************************
459  * SYSTEM RUN QUEUE manipulations and tests				*
460  ************************************************************************/
461 /*
462  * Initialize a run structure.
463  */
464 void
465 runq_init(struct runq *rq)
466 {
467 	int i;
468 
469 	bzero(rq, sizeof *rq);
470 	for (i = 0; i < RQ_NQS; i++)
471 		TAILQ_INIT(&rq->rq_queues[i]);
472 }
473 
474 /*
475  * Clear the status bit of the queue corresponding to priority level pri,
476  * indicating that it is empty.
477  */
478 static __inline void
479 runq_clrbit(struct runq *rq, int pri)
480 {
481 	struct rqbits *rqb;
482 
483 	rqb = &rq->rq_status;
484 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
485 	    rqb->rqb_bits[RQB_WORD(pri)],
486 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
487 	    RQB_BIT(pri), RQB_WORD(pri));
488 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
489 }
490 
491 /*
492  * Find the index of the first non-empty run queue.  This is done by
493  * scanning the status bits, a set bit indicates a non-empty queue.
494  */
495 static __inline int
496 runq_findbit(struct runq *rq)
497 {
498 	struct rqbits *rqb;
499 	int pri;
500 	int i;
501 
502 	rqb = &rq->rq_status;
503 	for (i = 0; i < RQB_LEN; i++)
504 		if (rqb->rqb_bits[i]) {
505 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
506 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
507 			    rqb->rqb_bits[i], i, pri);
508 			return (pri);
509 		}
510 
511 	return (-1);
512 }
513 
514 /*
515  * Set the status bit of the queue corresponding to priority level pri,
516  * indicating that it is non-empty.
517  */
518 static __inline void
519 runq_setbit(struct runq *rq, int pri)
520 {
521 	struct rqbits *rqb;
522 
523 	rqb = &rq->rq_status;
524 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
525 	    rqb->rqb_bits[RQB_WORD(pri)],
526 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
527 	    RQB_BIT(pri), RQB_WORD(pri));
528 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
529 }
530 
531 /*
532  * Add the KSE to the queue specified by its priority, and set the
533  * corresponding status bit.
534  */
535 void
536 runq_add(struct runq *rq, struct kse *ke)
537 {
538 	struct rqhead *rqh;
539 	int pri;
540 
541 	pri = ke->ke_thread->td_priority / RQ_PPQ;
542 	ke->ke_rqindex = pri;
543 	runq_setbit(rq, pri);
544 	rqh = &rq->rq_queues[pri];
545 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
546 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
547 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
548 }
549 
550 /*
551  * Return true if there are runnable processes of any priority on the run
552  * queue, false otherwise.  Has no side effects, does not modify the run
553  * queue structure.
554  */
555 int
556 runq_check(struct runq *rq)
557 {
558 	struct rqbits *rqb;
559 	int i;
560 
561 	rqb = &rq->rq_status;
562 	for (i = 0; i < RQB_LEN; i++)
563 		if (rqb->rqb_bits[i]) {
564 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
565 			    rqb->rqb_bits[i], i);
566 			return (1);
567 		}
568 	CTR0(KTR_RUNQ, "runq_check: empty");
569 
570 	return (0);
571 }
572 
573 /*
574  * Find the highest priority process on the run queue.
575  */
576 struct kse *
577 runq_choose(struct runq *rq)
578 {
579 	struct rqhead *rqh;
580 	struct kse *ke;
581 	int pri;
582 
583 	mtx_assert(&sched_lock, MA_OWNED);
584 	while ((pri = runq_findbit(rq)) != -1) {
585 		rqh = &rq->rq_queues[pri];
586 		ke = TAILQ_FIRST(rqh);
587 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
588 		CTR3(KTR_RUNQ,
589 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
590 		return (ke);
591 	}
592 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
593 
594 	return (NULL);
595 }
596 
597 /*
598  * Remove the KSE from the queue specified by its priority, and clear the
599  * corresponding status bit if the queue becomes empty.
600  * Caller must set ke->ke_state afterwards.
601  */
602 void
603 runq_remove(struct runq *rq, struct kse *ke)
604 {
605 	struct rqhead *rqh;
606 	int pri;
607 
608 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
609 		("runq_remove: process swapped out"));
610 	pri = ke->ke_rqindex;
611 	rqh = &rq->rq_queues[pri];
612 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
613 	    ke, ke->ke_thread->td_priority, pri, rqh);
614 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
615 	TAILQ_REMOVE(rqh, ke, ke_procq);
616 	if (TAILQ_EMPTY(rqh)) {
617 		CTR0(KTR_RUNQ, "runq_remove: empty");
618 		runq_clrbit(rq, pri);
619 	}
620 }
621 
622 #if 0
623 void
624 panc(char *string1, char *string2)
625 {
626 	printf("%s", string1);
627 	Debugger(string2);
628 }
629 
630 void
631 thread_sanity_check(struct thread *td, char *string)
632 {
633 	struct proc *p;
634 	struct ksegrp *kg;
635 	struct kse *ke;
636 	struct thread *td2 = NULL;
637 	unsigned int prevpri;
638 	int	saw_lastassigned = 0;
639 	int unassigned = 0;
640 	int assigned = 0;
641 
642 	p = td->td_proc;
643 	kg = td->td_ksegrp;
644 	ke = td->td_kse;
645 
646 
647 	if (ke) {
648 		if (p != ke->ke_proc) {
649 			panc(string, "wrong proc");
650 		}
651 		if (ke->ke_thread != td) {
652 			panc(string, "wrong thread");
653 		}
654 	}
655 
656 	if ((p->p_flag & P_SA) == 0) {
657 		if (ke == NULL) {
658 			panc(string, "non KSE thread lost kse");
659 		}
660 	} else {
661 		prevpri = 0;
662 		saw_lastassigned = 0;
663 		unassigned = 0;
664 		assigned = 0;
665 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
666 			if (td2->td_priority < prevpri) {
667 				panc(string, "thread runqueue unosorted");
668 			}
669 			if ((td2->td_state == TDS_RUNQ) &&
670 			    td2->td_kse &&
671 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
672 				panc(string, "KSE wrong state");
673 			}
674 			prevpri = td2->td_priority;
675 			if (td2->td_kse) {
676 				assigned++;
677 				if (unassigned) {
678 					panc(string, "unassigned before assigned");
679 				}
680  				if  (kg->kg_last_assigned == NULL) {
681 					panc(string, "lastassigned corrupt");
682 				}
683 				if (saw_lastassigned) {
684 					panc(string, "last assigned not last");
685 				}
686 				if (td2->td_kse->ke_thread != td2) {
687 					panc(string, "mismatched kse/thread");
688 				}
689 			} else {
690 				unassigned++;
691 			}
692 			if (td2 == kg->kg_last_assigned) {
693 				saw_lastassigned = 1;
694 				if (td2->td_kse == NULL) {
695 					panc(string, "last assigned not assigned");
696 				}
697 			}
698 		}
699 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
700 			panc(string, "where on earth does lastassigned point?");
701 		}
702 #if 0
703 		FOREACH_THREAD_IN_GROUP(kg, td2) {
704 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
705 			    (TD_ON_RUNQ(td2))) {
706 				assigned++;
707 				if (td2->td_kse == NULL) {
708 					panc(string, "BOUND thread with no KSE");
709 				}
710 			}
711 		}
712 #endif
713 #if 0
714 		if ((unassigned + assigned) != kg->kg_runnable) {
715 			panc(string, "wrong number in runnable");
716 		}
717 #endif
718 	}
719 	if (assigned == 12345) {
720 		printf("%p %p %p %p %p %d, %d",
721 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
722 	}
723 }
724 #endif
725 
726