xref: /freebsd/sys/kern/kern_switch.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /***
30 
31 Here is the logic..
32 
33 If there are N processors, then there are at most N KSEs (kernel
34 schedulable entities) working to process threads that belong to a
35 KSEGOUP (kg). If there are X of these KSEs actually running at the
36 moment in question, then there are at most M (N-X) of these KSEs on
37 the run queue, as running KSEs are not on the queue.
38 
39 Runnable threads are queued off the KSEGROUP in priority order.
40 If there are M or more threads runnable, the top M threads
41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42 their priority from those threads and are put on the run queue.
43 
44 The last thread that had a priority high enough to have a KSE associated
45 with it, AND IS ON THE RUN QUEUE is pointed to by
46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47 assigned as all the available KSEs are activly running, or because there
48 are no threads queued, that pointer is NULL.
49 
50 When a KSE is removed from the run queue to become runnable, we know
51 it was associated with the highest priority thread in the queue (at the head
52 of the queue). If it is also the last assigned we know M was 1 and must
53 now be 0. Since the thread is no longer queued that pointer must be
54 removed from it. Since we know there were no more KSEs available,
55 (M was 1 and is now 0) and since we are not FREEING our KSE
56 but using it, we know there are STILL no more KSEs available, we can prove
57 that the next thread in the ksegrp list will not have a KSE to assign to
58 it, so we can show that the pointer must be made 'invalid' (NULL).
59 
60 The pointer exists so that when a new thread is made runnable, it can
61 have its priority compared with the last assigned thread to see if
62 it should 'steal' its KSE or not.. i.e. is it 'earlier'
63 on the list than that thread or later.. If it's earlier, then the KSE is
64 removed from the last assigned (which is now not assigned a KSE)
65 and reassigned to the new thread, which is placed earlier in the list.
66 The pointer is then backed up to the previous thread (which may or may not
67 be the new thread).
68 
69 When a thread sleeps or is removed, the KSE becomes available and if there
70 are queued threads that are not assigned KSEs, the highest priority one of
71 them is assigned the KSE, which is then placed back on the run queue at
72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73 to point to it.
74 
75 The following diagram shows 2 KSEs and 3 threads from a single process.
76 
77  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78               \    \____
79                \        \
80     KSEGROUP---thread--thread--thread    (queued in priority order)
81         \                 /
82          \_______________/
83           (last_assigned)
84 
85 The result of this scheme is that the M available KSEs are always
86 queued at the priorities they have inherrited from the M highest priority
87 threads for that KSEGROUP. If this situation changes, the KSEs are
88 reassigned to keep this true.
89 
90 */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/sched.h>
101 #include <machine/critical.h>
102 
103 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104 
105 void panc(char *string1, char *string2);
106 
107 #if 0
108 static void runq_readjust(struct runq *rq, struct kse *ke);
109 #endif
110 /************************************************************************
111  * Functions that manipulate runnability from a thread perspective.	*
112  ************************************************************************/
113 /*
114  * Select the KSE that will be run next.  From that find the thread, and
115  * remove it from the KSEGRP's run queue.  If there is thread clustering,
116  * this will be what does it.
117  */
118 struct thread *
119 choosethread(void)
120 {
121 	struct kse *ke;
122 	struct thread *td;
123 	struct ksegrp *kg;
124 
125 retry:
126 	if ((ke = sched_choose())) {
127 		td = ke->ke_thread;
128 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
129 		kg = ke->ke_ksegrp;
130 		if (td->td_proc->p_flag & P_THREADED) {
131 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
132 			if (kg->kg_last_assigned == td) {
133 				kg->kg_last_assigned = TAILQ_PREV(td,
134 				    threadqueue, td_runq);
135 			}
136 		}
137 		kg->kg_runnable--;
138 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
139 		    td, td->td_priority);
140 	} else {
141 		/* Simulate runq_choose() having returned the idle thread */
142 		td = PCPU_GET(idlethread);
143 		ke = td->td_kse;
144 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
145 	}
146 	ke->ke_flags |= KEF_DIDRUN;
147 
148 	/*
149 	 * Only allow non system threads to run in panic
150 	 * if they are the one we are tracing.  (I think.. [JRE])
151 	 */
152 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
153 	    (td->td_flags & TDF_INPANIC) == 0))
154 		goto retry;
155 
156 	TD_SET_RUNNING(td);
157 	return (td);
158 }
159 
160 /*
161  * Given a surplus KSE, either assign a new runable thread to it
162  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
163  * Assumes that the original thread is not runnable.
164  */
165 void
166 kse_reassign(struct kse *ke)
167 {
168 	struct ksegrp *kg;
169 	struct thread *td;
170 	struct thread *original;
171 	struct kse_upcall *ku;
172 
173 	mtx_assert(&sched_lock, MA_OWNED);
174 	original = ke->ke_thread;
175 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
176     	    ("reassigning KSE with runnable thread"));
177 	kg = ke->ke_ksegrp;
178 	if (original) {
179 		/*
180 		 * If the outgoing thread is in threaded group and has never
181 		 * scheduled an upcall, decide whether this is a short
182 		 * or long term event and thus whether or not to schedule
183 		 * an upcall.
184 		 * If it is a short term event, just suspend it in
185 		 * a way that takes its KSE with it.
186 		 * Select the events for which we want to schedule upcalls.
187 		 * For now it's just sleep.
188 		 * XXXKSE eventually almost any inhibition could do.
189 		 */
190 		if (TD_CAN_UNBIND(original) && (original->td_standin) &&
191 		    TD_ON_SLEEPQ(original)) {
192 		    	/*
193 			 * Release ownership of upcall, and schedule an upcall
194 			 * thread, this new upcall thread becomes the owner of
195 			 * the upcall structure.
196 			 */
197 			ku = original->td_upcall;
198 			ku->ku_owner = NULL;
199 			original->td_upcall = NULL;
200 			original->td_flags &= ~TDF_CAN_UNBIND;
201 			thread_schedule_upcall(original, ku);
202 		}
203 		original->td_kse = NULL;
204 	}
205 
206 	/*
207 	 * Find the first unassigned thread
208 	 */
209 	if ((td = kg->kg_last_assigned) != NULL)
210 		td = TAILQ_NEXT(td, td_runq);
211 	else
212 		td = TAILQ_FIRST(&kg->kg_runq);
213 
214 	/*
215 	 * If we found one, assign it the kse, otherwise idle the kse.
216 	 */
217 	if (td) {
218 		kg->kg_last_assigned = td;
219 		td->td_kse = ke;
220 		ke->ke_thread = td;
221 		sched_add(ke);
222 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
223 		return;
224 	}
225 
226 	ke->ke_state = KES_IDLE;
227 	ke->ke_thread = NULL;
228 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
229 	kg->kg_idle_kses++;
230 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
231 	return;
232 }
233 
234 #if 0
235 /*
236  * Remove a thread from its KSEGRP's run queue.
237  * This in turn may remove it from a KSE if it was already assigned
238  * to one, possibly causing a new thread to be assigned to the KSE
239  * and the KSE getting a new priority.
240  */
241 static void
242 remrunqueue(struct thread *td)
243 {
244 	struct thread *td2, *td3;
245 	struct ksegrp *kg;
246 	struct kse *ke;
247 
248 	mtx_assert(&sched_lock, MA_OWNED);
249 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
250 	kg = td->td_ksegrp;
251 	ke = td->td_kse;
252 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
253 	kg->kg_runnable--;
254 	TD_SET_CAN_RUN(td);
255 	/*
256 	 * If it is not a threaded process, take the shortcut.
257 	 */
258 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
259 		/* Bring its kse with it, leave the thread attached */
260 		sched_rem(ke);
261 		ke->ke_state = KES_THREAD;
262 		return;
263 	}
264    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
265 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
266 	if (ke) {
267 		/*
268 		 * This thread has been assigned to a KSE.
269 		 * We need to dissociate it and try assign the
270 		 * KSE to the next available thread. Then, we should
271 		 * see if we need to move the KSE in the run queues.
272 		 */
273 		sched_rem(ke);
274 		ke->ke_state = KES_THREAD;
275 		td2 = kg->kg_last_assigned;
276 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
277 		if (td2 == td)
278 			kg->kg_last_assigned = td3;
279 		kse_reassign(ke);
280 	}
281 }
282 #endif
283 
284 /*
285  * Change the priority of a thread that is on the run queue.
286  */
287 void
288 adjustrunqueue( struct thread *td, int newpri)
289 {
290 	struct ksegrp *kg;
291 	struct kse *ke;
292 
293 	mtx_assert(&sched_lock, MA_OWNED);
294 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
295 
296 	ke = td->td_kse;
297 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
298 	/*
299 	 * If it is not a threaded process, take the shortcut.
300 	 */
301 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
302 		/* We only care about the kse in the run queue. */
303 		td->td_priority = newpri;
304 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
305 			sched_rem(ke);
306 			sched_add(ke);
307 		}
308 		return;
309 	}
310 
311 	/* It is a threaded process */
312 	kg = td->td_ksegrp;
313 	kg->kg_runnable--;
314 	TD_SET_CAN_RUN(td);
315 	if (ke) {
316 		if (kg->kg_last_assigned == td) {
317 			kg->kg_last_assigned =
318 			    TAILQ_PREV(td, threadqueue, td_runq);
319 		}
320 		sched_rem(ke);
321 	}
322 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
323 	td->td_priority = newpri;
324 	setrunqueue(td);
325 }
326 
327 void
328 setrunqueue(struct thread *td)
329 {
330 	struct kse *ke;
331 	struct ksegrp *kg;
332 	struct thread *td2;
333 	struct thread *tda;
334 
335 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
336 	mtx_assert(&sched_lock, MA_OWNED);
337 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
338 	    ("setrunqueue: bad thread state"));
339 	TD_SET_RUNQ(td);
340 	kg = td->td_ksegrp;
341 	kg->kg_runnable++;
342 	if ((td->td_proc->p_flag & P_THREADED) == 0) {
343 		/*
344 		 * Common path optimisation: Only one of everything
345 		 * and the KSE is always already attached.
346 		 * Totally ignore the ksegrp run queue.
347 		 */
348 		sched_add(td->td_kse);
349 		return;
350 	}
351 
352 	tda = kg->kg_last_assigned;
353 	if ((ke = td->td_kse) == NULL) {
354 		if (kg->kg_idle_kses) {
355 			/*
356 			 * There is a free one so it's ours for the asking..
357 			 */
358 			ke = TAILQ_FIRST(&kg->kg_iq);
359 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
360 			ke->ke_state = KES_THREAD;
361 			kg->kg_idle_kses--;
362 		} else if (tda && (tda->td_priority > td->td_priority)) {
363 			/*
364 			 * None free, but there is one we can commandeer.
365 			 */
366 			ke = tda->td_kse;
367 			tda->td_kse = NULL;
368 			ke->ke_thread = NULL;
369 			tda = kg->kg_last_assigned =
370 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
371 			sched_rem(ke);
372 		}
373 	} else {
374 		/*
375 		 * Temporarily disassociate so it looks like the other cases.
376 		 */
377 		ke->ke_thread = NULL;
378 		td->td_kse = NULL;
379 	}
380 
381 	/*
382 	 * Add the thread to the ksegrp's run queue at
383 	 * the appropriate place.
384 	 */
385 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
386 		if (td2->td_priority > td->td_priority) {
387 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
388 			break;
389 		}
390 	}
391 	if (td2 == NULL) {
392 		/* We ran off the end of the TAILQ or it was empty. */
393 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
394 	}
395 
396 	/*
397 	 * If we have a ke to use, then put it on the run queue and
398 	 * If needed, readjust the last_assigned pointer.
399 	 */
400 	if (ke) {
401 		if (tda == NULL) {
402 			/*
403 			 * No pre-existing last assigned so whoever is first
404 			 * gets the KSE we brought in.. (maybe us)
405 			 */
406 			td2 = TAILQ_FIRST(&kg->kg_runq);
407 			KASSERT((td2->td_kse == NULL),
408 			    ("unexpected ke present"));
409 			td2->td_kse = ke;
410 			ke->ke_thread = td2;
411 			kg->kg_last_assigned = td2;
412 		} else if (tda->td_priority > td->td_priority) {
413 			/*
414 			 * It's ours, grab it, but last_assigned is past us
415 			 * so don't change it.
416 			 */
417 			td->td_kse = ke;
418 			ke->ke_thread = td;
419 		} else {
420 			/*
421 			 * We are past last_assigned, so
422 			 * put the new kse on whatever is next,
423 			 * which may or may not be us.
424 			 */
425 			td2 = TAILQ_NEXT(tda, td_runq);
426 			kg->kg_last_assigned = td2;
427 			td2->td_kse = ke;
428 			ke->ke_thread = td2;
429 		}
430 		sched_add(ke);
431 	}
432 }
433 
434 /************************************************************************
435  * Critical section marker functions					*
436  ************************************************************************/
437 /* Critical sections that prevent preemption. */
438 void
439 critical_enter(void)
440 {
441 	struct thread *td;
442 
443 	td = curthread;
444 	if (td->td_critnest == 0)
445 		cpu_critical_enter();
446 	td->td_critnest++;
447 }
448 
449 void
450 critical_exit(void)
451 {
452 	struct thread *td;
453 
454 	td = curthread;
455 	if (td->td_critnest == 1) {
456 		td->td_critnest = 0;
457 		cpu_critical_exit();
458 	} else {
459 		td->td_critnest--;
460 	}
461 }
462 
463 
464 /************************************************************************
465  * SYSTEM RUN QUEUE manipulations and tests				*
466  ************************************************************************/
467 /*
468  * Initialize a run structure.
469  */
470 void
471 runq_init(struct runq *rq)
472 {
473 	int i;
474 
475 	bzero(rq, sizeof *rq);
476 	for (i = 0; i < RQ_NQS; i++)
477 		TAILQ_INIT(&rq->rq_queues[i]);
478 }
479 
480 /*
481  * Clear the status bit of the queue corresponding to priority level pri,
482  * indicating that it is empty.
483  */
484 static __inline void
485 runq_clrbit(struct runq *rq, int pri)
486 {
487 	struct rqbits *rqb;
488 
489 	rqb = &rq->rq_status;
490 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
491 	    rqb->rqb_bits[RQB_WORD(pri)],
492 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
493 	    RQB_BIT(pri), RQB_WORD(pri));
494 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
495 }
496 
497 /*
498  * Find the index of the first non-empty run queue.  This is done by
499  * scanning the status bits, a set bit indicates a non-empty queue.
500  */
501 static __inline int
502 runq_findbit(struct runq *rq)
503 {
504 	struct rqbits *rqb;
505 	int pri;
506 	int i;
507 
508 	rqb = &rq->rq_status;
509 	for (i = 0; i < RQB_LEN; i++)
510 		if (rqb->rqb_bits[i]) {
511 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
512 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
513 			    rqb->rqb_bits[i], i, pri);
514 			return (pri);
515 		}
516 
517 	return (-1);
518 }
519 
520 /*
521  * Set the status bit of the queue corresponding to priority level pri,
522  * indicating that it is non-empty.
523  */
524 static __inline void
525 runq_setbit(struct runq *rq, int pri)
526 {
527 	struct rqbits *rqb;
528 
529 	rqb = &rq->rq_status;
530 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
531 	    rqb->rqb_bits[RQB_WORD(pri)],
532 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
533 	    RQB_BIT(pri), RQB_WORD(pri));
534 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
535 }
536 
537 /*
538  * Add the KSE to the queue specified by its priority, and set the
539  * corresponding status bit.
540  */
541 void
542 runq_add(struct runq *rq, struct kse *ke)
543 {
544 	struct rqhead *rqh;
545 	int pri;
546 
547 	pri = ke->ke_thread->td_priority / RQ_PPQ;
548 	ke->ke_rqindex = pri;
549 	runq_setbit(rq, pri);
550 	rqh = &rq->rq_queues[pri];
551 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
552 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
553 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
554 }
555 
556 /*
557  * Return true if there are runnable processes of any priority on the run
558  * queue, false otherwise.  Has no side effects, does not modify the run
559  * queue structure.
560  */
561 int
562 runq_check(struct runq *rq)
563 {
564 	struct rqbits *rqb;
565 	int i;
566 
567 	rqb = &rq->rq_status;
568 	for (i = 0; i < RQB_LEN; i++)
569 		if (rqb->rqb_bits[i]) {
570 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
571 			    rqb->rqb_bits[i], i);
572 			return (1);
573 		}
574 	CTR0(KTR_RUNQ, "runq_check: empty");
575 
576 	return (0);
577 }
578 
579 /*
580  * Find the highest priority process on the run queue.
581  */
582 struct kse *
583 runq_choose(struct runq *rq)
584 {
585 	struct rqhead *rqh;
586 	struct kse *ke;
587 	int pri;
588 
589 	mtx_assert(&sched_lock, MA_OWNED);
590 	while ((pri = runq_findbit(rq)) != -1) {
591 		rqh = &rq->rq_queues[pri];
592 		ke = TAILQ_FIRST(rqh);
593 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
594 		CTR3(KTR_RUNQ,
595 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
596 		return (ke);
597 	}
598 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
599 
600 	return (NULL);
601 }
602 
603 /*
604  * Remove the KSE from the queue specified by its priority, and clear the
605  * corresponding status bit if the queue becomes empty.
606  * Caller must set ke->ke_state afterwards.
607  */
608 void
609 runq_remove(struct runq *rq, struct kse *ke)
610 {
611 	struct rqhead *rqh;
612 	int pri;
613 
614 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
615 		("runq_remove: process swapped out"));
616 	pri = ke->ke_rqindex;
617 	rqh = &rq->rq_queues[pri];
618 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
619 	    ke, ke->ke_thread->td_priority, pri, rqh);
620 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
621 	TAILQ_REMOVE(rqh, ke, ke_procq);
622 	if (TAILQ_EMPTY(rqh)) {
623 		CTR0(KTR_RUNQ, "runq_remove: empty");
624 		runq_clrbit(rq, pri);
625 	}
626 }
627 
628 #if 0
629 void
630 panc(char *string1, char *string2)
631 {
632 	printf("%s", string1);
633 	Debugger(string2);
634 }
635 
636 void
637 thread_sanity_check(struct thread *td, char *string)
638 {
639 	struct proc *p;
640 	struct ksegrp *kg;
641 	struct kse *ke;
642 	struct thread *td2 = NULL;
643 	unsigned int prevpri;
644 	int	saw_lastassigned = 0;
645 	int unassigned = 0;
646 	int assigned = 0;
647 
648 	p = td->td_proc;
649 	kg = td->td_ksegrp;
650 	ke = td->td_kse;
651 
652 
653 	if (ke) {
654 		if (p != ke->ke_proc) {
655 			panc(string, "wrong proc");
656 		}
657 		if (ke->ke_thread != td) {
658 			panc(string, "wrong thread");
659 		}
660 	}
661 
662 	if ((p->p_flag & P_THREADED) == 0) {
663 		if (ke == NULL) {
664 			panc(string, "non KSE thread lost kse");
665 		}
666 	} else {
667 		prevpri = 0;
668 		saw_lastassigned = 0;
669 		unassigned = 0;
670 		assigned = 0;
671 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
672 			if (td2->td_priority < prevpri) {
673 				panc(string, "thread runqueue unosorted");
674 			}
675 			if ((td2->td_state == TDS_RUNQ) &&
676 			    td2->td_kse &&
677 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
678 				panc(string, "KSE wrong state");
679 			}
680 			prevpri = td2->td_priority;
681 			if (td2->td_kse) {
682 				assigned++;
683 				if (unassigned) {
684 					panc(string, "unassigned before assigned");
685 				}
686  				if  (kg->kg_last_assigned == NULL) {
687 					panc(string, "lastassigned corrupt");
688 				}
689 				if (saw_lastassigned) {
690 					panc(string, "last assigned not last");
691 				}
692 				if (td2->td_kse->ke_thread != td2) {
693 					panc(string, "mismatched kse/thread");
694 				}
695 			} else {
696 				unassigned++;
697 			}
698 			if (td2 == kg->kg_last_assigned) {
699 				saw_lastassigned = 1;
700 				if (td2->td_kse == NULL) {
701 					panc(string, "last assigned not assigned");
702 				}
703 			}
704 		}
705 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
706 			panc(string, "where on earth does lastassigned point?");
707 		}
708 #if 0
709 		FOREACH_THREAD_IN_GROUP(kg, td2) {
710 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
711 			    (TD_ON_RUNQ(td2))) {
712 				assigned++;
713 				if (td2->td_kse == NULL) {
714 					panc(string, "BOUND thread with no KSE");
715 				}
716 			}
717 		}
718 #endif
719 #if 0
720 		if ((unassigned + assigned) != kg->kg_runnable) {
721 			panc(string, "wrong number in runnable");
722 		}
723 #endif
724 	}
725 	if (assigned == 12345) {
726 		printf("%p %p %p %p %p %d, %d",
727 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
728 	}
729 }
730 #endif
731 
732