xref: /freebsd/sys/kern/kern_switch.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /***
30 
31 Here is the logic..
32 
33 If there are N processors, then there are at most N KSEs (kernel
34 schedulable entities) working to process threads that belong to a
35 KSEGOUP (kg). If there are X of these KSEs actually running at the
36 moment in question, then there are at most M (N-X) of these KSEs on
37 the run queue, as running KSEs are not on the queue.
38 
39 Runnable threads are queued off the KSEGROUP in priority order.
40 If there are M or more threads runnable, the top M threads
41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42 their priority from those threads and are put on the run queue.
43 
44 The last thread that had a priority high enough to have a KSE associated
45 with it, AND IS ON THE RUN QUEUE is pointed to by
46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47 assigned as all the available KSEs are activly running, or because there
48 are no threads queued, that pointer is NULL.
49 
50 When a KSE is removed from the run queue to become runnable, we know
51 it was associated with the highest priority thread in the queue (at the head
52 of the queue). If it is also the last assigned we know M was 1 and must
53 now be 0. Since the thread is no longer queued that pointer must be
54 removed from it. Since we know there were no more KSEs available,
55 (M was 1 and is now 0) and since we are not FREEING our KSE
56 but using it, we know there are STILL no more KSEs available, we can prove
57 that the next thread in the ksegrp list will not have a KSE to assign to
58 it, so we can show that the pointer must be made 'invalid' (NULL).
59 
60 The pointer exists so that when a new thread is made runnable, it can
61 have its priority compared with the last assigned thread to see if
62 it should 'steal' its KSE or not.. i.e. is it 'earlier'
63 on the list than that thread or later.. If it's earlier, then the KSE is
64 removed from the last assigned (which is now not assigned a KSE)
65 and reassigned to the new thread, which is placed earlier in the list.
66 The pointer is then backed up to the previous thread (which may or may not
67 be the new thread).
68 
69 When a thread sleeps or is removed, the KSE becomes available and if there
70 are queued threads that are not assigned KSEs, the highest priority one of
71 them is assigned the KSE, which is then placed back on the run queue at
72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73 to point to it.
74 
75 The following diagram shows 2 KSEs and 3 threads from a single process.
76 
77  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78               \    \____
79                \        \
80     KSEGROUP---thread--thread--thread    (queued in priority order)
81         \                 /
82          \_______________/
83           (last_assigned)
84 
85 The result of this scheme is that the M available KSEs are always
86 queued at the priorities they have inherrited from the M highest priority
87 threads for that KSEGROUP. If this situation changes, the KSEs are
88 reassigned to keep this true.
89 
90 */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/sched.h>
101 #include <machine/critical.h>
102 
103 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104 
105 void panc(char *string1, char *string2);
106 
107 #if 0
108 static void runq_readjust(struct runq *rq, struct kse *ke);
109 #endif
110 /************************************************************************
111  * Functions that manipulate runnability from a thread perspective.	*
112  ************************************************************************/
113 /*
114  * Select the KSE that will be run next.  From that find the thread, and
115  * remove it from the KSEGRP's run queue.  If there is thread clustering,
116  * this will be what does it.
117  */
118 struct thread *
119 choosethread(void)
120 {
121 	struct kse *ke;
122 	struct thread *td;
123 	struct ksegrp *kg;
124 
125 retry:
126 	if ((ke = sched_choose())) {
127 		td = ke->ke_thread;
128 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
129 		kg = ke->ke_ksegrp;
130 		if (td->td_proc->p_flag & P_KSES) {
131 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
132 			if (kg->kg_last_assigned == td) {
133 				kg->kg_last_assigned = TAILQ_PREV(td,
134 				    threadqueue, td_runq);
135 			}
136 		}
137 		kg->kg_runnable--;
138 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
139 		    td, td->td_priority);
140 	} else {
141 		/* Simulate runq_choose() having returned the idle thread */
142 		td = PCPU_GET(idlethread);
143 		ke = td->td_kse;
144 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
145 	}
146 	ke->ke_flags |= KEF_DIDRUN;
147 
148 	/*
149 	 * Only allow non system threads to run in panic
150 	 * if they are the one we are tracing.  (I think.. [JRE])
151 	 */
152 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
153 	    (td->td_flags & TDF_INPANIC) == 0))
154 		goto retry;
155 
156 	TD_SET_RUNNING(td);
157 	return (td);
158 }
159 
160 /*
161  * Given a surplus KSE, either assign a new runable thread to it
162  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
163  * Or maybe give it back to its owner if it's been loaned.
164  * Assumes that the original thread is either not runnable or
165  * already on the run queue
166  */
167 void
168 kse_reassign(struct kse *ke)
169 {
170 	struct ksegrp *kg;
171 	struct thread *td;
172 	struct thread *original;
173 	struct kse_upcall *ku;
174 
175 	mtx_assert(&sched_lock, MA_OWNED);
176 	original = ke->ke_thread;
177 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
178     	    ("reassigning KSE with runnable thread"));
179 	kg = ke->ke_ksegrp;
180 	if (original) {
181 		/*
182 		 * If the outgoing thread is in threaded group and has never
183 		 * scheduled an upcall, decide whether this is a short
184 		 * or long term event and thus whether or not to schedule
185 		 * an upcall.
186 		 * If it is a short term event, just suspend it in
187 		 * a way that takes its KSE with it.
188 		 * Select the events for which we want to schedule upcalls.
189 		 * For now it's just sleep.
190 		 * XXXKSE eventually almost any inhibition could do.
191 		 */
192 		if (TD_CAN_UNBIND(original) && (original->td_standin) &&
193 		    TD_ON_SLEEPQ(original)) {
194 		    	/*
195 			 * Release ownership of upcall, and schedule an upcall
196 			 * thread, this new upcall thread becomes the owner of
197 			 * the upcall structure.
198 			 */
199 			ku = original->td_upcall;
200 			ku->ku_owner = NULL;
201 			original->td_upcall = NULL;
202 			original->td_flags &= ~TDF_CAN_UNBIND;
203 			thread_schedule_upcall(original, ku);
204 		}
205 		original->td_kse = NULL;
206 	}
207 
208 	/*
209 	 * Find the first unassigned thread
210 	 */
211 	if ((td = kg->kg_last_assigned) != NULL)
212 		td = TAILQ_NEXT(td, td_runq);
213 	else
214 		td = TAILQ_FIRST(&kg->kg_runq);
215 
216 	/*
217 	 * If we found one, assign it the kse, otherwise idle the kse.
218 	 */
219 	if (td) {
220 		kg->kg_last_assigned = td;
221 		td->td_kse = ke;
222 		ke->ke_thread = td;
223 		sched_add(ke);
224 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
225 		return;
226 	}
227 
228 	ke->ke_state = KES_IDLE;
229 	ke->ke_thread = NULL;
230 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
231 	kg->kg_idle_kses++;
232 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
233 	return;
234 }
235 
236 #if 0
237 /*
238  * Remove a thread from its KSEGRP's run queue.
239  * This in turn may remove it from a KSE if it was already assigned
240  * to one, possibly causing a new thread to be assigned to the KSE
241  * and the KSE getting a new priority.
242  */
243 static void
244 remrunqueue(struct thread *td)
245 {
246 	struct thread *td2, *td3;
247 	struct ksegrp *kg;
248 	struct kse *ke;
249 
250 	mtx_assert(&sched_lock, MA_OWNED);
251 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
252 	kg = td->td_ksegrp;
253 	ke = td->td_kse;
254 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
255 	kg->kg_runnable--;
256 	TD_SET_CAN_RUN(td);
257 	/*
258 	 * If it is not a threaded process, take the shortcut.
259 	 */
260 	if ((td->td_proc->p_flag & P_KSES) == 0) {
261 		/* Bring its kse with it, leave the thread attached */
262 		sched_rem(ke);
263 		ke->ke_state = KES_THREAD;
264 		return;
265 	}
266    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
267 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268 	if (ke) {
269 		/*
270 		 * This thread has been assigned to a KSE.
271 		 * We need to dissociate it and try assign the
272 		 * KSE to the next available thread. Then, we should
273 		 * see if we need to move the KSE in the run queues.
274 		 */
275 		sched_rem(ke);
276 		ke->ke_state = KES_THREAD;
277 		td2 = kg->kg_last_assigned;
278 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
279 		if (td2 == td)
280 			kg->kg_last_assigned = td3;
281 		kse_reassign(ke);
282 	}
283 }
284 #endif
285 
286 /*
287  * Change the priority of a thread that is on the run queue.
288  */
289 void
290 adjustrunqueue( struct thread *td, int newpri)
291 {
292 	struct ksegrp *kg;
293 	struct kse *ke;
294 
295 	mtx_assert(&sched_lock, MA_OWNED);
296 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
297 
298 	ke = td->td_kse;
299 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
300 	/*
301 	 * If it is not a threaded process, take the shortcut.
302 	 */
303 	if ((td->td_proc->p_flag & P_KSES) == 0) {
304 		/* We only care about the kse in the run queue. */
305 		td->td_priority = newpri;
306 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
307 			sched_rem(ke);
308 			sched_add(ke);
309 		}
310 		return;
311 	}
312 
313 	/* It is a threaded process */
314 	kg = td->td_ksegrp;
315 	kg->kg_runnable--;
316 	TD_SET_CAN_RUN(td);
317 	if (ke) {
318 		if (kg->kg_last_assigned == td) {
319 			kg->kg_last_assigned =
320 			    TAILQ_PREV(td, threadqueue, td_runq);
321 		}
322 		sched_rem(ke);
323 	}
324 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
325 	td->td_priority = newpri;
326 	setrunqueue(td);
327 }
328 
329 void
330 setrunqueue(struct thread *td)
331 {
332 	struct kse *ke;
333 	struct ksegrp *kg;
334 	struct thread *td2;
335 	struct thread *tda;
336 
337 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
338 	mtx_assert(&sched_lock, MA_OWNED);
339 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
340 	    ("setrunqueue: bad thread state"));
341 	TD_SET_RUNQ(td);
342 	kg = td->td_ksegrp;
343 	kg->kg_runnable++;
344 	if ((td->td_proc->p_flag & P_KSES) == 0) {
345 		/*
346 		 * Common path optimisation: Only one of everything
347 		 * and the KSE is always already attached.
348 		 * Totally ignore the ksegrp run queue.
349 		 */
350 		sched_add(td->td_kse);
351 		return;
352 	}
353 
354 	tda = kg->kg_last_assigned;
355 	if ((ke = td->td_kse) == NULL) {
356 		if (kg->kg_idle_kses) {
357 			/*
358 			 * There is a free one so it's ours for the asking..
359 			 */
360 			ke = TAILQ_FIRST(&kg->kg_iq);
361 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
362 			ke->ke_state = KES_THREAD;
363 			kg->kg_idle_kses--;
364 		} else if (tda && (tda->td_priority > td->td_priority)) {
365 			/*
366 			 * None free, but there is one we can commandeer.
367 			 */
368 			ke = tda->td_kse;
369 			tda->td_kse = NULL;
370 			ke->ke_thread = NULL;
371 			tda = kg->kg_last_assigned =
372 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
373 			sched_rem(ke);
374 		}
375 	} else {
376 		/*
377 		 * Temporarily disassociate so it looks like the other cases.
378 		 */
379 		ke->ke_thread = NULL;
380 		td->td_kse = NULL;
381 	}
382 
383 	/*
384 	 * Add the thread to the ksegrp's run queue at
385 	 * the appropriate place.
386 	 */
387 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
388 		if (td2->td_priority > td->td_priority) {
389 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
390 			break;
391 		}
392 	}
393 	if (td2 == NULL) {
394 		/* We ran off the end of the TAILQ or it was empty. */
395 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
396 	}
397 
398 	/*
399 	 * If we have a ke to use, then put it on the run queue and
400 	 * If needed, readjust the last_assigned pointer.
401 	 */
402 	if (ke) {
403 		if (tda == NULL) {
404 			/*
405 			 * No pre-existing last assigned so whoever is first
406 			 * gets the KSE we brought in.. (maybe us)
407 			 */
408 			td2 = TAILQ_FIRST(&kg->kg_runq);
409 			KASSERT((td2->td_kse == NULL),
410 			    ("unexpected ke present"));
411 			td2->td_kse = ke;
412 			ke->ke_thread = td2;
413 			kg->kg_last_assigned = td2;
414 		} else if (tda->td_priority > td->td_priority) {
415 			/*
416 			 * It's ours, grab it, but last_assigned is past us
417 			 * so don't change it.
418 			 */
419 			td->td_kse = ke;
420 			ke->ke_thread = td;
421 		} else {
422 			/*
423 			 * We are past last_assigned, so
424 			 * put the new kse on whatever is next,
425 			 * which may or may not be us.
426 			 */
427 			td2 = TAILQ_NEXT(tda, td_runq);
428 			kg->kg_last_assigned = td2;
429 			td2->td_kse = ke;
430 			ke->ke_thread = td2;
431 		}
432 		sched_add(ke);
433 	}
434 }
435 
436 /************************************************************************
437  * Critical section marker functions					*
438  ************************************************************************/
439 /* Critical sections that prevent preemption. */
440 void
441 critical_enter(void)
442 {
443 	struct thread *td;
444 
445 	td = curthread;
446 	if (td->td_critnest == 0)
447 		cpu_critical_enter();
448 	td->td_critnest++;
449 }
450 
451 void
452 critical_exit(void)
453 {
454 	struct thread *td;
455 
456 	td = curthread;
457 	if (td->td_critnest == 1) {
458 		td->td_critnest = 0;
459 		cpu_critical_exit();
460 	} else {
461 		td->td_critnest--;
462 	}
463 }
464 
465 
466 /************************************************************************
467  * SYSTEM RUN QUEUE manipulations and tests				*
468  ************************************************************************/
469 /*
470  * Initialize a run structure.
471  */
472 void
473 runq_init(struct runq *rq)
474 {
475 	int i;
476 
477 	bzero(rq, sizeof *rq);
478 	for (i = 0; i < RQ_NQS; i++)
479 		TAILQ_INIT(&rq->rq_queues[i]);
480 }
481 
482 /*
483  * Clear the status bit of the queue corresponding to priority level pri,
484  * indicating that it is empty.
485  */
486 static __inline void
487 runq_clrbit(struct runq *rq, int pri)
488 {
489 	struct rqbits *rqb;
490 
491 	rqb = &rq->rq_status;
492 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
493 	    rqb->rqb_bits[RQB_WORD(pri)],
494 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
495 	    RQB_BIT(pri), RQB_WORD(pri));
496 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
497 }
498 
499 /*
500  * Find the index of the first non-empty run queue.  This is done by
501  * scanning the status bits, a set bit indicates a non-empty queue.
502  */
503 static __inline int
504 runq_findbit(struct runq *rq)
505 {
506 	struct rqbits *rqb;
507 	int pri;
508 	int i;
509 
510 	rqb = &rq->rq_status;
511 	for (i = 0; i < RQB_LEN; i++)
512 		if (rqb->rqb_bits[i]) {
513 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
514 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
515 			    rqb->rqb_bits[i], i, pri);
516 			return (pri);
517 		}
518 
519 	return (-1);
520 }
521 
522 /*
523  * Set the status bit of the queue corresponding to priority level pri,
524  * indicating that it is non-empty.
525  */
526 static __inline void
527 runq_setbit(struct runq *rq, int pri)
528 {
529 	struct rqbits *rqb;
530 
531 	rqb = &rq->rq_status;
532 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
533 	    rqb->rqb_bits[RQB_WORD(pri)],
534 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
535 	    RQB_BIT(pri), RQB_WORD(pri));
536 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
537 }
538 
539 /*
540  * Add the KSE to the queue specified by its priority, and set the
541  * corresponding status bit.
542  */
543 void
544 runq_add(struct runq *rq, struct kse *ke)
545 {
546 	struct rqhead *rqh;
547 	int pri;
548 
549 	pri = ke->ke_thread->td_priority / RQ_PPQ;
550 	ke->ke_rqindex = pri;
551 	runq_setbit(rq, pri);
552 	rqh = &rq->rq_queues[pri];
553 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
554 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
555 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
556 }
557 
558 /*
559  * Return true if there are runnable processes of any priority on the run
560  * queue, false otherwise.  Has no side effects, does not modify the run
561  * queue structure.
562  */
563 int
564 runq_check(struct runq *rq)
565 {
566 	struct rqbits *rqb;
567 	int i;
568 
569 	rqb = &rq->rq_status;
570 	for (i = 0; i < RQB_LEN; i++)
571 		if (rqb->rqb_bits[i]) {
572 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
573 			    rqb->rqb_bits[i], i);
574 			return (1);
575 		}
576 	CTR0(KTR_RUNQ, "runq_check: empty");
577 
578 	return (0);
579 }
580 
581 /*
582  * Find the highest priority process on the run queue.
583  */
584 struct kse *
585 runq_choose(struct runq *rq)
586 {
587 	struct rqhead *rqh;
588 	struct kse *ke;
589 	int pri;
590 
591 	mtx_assert(&sched_lock, MA_OWNED);
592 	while ((pri = runq_findbit(rq)) != -1) {
593 		rqh = &rq->rq_queues[pri];
594 		ke = TAILQ_FIRST(rqh);
595 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
596 		CTR3(KTR_RUNQ,
597 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
598 		return (ke);
599 	}
600 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
601 
602 	return (NULL);
603 }
604 
605 /*
606  * Remove the KSE from the queue specified by its priority, and clear the
607  * corresponding status bit if the queue becomes empty.
608  * Caller must set ke->ke_state afterwards.
609  */
610 void
611 runq_remove(struct runq *rq, struct kse *ke)
612 {
613 	struct rqhead *rqh;
614 	int pri;
615 
616 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
617 		("runq_remove: process swapped out"));
618 	pri = ke->ke_rqindex;
619 	rqh = &rq->rq_queues[pri];
620 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
621 	    ke, ke->ke_thread->td_priority, pri, rqh);
622 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
623 	TAILQ_REMOVE(rqh, ke, ke_procq);
624 	if (TAILQ_EMPTY(rqh)) {
625 		CTR0(KTR_RUNQ, "runq_remove: empty");
626 		runq_clrbit(rq, pri);
627 	}
628 }
629 
630 #if 0
631 void
632 panc(char *string1, char *string2)
633 {
634 	printf("%s", string1);
635 	Debugger(string2);
636 }
637 
638 void
639 thread_sanity_check(struct thread *td, char *string)
640 {
641 	struct proc *p;
642 	struct ksegrp *kg;
643 	struct kse *ke;
644 	struct thread *td2 = NULL;
645 	unsigned int prevpri;
646 	int	saw_lastassigned = 0;
647 	int unassigned = 0;
648 	int assigned = 0;
649 
650 	p = td->td_proc;
651 	kg = td->td_ksegrp;
652 	ke = td->td_kse;
653 
654 
655 	if (ke) {
656 		if (p != ke->ke_proc) {
657 			panc(string, "wrong proc");
658 		}
659 		if (ke->ke_thread != td) {
660 			panc(string, "wrong thread");
661 		}
662 	}
663 
664 	if ((p->p_flag & P_KSES) == 0) {
665 		if (ke == NULL) {
666 			panc(string, "non KSE thread lost kse");
667 		}
668 	} else {
669 		prevpri = 0;
670 		saw_lastassigned = 0;
671 		unassigned = 0;
672 		assigned = 0;
673 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
674 			if (td2->td_priority < prevpri) {
675 				panc(string, "thread runqueue unosorted");
676 			}
677 			if ((td2->td_state == TDS_RUNQ) &&
678 			    td2->td_kse &&
679 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
680 				panc(string, "KSE wrong state");
681 			}
682 			prevpri = td2->td_priority;
683 			if (td2->td_kse) {
684 				assigned++;
685 				if (unassigned) {
686 					panc(string, "unassigned before assigned");
687 				}
688  				if  (kg->kg_last_assigned == NULL) {
689 					panc(string, "lastassigned corrupt");
690 				}
691 				if (saw_lastassigned) {
692 					panc(string, "last assigned not last");
693 				}
694 				if (td2->td_kse->ke_thread != td2) {
695 					panc(string, "mismatched kse/thread");
696 				}
697 			} else {
698 				unassigned++;
699 			}
700 			if (td2 == kg->kg_last_assigned) {
701 				saw_lastassigned = 1;
702 				if (td2->td_kse == NULL) {
703 					panc(string, "last assigned not assigned");
704 				}
705 			}
706 		}
707 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
708 			panc(string, "where on earth does lastassigned point?");
709 		}
710 #if 0
711 		FOREACH_THREAD_IN_GROUP(kg, td2) {
712 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
713 			    (TD_ON_RUNQ(td2))) {
714 				assigned++;
715 				if (td2->td_kse == NULL) {
716 					panc(string, "BOUND thread with no KSE");
717 				}
718 			}
719 		}
720 #endif
721 #if 0
722 		if ((unassigned + assigned) != kg->kg_runnable) {
723 			panc(string, "wrong number in runnable");
724 		}
725 #endif
726 	}
727 	if (assigned == 12345) {
728 		printf("%p %p %p %p %p %d, %d",
729 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
730 	}
731 }
732 #endif
733 
734