xref: /freebsd/sys/kern/kern_switch.c (revision 4f0db5e08cf1236f0efe47f6ea6f5ae1c833d6de)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /***
30 
31 Here is the logic..
32 
33 If there are N processors, then there are at most N KSEs (kernel
34 schedulable entities) working to process threads that belong to a
35 KSEGOUP (kg). If there are X of these KSEs actually running at the
36 moment in question, then there are at most M (N-X) of these KSEs on
37 the run queue, as running KSEs are not on the queue.
38 
39 Runnable threads are queued off the KSEGROUP in priority order.
40 If there are M or more threads runnable, the top M threads
41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42 their priority from those threads and are put on the run queue.
43 
44 The last thread that had a priority high enough to have a KSE associated
45 with it, AND IS ON THE RUN QUEUE is pointed to by
46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47 assigned as all the available KSEs are activly running, or because there
48 are no threads queued, that pointer is NULL.
49 
50 When a KSE is removed from the run queue to become runnable, we know
51 it was associated with the highest priority thread in the queue (at the head
52 of the queue). If it is also the last assigned we know M was 1 and must
53 now be 0. Since the thread is no longer queued that pointer must be
54 removed from it. Since we know there were no more KSEs available,
55 (M was 1 and is now 0) and since we are not FREEING our KSE
56 but using it, we know there are STILL no more KSEs available, we can prove
57 that the next thread in the ksegrp list will not have a KSE to assign to
58 it, so we can show that the pointer must be made 'invalid' (NULL).
59 
60 The pointer exists so that when a new thread is made runnable, it can
61 have its priority compared with the last assigned thread to see if
62 it should 'steal' its KSE or not.. i.e. is it 'earlier'
63 on the list than that thread or later.. If it's earlier, then the KSE is
64 removed from the last assigned (which is now not assigned a KSE)
65 and reassigned to the new thread, which is placed earlier in the list.
66 The pointer is then backed up to the previous thread (which may or may not
67 be the new thread).
68 
69 When a thread sleeps or is removed, the KSE becomes available and if there
70 are queued threads that are not assigned KSEs, the highest priority one of
71 them is assigned the KSE, which is then placed back on the run queue at
72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73 to point to it.
74 
75 The following diagram shows 2 KSEs and 3 threads from a single process.
76 
77  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78               \    \____
79                \        \
80     KSEGROUP---thread--thread--thread    (queued in priority order)
81         \                 /
82          \_______________/
83           (last_assigned)
84 
85 The result of this scheme is that the M available KSEs are always
86 queued at the priorities they have inherrited from the M highest priority
87 threads for that KSEGROUP. If this situation changes, the KSEs are
88 reassigned to keep this true.
89 
90 */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <machine/critical.h>
101 
102 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
103 
104 /*
105  * Global run queue.
106  */
107 static struct runq runq;
108 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
109 
110 static void runq_readjust(struct runq *rq, struct kse *ke);
111 /************************************************************************
112  * Functions that manipulate runnability from a thread perspective.	*
113  ************************************************************************/
114 
115 /*
116  * Select the KSE that will be run next.  From that find the thread, and x
117  * remove it from the KSEGRP's run queue.  If there is thread clustering,
118  * this will be what does it.
119  */
120 struct thread *
121 choosethread(void)
122 {
123 	struct kse *ke;
124 	struct thread *td;
125 	struct ksegrp *kg;
126 
127 retry:
128 	if ((ke = runq_choose(&runq))) {
129 		td = ke->ke_thread;
130 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
131 		kg = ke->ke_ksegrp;
132 		if (td->td_flags & TDF_UNBOUND) {
133 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
134 			if (kg->kg_last_assigned == td)
135 				if (TAILQ_PREV(td, threadqueue, td_runq)
136 				    != NULL)
137 					printf("Yo MAMA!\n");
138 				kg->kg_last_assigned = TAILQ_PREV(td,
139 				    threadqueue, td_runq);
140 			/*
141 			 *  If we have started running an upcall,
142 			 * Then TDF_UNBOUND WAS set because the thread was
143 			 * created without a KSE. Now that we have one,
144 			 * and it is our time to run, we make sure
145 			 * that BOUND semantics apply for the rest of
146 			 * the journey to userland, and into the UTS.
147 			 */
148 #ifdef	NOTYET
149 			if (td->td_flags & TDF_UPCALLING)
150 				tdf->td_flags &= ~TDF_UNBOUND;
151 #endif
152 		}
153 		kg->kg_runnable--;
154 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
155 		    td, td->td_priority);
156 	} else {
157 		/* Simulate runq_choose() having returned the idle thread */
158 		td = PCPU_GET(idlethread);
159 		ke = td->td_kse;
160 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
161 	}
162 	ke->ke_flags |= KEF_DIDRUN;
163 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
164 	    (td->td_flags & TDF_INPANIC) == 0))
165 		goto retry;
166 	TD_SET_RUNNING(td);
167 	return (td);
168 }
169 
170 /*
171  * Given a KSE (now surplus), either assign a new runable thread to it
172  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
173  * Assumes the kse is not linked to any threads any more. (has been cleaned).
174  */
175 void
176 kse_reassign(struct kse *ke)
177 {
178 	struct ksegrp *kg;
179 	struct thread *td;
180 
181 	kg = ke->ke_ksegrp;
182 
183 	/*
184 	 * Find the first unassigned thread
185 	 * If there is a 'last assigned' then see what's next.
186 	 * otherwise look at what is first.
187 	 */
188 	if ((td = kg->kg_last_assigned)) {
189 		td = TAILQ_NEXT(td, td_runq);
190 	} else {
191 		td = TAILQ_FIRST(&kg->kg_runq);
192 	}
193 
194 	/*
195 	 * If we found one assign it the kse, otherwise idle the kse.
196 	 */
197 	if (td) {
198 		kg->kg_last_assigned = td;
199 		td->td_kse = ke;
200 		ke->ke_thread = td;
201 		runq_add(&runq, ke);
202 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
203 	} else {
204 		ke->ke_state = KES_IDLE;
205 		ke->ke_thread = NULL;
206 		TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
207 		kg->kg_idle_kses++;
208 		CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
209 	}
210 }
211 
212 int
213 kserunnable(void)
214 {
215 	return runq_check(&runq);
216 }
217 
218 /*
219  * Remove a thread from its KSEGRP's run queue.
220  * This in turn may remove it from a KSE if it was already assigned
221  * to one, possibly causing a new thread to be assigned to the KSE
222  * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
223  */
224 void
225 remrunqueue(struct thread *td)
226 {
227 	struct thread *td2, *td3;
228 	struct ksegrp *kg;
229 	struct kse *ke;
230 
231 	mtx_assert(&sched_lock, MA_OWNED);
232 	KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
233 	kg = td->td_ksegrp;
234 	ke = td->td_kse;
235 	/*
236 	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
237 	 * threads are BOUND.
238 	 */
239 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
240 	kg->kg_runnable--;
241 	TD_SET_CAN_RUN(td);
242 	if ((td->td_flags & TDF_UNBOUND) == 0)  {
243 		/* Bring its kse with it, leave the thread attached */
244 		runq_remove(&runq, ke);
245 		ke->ke_state = KES_THREAD;
246 		return;
247 	}
248 	if (ke) {
249 		/*
250 		 * This thread has been assigned to a KSE.
251 		 * We need to dissociate it and try assign the
252 		 * KSE to the next available thread. Then, we should
253 		 * see if we need to move the KSE in the run queues.
254 		 */
255 		td2 = kg->kg_last_assigned;
256 		KASSERT((td2 != NULL), ("last assigned has wrong value "));
257 		td->td_kse = NULL;
258 		if ((td3 = TAILQ_NEXT(td2, td_runq))) {
259 			KASSERT(td3 != td, ("td3 somehow matched td"));
260 			/*
261 			 * Give the next unassigned thread to the KSE
262 			 * so the number of runnable KSEs remains
263 			 * constant.
264 			 */
265 			td3->td_kse = ke;
266 			ke->ke_thread = td3;
267 			kg->kg_last_assigned = td3;
268 			runq_readjust(&runq, ke);
269 		} else {
270 			/*
271 			 * There is no unassigned thread.
272 			 * If we were the last assigned one,
273 			 * adjust the last assigned pointer back
274 			 * one, which may result in NULL.
275 			 */
276 			if (td == td2) {
277 				kg->kg_last_assigned =
278 				    TAILQ_PREV(td, threadqueue, td_runq);
279 			}
280 			runq_remove(&runq, ke);
281 			KASSERT((ke->ke_state != KES_IDLE),
282 			    ("kse already idle"));
283 			ke->ke_state = KES_IDLE;
284 			ke->ke_thread = NULL;
285 			TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
286 			kg->kg_idle_kses++;
287 		}
288 	}
289 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
290 }
291 
292 void
293 setrunqueue(struct thread *td)
294 {
295 	struct kse *ke;
296 	struct ksegrp *kg;
297 	struct thread *td2;
298 	struct thread *tda;
299 
300 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
301 	mtx_assert(&sched_lock, MA_OWNED);
302 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
303 	    ("setrunqueue: bad thread state"));
304 	TD_SET_RUNQ(td);
305 	kg = td->td_ksegrp;
306 	kg->kg_runnable++;
307 	if ((td->td_flags & TDF_UNBOUND) == 0) {
308 		KASSERT((td->td_kse != NULL),
309 		    ("queueing BAD thread to run queue"));
310 		/*
311 		 * Common path optimisation: Only one of everything
312 		 * and the KSE is always already attached.
313 		 * Totally ignore the ksegrp run queue.
314 		 */
315 		runq_add(&runq, td->td_kse);
316 		return;
317 	}
318 	/*
319 	 * Ok, so we are threading with this thread.
320 	 * We don't have a KSE, see if we can get one..
321 	 */
322 	tda = kg->kg_last_assigned;
323 	if ((ke = td->td_kse) == NULL) {
324 		/*
325 		 * We will need a KSE, see if there is one..
326 		 * First look for a free one, before getting desperate.
327 		 * If we can't get one, our priority is not high enough..
328 		 * that's ok..
329 		 */
330 		if (kg->kg_idle_kses) {
331 			/*
332 			 * There is a free one so it's ours for the asking..
333 			 */
334 			ke = TAILQ_FIRST(&kg->kg_iq);
335 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
336 			ke->ke_state = KES_THREAD;
337 			kg->kg_idle_kses--;
338 		} else if (tda && (tda->td_priority > td->td_priority)) {
339 			/*
340 			 * None free, but there is one we can commandeer.
341 			 */
342 			ke = tda->td_kse;
343 			tda->td_kse = NULL;
344 			ke->ke_thread = NULL;
345 			tda = kg->kg_last_assigned =
346 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
347 			runq_remove(&runq, ke);
348 		}
349 	} else {
350 		/*
351 		 * Temporarily disassociate so it looks like the other cases.
352 		 */
353 		ke->ke_thread = NULL;
354 		td->td_kse = NULL;
355 	}
356 
357 	/*
358 	 * Add the thread to the ksegrp's run queue at
359 	 * the appropriate place.
360 	 */
361 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
362 		if (td2->td_priority > td->td_priority) {
363 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
364 			break;
365 		}
366 	}
367 	if (td2 == NULL) {
368 		/* We ran off the end of the TAILQ or it was empty. */
369 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
370 	}
371 
372 	/*
373 	 * If we have a ke to use, then put it on the run queue and
374 	 * If needed, readjust the last_assigned pointer.
375 	 */
376 	if (ke) {
377 		if (tda == NULL) {
378 			/*
379 			 * No pre-existing last assigned so whoever is first
380 			 * gets the KSE we brought in.. (maybe us)
381 			 */
382 			td2 = TAILQ_FIRST(&kg->kg_runq);
383 			KASSERT((td2->td_kse == NULL),
384 			    ("unexpected ke present"));
385 			td2->td_kse = ke;
386 			ke->ke_thread = td2;
387 			kg->kg_last_assigned = td2;
388 		} else if (tda->td_priority > td->td_priority) {
389 			/*
390 			 * It's ours, grab it, but last_assigned is past us
391 			 * so don't change it.
392 			 */
393 			td->td_kse = ke;
394 			ke->ke_thread = td;
395 		} else {
396 			/*
397 			 * We are past last_assigned, so
398 			 * put the new kse on whatever is next,
399 			 * which may or may not be us.
400 			 */
401 			td2 = TAILQ_NEXT(tda, td_runq);
402 			kg->kg_last_assigned = td2;
403 			td2->td_kse = ke;
404 			ke->ke_thread = td2;
405 		}
406 		runq_add(&runq, ke);
407 	}
408 }
409 
410 /************************************************************************
411  * Critical section marker functions					*
412  ************************************************************************/
413 /* Critical sections that prevent preemption. */
414 void
415 critical_enter(void)
416 {
417 	struct thread *td;
418 
419 	td = curthread;
420 	if (td->td_critnest == 0)
421 		cpu_critical_enter();
422 	td->td_critnest++;
423 }
424 
425 void
426 critical_exit(void)
427 {
428 	struct thread *td;
429 
430 	td = curthread;
431 	if (td->td_critnest == 1) {
432 		td->td_critnest = 0;
433 		cpu_critical_exit();
434 	} else {
435 		td->td_critnest--;
436 	}
437 }
438 
439 
440 /************************************************************************
441  * SYSTEM RUN QUEUE manipulations and tests				*
442  ************************************************************************/
443 /*
444  * Initialize a run structure.
445  */
446 void
447 runq_init(struct runq *rq)
448 {
449 	int i;
450 
451 	bzero(rq, sizeof *rq);
452 	for (i = 0; i < RQ_NQS; i++)
453 		TAILQ_INIT(&rq->rq_queues[i]);
454 }
455 
456 /*
457  * Clear the status bit of the queue corresponding to priority level pri,
458  * indicating that it is empty.
459  */
460 static __inline void
461 runq_clrbit(struct runq *rq, int pri)
462 {
463 	struct rqbits *rqb;
464 
465 	rqb = &rq->rq_status;
466 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
467 	    rqb->rqb_bits[RQB_WORD(pri)],
468 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
469 	    RQB_BIT(pri), RQB_WORD(pri));
470 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
471 }
472 
473 /*
474  * Find the index of the first non-empty run queue.  This is done by
475  * scanning the status bits, a set bit indicates a non-empty queue.
476  */
477 static __inline int
478 runq_findbit(struct runq *rq)
479 {
480 	struct rqbits *rqb;
481 	int pri;
482 	int i;
483 
484 	rqb = &rq->rq_status;
485 	for (i = 0; i < RQB_LEN; i++)
486 		if (rqb->rqb_bits[i]) {
487 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
488 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
489 			    rqb->rqb_bits[i], i, pri);
490 			return (pri);
491 		}
492 
493 	return (-1);
494 }
495 
496 /*
497  * Set the status bit of the queue corresponding to priority level pri,
498  * indicating that it is non-empty.
499  */
500 static __inline void
501 runq_setbit(struct runq *rq, int pri)
502 {
503 	struct rqbits *rqb;
504 
505 	rqb = &rq->rq_status;
506 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
507 	    rqb->rqb_bits[RQB_WORD(pri)],
508 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
509 	    RQB_BIT(pri), RQB_WORD(pri));
510 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
511 }
512 
513 /*
514  * Add the KSE to the queue specified by its priority, and set the
515  * corresponding status bit.
516  */
517 void
518 runq_add(struct runq *rq, struct kse *ke)
519 {
520 	struct rqhead *rqh;
521 	int pri;
522 
523 	mtx_assert(&sched_lock, MA_OWNED);
524 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
525 	KASSERT((ke->ke_thread->td_kse != NULL),
526 	    ("runq_add: No KSE on thread"));
527 	KASSERT(ke->ke_state != KES_ONRUNQ,
528 	    ("runq_add: kse %p (%s) already in run queue", ke,
529 	    ke->ke_proc->p_comm));
530 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
531 		("runq_add: process swapped out"));
532 	pri = ke->ke_thread->td_priority / RQ_PPQ;
533 	ke->ke_rqindex = pri;
534 	runq_setbit(rq, pri);
535 	rqh = &rq->rq_queues[pri];
536 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
537 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
538 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
539 	ke->ke_ksegrp->kg_runq_kses++;
540 	ke->ke_state = KES_ONRUNQ;
541 }
542 
543 /*
544  * Return true if there are runnable processes of any priority on the run
545  * queue, false otherwise.  Has no side effects, does not modify the run
546  * queue structure.
547  */
548 int
549 runq_check(struct runq *rq)
550 {
551 	struct rqbits *rqb;
552 	int i;
553 
554 	rqb = &rq->rq_status;
555 	for (i = 0; i < RQB_LEN; i++)
556 		if (rqb->rqb_bits[i]) {
557 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
558 			    rqb->rqb_bits[i], i);
559 			return (1);
560 		}
561 	CTR0(KTR_RUNQ, "runq_check: empty");
562 
563 	return (0);
564 }
565 
566 /*
567  * Find and remove the highest priority process from the run queue.
568  * If there are no runnable processes, the per-cpu idle process is
569  * returned.  Will not return NULL under any circumstances.
570  */
571 struct kse *
572 runq_choose(struct runq *rq)
573 {
574 	struct rqhead *rqh;
575 	struct kse *ke;
576 	int pri;
577 
578 	mtx_assert(&sched_lock, MA_OWNED);
579 	while ((pri = runq_findbit(rq)) != -1) {
580 		rqh = &rq->rq_queues[pri];
581 		ke = TAILQ_FIRST(rqh);
582 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
583 		CTR3(KTR_RUNQ,
584 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
585 		TAILQ_REMOVE(rqh, ke, ke_procq);
586 		ke->ke_ksegrp->kg_runq_kses--;
587 		if (TAILQ_EMPTY(rqh)) {
588 			CTR0(KTR_RUNQ, "runq_choose: empty");
589 			runq_clrbit(rq, pri);
590 		}
591 
592 		ke->ke_state = KES_THREAD;
593 		KASSERT((ke->ke_thread != NULL),
594 		    ("runq_choose: No thread on KSE"));
595 		KASSERT((ke->ke_thread->td_kse != NULL),
596 		    ("runq_choose: No KSE on thread"));
597 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
598 			("runq_choose: process swapped out"));
599 		return (ke);
600 	}
601 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
602 
603 	return (NULL);
604 }
605 
606 /*
607  * Remove the KSE from the queue specified by its priority, and clear the
608  * corresponding status bit if the queue becomes empty.
609  * Caller must set ke->ke_state afterwards.
610  */
611 void
612 runq_remove(struct runq *rq, struct kse *ke)
613 {
614 	struct rqhead *rqh;
615 	int pri;
616 
617 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
618 	mtx_assert(&sched_lock, MA_OWNED);
619 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
620 		("runq_remove: process swapped out"));
621 	pri = ke->ke_rqindex;
622 	rqh = &rq->rq_queues[pri];
623 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
624 	    ke, ke->ke_thread->td_priority, pri, rqh);
625 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
626 	TAILQ_REMOVE(rqh, ke, ke_procq);
627 	if (TAILQ_EMPTY(rqh)) {
628 		CTR0(KTR_RUNQ, "runq_remove: empty");
629 		runq_clrbit(rq, pri);
630 	}
631 	ke->ke_state = KES_THREAD;
632 	ke->ke_ksegrp->kg_runq_kses--;
633 }
634 
635 static void
636 runq_readjust(struct runq *rq, struct kse *ke)
637 {
638 
639 	if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
640 		runq_remove(rq, ke);
641 		runq_add(rq, ke);
642 	}
643 }
644 
645 #if 0
646 void
647 thread_sanity_check(struct thread *td)
648 {
649 	struct proc *p;
650 	struct ksegrp *kg;
651 	struct kse *ke;
652 	struct thread *td2;
653 	unsigned int prevpri;
654 	int	saw_lastassigned;
655 	int unassigned;
656 	int assigned;
657 
658 	p = td->td_proc;
659 	kg = td->td_ksegrp;
660 	ke = td->td_kse;
661 
662 
663 	if (ke) {
664 		if (p != ke->ke_proc) {
665 			panic("wrong proc");
666 		}
667 		if (ke->ke_thread != td) {
668 			panic("wrong thread");
669 		}
670 	}
671 
672 	if ((p->p_flag & P_KSES) == 0) {
673 		if (ke == NULL) {
674 			panic("non KSE thread lost kse");
675 		}
676 	} else {
677 		prevpri = 0;
678 		saw_lastassigned = 0;
679 		unassigned = 0;
680 		assigned = 0;
681 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
682 			if (td2->td_priority < prevpri) {
683 				panic("thread runqueue unosorted");
684 			}
685 			prevpri = td2->td_priority;
686 			if (td2->td_kse) {
687 				assigned++;
688 				if (unassigned) {
689 					panic("unassigned before assigned");
690 				}
691  				if  (kg->kg_last_assigned == NULL) {
692 					panic("lastassigned corrupt");
693 				}
694 				if (saw_lastassigned) {
695 					panic("last assigned not last");
696 				}
697 				if (td2->td_kse->ke_thread != td2) {
698 					panic("mismatched kse/thread");
699 				}
700 			} else {
701 				unassigned++;
702 			}
703 			if (td2 == kg->kg_last_assigned) {
704 				saw_lastassigned = 1;
705 				if (td2->td_kse == NULL) {
706 					panic("last assigned not assigned");
707 				}
708 			}
709 		}
710 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
711 			panic("where on earth does lastassigned point?");
712 		}
713 		FOREACH_THREAD_IN_GROUP(kg, td2) {
714 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
715 			    (TD_ON_RUNQ(td2))) {
716 				assigned++;
717 				if (td2->td_kse == NULL) {
718 					panic ("BOUND thread with no KSE");
719 				}
720 			}
721 		}
722 #if 0
723 		if ((unassigned + assigned) != kg->kg_runnable) {
724 			panic("wrong number in runnable");
725 		}
726 #endif
727 	}
728 }
729 #endif
730 
731