xref: /freebsd/sys/kern/kern_switch.c (revision 1a5cd27b4b50004082a1c6292b70d18ea0d9c8e3)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include "opt_full_preemption.h"
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/kdb.h>
96 #include <sys/kernel.h>
97 #include <sys/ktr.h>
98 #include <sys/lock.h>
99 #include <sys/mutex.h>
100 #include <sys/proc.h>
101 #include <sys/queue.h>
102 #include <sys/sched.h>
103 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
104 #include <sys/smp.h>
105 #endif
106 #include <machine/critical.h>
107 
108 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
109 
110 void panc(char *string1, char *string2);
111 
112 #if 0
113 static void runq_readjust(struct runq *rq, struct kse *ke);
114 #endif
115 /************************************************************************
116  * Functions that manipulate runnability from a thread perspective.	*
117  ************************************************************************/
118 /*
119  * Select the KSE that will be run next.  From that find the thread, and
120  * remove it from the KSEGRP's run queue.  If there is thread clustering,
121  * this will be what does it.
122  */
123 struct thread *
124 choosethread(void)
125 {
126 	struct kse *ke;
127 	struct thread *td;
128 	struct ksegrp *kg;
129 
130 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
131 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
132 		/* Shutting down, run idlethread on AP's */
133 		td = PCPU_GET(idlethread);
134 		ke = td->td_kse;
135 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
136 		ke->ke_flags |= KEF_DIDRUN;
137 		TD_SET_RUNNING(td);
138 		return (td);
139 	}
140 #endif
141 
142 retry:
143 	ke = sched_choose();
144 	if (ke) {
145 		td = ke->ke_thread;
146 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
147 		kg = ke->ke_ksegrp;
148 		if (td->td_proc->p_flag & P_SA) {
149 			if (kg->kg_last_assigned == td) {
150 				kg->kg_last_assigned = TAILQ_PREV(td,
151 				    threadqueue, td_runq);
152 			}
153 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
154 			kg->kg_runnable--;
155 		}
156 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
157 		    td, td->td_priority);
158 	} else {
159 		/* Simulate runq_choose() having returned the idle thread */
160 		td = PCPU_GET(idlethread);
161 		ke = td->td_kse;
162 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
163 	}
164 	ke->ke_flags |= KEF_DIDRUN;
165 
166 	/*
167 	 * If we are in panic, only allow system threads,
168 	 * plus the one we are running in, to be run.
169 	 */
170 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
171 	    (td->td_flags & TDF_INPANIC) == 0)) {
172 		/* note that it is no longer on the run queue */
173 		TD_SET_CAN_RUN(td);
174 		goto retry;
175 	}
176 
177 	TD_SET_RUNNING(td);
178 	return (td);
179 }
180 
181 /*
182  * Given a surplus KSE, either assign a new runable thread to it
183  * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
184  * Assumes that the original thread is not runnable.
185  */
186 void
187 kse_reassign(struct kse *ke)
188 {
189 	struct ksegrp *kg;
190 	struct thread *td;
191 	struct thread *original;
192 
193 	mtx_assert(&sched_lock, MA_OWNED);
194 	original = ke->ke_thread;
195 	KASSERT(original == NULL || TD_IS_INHIBITED(original),
196     	    ("reassigning KSE with runnable thread"));
197 	kg = ke->ke_ksegrp;
198 	if (original)
199 		original->td_kse = NULL;
200 
201 	/*
202 	 * Find the first unassigned thread
203 	 */
204 	if ((td = kg->kg_last_assigned) != NULL)
205 		td = TAILQ_NEXT(td, td_runq);
206 	else
207 		td = TAILQ_FIRST(&kg->kg_runq);
208 
209 	/*
210 	 * If we found one, assign it the kse, otherwise idle the kse.
211 	 */
212 	if (td) {
213 		kg->kg_last_assigned = td;
214 		td->td_kse = ke;
215 		ke->ke_thread = td;
216 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
217 		sched_add(td);
218 		return;
219 	}
220 
221 	ke->ke_state = KES_IDLE;
222 	ke->ke_thread = NULL;
223 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
224 	kg->kg_idle_kses++;
225 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
226 	return;
227 }
228 
229 #if 0
230 /*
231  * Remove a thread from its KSEGRP's run queue.
232  * This in turn may remove it from a KSE if it was already assigned
233  * to one, possibly causing a new thread to be assigned to the KSE
234  * and the KSE getting a new priority.
235  */
236 static void
237 remrunqueue(struct thread *td)
238 {
239 	struct thread *td2, *td3;
240 	struct ksegrp *kg;
241 	struct kse *ke;
242 
243 	mtx_assert(&sched_lock, MA_OWNED);
244 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
245 	kg = td->td_ksegrp;
246 	ke = td->td_kse;
247 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
248 	TD_SET_CAN_RUN(td);
249 	/*
250 	 * If it is not a threaded process, take the shortcut.
251 	 */
252 	if ((td->td_proc->p_flag & P_SA) == 0) {
253 		/* Bring its kse with it, leave the thread attached */
254 		sched_rem(td);
255 		ke->ke_state = KES_THREAD;
256 		return;
257 	}
258    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
259 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
260 	kg->kg_runnable--;
261 	if (ke) {
262 		/*
263 		 * This thread has been assigned to a KSE.
264 		 * We need to dissociate it and try assign the
265 		 * KSE to the next available thread. Then, we should
266 		 * see if we need to move the KSE in the run queues.
267 		 */
268 		sched_rem(td);
269 		ke->ke_state = KES_THREAD;
270 		td2 = kg->kg_last_assigned;
271 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
272 		if (td2 == td)
273 			kg->kg_last_assigned = td3;
274 		kse_reassign(ke);
275 	}
276 }
277 #endif
278 
279 /*
280  * Change the priority of a thread that is on the run queue.
281  */
282 void
283 adjustrunqueue( struct thread *td, int newpri)
284 {
285 	struct ksegrp *kg;
286 	struct kse *ke;
287 
288 	mtx_assert(&sched_lock, MA_OWNED);
289 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
290 
291 	ke = td->td_kse;
292 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
293 	/*
294 	 * If it is not a threaded process, take the shortcut.
295 	 */
296 	if ((td->td_proc->p_flag & P_SA) == 0) {
297 		/* We only care about the kse in the run queue. */
298 		td->td_priority = newpri;
299 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
300 			sched_rem(td);
301 			sched_add(td);
302 		}
303 		return;
304 	}
305 
306 	/* It is a threaded process */
307 	kg = td->td_ksegrp;
308 	TD_SET_CAN_RUN(td);
309 	if (ke) {
310 		if (kg->kg_last_assigned == td) {
311 			kg->kg_last_assigned =
312 			    TAILQ_PREV(td, threadqueue, td_runq);
313 		}
314 		sched_rem(td);
315 	}
316 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
317 	kg->kg_runnable--;
318 	td->td_priority = newpri;
319 	setrunqueue(td);
320 }
321 
322 void
323 setrunqueue(struct thread *td)
324 {
325 	struct kse *ke;
326 	struct ksegrp *kg;
327 	struct thread *td2;
328 	struct thread *tda;
329 
330 	CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d",
331 	    td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid);
332 	mtx_assert(&sched_lock, MA_OWNED);
333 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
334 	    ("setrunqueue: bad thread state"));
335 	TD_SET_RUNQ(td);
336 	kg = td->td_ksegrp;
337 	if ((td->td_proc->p_flag & P_SA) == 0) {
338 		/*
339 		 * Common path optimisation: Only one of everything
340 		 * and the KSE is always already attached.
341 		 * Totally ignore the ksegrp run queue.
342 		 */
343 		sched_add(td);
344 		return;
345 	}
346 
347 	tda = kg->kg_last_assigned;
348 	if ((ke = td->td_kse) == NULL) {
349 		if (kg->kg_idle_kses) {
350 			/*
351 			 * There is a free one so it's ours for the asking..
352 			 */
353 			ke = TAILQ_FIRST(&kg->kg_iq);
354 			CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p",
355 			    kg, ke);
356 			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
357 			ke->ke_state = KES_THREAD;
358 			kg->kg_idle_kses--;
359 		} else if (tda && (tda->td_priority > td->td_priority)) {
360 			/*
361 			 * None free, but there is one we can commandeer.
362 			 */
363 			ke = tda->td_kse;
364 			CTR3(KTR_RUNQ,
365 			    "setrunqueue: kg:%p: take ke:%p from td: %p",
366 			    kg, ke, tda);
367 			sched_rem(tda);
368 			tda->td_kse = NULL;
369 			ke->ke_thread = NULL;
370 			tda = kg->kg_last_assigned =
371 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
372 		}
373 	} else {
374 		/*
375 		 * Temporarily disassociate so it looks like the other cases.
376 		 */
377 		ke->ke_thread = NULL;
378 		td->td_kse = NULL;
379 	}
380 
381 	/*
382 	 * Add the thread to the ksegrp's run queue at
383 	 * the appropriate place.
384 	 */
385 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
386 		if (td2->td_priority > td->td_priority) {
387 			kg->kg_runnable++;
388 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
389 			break;
390 		}
391 	}
392 	if (td2 == NULL) {
393 		/* We ran off the end of the TAILQ or it was empty. */
394 		kg->kg_runnable++;
395 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
396 	}
397 
398 	/*
399 	 * If we have a ke to use, then put it on the run queue and
400 	 * If needed, readjust the last_assigned pointer.
401 	 */
402 	if (ke) {
403 		if (tda == NULL) {
404 			/*
405 			 * No pre-existing last assigned so whoever is first
406 			 * gets the KSE we brought in.. (maybe us)
407 			 */
408 			td2 = TAILQ_FIRST(&kg->kg_runq);
409 			KASSERT((td2->td_kse == NULL),
410 			    ("unexpected ke present"));
411 			td2->td_kse = ke;
412 			ke->ke_thread = td2;
413 			kg->kg_last_assigned = td2;
414 		} else if (tda->td_priority > td->td_priority) {
415 			/*
416 			 * It's ours, grab it, but last_assigned is past us
417 			 * so don't change it.
418 			 */
419 			td->td_kse = ke;
420 			ke->ke_thread = td;
421 		} else {
422 			/*
423 			 * We are past last_assigned, so
424 			 * put the new kse on whatever is next,
425 			 * which may or may not be us.
426 			 */
427 			td2 = TAILQ_NEXT(tda, td_runq);
428 			kg->kg_last_assigned = td2;
429 			td2->td_kse = ke;
430 			ke->ke_thread = td2;
431 		}
432 		sched_add(ke->ke_thread);
433 	} else {
434 		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
435 			td, td->td_ksegrp, td->td_proc->p_pid);
436 	}
437 }
438 
439 /*
440  * Kernel thread preemption implementation.  Critical sections mark
441  * regions of code in which preemptions are not allowed.
442  */
443 void
444 critical_enter(void)
445 {
446 	struct thread *td;
447 
448 	td = curthread;
449 	if (td->td_critnest == 0)
450 		cpu_critical_enter(td);
451 	td->td_critnest++;
452 }
453 
454 void
455 critical_exit(void)
456 {
457 	struct thread *td;
458 
459 	td = curthread;
460 	KASSERT(td->td_critnest != 0,
461 	    ("critical_exit: td_critnest == 0"));
462 	if (td->td_critnest == 1) {
463 #ifdef PREEMPTION
464 		mtx_assert(&sched_lock, MA_NOTOWNED);
465 		if (td->td_pflags & TDP_OWEPREEMPT) {
466 			mtx_lock_spin(&sched_lock);
467 			mi_switch(SW_INVOL, NULL);
468 			mtx_unlock_spin(&sched_lock);
469 		}
470 #endif
471 		td->td_critnest = 0;
472 		cpu_critical_exit(td);
473 	} else {
474 		td->td_critnest--;
475 	}
476 }
477 
478 /*
479  * This function is called when a thread is about to be put on run queue
480  * because it has been made runnable or its priority has been adjusted.  It
481  * determines if the new thread should be immediately preempted to.  If so,
482  * it switches to it and eventually returns true.  If not, it returns false
483  * so that the caller may place the thread on an appropriate run queue.
484  */
485 int
486 maybe_preempt(struct thread *td)
487 {
488 #ifdef PREEMPTION
489 	struct thread *ctd;
490 	int cpri, pri;
491 #endif
492 
493 	mtx_assert(&sched_lock, MA_OWNED);
494 #ifdef PREEMPTION
495 	/*
496 	 * The new thread should not preempt the current thread if any of the
497 	 * following conditions are true:
498 	 *
499 	 *  - The current thread has a higher (numerically lower) or
500 	 *    equivalent priority.  Note that this prevents curthread from
501 	 *    trying to preempt to itself.
502 	 *  - It is too early in the boot for context switches (cold is set).
503 	 *  - The current thread has an inhibitor set or is in the process of
504 	 *    exiting.  In this case, the current thread is about to switch
505 	 *    out anyways, so there's no point in preempting.  If we did,
506 	 *    the current thread would not be properly resumed as well, so
507 	 *    just avoid that whole landmine.
508 	 *  - If the new thread's priority is not a realtime priority and
509 	 *    the current thread's priority is not an idle priority and
510 	 *    FULL_PREEMPTION is disabled.
511 	 *
512 	 * If all of these conditions are false, but the current thread is in
513 	 * a nested critical section, then we have to defer the preemption
514 	 * until we exit the critical section.  Otherwise, switch immediately
515 	 * to the new thread.
516 	 */
517 	ctd = curthread;
518 	pri = td->td_priority;
519 	cpri = ctd->td_priority;
520 	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
521 	    td->td_kse->ke_state != KES_THREAD)
522 		return (0);
523 #ifndef FULL_PREEMPTION
524 	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
525 	    !(cpri >= PRI_MIN_IDLE))
526 		return (0);
527 #endif
528 	if (ctd->td_critnest > 1) {
529 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
530 		    ctd->td_critnest);
531 		ctd->td_pflags |= TDP_OWEPREEMPT;
532 		return (0);
533 	}
534 
535 	/*
536 	 * Our thread state says that we are already on a run queue, so
537 	 * update our state as if we had been dequeued by choosethread().
538 	 */
539 	MPASS(TD_ON_RUNQ(td));
540 	TD_SET_RUNNING(td);
541 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
542 	    td->td_proc->p_pid, td->td_proc->p_comm);
543 	mi_switch(SW_INVOL, td);
544 	return (1);
545 #else
546 	return (0);
547 #endif
548 }
549 
550 #if 0
551 #ifndef PREEMPTION
552 /* XXX: There should be a non-static version of this. */
553 static void
554 printf_caddr_t(void *data)
555 {
556 	printf("%s", (char *)data);
557 }
558 static char preempt_warning[] =
559     "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
560 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
561     preempt_warning)
562 #endif
563 #endif
564 
565 /************************************************************************
566  * SYSTEM RUN QUEUE manipulations and tests				*
567  ************************************************************************/
568 /*
569  * Initialize a run structure.
570  */
571 void
572 runq_init(struct runq *rq)
573 {
574 	int i;
575 
576 	bzero(rq, sizeof *rq);
577 	for (i = 0; i < RQ_NQS; i++)
578 		TAILQ_INIT(&rq->rq_queues[i]);
579 }
580 
581 /*
582  * Clear the status bit of the queue corresponding to priority level pri,
583  * indicating that it is empty.
584  */
585 static __inline void
586 runq_clrbit(struct runq *rq, int pri)
587 {
588 	struct rqbits *rqb;
589 
590 	rqb = &rq->rq_status;
591 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
592 	    rqb->rqb_bits[RQB_WORD(pri)],
593 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
594 	    RQB_BIT(pri), RQB_WORD(pri));
595 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
596 }
597 
598 /*
599  * Find the index of the first non-empty run queue.  This is done by
600  * scanning the status bits, a set bit indicates a non-empty queue.
601  */
602 static __inline int
603 runq_findbit(struct runq *rq)
604 {
605 	struct rqbits *rqb;
606 	int pri;
607 	int i;
608 
609 	rqb = &rq->rq_status;
610 	for (i = 0; i < RQB_LEN; i++)
611 		if (rqb->rqb_bits[i]) {
612 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
613 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
614 			    rqb->rqb_bits[i], i, pri);
615 			return (pri);
616 		}
617 
618 	return (-1);
619 }
620 
621 /*
622  * Set the status bit of the queue corresponding to priority level pri,
623  * indicating that it is non-empty.
624  */
625 static __inline void
626 runq_setbit(struct runq *rq, int pri)
627 {
628 	struct rqbits *rqb;
629 
630 	rqb = &rq->rq_status;
631 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
632 	    rqb->rqb_bits[RQB_WORD(pri)],
633 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
634 	    RQB_BIT(pri), RQB_WORD(pri));
635 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
636 }
637 
638 /*
639  * Add the KSE to the queue specified by its priority, and set the
640  * corresponding status bit.
641  */
642 void
643 runq_add(struct runq *rq, struct kse *ke)
644 {
645 	struct rqhead *rqh;
646 	int pri;
647 
648 	pri = ke->ke_thread->td_priority / RQ_PPQ;
649 	ke->ke_rqindex = pri;
650 	runq_setbit(rq, pri);
651 	rqh = &rq->rq_queues[pri];
652 	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
653 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
654 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
655 }
656 
657 /*
658  * Return true if there are runnable processes of any priority on the run
659  * queue, false otherwise.  Has no side effects, does not modify the run
660  * queue structure.
661  */
662 int
663 runq_check(struct runq *rq)
664 {
665 	struct rqbits *rqb;
666 	int i;
667 
668 	rqb = &rq->rq_status;
669 	for (i = 0; i < RQB_LEN; i++)
670 		if (rqb->rqb_bits[i]) {
671 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
672 			    rqb->rqb_bits[i], i);
673 			return (1);
674 		}
675 	CTR0(KTR_RUNQ, "runq_check: empty");
676 
677 	return (0);
678 }
679 
680 /*
681  * Find the highest priority process on the run queue.
682  */
683 struct kse *
684 runq_choose(struct runq *rq)
685 {
686 	struct rqhead *rqh;
687 	struct kse *ke;
688 	int pri;
689 
690 	mtx_assert(&sched_lock, MA_OWNED);
691 	while ((pri = runq_findbit(rq)) != -1) {
692 		rqh = &rq->rq_queues[pri];
693 		ke = TAILQ_FIRST(rqh);
694 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
695 		CTR3(KTR_RUNQ,
696 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
697 		return (ke);
698 	}
699 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
700 
701 	return (NULL);
702 }
703 
704 /*
705  * Remove the KSE from the queue specified by its priority, and clear the
706  * corresponding status bit if the queue becomes empty.
707  * Caller must set ke->ke_state afterwards.
708  */
709 void
710 runq_remove(struct runq *rq, struct kse *ke)
711 {
712 	struct rqhead *rqh;
713 	int pri;
714 
715 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
716 		("runq_remove: process swapped out"));
717 	pri = ke->ke_rqindex;
718 	rqh = &rq->rq_queues[pri];
719 	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
720 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
721 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
722 	TAILQ_REMOVE(rqh, ke, ke_procq);
723 	if (TAILQ_EMPTY(rqh)) {
724 		CTR0(KTR_RUNQ, "runq_remove: empty");
725 		runq_clrbit(rq, pri);
726 	}
727 }
728 
729 #if 0
730 void
731 panc(char *string1, char *string2)
732 {
733 	printf("%s", string1);
734 	kdb_enter(string2);
735 }
736 
737 void
738 thread_sanity_check(struct thread *td, char *string)
739 {
740 	struct proc *p;
741 	struct ksegrp *kg;
742 	struct kse *ke;
743 	struct thread *td2 = NULL;
744 	unsigned int prevpri;
745 	int	saw_lastassigned = 0;
746 	int unassigned = 0;
747 	int assigned = 0;
748 
749 	p = td->td_proc;
750 	kg = td->td_ksegrp;
751 	ke = td->td_kse;
752 
753 
754 	if (ke) {
755 		if (p != ke->ke_proc) {
756 			panc(string, "wrong proc");
757 		}
758 		if (ke->ke_thread != td) {
759 			panc(string, "wrong thread");
760 		}
761 	}
762 
763 	if ((p->p_flag & P_SA) == 0) {
764 		if (ke == NULL) {
765 			panc(string, "non KSE thread lost kse");
766 		}
767 	} else {
768 		prevpri = 0;
769 		saw_lastassigned = 0;
770 		unassigned = 0;
771 		assigned = 0;
772 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
773 			if (td2->td_priority < prevpri) {
774 				panc(string, "thread runqueue unosorted");
775 			}
776 			if ((td2->td_state == TDS_RUNQ) &&
777 			    td2->td_kse &&
778 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
779 				panc(string, "KSE wrong state");
780 			}
781 			prevpri = td2->td_priority;
782 			if (td2->td_kse) {
783 				assigned++;
784 				if (unassigned) {
785 					panc(string, "unassigned before assigned");
786 				}
787  				if  (kg->kg_last_assigned == NULL) {
788 					panc(string, "lastassigned corrupt");
789 				}
790 				if (saw_lastassigned) {
791 					panc(string, "last assigned not last");
792 				}
793 				if (td2->td_kse->ke_thread != td2) {
794 					panc(string, "mismatched kse/thread");
795 				}
796 			} else {
797 				unassigned++;
798 			}
799 			if (td2 == kg->kg_last_assigned) {
800 				saw_lastassigned = 1;
801 				if (td2->td_kse == NULL) {
802 					panc(string, "last assigned not assigned");
803 				}
804 			}
805 		}
806 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
807 			panc(string, "where on earth does lastassigned point?");
808 		}
809 #if 0
810 		FOREACH_THREAD_IN_GROUP(kg, td2) {
811 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
812 			    (TD_ON_RUNQ(td2))) {
813 				assigned++;
814 				if (td2->td_kse == NULL) {
815 					panc(string, "BOUND thread with no KSE");
816 				}
817 			}
818 		}
819 #endif
820 #if 0
821 		if ((unassigned + assigned) != kg->kg_runnable) {
822 			panc(string, "wrong number in runnable");
823 		}
824 #endif
825 	}
826 	if (assigned == 12345) {
827 		printf("%p %p %p %p %p %d, %d",
828 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
829 	}
830 }
831 #endif
832 
833