xref: /freebsd/sys/kern/kern_switch.c (revision 042b7b1af0736a1511622c2e53b3c5e2b2259286)
1 /*
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include "opt_sched.h"
92 
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kdb.h>
97 #include <sys/kernel.h>
98 #include <sys/ktr.h>
99 #include <sys/lock.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else  /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106 #include <sys/smp.h>
107 #endif
108 #include <machine/critical.h>
109 #if defined(SMP) && defined(SCHED_4BSD)
110 #include <sys/sysctl.h>
111 #endif
112 
113 #ifdef FULL_PREEMPTION
114 #ifndef PREEMPTION
115 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
116 #endif
117 #endif
118 
119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120 
121 #define td_kse td_sched
122 
123 /************************************************************************
124  * Functions that manipulate runnability from a thread perspective.	*
125  ************************************************************************/
126 /*
127  * Select the KSE that will be run next.  From that find the thread, and
128  * remove it from the KSEGRP's run queue.  If there is thread clustering,
129  * this will be what does it.
130  */
131 struct thread *
132 choosethread(void)
133 {
134 	struct kse *ke;
135 	struct thread *td;
136 	struct ksegrp *kg;
137 
138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140 		/* Shutting down, run idlethread on AP's */
141 		td = PCPU_GET(idlethread);
142 		ke = td->td_kse;
143 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144 		ke->ke_flags |= KEF_DIDRUN;
145 		TD_SET_RUNNING(td);
146 		return (td);
147 	}
148 #endif
149 
150 retry:
151 	ke = sched_choose();
152 	if (ke) {
153 		td = ke->ke_thread;
154 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155 		kg = ke->ke_ksegrp;
156 		if (td->td_proc->p_flag & P_HADTHREADS) {
157 			if (kg->kg_last_assigned == td) {
158 				kg->kg_last_assigned = TAILQ_PREV(td,
159 				    threadqueue, td_runq);
160 			}
161 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162 			kg->kg_runnable--;
163 		}
164 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165 		    td, td->td_priority);
166 	} else {
167 		/* Simulate runq_choose() having returned the idle thread */
168 		td = PCPU_GET(idlethread);
169 		ke = td->td_kse;
170 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171 	}
172 	ke->ke_flags |= KEF_DIDRUN;
173 
174 	/*
175 	 * If we are in panic, only allow system threads,
176 	 * plus the one we are running in, to be run.
177 	 */
178 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179 	    (td->td_flags & TDF_INPANIC) == 0)) {
180 		/* note that it is no longer on the run queue */
181 		TD_SET_CAN_RUN(td);
182 		goto retry;
183 	}
184 
185 	TD_SET_RUNNING(td);
186 	return (td);
187 }
188 
189 /*
190  * Given a surplus system slot, try assign a new runnable thread to it.
191  * Called from:
192  *  sched_thread_exit()  (local)
193  *  sched_switch()  (local)
194  *  sched_thread_exit()  (local)
195  *  remrunqueue()  (local)  (not at the moment)
196  */
197 static void
198 slot_fill(struct ksegrp *kg)
199 {
200 	struct thread *td;
201 
202 	mtx_assert(&sched_lock, MA_OWNED);
203 	while (kg->kg_avail_opennings > 0) {
204 		/*
205 		 * Find the first unassigned thread
206 		 */
207 		if ((td = kg->kg_last_assigned) != NULL)
208 			td = TAILQ_NEXT(td, td_runq);
209 		else
210 			td = TAILQ_FIRST(&kg->kg_runq);
211 
212 		/*
213 		 * If we found one, send it to the system scheduler.
214 		 */
215 		if (td) {
216 			kg->kg_last_assigned = td;
217 			sched_add(td, SRQ_BORING);
218 			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219 		} else {
220 			/* no threads to use up the slots. quit now */
221 			break;
222 		}
223 	}
224 }
225 
226 #ifdef	SCHED_4BSD
227 /*
228  * Remove a thread from its KSEGRP's run queue.
229  * This in turn may remove it from a KSE if it was already assigned
230  * to one, possibly causing a new thread to be assigned to the KSE
231  * and the KSE getting a new priority.
232  */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 	struct thread *td2, *td3;
237 	struct ksegrp *kg;
238 	struct kse *ke;
239 
240 	mtx_assert(&sched_lock, MA_OWNED);
241 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 	kg = td->td_ksegrp;
243 	ke = td->td_kse;
244 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 	TD_SET_CAN_RUN(td);
246 	/*
247 	 * If it is not a threaded process, take the shortcut.
248 	 */
249 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250 		/* remve from sys run queue and free up a slot */
251 		sched_rem(td);
252 		ke->ke_state = KES_THREAD;
253 		return;
254 	}
255    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
256 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257 	kg->kg_runnable--;
258 	if (ke->ke_state == KES_ONRUNQ) {
259 		/*
260 		 * This thread has been assigned to the system run queue.
261 		 * We need to dissociate it and try assign the
262 		 * KSE to the next available thread. Then, we should
263 		 * see if we need to move the KSE in the run queues.
264 		 */
265 		sched_rem(td);
266 		ke->ke_state = KES_THREAD;
267 		td2 = kg->kg_last_assigned;
268 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 		if (td2 == td)
270 			kg->kg_last_assigned = td3;
271 		/* slot_fill(kg); */ /* will replace it with another */
272 	}
273 }
274 #endif
275 
276 /*
277  * Change the priority of a thread that is on the run queue.
278  */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 	struct ksegrp *kg;
283 	struct kse *ke;
284 
285 	mtx_assert(&sched_lock, MA_OWNED);
286 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287 
288 	ke = td->td_kse;
289 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 	/*
291 	 * If it is not a threaded process, take the shortcut.
292 	 */
293 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294 		/* We only care about the kse in the run queue. */
295 		td->td_priority = newpri;
296 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 			sched_rem(td);
298 			sched_add(td, SRQ_BORING);
299 		}
300 		return;
301 	}
302 
303 	/* It is a threaded process */
304 	kg = td->td_ksegrp;
305 	if (ke->ke_state == KES_ONRUNQ) {
306 		if (kg->kg_last_assigned == td) {
307 			kg->kg_last_assigned =
308 			    TAILQ_PREV(td, threadqueue, td_runq);
309 		}
310 		sched_rem(td);
311 	}
312 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313 	kg->kg_runnable--;
314 	TD_SET_CAN_RUN(td);
315 	td->td_priority = newpri;
316 	setrunqueue(td, SRQ_BORING);
317 }
318 int limitcount;
319 void
320 setrunqueue(struct thread *td, int flags)
321 {
322 	struct ksegrp *kg;
323 	struct thread *td2;
324 	struct thread *tda;
325 
326 	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
327 	    td, td->td_ksegrp, td->td_proc->p_pid);
328 	mtx_assert(&sched_lock, MA_OWNED);
329 	KASSERT((td->td_inhibitors == 0),
330 			("setrunqueue: trying to run inhibitted thread"));
331 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
332 	    ("setrunqueue: bad thread state"));
333 	TD_SET_RUNQ(td);
334 	kg = td->td_ksegrp;
335 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
336 		/*
337 		 * Common path optimisation: Only one of everything
338 		 * and the KSE is always already attached.
339 		 * Totally ignore the ksegrp run queue.
340 		 */
341 		if (kg->kg_avail_opennings != 1) {
342 			if (limitcount < 1) {
343 				limitcount++;
344 				printf("pid %d: corrected slot count (%d->1)\n",
345 				    td->td_proc->p_pid, kg->kg_avail_opennings);
346 
347 			}
348 			kg->kg_avail_opennings = 1;
349 		}
350 		sched_add(td, flags);
351 		return;
352 	}
353 
354 	/*
355 	 * If the concurrency has reduced, and we would go in the
356 	 * assigned section, then keep removing entries from the
357 	 * system run queue, until we are not in that section
358 	 * or there is room for us to be put in that section.
359 	 * What we MUST avoid is the case where there are threads of less
360 	 * priority than the new one scheduled, but it can not
361 	 * be scheduled itself. That would lead to a non contiguous set
362 	 * of scheduled threads, and everything would break.
363 	 */
364 	tda = kg->kg_last_assigned;
365 	while ((kg->kg_avail_opennings <= 0) &&
366 	    (tda && (tda->td_priority > td->td_priority))) {
367 		/*
368 		 * None free, but there is one we can commandeer.
369 		 */
370 		CTR2(KTR_RUNQ,
371 		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
372 		sched_rem(tda);
373 		tda = kg->kg_last_assigned =
374 		    TAILQ_PREV(tda, threadqueue, td_runq);
375 	}
376 
377 	/*
378 	 * Add the thread to the ksegrp's run queue at
379 	 * the appropriate place.
380 	 */
381 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
382 		if (td2->td_priority > td->td_priority) {
383 			kg->kg_runnable++;
384 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
385 			break;
386 		}
387 	}
388 	if (td2 == NULL) {
389 		/* We ran off the end of the TAILQ or it was empty. */
390 		kg->kg_runnable++;
391 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
392 	}
393 
394 	/*
395 	 * If we have a slot to use, then put the thread on the system
396 	 * run queue and if needed, readjust the last_assigned pointer.
397 	 * it may be that we need to schedule something anyhow
398 	 * even if the availabel slots are -ve so that
399 	 * all the items < last_assigned are scheduled.
400 	 */
401 	if (kg->kg_avail_opennings > 0) {
402 		if (tda == NULL) {
403 			/*
404 			 * No pre-existing last assigned so whoever is first
405 			 * gets the slot.. (maybe us)
406 			 */
407 			td2 = TAILQ_FIRST(&kg->kg_runq);
408 			kg->kg_last_assigned = td2;
409 		} else if (tda->td_priority > td->td_priority) {
410 			td2 = td;
411 		} else {
412 			/*
413 			 * We are past last_assigned, so
414 			 * give the next slot to whatever is next,
415 			 * which may or may not be us.
416 			 */
417 			td2 = TAILQ_NEXT(tda, td_runq);
418 			kg->kg_last_assigned = td2;
419 		}
420 		sched_add(td2, flags);
421 	} else {
422 		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
423 			td, td->td_ksegrp, td->td_proc->p_pid);
424 	}
425 }
426 
427 /*
428  * Kernel thread preemption implementation.  Critical sections mark
429  * regions of code in which preemptions are not allowed.
430  */
431 void
432 critical_enter(void)
433 {
434 	struct thread *td;
435 
436 	td = curthread;
437 	if (td->td_critnest == 0)
438 		cpu_critical_enter(td);
439 	td->td_critnest++;
440 }
441 
442 void
443 critical_exit(void)
444 {
445 	struct thread *td;
446 
447 	td = curthread;
448 	KASSERT(td->td_critnest != 0,
449 	    ("critical_exit: td_critnest == 0"));
450 	if (td->td_critnest == 1) {
451 #ifdef PREEMPTION
452 		mtx_assert(&sched_lock, MA_NOTOWNED);
453 		if (td->td_pflags & TDP_OWEPREEMPT) {
454 			mtx_lock_spin(&sched_lock);
455 			mi_switch(SW_INVOL, NULL);
456 			mtx_unlock_spin(&sched_lock);
457 		}
458 #endif
459 		td->td_critnest = 0;
460 		cpu_critical_exit(td);
461 	} else {
462 		td->td_critnest--;
463 	}
464 }
465 
466 /*
467  * This function is called when a thread is about to be put on run queue
468  * because it has been made runnable or its priority has been adjusted.  It
469  * determines if the new thread should be immediately preempted to.  If so,
470  * it switches to it and eventually returns true.  If not, it returns false
471  * so that the caller may place the thread on an appropriate run queue.
472  */
473 int
474 maybe_preempt(struct thread *td)
475 {
476 #ifdef PREEMPTION
477 	struct thread *ctd;
478 	int cpri, pri;
479 #endif
480 
481 	mtx_assert(&sched_lock, MA_OWNED);
482 #ifdef PREEMPTION
483 	/*
484 	 * The new thread should not preempt the current thread if any of the
485 	 * following conditions are true:
486 	 *
487 	 *  - The current thread has a higher (numerically lower) or
488 	 *    equivalent priority.  Note that this prevents curthread from
489 	 *    trying to preempt to itself.
490 	 *  - It is too early in the boot for context switches (cold is set).
491 	 *  - The current thread has an inhibitor set or is in the process of
492 	 *    exiting.  In this case, the current thread is about to switch
493 	 *    out anyways, so there's no point in preempting.  If we did,
494 	 *    the current thread would not be properly resumed as well, so
495 	 *    just avoid that whole landmine.
496 	 *  - If the new thread's priority is not a realtime priority and
497 	 *    the current thread's priority is not an idle priority and
498 	 *    FULL_PREEMPTION is disabled.
499 	 *
500 	 * If all of these conditions are false, but the current thread is in
501 	 * a nested critical section, then we have to defer the preemption
502 	 * until we exit the critical section.  Otherwise, switch immediately
503 	 * to the new thread.
504 	 */
505 	ctd = curthread;
506 	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
507 	  ("thread has no (or wrong) sched-private part."));
508 	KASSERT((td->td_inhibitors == 0),
509 			("maybe_preempt: trying to run inhibitted thread"));
510 	pri = td->td_priority;
511 	cpri = ctd->td_priority;
512 	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
513 	    td->td_kse->ke_state != KES_THREAD)
514 		return (0);
515 #ifndef FULL_PREEMPTION
516 	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
517 	    !(cpri >= PRI_MIN_IDLE))
518 		return (0);
519 #endif
520 	if (ctd->td_critnest > 1) {
521 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
522 		    ctd->td_critnest);
523 		ctd->td_pflags |= TDP_OWEPREEMPT;
524 		return (0);
525 	}
526 
527 	/*
528 	 * Thread is runnable but not yet put on system run queue.
529 	 */
530 	MPASS(TD_ON_RUNQ(td));
531 	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
532 	if (td->td_proc->p_flag & P_HADTHREADS) {
533 		/*
534 		 * If this is a threaded process we actually ARE on the
535 		 * ksegrp run queue so take it off that first.
536 		 * Also undo any damage done to the last_assigned pointer.
537 		 * XXX Fix setrunqueue so this isn't needed
538 		 */
539 		struct ksegrp *kg;
540 
541 		kg = td->td_ksegrp;
542 		if (kg->kg_last_assigned == td)
543 			kg->kg_last_assigned =
544 			    TAILQ_PREV(td, threadqueue, td_runq);
545 		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
546 	}
547 
548 	TD_SET_RUNNING(td);
549 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
550 	    td->td_proc->p_pid, td->td_proc->p_comm);
551 	mi_switch(SW_INVOL|SW_PREEMPT, td);
552 	return (1);
553 #else
554 	return (0);
555 #endif
556 }
557 
558 #if 0
559 #ifndef PREEMPTION
560 /* XXX: There should be a non-static version of this. */
561 static void
562 printf_caddr_t(void *data)
563 {
564 	printf("%s", (char *)data);
565 }
566 static char preempt_warning[] =
567     "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
568 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
569     preempt_warning)
570 #endif
571 #endif
572 
573 /************************************************************************
574  * SYSTEM RUN QUEUE manipulations and tests				*
575  ************************************************************************/
576 /*
577  * Initialize a run structure.
578  */
579 void
580 runq_init(struct runq *rq)
581 {
582 	int i;
583 
584 	bzero(rq, sizeof *rq);
585 	for (i = 0; i < RQ_NQS; i++)
586 		TAILQ_INIT(&rq->rq_queues[i]);
587 }
588 
589 /*
590  * Clear the status bit of the queue corresponding to priority level pri,
591  * indicating that it is empty.
592  */
593 static __inline void
594 runq_clrbit(struct runq *rq, int pri)
595 {
596 	struct rqbits *rqb;
597 
598 	rqb = &rq->rq_status;
599 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
600 	    rqb->rqb_bits[RQB_WORD(pri)],
601 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
602 	    RQB_BIT(pri), RQB_WORD(pri));
603 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
604 }
605 
606 /*
607  * Find the index of the first non-empty run queue.  This is done by
608  * scanning the status bits, a set bit indicates a non-empty queue.
609  */
610 static __inline int
611 runq_findbit(struct runq *rq)
612 {
613 	struct rqbits *rqb;
614 	int pri;
615 	int i;
616 
617 	rqb = &rq->rq_status;
618 	for (i = 0; i < RQB_LEN; i++)
619 		if (rqb->rqb_bits[i]) {
620 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
621 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
622 			    rqb->rqb_bits[i], i, pri);
623 			return (pri);
624 		}
625 
626 	return (-1);
627 }
628 
629 /*
630  * Set the status bit of the queue corresponding to priority level pri,
631  * indicating that it is non-empty.
632  */
633 static __inline void
634 runq_setbit(struct runq *rq, int pri)
635 {
636 	struct rqbits *rqb;
637 
638 	rqb = &rq->rq_status;
639 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
640 	    rqb->rqb_bits[RQB_WORD(pri)],
641 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
642 	    RQB_BIT(pri), RQB_WORD(pri));
643 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
644 }
645 
646 /*
647  * Add the KSE to the queue specified by its priority, and set the
648  * corresponding status bit.
649  */
650 void
651 runq_add(struct runq *rq, struct kse *ke, int flags)
652 {
653 	struct rqhead *rqh;
654 	int pri;
655 
656 	pri = ke->ke_thread->td_priority / RQ_PPQ;
657 	ke->ke_rqindex = pri;
658 	runq_setbit(rq, pri);
659 	rqh = &rq->rq_queues[pri];
660 	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
661 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
662 	if (flags & SRQ_PREEMPTED) {
663 		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
664 	} else {
665 		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
666 	}
667 }
668 
669 /*
670  * Return true if there are runnable processes of any priority on the run
671  * queue, false otherwise.  Has no side effects, does not modify the run
672  * queue structure.
673  */
674 int
675 runq_check(struct runq *rq)
676 {
677 	struct rqbits *rqb;
678 	int i;
679 
680 	rqb = &rq->rq_status;
681 	for (i = 0; i < RQB_LEN; i++)
682 		if (rqb->rqb_bits[i]) {
683 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
684 			    rqb->rqb_bits[i], i);
685 			return (1);
686 		}
687 	CTR0(KTR_RUNQ, "runq_check: empty");
688 
689 	return (0);
690 }
691 
692 #if defined(SMP) && defined(SCHED_4BSD)
693 int runq_fuzz = 1;
694 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
695 #endif
696 
697 /*
698  * Find the highest priority process on the run queue.
699  */
700 struct kse *
701 runq_choose(struct runq *rq)
702 {
703 	struct rqhead *rqh;
704 	struct kse *ke;
705 	int pri;
706 
707 	mtx_assert(&sched_lock, MA_OWNED);
708 	while ((pri = runq_findbit(rq)) != -1) {
709 		rqh = &rq->rq_queues[pri];
710 #if defined(SMP) && defined(SCHED_4BSD)
711 		/* fuzz == 1 is normal.. 0 or less are ignored */
712 		if (runq_fuzz > 1) {
713 			/*
714 			 * In the first couple of entries, check if
715 			 * there is one for our CPU as a preference.
716 			 */
717 			int count = runq_fuzz;
718 			int cpu = PCPU_GET(cpuid);
719 			struct kse *ke2;
720 			ke2 = ke = TAILQ_FIRST(rqh);
721 
722 			while (count-- && ke2) {
723 				if (ke->ke_thread->td_lastcpu == cpu) {
724 					ke = ke2;
725 					break;
726 				}
727 				ke2 = TAILQ_NEXT(ke2, ke_procq);
728 			}
729 		} else
730 #endif
731 			ke = TAILQ_FIRST(rqh);
732 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
733 		CTR3(KTR_RUNQ,
734 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
735 		return (ke);
736 	}
737 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
738 
739 	return (NULL);
740 }
741 
742 /*
743  * Remove the KSE from the queue specified by its priority, and clear the
744  * corresponding status bit if the queue becomes empty.
745  * Caller must set ke->ke_state afterwards.
746  */
747 void
748 runq_remove(struct runq *rq, struct kse *ke)
749 {
750 	struct rqhead *rqh;
751 	int pri;
752 
753 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
754 		("runq_remove: process swapped out"));
755 	pri = ke->ke_rqindex;
756 	rqh = &rq->rq_queues[pri];
757 	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
758 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
759 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
760 	TAILQ_REMOVE(rqh, ke, ke_procq);
761 	if (TAILQ_EMPTY(rqh)) {
762 		CTR0(KTR_RUNQ, "runq_remove: empty");
763 		runq_clrbit(rq, pri);
764 	}
765 }
766 
767 /****** functions that are temporarily here ***********/
768 #include <vm/uma.h>
769 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
770 extern struct mtx kse_zombie_lock;
771 
772 /*
773  *  Allocate scheduler specific per-process resources.
774  * The thread and ksegrp have already been linked in.
775  * In this case just set the default concurrency value.
776  *
777  * Called from:
778  *  proc_init() (UMA init method)
779  */
780 void
781 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
782 {
783 
784 	/* This can go in sched_fork */
785 	sched_init_concurrency(kg);
786 }
787 
788 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
789 /*
790  * thread is being either created or recycled.
791  * Fix up the per-scheduler resources associated with it.
792  * Called from:
793  *  sched_fork_thread()
794  *  thread_dtor()  (*may go away)
795  *  thread_init()  (*may go away)
796  */
797 void
798 sched_newthread(struct thread *td)
799 {
800 	struct td_sched *ke;
801 
802 	ke = (struct td_sched *) (td + 1);
803 	bzero(ke, sizeof(*ke));
804 	td->td_sched     = ke;
805 	ke->ke_thread	= td;
806 	ke->ke_oncpu	= NOCPU;
807 	ke->ke_state	= KES_THREAD;
808 }
809 
810 /*
811  * Set up an initial concurrency of 1
812  * and set the given thread (if given) to be using that
813  * concurrency slot.
814  * May be used "offline"..before the ksegrp is attached to the world
815  * and thus wouldn't need schedlock in that case.
816  * Called from:
817  *  thr_create()
818  *  proc_init() (UMA) via sched_newproc()
819  */
820 void
821 sched_init_concurrency(struct ksegrp *kg)
822 {
823 
824 	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
825 	kg->kg_concurrency = 1;
826 	kg->kg_avail_opennings = 1;
827 }
828 
829 /*
830  * Change the concurrency of an existing ksegrp to N
831  * Called from:
832  *  kse_create()
833  *  kse_exit()
834  *  thread_exit()
835  *  thread_single()
836  */
837 void
838 sched_set_concurrency(struct ksegrp *kg, int concurrency)
839 {
840 
841 	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
842 	    kg,
843 	    concurrency,
844 	    kg->kg_avail_opennings,
845 	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
846 	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
847 	kg->kg_concurrency = concurrency;
848 }
849 
850 /*
851  * Called from thread_exit() for all exiting thread
852  *
853  * Not to be confused with sched_exit_thread()
854  * that is only called from thread_exit() for threads exiting
855  * without the rest of the process exiting because it is also called from
856  * sched_exit() and we wouldn't want to call it twice.
857  * XXX This can probably be fixed.
858  */
859 void
860 sched_thread_exit(struct thread *td)
861 {
862 
863 	SLOT_RELEASE(td->td_ksegrp);
864 	slot_fill(td->td_ksegrp);
865 }
866 
867 #endif /* KERN_SWITCH_INCLUDE */
868