xref: /freebsd/sys/kern/kern_switch.c (revision 748b15fc02bd73c2aacc2404fb10e6bf1e424640)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4d5a08a60SJake Burkholder  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5d5a08a60SJake Burkholder  * All rights reserved.
6dba6c5a6SPeter Wemm  *
7dba6c5a6SPeter Wemm  * Redistribution and use in source and binary forms, with or without
8dba6c5a6SPeter Wemm  * modification, are permitted provided that the following conditions
9dba6c5a6SPeter Wemm  * are met:
10dba6c5a6SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
11dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
12dba6c5a6SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
14dba6c5a6SPeter Wemm  *    documentation and/or other materials provided with the distribution.
15dba6c5a6SPeter Wemm  *
16dba6c5a6SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17dba6c5a6SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18dba6c5a6SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19dba6c5a6SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20dba6c5a6SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21dba6c5a6SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22dba6c5a6SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23dba6c5a6SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24dba6c5a6SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25dba6c5a6SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26dba6c5a6SPeter Wemm  * SUCH DAMAGE.
27dba6c5a6SPeter Wemm  */
28dba6c5a6SPeter Wemm 
29e602ba25SJulian Elischer 
30677b542eSDavid E. O'Brien #include <sys/cdefs.h>
31677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
32e602ba25SJulian Elischer 
336804a3abSJulian Elischer #include "opt_sched.h"
340c0b25aeSJohn Baldwin 
35dba6c5a6SPeter Wemm #include <sys/param.h>
36dba6c5a6SPeter Wemm #include <sys/systm.h>
372d50560aSMarcel Moolenaar #include <sys/kdb.h>
38dba6c5a6SPeter Wemm #include <sys/kernel.h>
390384fff8SJason Evans #include <sys/ktr.h>
40f34fa851SJohn Baldwin #include <sys/lock.h>
4135e0e5b3SJohn Baldwin #include <sys/mutex.h>
42dba6c5a6SPeter Wemm #include <sys/proc.h>
43dba6c5a6SPeter Wemm #include <sys/queue.h>
44b43179fbSJeff Roberson #include <sys/sched.h>
45cc66ebe2SPeter Wemm #include <sys/smp.h>
469727e637SJeff Roberson #include <sys/sysctl.h>
476804a3abSJulian Elischer 
487b20fb19SJeff Roberson #include <machine/cpu.h>
497b20fb19SJeff Roberson 
501335c4dfSNate Lawson /* Uncomment this to enable logging of critical_enter/exit. */
511335c4dfSNate Lawson #if 0
521335c4dfSNate Lawson #define	KTR_CRITICAL	KTR_SCHED
531335c4dfSNate Lawson #else
541335c4dfSNate Lawson #define	KTR_CRITICAL	0
551335c4dfSNate Lawson #endif
561335c4dfSNate Lawson 
579923b511SScott Long #ifdef FULL_PREEMPTION
589923b511SScott Long #ifndef PREEMPTION
599923b511SScott Long #error "The FULL_PREEMPTION option requires the PREEMPTION option"
609923b511SScott Long #endif
619923b511SScott Long #endif
62dba6c5a6SPeter Wemm 
63d2ac2316SJake Burkholder CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
64d2ac2316SJake Burkholder 
656220dcbaSRobert Watson /*
666220dcbaSRobert Watson  * kern.sched.preemption allows user space to determine if preemption support
676220dcbaSRobert Watson  * is compiled in or not.  It is not currently a boot or runtime flag that
686220dcbaSRobert Watson  * can be changed.
696220dcbaSRobert Watson  */
706220dcbaSRobert Watson #ifdef PREEMPTION
716220dcbaSRobert Watson static int kern_sched_preemption = 1;
726220dcbaSRobert Watson #else
736220dcbaSRobert Watson static int kern_sched_preemption = 0;
746220dcbaSRobert Watson #endif
756220dcbaSRobert Watson SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
766220dcbaSRobert Watson     &kern_sched_preemption, 0, "Kernel preemption enabled");
776220dcbaSRobert Watson 
788df78c41SJeff Roberson /*
798df78c41SJeff Roberson  * Support for scheduler stats exported via kern.sched.stats.  All stats may
808df78c41SJeff Roberson  * be reset with kern.sched.stats.reset = 1.  Stats may be defined elsewhere
818df78c41SJeff Roberson  * with SCHED_STAT_DEFINE().
828df78c41SJeff Roberson  */
837b20fb19SJeff Roberson #ifdef SCHED_STATS
848df78c41SJeff Roberson SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
85791d9a6eSJeff Roberson 
86791d9a6eSJeff Roberson /* Switch reasons from mi_switch(). */
87791d9a6eSJeff Roberson DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
88791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(uncategorized,
89791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
90791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(preempt,
91791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
92791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(owepreempt,
93791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
94791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(turnstile,
95791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
96791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(sleepq,
97791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
98a115fb62SHans Petter Selasky SCHED_STAT_DEFINE_VAR(sleepqtimo,
99a115fb62SHans Petter Selasky     &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
100791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(relinquish,
101791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
102791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(needresched,
103791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
104791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(idle,
105791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
106791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(iwait,
107791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
108791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(suspend,
109791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
110791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(remotepreempt,
111791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
112791d9a6eSJeff Roberson SCHED_STAT_DEFINE_VAR(remotewakeidle,
113791d9a6eSJeff Roberson     &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
1148df78c41SJeff Roberson 
1157b20fb19SJeff Roberson static int
1167b20fb19SJeff Roberson sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
1177b20fb19SJeff Roberson {
1188df78c41SJeff Roberson 	struct sysctl_oid *p;
119791d9a6eSJeff Roberson 	uintptr_t counter;
1207b20fb19SJeff Roberson         int error;
1217b20fb19SJeff Roberson 	int val;
122791d9a6eSJeff Roberson 	int i;
1237b20fb19SJeff Roberson 
1247b20fb19SJeff Roberson         val = 0;
1257b20fb19SJeff Roberson         error = sysctl_handle_int(oidp, &val, 0, req);
1267b20fb19SJeff Roberson         if (error != 0 || req->newptr == NULL)
1277b20fb19SJeff Roberson                 return (error);
1287b20fb19SJeff Roberson         if (val == 0)
1297b20fb19SJeff Roberson                 return (0);
1308df78c41SJeff Roberson 	/*
1318df78c41SJeff Roberson 	 * Traverse the list of children of _kern_sched_stats and reset each
1328df78c41SJeff Roberson 	 * to 0.  Skip the reset entry.
1338df78c41SJeff Roberson 	 */
1348df78c41SJeff Roberson 	SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
1358df78c41SJeff Roberson 		if (p == oidp || p->oid_arg1 == NULL)
1368df78c41SJeff Roberson 			continue;
137791d9a6eSJeff Roberson 		counter = (uintptr_t)p->oid_arg1;
1383aa6d94eSJohn Baldwin 		CPU_FOREACH(i) {
139791d9a6eSJeff Roberson 			*(long *)(dpcpu_off[i] + counter) = 0;
140791d9a6eSJeff Roberson 		}
1418df78c41SJeff Roberson 	}
1427b20fb19SJeff Roberson 	return (0);
1437b20fb19SJeff Roberson }
1447b20fb19SJeff Roberson 
1457b20fb19SJeff Roberson SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
1467b20fb19SJeff Roberson     0, sysctl_stats_reset, "I", "Reset scheduler statistics");
1477b20fb19SJeff Roberson #endif
1487b20fb19SJeff Roberson 
149e602ba25SJulian Elischer /************************************************************************
150e602ba25SJulian Elischer  * Functions that manipulate runnability from a thread perspective.	*
151e602ba25SJulian Elischer  ************************************************************************/
1528460a577SJohn Birrell /*
1538460a577SJohn Birrell  * Select the thread that will be run next.
1548460a577SJohn Birrell  */
155e602ba25SJulian Elischer 
15632aef9ffSMateusz Guzik static __noinline struct thread *
15732aef9ffSMateusz Guzik choosethread_panic(struct thread *td)
15832aef9ffSMateusz Guzik {
15993a7aa79SJulian Elischer 
16093a7aa79SJulian Elischer 	/*
161faaa20f6SJulian Elischer 	 * If we are in panic, only allow system threads,
162faaa20f6SJulian Elischer 	 * plus the one we are running in, to be run.
16393a7aa79SJulian Elischer 	 */
16432aef9ffSMateusz Guzik retry:
16532aef9ffSMateusz Guzik 	if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
166faaa20f6SJulian Elischer 	    (td->td_flags & TDF_INPANIC) == 0)) {
167faaa20f6SJulian Elischer 		/* note that it is no longer on the run queue */
168faaa20f6SJulian Elischer 		TD_SET_CAN_RUN(td);
16932aef9ffSMateusz Guzik 		td = sched_choose();
170fe799533SAndrew Gallatin 		goto retry;
171faaa20f6SJulian Elischer 	}
17293a7aa79SJulian Elischer 
17371fad9fdSJulian Elischer 	TD_SET_RUNNING(td);
174e602ba25SJulian Elischer 	return (td);
175e602ba25SJulian Elischer }
176e602ba25SJulian Elischer 
17732aef9ffSMateusz Guzik struct thread *
17832aef9ffSMateusz Guzik choosethread(void)
17932aef9ffSMateusz Guzik {
18032aef9ffSMateusz Guzik 	struct thread *td;
18132aef9ffSMateusz Guzik 
18232aef9ffSMateusz Guzik 	td = sched_choose();
18332aef9ffSMateusz Guzik 
18432aef9ffSMateusz Guzik 	if (__predict_false(panicstr != NULL))
18532aef9ffSMateusz Guzik 		return (choosethread_panic(td));
18632aef9ffSMateusz Guzik 
18732aef9ffSMateusz Guzik 	TD_SET_RUNNING(td);
18832aef9ffSMateusz Guzik 	return (td);
18932aef9ffSMateusz Guzik }
19032aef9ffSMateusz Guzik 
1910c0b25aeSJohn Baldwin /*
1920c0b25aeSJohn Baldwin  * Kernel thread preemption implementation.  Critical sections mark
1930c0b25aeSJohn Baldwin  * regions of code in which preemptions are not allowed.
194e68ccbe8SAttilio Rao  *
195e68ccbe8SAttilio Rao  * It might seem a good idea to inline critical_enter() but, in order
196e68ccbe8SAttilio Rao  * to prevent instructions reordering by the compiler, a __compiler_membar()
197e68ccbe8SAttilio Rao  * would have to be used here (the same as sched_pin()).  The performance
198e68ccbe8SAttilio Rao  * penalty imposed by the membar could, then, produce slower code than
199e68ccbe8SAttilio Rao  * the function call itself, for most cases.
2000c0b25aeSJohn Baldwin  */
2017e1f6dfeSJohn Baldwin void
2027e1f6dfeSJohn Baldwin critical_enter(void)
2037e1f6dfeSJohn Baldwin {
2047e1f6dfeSJohn Baldwin 	struct thread *td;
2057e1f6dfeSJohn Baldwin 
2067e1f6dfeSJohn Baldwin 	td = curthread;
2077e1f6dfeSJohn Baldwin 	td->td_critnest++;
2081335c4dfSNate Lawson 	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
209431f8906SJulian Elischer 	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
2107e1f6dfeSJohn Baldwin }
2117e1f6dfeSJohn Baldwin 
212*748b15fcSMateusz Guzik static void __noinline
213*748b15fcSMateusz Guzik critical_exit_preempt(void)
2147e1f6dfeSJohn Baldwin {
2157e1f6dfeSJohn Baldwin 	struct thread *td;
2168df78c41SJeff Roberson 	int flags;
2177e1f6dfeSJohn Baldwin 
2187e1f6dfeSJohn Baldwin 	td = curthread;
219*748b15fcSMateusz Guzik 	KASSERT(td->td_owepreempt != 0,
220*748b15fcSMateusz Guzik 	    ("critical_exit: td_owepreempt == 0"));
221*748b15fcSMateusz Guzik 	if (td->td_critnest != 0)
222*748b15fcSMateusz Guzik 		return;
223*748b15fcSMateusz Guzik 	if (kdb_active)
224*748b15fcSMateusz Guzik 		return;
2255bce4ae3SJeff Roberson 
2263467f88cSKonstantin Belousov 	/*
2273467f88cSKonstantin Belousov 	 * Microoptimization: we committed to switch,
2283467f88cSKonstantin Belousov 	 * disable preemption in interrupt handlers
2293467f88cSKonstantin Belousov 	 * while spinning for the thread lock.
2303467f88cSKonstantin Belousov 	 */
23177918643SStephan Uphoff 	td->td_critnest = 1;
2327b20fb19SJeff Roberson 	thread_lock(td);
23377918643SStephan Uphoff 	td->td_critnest--;
2348df78c41SJeff Roberson 	flags = SW_INVOL | SW_PREEMPT;
2358df78c41SJeff Roberson 	if (TD_IS_IDLETHREAD(td))
2368df78c41SJeff Roberson 		flags |= SWT_IDLE;
2378df78c41SJeff Roberson 	else
2388df78c41SJeff Roberson 		flags |= SWT_OWEPREEMPT;
2398df78c41SJeff Roberson 	mi_switch(flags, NULL);
2407b20fb19SJeff Roberson 	thread_unlock(td);
2410c0b25aeSJohn Baldwin }
242d13ec713SStephan Uphoff 
243*748b15fcSMateusz Guzik void
244*748b15fcSMateusz Guzik critical_exit(void)
245*748b15fcSMateusz Guzik {
246*748b15fcSMateusz Guzik 	struct thread *td;
247*748b15fcSMateusz Guzik 
248*748b15fcSMateusz Guzik 	td = curthread;
249*748b15fcSMateusz Guzik 	KASSERT(td->td_critnest != 0,
250*748b15fcSMateusz Guzik 	    ("critical_exit: td_critnest == 0"));
251*748b15fcSMateusz Guzik 	td->td_critnest--;
252*748b15fcSMateusz Guzik 	__compiler_membar();
253*748b15fcSMateusz Guzik 	if (__predict_false(td->td_owepreempt))
254*748b15fcSMateusz Guzik 		critical_exit_preempt();
2551335c4dfSNate Lawson 	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
256431f8906SJulian Elischer 	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
257d74ac681SMatthew Dillon }
2587e1f6dfeSJohn Baldwin 
259e602ba25SJulian Elischer /************************************************************************
260e602ba25SJulian Elischer  * SYSTEM RUN QUEUE manipulations and tests				*
261e602ba25SJulian Elischer  ************************************************************************/
262e602ba25SJulian Elischer /*
263e602ba25SJulian Elischer  * Initialize a run structure.
264e602ba25SJulian Elischer  */
265e602ba25SJulian Elischer void
266e602ba25SJulian Elischer runq_init(struct runq *rq)
267e602ba25SJulian Elischer {
268e602ba25SJulian Elischer 	int i;
269e602ba25SJulian Elischer 
270e602ba25SJulian Elischer 	bzero(rq, sizeof *rq);
271e602ba25SJulian Elischer 	for (i = 0; i < RQ_NQS; i++)
272e602ba25SJulian Elischer 		TAILQ_INIT(&rq->rq_queues[i]);
273e602ba25SJulian Elischer }
274e602ba25SJulian Elischer 
275d5a08a60SJake Burkholder /*
276d5a08a60SJake Burkholder  * Clear the status bit of the queue corresponding to priority level pri,
277d5a08a60SJake Burkholder  * indicating that it is empty.
278d5a08a60SJake Burkholder  */
279d5a08a60SJake Burkholder static __inline void
280d5a08a60SJake Burkholder runq_clrbit(struct runq *rq, int pri)
281d5a08a60SJake Burkholder {
282d5a08a60SJake Burkholder 	struct rqbits *rqb;
283d5a08a60SJake Burkholder 
284d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
285d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
286d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
287d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
288d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
289d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
290d5a08a60SJake Burkholder }
291d5a08a60SJake Burkholder 
292d5a08a60SJake Burkholder /*
293d5a08a60SJake Burkholder  * Find the index of the first non-empty run queue.  This is done by
294d5a08a60SJake Burkholder  * scanning the status bits, a set bit indicates a non-empty queue.
295d5a08a60SJake Burkholder  */
296d5a08a60SJake Burkholder static __inline int
297d5a08a60SJake Burkholder runq_findbit(struct runq *rq)
298d5a08a60SJake Burkholder {
299d5a08a60SJake Burkholder 	struct rqbits *rqb;
300d5a08a60SJake Burkholder 	int pri;
301d5a08a60SJake Burkholder 	int i;
302d5a08a60SJake Burkholder 
303d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
304d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
305d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
3062f9267ecSPeter Wemm 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
307d5a08a60SJake Burkholder 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
308d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i, pri);
309d5a08a60SJake Burkholder 			return (pri);
310d5a08a60SJake Burkholder 		}
311d5a08a60SJake Burkholder 
312d5a08a60SJake Burkholder 	return (-1);
313d5a08a60SJake Burkholder }
314d5a08a60SJake Burkholder 
3153fed7d23SJeff Roberson static __inline int
31667e20930SJeff Roberson runq_findbit_from(struct runq *rq, u_char pri)
3173fed7d23SJeff Roberson {
3183fed7d23SJeff Roberson 	struct rqbits *rqb;
31967e20930SJeff Roberson 	rqb_word_t mask;
3203fed7d23SJeff Roberson 	int i;
3213fed7d23SJeff Roberson 
32267e20930SJeff Roberson 	/*
32367e20930SJeff Roberson 	 * Set the mask for the first word so we ignore priorities before 'pri'.
32467e20930SJeff Roberson 	 */
32567e20930SJeff Roberson 	mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
3263fed7d23SJeff Roberson 	rqb = &rq->rq_status;
3273fed7d23SJeff Roberson again:
32867e20930SJeff Roberson 	for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
32967e20930SJeff Roberson 		mask = rqb->rqb_bits[i] & mask;
33067e20930SJeff Roberson 		if (mask == 0)
3313fed7d23SJeff Roberson 			continue;
33267e20930SJeff Roberson 		pri = RQB_FFS(mask) + (i << RQB_L2BPW);
3333fed7d23SJeff Roberson 		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
33467e20930SJeff Roberson 		    mask, i, pri);
3353fed7d23SJeff Roberson 		return (pri);
3363fed7d23SJeff Roberson 	}
33767e20930SJeff Roberson 	if (pri == 0)
3383fed7d23SJeff Roberson 		return (-1);
33967e20930SJeff Roberson 	/*
34067e20930SJeff Roberson 	 * Wrap back around to the beginning of the list just once so we
34167e20930SJeff Roberson 	 * scan the whole thing.
34267e20930SJeff Roberson 	 */
34367e20930SJeff Roberson 	pri = 0;
34467e20930SJeff Roberson 	goto again;
3453fed7d23SJeff Roberson }
3463fed7d23SJeff Roberson 
347d5a08a60SJake Burkholder /*
348d5a08a60SJake Burkholder  * Set the status bit of the queue corresponding to priority level pri,
349d5a08a60SJake Burkholder  * indicating that it is non-empty.
350d5a08a60SJake Burkholder  */
351d5a08a60SJake Burkholder static __inline void
352d5a08a60SJake Burkholder runq_setbit(struct runq *rq, int pri)
353d5a08a60SJake Burkholder {
354d5a08a60SJake Burkholder 	struct rqbits *rqb;
355d5a08a60SJake Burkholder 
356d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
357d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
358d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
359d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
360d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
361d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
362d5a08a60SJake Burkholder }
363d5a08a60SJake Burkholder 
364d5a08a60SJake Burkholder /*
365ad1e7d28SJulian Elischer  * Add the thread to the queue specified by its priority, and set the
366d5a08a60SJake Burkholder  * corresponding status bit.
367d5a08a60SJake Burkholder  */
368d5a08a60SJake Burkholder void
3699727e637SJeff Roberson runq_add(struct runq *rq, struct thread *td, int flags)
370d5a08a60SJake Burkholder {
371d5a08a60SJake Burkholder 	struct rqhead *rqh;
372d5a08a60SJake Burkholder 	int pri;
373dba6c5a6SPeter Wemm 
3749727e637SJeff Roberson 	pri = td->td_priority / RQ_PPQ;
3759727e637SJeff Roberson 	td->td_rqindex = pri;
376d5a08a60SJake Burkholder 	runq_setbit(rq, pri);
377d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
3789727e637SJeff Roberson 	CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
3799727e637SJeff Roberson 	    td, td->td_priority, pri, rqh);
380c20c691bSJulian Elischer 	if (flags & SRQ_PREEMPTED) {
3819727e637SJeff Roberson 		TAILQ_INSERT_HEAD(rqh, td, td_runq);
382c20c691bSJulian Elischer 	} else {
3839727e637SJeff Roberson 		TAILQ_INSERT_TAIL(rqh, td, td_runq);
384dba6c5a6SPeter Wemm 	}
385c20c691bSJulian Elischer }
386d5a08a60SJake Burkholder 
3873fed7d23SJeff Roberson void
3889727e637SJeff Roberson runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
3893fed7d23SJeff Roberson {
3903fed7d23SJeff Roberson 	struct rqhead *rqh;
3913fed7d23SJeff Roberson 
3923fed7d23SJeff Roberson 	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
3939727e637SJeff Roberson 	td->td_rqindex = pri;
3943fed7d23SJeff Roberson 	runq_setbit(rq, pri);
3953fed7d23SJeff Roberson 	rqh = &rq->rq_queues[pri];
3969727e637SJeff Roberson 	CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
3979727e637SJeff Roberson 	    td, td->td_priority, pri, rqh);
3983fed7d23SJeff Roberson 	if (flags & SRQ_PREEMPTED) {
3999727e637SJeff Roberson 		TAILQ_INSERT_HEAD(rqh, td, td_runq);
4003fed7d23SJeff Roberson 	} else {
4019727e637SJeff Roberson 		TAILQ_INSERT_TAIL(rqh, td, td_runq);
4023fed7d23SJeff Roberson 	}
4033fed7d23SJeff Roberson }
404d5a08a60SJake Burkholder /*
405d5a08a60SJake Burkholder  * Return true if there are runnable processes of any priority on the run
406d5a08a60SJake Burkholder  * queue, false otherwise.  Has no side effects, does not modify the run
407d5a08a60SJake Burkholder  * queue structure.
408d5a08a60SJake Burkholder  */
409d5a08a60SJake Burkholder int
410d5a08a60SJake Burkholder runq_check(struct runq *rq)
411d5a08a60SJake Burkholder {
412d5a08a60SJake Burkholder 	struct rqbits *rqb;
413d5a08a60SJake Burkholder 	int i;
414d5a08a60SJake Burkholder 
415d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
416d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
417d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
418d5a08a60SJake Burkholder 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
419d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i);
420d5a08a60SJake Burkholder 			return (1);
421dba6c5a6SPeter Wemm 		}
422d5a08a60SJake Burkholder 	CTR0(KTR_RUNQ, "runq_check: empty");
423d5a08a60SJake Burkholder 
424d5a08a60SJake Burkholder 	return (0);
425dba6c5a6SPeter Wemm }
426d5a08a60SJake Burkholder 
427a90f3f25SJeff Roberson /*
428a90f3f25SJeff Roberson  * Find the highest priority process on the run queue.
429a90f3f25SJeff Roberson  */
4309727e637SJeff Roberson struct thread *
431a90f3f25SJeff Roberson runq_choose_fuzz(struct runq *rq, int fuzz)
432a90f3f25SJeff Roberson {
433a90f3f25SJeff Roberson 	struct rqhead *rqh;
4349727e637SJeff Roberson 	struct thread *td;
435a90f3f25SJeff Roberson 	int pri;
436a90f3f25SJeff Roberson 
437a90f3f25SJeff Roberson 	while ((pri = runq_findbit(rq)) != -1) {
438a90f3f25SJeff Roberson 		rqh = &rq->rq_queues[pri];
439a90f3f25SJeff Roberson 		/* fuzz == 1 is normal.. 0 or less are ignored */
440a90f3f25SJeff Roberson 		if (fuzz > 1) {
441a90f3f25SJeff Roberson 			/*
442a90f3f25SJeff Roberson 			 * In the first couple of entries, check if
443a90f3f25SJeff Roberson 			 * there is one for our CPU as a preference.
444a90f3f25SJeff Roberson 			 */
445a90f3f25SJeff Roberson 			int count = fuzz;
446a90f3f25SJeff Roberson 			int cpu = PCPU_GET(cpuid);
4479727e637SJeff Roberson 			struct thread *td2;
4489727e637SJeff Roberson 			td2 = td = TAILQ_FIRST(rqh);
449a90f3f25SJeff Roberson 
4509727e637SJeff Roberson 			while (count-- && td2) {
451681e4062SJulian Elischer 				if (td2->td_lastcpu == cpu) {
4529727e637SJeff Roberson 					td = td2;
453a90f3f25SJeff Roberson 					break;
454a90f3f25SJeff Roberson 				}
4559727e637SJeff Roberson 				td2 = TAILQ_NEXT(td2, td_runq);
456a90f3f25SJeff Roberson 			}
457a90f3f25SJeff Roberson 		} else
4589727e637SJeff Roberson 			td = TAILQ_FIRST(rqh);
4599727e637SJeff Roberson 		KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
460a90f3f25SJeff Roberson 		CTR3(KTR_RUNQ,
4619727e637SJeff Roberson 		    "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
4629727e637SJeff Roberson 		return (td);
463a90f3f25SJeff Roberson 	}
464a90f3f25SJeff Roberson 	CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
465a90f3f25SJeff Roberson 
466a90f3f25SJeff Roberson 	return (NULL);
467a90f3f25SJeff Roberson }
4686804a3abSJulian Elischer 
469d5a08a60SJake Burkholder /*
470b43179fbSJeff Roberson  * Find the highest priority process on the run queue.
471d5a08a60SJake Burkholder  */
4729727e637SJeff Roberson struct thread *
473d5a08a60SJake Burkholder runq_choose(struct runq *rq)
474d5a08a60SJake Burkholder {
475d5a08a60SJake Burkholder 	struct rqhead *rqh;
4769727e637SJeff Roberson 	struct thread *td;
477d5a08a60SJake Burkholder 	int pri;
478d5a08a60SJake Burkholder 
479e602ba25SJulian Elischer 	while ((pri = runq_findbit(rq)) != -1) {
480d5a08a60SJake Burkholder 		rqh = &rq->rq_queues[pri];
4819727e637SJeff Roberson 		td = TAILQ_FIRST(rqh);
4829727e637SJeff Roberson 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
483e602ba25SJulian Elischer 		CTR3(KTR_RUNQ,
4849727e637SJeff Roberson 		    "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
4859727e637SJeff Roberson 		return (td);
486d5a08a60SJake Burkholder 	}
4879727e637SJeff Roberson 	CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
488d5a08a60SJake Burkholder 
489e602ba25SJulian Elischer 	return (NULL);
490d5a08a60SJake Burkholder }
491d5a08a60SJake Burkholder 
4929727e637SJeff Roberson struct thread *
493ed0e8f2fSJeff Roberson runq_choose_from(struct runq *rq, u_char idx)
4943fed7d23SJeff Roberson {
4953fed7d23SJeff Roberson 	struct rqhead *rqh;
4969727e637SJeff Roberson 	struct thread *td;
4973fed7d23SJeff Roberson 	int pri;
4983fed7d23SJeff Roberson 
499cd49bb70SJeff Roberson 	if ((pri = runq_findbit_from(rq, idx)) != -1) {
5003fed7d23SJeff Roberson 		rqh = &rq->rq_queues[pri];
5019727e637SJeff Roberson 		td = TAILQ_FIRST(rqh);
5029727e637SJeff Roberson 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
5033fed7d23SJeff Roberson 		CTR4(KTR_RUNQ,
5049727e637SJeff Roberson 		    "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
5059727e637SJeff Roberson 		    pri, td, td->td_rqindex, rqh);
5069727e637SJeff Roberson 		return (td);
5073fed7d23SJeff Roberson 	}
5089727e637SJeff Roberson 	CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
5093fed7d23SJeff Roberson 
5103fed7d23SJeff Roberson 	return (NULL);
5113fed7d23SJeff Roberson }
512d5a08a60SJake Burkholder /*
513ad1e7d28SJulian Elischer  * Remove the thread from the queue specified by its priority, and clear the
514d5a08a60SJake Burkholder  * corresponding status bit if the queue becomes empty.
515f0393f06SJeff Roberson  * Caller must set state afterwards.
516d5a08a60SJake Burkholder  */
517d5a08a60SJake Burkholder void
5189727e637SJeff Roberson runq_remove(struct runq *rq, struct thread *td)
519d5a08a60SJake Burkholder {
5203fed7d23SJeff Roberson 
5219727e637SJeff Roberson 	runq_remove_idx(rq, td, NULL);
5223fed7d23SJeff Roberson }
5233fed7d23SJeff Roberson 
5243fed7d23SJeff Roberson void
5259727e637SJeff Roberson runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
5263fed7d23SJeff Roberson {
527d5a08a60SJake Burkholder 	struct rqhead *rqh;
528ed0e8f2fSJeff Roberson 	u_char pri;
529d5a08a60SJake Burkholder 
5309727e637SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
531b61ce5b0SJeff Roberson 		("runq_remove_idx: thread swapped out"));
5329727e637SJeff Roberson 	pri = td->td_rqindex;
5337b20fb19SJeff Roberson 	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
534d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
5359727e637SJeff Roberson 	CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
5369727e637SJeff Roberson 	    td, td->td_priority, pri, rqh);
5379727e637SJeff Roberson 	TAILQ_REMOVE(rqh, td, td_runq);
538d5a08a60SJake Burkholder 	if (TAILQ_EMPTY(rqh)) {
5393fed7d23SJeff Roberson 		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
540d5a08a60SJake Burkholder 		runq_clrbit(rq, pri);
5413fed7d23SJeff Roberson 		if (idx != NULL && *idx == pri)
5423fed7d23SJeff Roberson 			*idx = (pri + 1) % RQ_NQS;
543d5a08a60SJake Burkholder 	}
544dba6c5a6SPeter Wemm }
545