xref: /freebsd/sys/kern/kern_switch.c (revision cd49bb7047f32ed0cc010747c38f6c95470fda4a)
19454b2d8SWarner Losh /*-
2d5a08a60SJake Burkholder  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3d5a08a60SJake Burkholder  * All rights reserved.
4dba6c5a6SPeter Wemm  *
5dba6c5a6SPeter Wemm  * Redistribution and use in source and binary forms, with or without
6dba6c5a6SPeter Wemm  * modification, are permitted provided that the following conditions
7dba6c5a6SPeter Wemm  * are met:
8dba6c5a6SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
10dba6c5a6SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
11dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
12dba6c5a6SPeter Wemm  *    documentation and/or other materials provided with the distribution.
13dba6c5a6SPeter Wemm  *
14dba6c5a6SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15dba6c5a6SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16dba6c5a6SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17dba6c5a6SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18dba6c5a6SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19dba6c5a6SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20dba6c5a6SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21dba6c5a6SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22dba6c5a6SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23dba6c5a6SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24dba6c5a6SPeter Wemm  * SUCH DAMAGE.
25dba6c5a6SPeter Wemm  */
26dba6c5a6SPeter Wemm 
27e602ba25SJulian Elischer 
28677b542eSDavid E. O'Brien #include <sys/cdefs.h>
29677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
30e602ba25SJulian Elischer 
316804a3abSJulian Elischer #include "opt_sched.h"
320c0b25aeSJohn Baldwin 
33ed062c8dSJulian Elischer #ifndef KERN_SWITCH_INCLUDE
34dba6c5a6SPeter Wemm #include <sys/param.h>
35dba6c5a6SPeter Wemm #include <sys/systm.h>
362d50560aSMarcel Moolenaar #include <sys/kdb.h>
37dba6c5a6SPeter Wemm #include <sys/kernel.h>
380384fff8SJason Evans #include <sys/ktr.h>
39f34fa851SJohn Baldwin #include <sys/lock.h>
4035e0e5b3SJohn Baldwin #include <sys/mutex.h>
41dba6c5a6SPeter Wemm #include <sys/proc.h>
42dba6c5a6SPeter Wemm #include <sys/queue.h>
43b43179fbSJeff Roberson #include <sys/sched.h>
44ed062c8dSJulian Elischer #else  /* KERN_SWITCH_INCLUDE */
450d2a2989SPeter Wemm #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46cc66ebe2SPeter Wemm #include <sys/smp.h>
47cc66ebe2SPeter Wemm #endif
486804a3abSJulian Elischer #if defined(SMP) && defined(SCHED_4BSD)
496804a3abSJulian Elischer #include <sys/sysctl.h>
506804a3abSJulian Elischer #endif
516804a3abSJulian Elischer 
521335c4dfSNate Lawson /* Uncomment this to enable logging of critical_enter/exit. */
531335c4dfSNate Lawson #if 0
541335c4dfSNate Lawson #define	KTR_CRITICAL	KTR_SCHED
551335c4dfSNate Lawson #else
561335c4dfSNate Lawson #define	KTR_CRITICAL	0
571335c4dfSNate Lawson #endif
581335c4dfSNate Lawson 
599923b511SScott Long #ifdef FULL_PREEMPTION
609923b511SScott Long #ifndef PREEMPTION
619923b511SScott Long #error "The FULL_PREEMPTION option requires the PREEMPTION option"
629923b511SScott Long #endif
639923b511SScott Long #endif
64dba6c5a6SPeter Wemm 
65d2ac2316SJake Burkholder CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
66d2ac2316SJake Burkholder 
676220dcbaSRobert Watson /*
686220dcbaSRobert Watson  * kern.sched.preemption allows user space to determine if preemption support
696220dcbaSRobert Watson  * is compiled in or not.  It is not currently a boot or runtime flag that
706220dcbaSRobert Watson  * can be changed.
716220dcbaSRobert Watson  */
726220dcbaSRobert Watson #ifdef PREEMPTION
736220dcbaSRobert Watson static int kern_sched_preemption = 1;
746220dcbaSRobert Watson #else
756220dcbaSRobert Watson static int kern_sched_preemption = 0;
766220dcbaSRobert Watson #endif
776220dcbaSRobert Watson SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
786220dcbaSRobert Watson     &kern_sched_preemption, 0, "Kernel preemption enabled");
796220dcbaSRobert Watson 
80e602ba25SJulian Elischer /************************************************************************
81e602ba25SJulian Elischer  * Functions that manipulate runnability from a thread perspective.	*
82e602ba25SJulian Elischer  ************************************************************************/
838460a577SJohn Birrell /*
848460a577SJohn Birrell  * Select the thread that will be run next.
858460a577SJohn Birrell  */
86b40ce416SJulian Elischer struct thread *
87b40ce416SJulian Elischer choosethread(void)
88dba6c5a6SPeter Wemm {
89ad1e7d28SJulian Elischer 	struct td_sched *ts;
90e602ba25SJulian Elischer 	struct thread *td;
91e602ba25SJulian Elischer 
920d2a2989SPeter Wemm #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
93cc66ebe2SPeter Wemm 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
94cc66ebe2SPeter Wemm 		/* Shutting down, run idlethread on AP's */
95cc66ebe2SPeter Wemm 		td = PCPU_GET(idlethread);
96ad1e7d28SJulian Elischer 		ts = td->td_sched;
97cc66ebe2SPeter Wemm 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
98ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_DIDRUN;
99cc66ebe2SPeter Wemm 		TD_SET_RUNNING(td);
100cc66ebe2SPeter Wemm 		return (td);
101cc66ebe2SPeter Wemm 	}
102cc66ebe2SPeter Wemm #endif
103cc66ebe2SPeter Wemm 
104fe799533SAndrew Gallatin retry:
105ad1e7d28SJulian Elischer 	ts = sched_choose();
106ad1e7d28SJulian Elischer 	if (ts) {
107ad1e7d28SJulian Elischer 		td = ts->ts_thread;
108e602ba25SJulian Elischer 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
109e602ba25SJulian Elischer 		    td, td->td_priority);
110e602ba25SJulian Elischer 	} else {
11140e55026SJulian Elischer 		/* Simulate runq_choose() having returned the idle thread */
112e602ba25SJulian Elischer 		td = PCPU_GET(idlethread);
113ad1e7d28SJulian Elischer 		ts = td->td_sched;
114e602ba25SJulian Elischer 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
115e602ba25SJulian Elischer 	}
116ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_DIDRUN;
11793a7aa79SJulian Elischer 
11893a7aa79SJulian Elischer 	/*
119faaa20f6SJulian Elischer 	 * If we are in panic, only allow system threads,
120faaa20f6SJulian Elischer 	 * plus the one we are running in, to be run.
12193a7aa79SJulian Elischer 	 */
122fe799533SAndrew Gallatin 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
123faaa20f6SJulian Elischer 	    (td->td_flags & TDF_INPANIC) == 0)) {
124faaa20f6SJulian Elischer 		/* note that it is no longer on the run queue */
125faaa20f6SJulian Elischer 		TD_SET_CAN_RUN(td);
126fe799533SAndrew Gallatin 		goto retry;
127faaa20f6SJulian Elischer 	}
12893a7aa79SJulian Elischer 
12971fad9fdSJulian Elischer 	TD_SET_RUNNING(td);
130e602ba25SJulian Elischer 	return (td);
131e602ba25SJulian Elischer }
132e602ba25SJulian Elischer 
133e602ba25SJulian Elischer 
134ad1e7d28SJulian Elischer #if 0
135e602ba25SJulian Elischer /*
136ad1e7d28SJulian Elischer  * currently not used.. threads remove themselves from the
137ad1e7d28SJulian Elischer  * run queue by running.
138e602ba25SJulian Elischer  */
1391f955e2dSJulian Elischer static void
140b40ce416SJulian Elischer remrunqueue(struct thread *td)
141d5a08a60SJake Burkholder {
142e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
14371fad9fdSJulian Elischer 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
144e602ba25SJulian Elischer 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
14571fad9fdSJulian Elischer 	TD_SET_CAN_RUN(td);
146ad1e7d28SJulian Elischer 	/* remove from sys run queue */
1477cf90fb3SJeff Roberson 	sched_rem(td);
148e602ba25SJulian Elischer 	return;
149d5a08a60SJake Burkholder }
1508460a577SJohn Birrell #endif
1511f955e2dSJulian Elischer 
1521f955e2dSJulian Elischer /*
1531f955e2dSJulian Elischer  * Change the priority of a thread that is on the run queue.
1541f955e2dSJulian Elischer  */
1551f955e2dSJulian Elischer void
1561f955e2dSJulian Elischer adjustrunqueue( struct thread *td, int newpri)
1571f955e2dSJulian Elischer {
158ad1e7d28SJulian Elischer 	struct td_sched *ts;
1591f955e2dSJulian Elischer 
1601f955e2dSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
1611f955e2dSJulian Elischer 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
1625215b187SJeff Roberson 
163ad1e7d28SJulian Elischer 	ts = td->td_sched;
1641f955e2dSJulian Elischer 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
165ad1e7d28SJulian Elischer 		/* We only care about the td_sched in the run queue. */
16624c5baaeSJulian Elischer 	td->td_priority = newpri;
167b41f1452SDavid Xu #ifndef SCHED_CORE
168ad1e7d28SJulian Elischer 	if (ts->ts_rqindex != (newpri / RQ_PPQ))
169b41f1452SDavid Xu #else
170ad1e7d28SJulian Elischer 	if (ts->ts_rqindex != newpri)
171b41f1452SDavid Xu #endif
172b41f1452SDavid Xu 	{
1737cf90fb3SJeff Roberson 		sched_rem(td);
1742630e4c9SJulian Elischer 		sched_add(td, SRQ_BORING);
1751f955e2dSJulian Elischer 	}
1761f955e2dSJulian Elischer }
1775215b187SJeff Roberson 
178d5a08a60SJake Burkholder void
1792630e4c9SJulian Elischer setrunqueue(struct thread *td, int flags)
180d5a08a60SJake Burkholder {
181e602ba25SJulian Elischer 
1828460a577SJohn Birrell 	CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
1838460a577SJohn Birrell 	    td, td->td_proc->p_pid);
18485da7a56SJeff Roberson 	CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
18585da7a56SJeff Roberson             td, td->td_proc->p_comm, td->td_priority, curthread,
18685da7a56SJeff Roberson             curthread->td_proc->p_comm);
187e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
188b2578c6cSJulian Elischer 	KASSERT((td->td_inhibitors == 0),
1892da78e38SRobert Watson 			("setrunqueue: trying to run inhibited thread"));
19071fad9fdSJulian Elischer 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
19171fad9fdSJulian Elischer 	    ("setrunqueue: bad thread state"));
19271fad9fdSJulian Elischer 	TD_SET_RUNQ(td);
1932630e4c9SJulian Elischer 	sched_add(td, flags);
194e602ba25SJulian Elischer }
195e602ba25SJulian Elischer 
1960c0b25aeSJohn Baldwin /*
1970c0b25aeSJohn Baldwin  * Kernel thread preemption implementation.  Critical sections mark
1980c0b25aeSJohn Baldwin  * regions of code in which preemptions are not allowed.
1990c0b25aeSJohn Baldwin  */
2007e1f6dfeSJohn Baldwin void
2017e1f6dfeSJohn Baldwin critical_enter(void)
2027e1f6dfeSJohn Baldwin {
2037e1f6dfeSJohn Baldwin 	struct thread *td;
2047e1f6dfeSJohn Baldwin 
2057e1f6dfeSJohn Baldwin 	td = curthread;
2067e1f6dfeSJohn Baldwin 	td->td_critnest++;
2071335c4dfSNate Lawson 	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
208f42a43faSRobert Watson 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
2097e1f6dfeSJohn Baldwin }
2107e1f6dfeSJohn Baldwin 
2117e1f6dfeSJohn Baldwin void
2127e1f6dfeSJohn Baldwin critical_exit(void)
2137e1f6dfeSJohn Baldwin {
2147e1f6dfeSJohn Baldwin 	struct thread *td;
2157e1f6dfeSJohn Baldwin 
2167e1f6dfeSJohn Baldwin 	td = curthread;
217b209e5e3SJeff Roberson 	KASSERT(td->td_critnest != 0,
218b209e5e3SJeff Roberson 	    ("critical_exit: td_critnest == 0"));
2190c0b25aeSJohn Baldwin #ifdef PREEMPTION
220d13ec713SStephan Uphoff 	if (td->td_critnest == 1) {
221d13ec713SStephan Uphoff 		td->td_critnest = 0;
22252eb8464SJohn Baldwin 		mtx_assert(&sched_lock, MA_NOTOWNED);
22377918643SStephan Uphoff 		if (td->td_owepreempt) {
22477918643SStephan Uphoff 			td->td_critnest = 1;
2250c0b25aeSJohn Baldwin 			mtx_lock_spin(&sched_lock);
22677918643SStephan Uphoff 			td->td_critnest--;
2270c0b25aeSJohn Baldwin 			mi_switch(SW_INVOL, NULL);
2280c0b25aeSJohn Baldwin 			mtx_unlock_spin(&sched_lock);
2290c0b25aeSJohn Baldwin 		}
230d13ec713SStephan Uphoff 	} else
2310c0b25aeSJohn Baldwin #endif
2327e1f6dfeSJohn Baldwin 		td->td_critnest--;
233d13ec713SStephan Uphoff 
2341335c4dfSNate Lawson 	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
235f42a43faSRobert Watson 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
236d74ac681SMatthew Dillon }
2377e1f6dfeSJohn Baldwin 
2380c0b25aeSJohn Baldwin /*
2390c0b25aeSJohn Baldwin  * This function is called when a thread is about to be put on run queue
2400c0b25aeSJohn Baldwin  * because it has been made runnable or its priority has been adjusted.  It
2410c0b25aeSJohn Baldwin  * determines if the new thread should be immediately preempted to.  If so,
2420c0b25aeSJohn Baldwin  * it switches to it and eventually returns true.  If not, it returns false
2430c0b25aeSJohn Baldwin  * so that the caller may place the thread on an appropriate run queue.
2440c0b25aeSJohn Baldwin  */
2450c0b25aeSJohn Baldwin int
2460c0b25aeSJohn Baldwin maybe_preempt(struct thread *td)
2470c0b25aeSJohn Baldwin {
2488b44a2e2SMarcel Moolenaar #ifdef PREEMPTION
2490c0b25aeSJohn Baldwin 	struct thread *ctd;
2500c0b25aeSJohn Baldwin 	int cpri, pri;
2518b44a2e2SMarcel Moolenaar #endif
2520c0b25aeSJohn Baldwin 
2530c0b25aeSJohn Baldwin 	mtx_assert(&sched_lock, MA_OWNED);
2540c0b25aeSJohn Baldwin #ifdef PREEMPTION
2550c0b25aeSJohn Baldwin 	/*
2560c0b25aeSJohn Baldwin 	 * The new thread should not preempt the current thread if any of the
2570c0b25aeSJohn Baldwin 	 * following conditions are true:
2580c0b25aeSJohn Baldwin 	 *
259bc608306SRobert Watson 	 *  - The kernel is in the throes of crashing (panicstr).
26052eb8464SJohn Baldwin 	 *  - The current thread has a higher (numerically lower) or
26152eb8464SJohn Baldwin 	 *    equivalent priority.  Note that this prevents curthread from
26252eb8464SJohn Baldwin 	 *    trying to preempt to itself.
2630c0b25aeSJohn Baldwin 	 *  - It is too early in the boot for context switches (cold is set).
2640c0b25aeSJohn Baldwin 	 *  - The current thread has an inhibitor set or is in the process of
2650c0b25aeSJohn Baldwin 	 *    exiting.  In this case, the current thread is about to switch
2660c0b25aeSJohn Baldwin 	 *    out anyways, so there's no point in preempting.  If we did,
2670c0b25aeSJohn Baldwin 	 *    the current thread would not be properly resumed as well, so
2680c0b25aeSJohn Baldwin 	 *    just avoid that whole landmine.
2690c0b25aeSJohn Baldwin 	 *  - If the new thread's priority is not a realtime priority and
2700c0b25aeSJohn Baldwin 	 *    the current thread's priority is not an idle priority and
2710c0b25aeSJohn Baldwin 	 *    FULL_PREEMPTION is disabled.
2720c0b25aeSJohn Baldwin 	 *
2730c0b25aeSJohn Baldwin 	 * If all of these conditions are false, but the current thread is in
2740c0b25aeSJohn Baldwin 	 * a nested critical section, then we have to defer the preemption
2750c0b25aeSJohn Baldwin 	 * until we exit the critical section.  Otherwise, switch immediately
2760c0b25aeSJohn Baldwin 	 * to the new thread.
2770c0b25aeSJohn Baldwin 	 */
2780c0b25aeSJohn Baldwin 	ctd = curthread;
279ad1e7d28SJulian Elischer 	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
2806a574b2aSJulian Elischer 	  ("thread has no (or wrong) sched-private part."));
281b2578c6cSJulian Elischer 	KASSERT((td->td_inhibitors == 0),
2822da78e38SRobert Watson 			("maybe_preempt: trying to run inhibited thread"));
2830c0b25aeSJohn Baldwin 	pri = td->td_priority;
2840c0b25aeSJohn Baldwin 	cpri = ctd->td_priority;
285bc608306SRobert Watson 	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
286ad1e7d28SJulian Elischer 	    TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
2870c0b25aeSJohn Baldwin 		return (0);
2880c0b25aeSJohn Baldwin #ifndef FULL_PREEMPTION
2893ea6bbc5SStephan Uphoff 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
2900c0b25aeSJohn Baldwin 		return (0);
2910c0b25aeSJohn Baldwin #endif
292a3f2d842SStephan Uphoff 
2930c0b25aeSJohn Baldwin 	if (ctd->td_critnest > 1) {
2940c0b25aeSJohn Baldwin 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
2950c0b25aeSJohn Baldwin 		    ctd->td_critnest);
29677918643SStephan Uphoff 		ctd->td_owepreempt = 1;
2970c0b25aeSJohn Baldwin 		return (0);
2980c0b25aeSJohn Baldwin 	}
2990c0b25aeSJohn Baldwin 
3000c0b25aeSJohn Baldwin 	/*
301c20c691bSJulian Elischer 	 * Thread is runnable but not yet put on system run queue.
3020c0b25aeSJohn Baldwin 	 */
3030c0b25aeSJohn Baldwin 	MPASS(TD_ON_RUNQ(td));
304ad1e7d28SJulian Elischer 	MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
3050c0b25aeSJohn Baldwin 	TD_SET_RUNNING(td);
3060c0b25aeSJohn Baldwin 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
3070c0b25aeSJohn Baldwin 	    td->td_proc->p_pid, td->td_proc->p_comm);
308c20c691bSJulian Elischer 	mi_switch(SW_INVOL|SW_PREEMPT, td);
3090c0b25aeSJohn Baldwin 	return (1);
3100c0b25aeSJohn Baldwin #else
3110c0b25aeSJohn Baldwin 	return (0);
3120c0b25aeSJohn Baldwin #endif
3130c0b25aeSJohn Baldwin }
3140c0b25aeSJohn Baldwin 
31544fe3c1fSJohn Baldwin #if 0
3160c0b25aeSJohn Baldwin #ifndef PREEMPTION
3170c0b25aeSJohn Baldwin /* XXX: There should be a non-static version of this. */
3180c0b25aeSJohn Baldwin static void
3190c0b25aeSJohn Baldwin printf_caddr_t(void *data)
3200c0b25aeSJohn Baldwin {
3210c0b25aeSJohn Baldwin 	printf("%s", (char *)data);
3220c0b25aeSJohn Baldwin }
3230c0b25aeSJohn Baldwin static char preempt_warning[] =
3240c0b25aeSJohn Baldwin     "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
3250c0b25aeSJohn Baldwin SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
3260c0b25aeSJohn Baldwin     preempt_warning)
3270c0b25aeSJohn Baldwin #endif
32844fe3c1fSJohn Baldwin #endif
329e602ba25SJulian Elischer 
330e602ba25SJulian Elischer /************************************************************************
331e602ba25SJulian Elischer  * SYSTEM RUN QUEUE manipulations and tests				*
332e602ba25SJulian Elischer  ************************************************************************/
333e602ba25SJulian Elischer /*
334e602ba25SJulian Elischer  * Initialize a run structure.
335e602ba25SJulian Elischer  */
336e602ba25SJulian Elischer void
337e602ba25SJulian Elischer runq_init(struct runq *rq)
338e602ba25SJulian Elischer {
339e602ba25SJulian Elischer 	int i;
340e602ba25SJulian Elischer 
341e602ba25SJulian Elischer 	bzero(rq, sizeof *rq);
342e602ba25SJulian Elischer 	for (i = 0; i < RQ_NQS; i++)
343e602ba25SJulian Elischer 		TAILQ_INIT(&rq->rq_queues[i]);
344e602ba25SJulian Elischer }
345e602ba25SJulian Elischer 
346d5a08a60SJake Burkholder /*
347d5a08a60SJake Burkholder  * Clear the status bit of the queue corresponding to priority level pri,
348d5a08a60SJake Burkholder  * indicating that it is empty.
349d5a08a60SJake Burkholder  */
350d5a08a60SJake Burkholder static __inline void
351d5a08a60SJake Burkholder runq_clrbit(struct runq *rq, int pri)
352d5a08a60SJake Burkholder {
353d5a08a60SJake Burkholder 	struct rqbits *rqb;
354d5a08a60SJake Burkholder 
355d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
356d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
357d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
358d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
359d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
360d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
361d5a08a60SJake Burkholder }
362d5a08a60SJake Burkholder 
363d5a08a60SJake Burkholder /*
364d5a08a60SJake Burkholder  * Find the index of the first non-empty run queue.  This is done by
365d5a08a60SJake Burkholder  * scanning the status bits, a set bit indicates a non-empty queue.
366d5a08a60SJake Burkholder  */
367d5a08a60SJake Burkholder static __inline int
368d5a08a60SJake Burkholder runq_findbit(struct runq *rq)
369d5a08a60SJake Burkholder {
370d5a08a60SJake Burkholder 	struct rqbits *rqb;
371d5a08a60SJake Burkholder 	int pri;
372d5a08a60SJake Burkholder 	int i;
373d5a08a60SJake Burkholder 
374d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
375d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
376d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
3772f9267ecSPeter Wemm 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
378d5a08a60SJake Burkholder 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
379d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i, pri);
380d5a08a60SJake Burkholder 			return (pri);
381d5a08a60SJake Burkholder 		}
382d5a08a60SJake Burkholder 
383d5a08a60SJake Burkholder 	return (-1);
384d5a08a60SJake Burkholder }
385d5a08a60SJake Burkholder 
3863fed7d23SJeff Roberson static __inline int
3873fed7d23SJeff Roberson runq_findbit_from(struct runq *rq, int start)
3883fed7d23SJeff Roberson {
3893fed7d23SJeff Roberson 	struct rqbits *rqb;
3903fed7d23SJeff Roberson 	int bit;
3913fed7d23SJeff Roberson 	int pri;
3923fed7d23SJeff Roberson 	int i;
3933fed7d23SJeff Roberson 
3943fed7d23SJeff Roberson 	rqb = &rq->rq_status;
3953fed7d23SJeff Roberson 	bit = start & (RQB_BPW -1);
3963fed7d23SJeff Roberson 	pri = 0;
3973fed7d23SJeff Roberson 	CTR1(KTR_RUNQ, "runq_findbit_from: start %d", start);
3983fed7d23SJeff Roberson again:
3993fed7d23SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; i++) {
4003fed7d23SJeff Roberson 		CTR3(KTR_RUNQ, "runq_findbit_from: bits %d = %#x bit = %d",
4013fed7d23SJeff Roberson 		    i, rqb->rqb_bits[i], bit);
4023fed7d23SJeff Roberson 		if (rqb->rqb_bits[i]) {
4033fed7d23SJeff Roberson 			if (bit != 0) {
4043fed7d23SJeff Roberson 				for (pri = bit; pri < RQB_BPW; pri++)
4053fed7d23SJeff Roberson 					if (rqb->rqb_bits[i] & (1ul << pri))
4063fed7d23SJeff Roberson 						break;
4073fed7d23SJeff Roberson 				bit = 0;
4083fed7d23SJeff Roberson 				if (pri >= RQB_BPW)
4093fed7d23SJeff Roberson 					continue;
4103fed7d23SJeff Roberson 			} else
4113fed7d23SJeff Roberson 				pri = RQB_FFS(rqb->rqb_bits[i]);
4123fed7d23SJeff Roberson 			pri += (i << RQB_L2BPW);
4133fed7d23SJeff Roberson 			CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
4143fed7d23SJeff Roberson 			    rqb->rqb_bits[i], i, pri);
4153fed7d23SJeff Roberson 			return (pri);
4163fed7d23SJeff Roberson 		}
4173fed7d23SJeff Roberson 		bit = 0;
4183fed7d23SJeff Roberson 	}
4193fed7d23SJeff Roberson 	if (start != 0) {
4203fed7d23SJeff Roberson 		CTR0(KTR_RUNQ, "runq_findbit_from: restarting");
4213fed7d23SJeff Roberson 		start = 0;
4223fed7d23SJeff Roberson 		goto again;
4233fed7d23SJeff Roberson 	}
4243fed7d23SJeff Roberson 
4253fed7d23SJeff Roberson 	return (-1);
4263fed7d23SJeff Roberson }
4273fed7d23SJeff Roberson 
428d5a08a60SJake Burkholder /*
429d5a08a60SJake Burkholder  * Set the status bit of the queue corresponding to priority level pri,
430d5a08a60SJake Burkholder  * indicating that it is non-empty.
431d5a08a60SJake Burkholder  */
432d5a08a60SJake Burkholder static __inline void
433d5a08a60SJake Burkholder runq_setbit(struct runq *rq, int pri)
434d5a08a60SJake Burkholder {
435d5a08a60SJake Burkholder 	struct rqbits *rqb;
436d5a08a60SJake Burkholder 
437d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
438d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
439d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
440d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
441d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
442d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
443d5a08a60SJake Burkholder }
444d5a08a60SJake Burkholder 
445d5a08a60SJake Burkholder /*
446ad1e7d28SJulian Elischer  * Add the thread to the queue specified by its priority, and set the
447d5a08a60SJake Burkholder  * corresponding status bit.
448d5a08a60SJake Burkholder  */
449d5a08a60SJake Burkholder void
450ad1e7d28SJulian Elischer runq_add(struct runq *rq, struct td_sched *ts, int flags)
451d5a08a60SJake Burkholder {
452d5a08a60SJake Burkholder 	struct rqhead *rqh;
453d5a08a60SJake Burkholder 	int pri;
454dba6c5a6SPeter Wemm 
455ad1e7d28SJulian Elischer 	pri = ts->ts_thread->td_priority / RQ_PPQ;
456ad1e7d28SJulian Elischer 	ts->ts_rqindex = pri;
457d5a08a60SJake Burkholder 	runq_setbit(rq, pri);
458d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
459ad1e7d28SJulian Elischer 	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
460ad1e7d28SJulian Elischer 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
461c20c691bSJulian Elischer 	if (flags & SRQ_PREEMPTED) {
462ad1e7d28SJulian Elischer 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
463c20c691bSJulian Elischer 	} else {
464ad1e7d28SJulian Elischer 		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
465dba6c5a6SPeter Wemm 	}
466c20c691bSJulian Elischer }
467d5a08a60SJake Burkholder 
4683fed7d23SJeff Roberson void
4693fed7d23SJeff Roberson runq_add_pri(struct runq *rq, struct td_sched *ts, int pri, int flags)
4703fed7d23SJeff Roberson {
4713fed7d23SJeff Roberson 	struct rqhead *rqh;
4723fed7d23SJeff Roberson 
4733fed7d23SJeff Roberson 	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
4743fed7d23SJeff Roberson 	ts->ts_rqindex = pri;
4753fed7d23SJeff Roberson 	runq_setbit(rq, pri);
4763fed7d23SJeff Roberson 	rqh = &rq->rq_queues[pri];
4773fed7d23SJeff Roberson 	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
4783fed7d23SJeff Roberson 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
4793fed7d23SJeff Roberson 	if (flags & SRQ_PREEMPTED) {
4803fed7d23SJeff Roberson 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
4813fed7d23SJeff Roberson 	} else {
4823fed7d23SJeff Roberson 		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
4833fed7d23SJeff Roberson 	}
4843fed7d23SJeff Roberson }
485d5a08a60SJake Burkholder /*
486d5a08a60SJake Burkholder  * Return true if there are runnable processes of any priority on the run
487d5a08a60SJake Burkholder  * queue, false otherwise.  Has no side effects, does not modify the run
488d5a08a60SJake Burkholder  * queue structure.
489d5a08a60SJake Burkholder  */
490d5a08a60SJake Burkholder int
491d5a08a60SJake Burkholder runq_check(struct runq *rq)
492d5a08a60SJake Burkholder {
493d5a08a60SJake Burkholder 	struct rqbits *rqb;
494d5a08a60SJake Burkholder 	int i;
495d5a08a60SJake Burkholder 
496d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
497d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
498d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
499d5a08a60SJake Burkholder 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
500d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i);
501d5a08a60SJake Burkholder 			return (1);
502dba6c5a6SPeter Wemm 		}
503d5a08a60SJake Burkholder 	CTR0(KTR_RUNQ, "runq_check: empty");
504d5a08a60SJake Burkholder 
505d5a08a60SJake Burkholder 	return (0);
506dba6c5a6SPeter Wemm }
507d5a08a60SJake Burkholder 
5086804a3abSJulian Elischer #if defined(SMP) && defined(SCHED_4BSD)
5096804a3abSJulian Elischer int runq_fuzz = 1;
5106804a3abSJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
5116804a3abSJulian Elischer #endif
5126804a3abSJulian Elischer 
513d5a08a60SJake Burkholder /*
514b43179fbSJeff Roberson  * Find the highest priority process on the run queue.
515d5a08a60SJake Burkholder  */
516ad1e7d28SJulian Elischer struct td_sched *
517d5a08a60SJake Burkholder runq_choose(struct runq *rq)
518d5a08a60SJake Burkholder {
519d5a08a60SJake Burkholder 	struct rqhead *rqh;
520ad1e7d28SJulian Elischer 	struct td_sched *ts;
521d5a08a60SJake Burkholder 	int pri;
522d5a08a60SJake Burkholder 
523d5a08a60SJake Burkholder 	mtx_assert(&sched_lock, MA_OWNED);
524e602ba25SJulian Elischer 	while ((pri = runq_findbit(rq)) != -1) {
525d5a08a60SJake Burkholder 		rqh = &rq->rq_queues[pri];
5266804a3abSJulian Elischer #if defined(SMP) && defined(SCHED_4BSD)
5276804a3abSJulian Elischer 		/* fuzz == 1 is normal.. 0 or less are ignored */
5286804a3abSJulian Elischer 		if (runq_fuzz > 1) {
5296804a3abSJulian Elischer 			/*
5306804a3abSJulian Elischer 			 * In the first couple of entries, check if
5316804a3abSJulian Elischer 			 * there is one for our CPU as a preference.
5326804a3abSJulian Elischer 			 */
5336804a3abSJulian Elischer 			int count = runq_fuzz;
5346804a3abSJulian Elischer 			int cpu = PCPU_GET(cpuid);
535ad1e7d28SJulian Elischer 			struct td_sched *ts2;
536ad1e7d28SJulian Elischer 			ts2 = ts = TAILQ_FIRST(rqh);
5376804a3abSJulian Elischer 
538ad1e7d28SJulian Elischer 			while (count-- && ts2) {
539ad1e7d28SJulian Elischer 				if (ts->ts_thread->td_lastcpu == cpu) {
540ad1e7d28SJulian Elischer 					ts = ts2;
5416804a3abSJulian Elischer 					break;
5426804a3abSJulian Elischer 				}
543ad1e7d28SJulian Elischer 				ts2 = TAILQ_NEXT(ts2, ts_procq);
5446804a3abSJulian Elischer 			}
5456804a3abSJulian Elischer 		} else
5466804a3abSJulian Elischer #endif
547ad1e7d28SJulian Elischer 			ts = TAILQ_FIRST(rqh);
548ad1e7d28SJulian Elischer 		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
549e602ba25SJulian Elischer 		CTR3(KTR_RUNQ,
550ad1e7d28SJulian Elischer 		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
551ad1e7d28SJulian Elischer 		return (ts);
552d5a08a60SJake Burkholder 	}
553d5a08a60SJake Burkholder 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
554d5a08a60SJake Burkholder 
555e602ba25SJulian Elischer 	return (NULL);
556d5a08a60SJake Burkholder }
557d5a08a60SJake Burkholder 
5583fed7d23SJeff Roberson struct td_sched *
559cd49bb70SJeff Roberson runq_choose_from(struct runq *rq, int idx)
5603fed7d23SJeff Roberson {
5613fed7d23SJeff Roberson 	struct rqhead *rqh;
5623fed7d23SJeff Roberson 	struct td_sched *ts;
5633fed7d23SJeff Roberson 	int pri;
5643fed7d23SJeff Roberson 
5653fed7d23SJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
566cd49bb70SJeff Roberson 	if ((pri = runq_findbit_from(rq, idx)) != -1) {
5673fed7d23SJeff Roberson 		rqh = &rq->rq_queues[pri];
5683fed7d23SJeff Roberson 		ts = TAILQ_FIRST(rqh);
5693fed7d23SJeff Roberson 		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
5703fed7d23SJeff Roberson 		CTR4(KTR_RUNQ,
5713fed7d23SJeff Roberson 		    "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
5723fed7d23SJeff Roberson 		    pri, ts, ts->ts_rqindex, rqh);
5733fed7d23SJeff Roberson 		return (ts);
5743fed7d23SJeff Roberson 	}
5753fed7d23SJeff Roberson 	CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
5763fed7d23SJeff Roberson 
5773fed7d23SJeff Roberson 	return (NULL);
5783fed7d23SJeff Roberson }
579d5a08a60SJake Burkholder /*
580ad1e7d28SJulian Elischer  * Remove the thread from the queue specified by its priority, and clear the
581d5a08a60SJake Burkholder  * corresponding status bit if the queue becomes empty.
582ad1e7d28SJulian Elischer  * Caller must set ts->ts_state afterwards.
583d5a08a60SJake Burkholder  */
584d5a08a60SJake Burkholder void
585ad1e7d28SJulian Elischer runq_remove(struct runq *rq, struct td_sched *ts)
586d5a08a60SJake Burkholder {
5873fed7d23SJeff Roberson 
5883fed7d23SJeff Roberson 	runq_remove_idx(rq, ts, NULL);
5893fed7d23SJeff Roberson }
5903fed7d23SJeff Roberson 
5913fed7d23SJeff Roberson void
5923fed7d23SJeff Roberson runq_remove_idx(struct runq *rq, struct td_sched *ts, int *idx)
5933fed7d23SJeff Roberson {
594d5a08a60SJake Burkholder 	struct rqhead *rqh;
595d5a08a60SJake Burkholder 	int pri;
596d5a08a60SJake Burkholder 
597ad1e7d28SJulian Elischer 	KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
5983fed7d23SJeff Roberson 		("runq_remove_idx: process swapped out"));
599ad1e7d28SJulian Elischer 	pri = ts->ts_rqindex;
600d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
6013fed7d23SJeff Roberson 	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
602ad1e7d28SJulian Elischer 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
603ad1e7d28SJulian Elischer 	TAILQ_REMOVE(rqh, ts, ts_procq);
604d5a08a60SJake Burkholder 	if (TAILQ_EMPTY(rqh)) {
6053fed7d23SJeff Roberson 		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
606d5a08a60SJake Burkholder 		runq_clrbit(rq, pri);
6073fed7d23SJeff Roberson 		if (idx != NULL && *idx == pri)
6083fed7d23SJeff Roberson 			*idx = (pri + 1) % RQ_NQS;
609d5a08a60SJake Burkholder 	}
610dba6c5a6SPeter Wemm }
611e602ba25SJulian Elischer 
612ed062c8dSJulian Elischer /****** functions that are temporarily here ***********/
613ed062c8dSJulian Elischer #include <vm/uma.h>
614ed062c8dSJulian Elischer extern struct mtx kse_zombie_lock;
615ed062c8dSJulian Elischer 
616ed062c8dSJulian Elischer /*
617ed062c8dSJulian Elischer  *  Allocate scheduler specific per-process resources.
618ad1e7d28SJulian Elischer  * The thread and proc have already been linked in.
619ed062c8dSJulian Elischer  *
620ed062c8dSJulian Elischer  * Called from:
621ed062c8dSJulian Elischer  *  proc_init() (UMA init method)
622ed062c8dSJulian Elischer  */
623ed062c8dSJulian Elischer void
624ad1e7d28SJulian Elischer sched_newproc(struct proc *p, struct thread *td)
625ed062c8dSJulian Elischer {
626ed062c8dSJulian Elischer }
627ed062c8dSJulian Elischer 
628ed062c8dSJulian Elischer /*
629ed062c8dSJulian Elischer  * thread is being either created or recycled.
630ed062c8dSJulian Elischer  * Fix up the per-scheduler resources associated with it.
631ed062c8dSJulian Elischer  * Called from:
632ed062c8dSJulian Elischer  *  sched_fork_thread()
633ed062c8dSJulian Elischer  *  thread_dtor()  (*may go away)
634ed062c8dSJulian Elischer  *  thread_init()  (*may go away)
635ed062c8dSJulian Elischer  */
636ed062c8dSJulian Elischer void
637ed062c8dSJulian Elischer sched_newthread(struct thread *td)
638ed062c8dSJulian Elischer {
639ad1e7d28SJulian Elischer 	struct td_sched *ts;
640ed062c8dSJulian Elischer 
641ad1e7d28SJulian Elischer 	ts = (struct td_sched *) (td + 1);
642ad1e7d28SJulian Elischer 	bzero(ts, sizeof(*ts));
643ad1e7d28SJulian Elischer 	td->td_sched     = ts;
644ad1e7d28SJulian Elischer 	ts->ts_thread	= td;
645ad1e7d28SJulian Elischer 	ts->ts_state	= TSS_THREAD;
646ed062c8dSJulian Elischer }
647ed062c8dSJulian Elischer 
648ed062c8dSJulian Elischer /*
649ed062c8dSJulian Elischer  * Called from:
650ed062c8dSJulian Elischer  *  thr_create()
651ed062c8dSJulian Elischer  *  proc_init() (UMA) via sched_newproc()
652ed062c8dSJulian Elischer  */
653ed062c8dSJulian Elischer void
654ad1e7d28SJulian Elischer sched_init_concurrency(struct proc *p)
655ed062c8dSJulian Elischer {
656ed062c8dSJulian Elischer }
657ed062c8dSJulian Elischer 
658ed062c8dSJulian Elischer /*
659ad1e7d28SJulian Elischer  * Change the concurrency of an existing proc to N
660ed062c8dSJulian Elischer  * Called from:
661ed062c8dSJulian Elischer  *  kse_create()
662ed062c8dSJulian Elischer  *  kse_exit()
663ed062c8dSJulian Elischer  *  thread_exit()
664ed062c8dSJulian Elischer  *  thread_single()
665ed062c8dSJulian Elischer  */
666ed062c8dSJulian Elischer void
667ad1e7d28SJulian Elischer sched_set_concurrency(struct proc *p, int concurrency)
668ed062c8dSJulian Elischer {
669ed062c8dSJulian Elischer }
670ed062c8dSJulian Elischer 
671ed062c8dSJulian Elischer /*
672ed062c8dSJulian Elischer  * Called from thread_exit() for all exiting thread
673ed062c8dSJulian Elischer  *
674ed062c8dSJulian Elischer  * Not to be confused with sched_exit_thread()
675ed062c8dSJulian Elischer  * that is only called from thread_exit() for threads exiting
676ed062c8dSJulian Elischer  * without the rest of the process exiting because it is also called from
677ed062c8dSJulian Elischer  * sched_exit() and we wouldn't want to call it twice.
678ed062c8dSJulian Elischer  * XXX This can probably be fixed.
679ed062c8dSJulian Elischer  */
680ed062c8dSJulian Elischer void
681ed062c8dSJulian Elischer sched_thread_exit(struct thread *td)
682ed062c8dSJulian Elischer {
683ed062c8dSJulian Elischer }
684ed062c8dSJulian Elischer 
685ed062c8dSJulian Elischer #endif /* KERN_SWITCH_INCLUDE */
686