xref: /freebsd/sys/kern/sched_4bsd.c (revision 8aa3d7ffc0976e03f4382ba2c06ccaec5f82a931)
1b43179fbSJeff Roberson /*-
2b43179fbSJeff Roberson  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3b43179fbSJeff Roberson  *	The Regents of the University of California.  All rights reserved.
4b43179fbSJeff Roberson  * (c) UNIX System Laboratories, Inc.
5b43179fbSJeff Roberson  * All or some portions of this file are derived from material licensed
6b43179fbSJeff Roberson  * to the University of California by American Telephone and Telegraph
7b43179fbSJeff Roberson  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8b43179fbSJeff Roberson  * the permission of UNIX System Laboratories, Inc.
9b43179fbSJeff Roberson  *
10b43179fbSJeff Roberson  * Redistribution and use in source and binary forms, with or without
11b43179fbSJeff Roberson  * modification, are permitted provided that the following conditions
12b43179fbSJeff Roberson  * are met:
13b43179fbSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
14b43179fbSJeff Roberson  *    notice, this list of conditions and the following disclaimer.
15b43179fbSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
16b43179fbSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
17b43179fbSJeff Roberson  *    documentation and/or other materials provided with the distribution.
18b43179fbSJeff Roberson  * 4. Neither the name of the University nor the names of its contributors
19b43179fbSJeff Roberson  *    may be used to endorse or promote products derived from this software
20b43179fbSJeff Roberson  *    without specific prior written permission.
21b43179fbSJeff Roberson  *
22b43179fbSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23b43179fbSJeff Roberson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24b43179fbSJeff Roberson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25b43179fbSJeff Roberson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26b43179fbSJeff Roberson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27b43179fbSJeff Roberson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28b43179fbSJeff Roberson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29b43179fbSJeff Roberson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30b43179fbSJeff Roberson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31b43179fbSJeff Roberson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32b43179fbSJeff Roberson  * SUCH DAMAGE.
33b43179fbSJeff Roberson  */
34b43179fbSJeff Roberson 
35677b542eSDavid E. O'Brien #include <sys/cdefs.h>
36677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
37677b542eSDavid E. O'Brien 
384da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
39a564bfc7SJeff Roberson #include "opt_sched.h"
406f5f25e5SJohn Birrell #include "opt_kdtrace.h"
414da0d332SPeter Wemm 
42b43179fbSJeff Roberson #include <sys/param.h>
43b43179fbSJeff Roberson #include <sys/systm.h>
44f5a3ef99SMarcel Moolenaar #include <sys/cpuset.h>
45b43179fbSJeff Roberson #include <sys/kernel.h>
46b43179fbSJeff Roberson #include <sys/ktr.h>
47b43179fbSJeff Roberson #include <sys/lock.h>
48c55bbb6cSJohn Baldwin #include <sys/kthread.h>
49b43179fbSJeff Roberson #include <sys/mutex.h>
50b43179fbSJeff Roberson #include <sys/proc.h>
51b43179fbSJeff Roberson #include <sys/resourcevar.h>
52b43179fbSJeff Roberson #include <sys/sched.h>
53b43179fbSJeff Roberson #include <sys/smp.h>
54b43179fbSJeff Roberson #include <sys/sysctl.h>
55b43179fbSJeff Roberson #include <sys/sx.h>
56f5c157d9SJohn Baldwin #include <sys/turnstile.h>
573db720fdSDavid Xu #include <sys/umtx.h>
582e4db89cSDavid E. O'Brien #include <machine/pcb.h>
59293968d8SJulian Elischer #include <machine/smp.h>
60b43179fbSJeff Roberson 
61ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
62ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
63ebccf1e3SJoseph Koshy #endif
64ebccf1e3SJoseph Koshy 
656f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
666f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
676f5f25e5SJohn Birrell int				dtrace_vtime_active;
686f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
696f5f25e5SJohn Birrell #endif
706f5f25e5SJohn Birrell 
7106439a04SJeff Roberson /*
7206439a04SJeff Roberson  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
7306439a04SJeff Roberson  * the range 100-256 Hz (approximately).
7406439a04SJeff Roberson  */
7506439a04SJeff Roberson #define	ESTCPULIM(e) \
7606439a04SJeff Roberson     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
7706439a04SJeff Roberson     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
78b698380fSBruce Evans #ifdef SMP
79b698380fSBruce Evans #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
80b698380fSBruce Evans #else
8106439a04SJeff Roberson #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
82b698380fSBruce Evans #endif
8306439a04SJeff Roberson #define	NICE_WEIGHT		1	/* Priorities per nice level. */
8406439a04SJeff Roberson 
858460a577SJohn Birrell /*
868460a577SJohn Birrell  * The schedulable entity that runs a context.
87ad1e7d28SJulian Elischer  * This is  an extension to the thread structure and is tailored to
88ad1e7d28SJulian Elischer  * the requirements of this scheduler
898460a577SJohn Birrell  */
90ad1e7d28SJulian Elischer struct td_sched {
91ad1e7d28SJulian Elischer 	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
92ad1e7d28SJulian Elischer 	int		ts_cpticks;	/* (j) Ticks of cpu time. */
9354b0e65fSJeff Roberson 	int		ts_slptime;	/* (j) Seconds !RUNNING. */
94ad1e7d28SJulian Elischer 	struct runq	*ts_runq;	/* runq the thread is currently on */
95bcb06d59SJeff Roberson };
96ed062c8dSJulian Elischer 
97ed062c8dSJulian Elischer /* flags kept in td_flags */
98ad1e7d28SJulian Elischer #define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
999727e637SJeff Roberson #define TDF_BOUND	TDF_SCHED1	/* Bound to one CPU. */
100bcb06d59SJeff Roberson 
101ad1e7d28SJulian Elischer #define SKE_RUNQ_PCPU(ts)						\
102ad1e7d28SJulian Elischer     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
103e17c57b1SJeff Roberson 
104ad1e7d28SJulian Elischer static struct td_sched td_sched0;
1056ea38de8SJeff Roberson struct mtx sched_lock;
106b43179fbSJeff Roberson 
107ca59f152SJeff Roberson static int	sched_tdcnt;	/* Total runnable threads in the system. */
108b43179fbSJeff Roberson static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
1094974b53eSMaxime Henrion #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
110b43179fbSJeff Roberson 
111e17c57b1SJeff Roberson static void	setup_runqs(void);
112c55bbb6cSJohn Baldwin static void	schedcpu(void);
113e17c57b1SJeff Roberson static void	schedcpu_thread(void);
114f5c157d9SJohn Baldwin static void	sched_priority(struct thread *td, u_char prio);
115b43179fbSJeff Roberson static void	sched_setup(void *dummy);
116b43179fbSJeff Roberson static void	maybe_resched(struct thread *td);
1178460a577SJohn Birrell static void	updatepri(struct thread *td);
1188460a577SJohn Birrell static void	resetpriority(struct thread *td);
1198460a577SJohn Birrell static void	resetpriority_thread(struct thread *td);
12000b0483dSJulian Elischer #ifdef SMP
12182a1dfc1SJulian Elischer static int	forward_wakeup(int cpunum);
1228aa3d7ffSJohn Baldwin static void	kick_other_cpu(int pri, int cpuid);
12300b0483dSJulian Elischer #endif
124b43179fbSJeff Roberson 
125e17c57b1SJeff Roberson static struct kproc_desc sched_kp = {
126e17c57b1SJeff Roberson         "schedcpu",
127e17c57b1SJeff Roberson         schedcpu_thread,
128e17c57b1SJeff Roberson         NULL
129e17c57b1SJeff Roberson };
130237fdd78SRobert Watson SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
131237fdd78SRobert Watson     &sched_kp);
132237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
133b43179fbSJeff Roberson 
134b43179fbSJeff Roberson /*
135b43179fbSJeff Roberson  * Global run queue.
136b43179fbSJeff Roberson  */
137b43179fbSJeff Roberson static struct runq runq;
138e17c57b1SJeff Roberson 
139e17c57b1SJeff Roberson #ifdef SMP
140e17c57b1SJeff Roberson /*
141e17c57b1SJeff Roberson  * Per-CPU run queues
142e17c57b1SJeff Roberson  */
143e17c57b1SJeff Roberson static struct runq runq_pcpu[MAXCPU];
144e17c57b1SJeff Roberson #endif
145e17c57b1SJeff Roberson 
146e17c57b1SJeff Roberson static void
147e17c57b1SJeff Roberson setup_runqs(void)
148e17c57b1SJeff Roberson {
149e17c57b1SJeff Roberson #ifdef SMP
150e17c57b1SJeff Roberson 	int i;
151e17c57b1SJeff Roberson 
152e17c57b1SJeff Roberson 	for (i = 0; i < MAXCPU; ++i)
153e17c57b1SJeff Roberson 		runq_init(&runq_pcpu[i]);
154e17c57b1SJeff Roberson #endif
155e17c57b1SJeff Roberson 
156e17c57b1SJeff Roberson 	runq_init(&runq);
157e17c57b1SJeff Roberson }
158b43179fbSJeff Roberson 
159b43179fbSJeff Roberson static int
160b43179fbSJeff Roberson sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
161b43179fbSJeff Roberson {
162b43179fbSJeff Roberson 	int error, new_val;
163b43179fbSJeff Roberson 
164b43179fbSJeff Roberson 	new_val = sched_quantum * tick;
165b43179fbSJeff Roberson 	error = sysctl_handle_int(oidp, &new_val, 0, req);
166b43179fbSJeff Roberson         if (error != 0 || req->newptr == NULL)
167b43179fbSJeff Roberson 		return (error);
168b43179fbSJeff Roberson 	if (new_val < tick)
169b43179fbSJeff Roberson 		return (EINVAL);
170b43179fbSJeff Roberson 	sched_quantum = new_val / tick;
171b43179fbSJeff Roberson 	hogticks = 2 * sched_quantum;
172b43179fbSJeff Roberson 	return (0);
173b43179fbSJeff Roberson }
174b43179fbSJeff Roberson 
175e038d354SScott Long SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
176dc095794SScott Long 
177e038d354SScott Long SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
178e038d354SScott Long     "Scheduler name");
179dc095794SScott Long 
180dc095794SScott Long SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
181b43179fbSJeff Roberson     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
182b43179fbSJeff Roberson     "Roundrobin scheduling quantum in microseconds");
183b43179fbSJeff Roberson 
18437c28a02SJulian Elischer #ifdef SMP
18582a1dfc1SJulian Elischer /* Enable forwarding of wakeups to all other cpus */
18682a1dfc1SJulian Elischer SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
18782a1dfc1SJulian Elischer 
188a90f3f25SJeff Roberson static int runq_fuzz = 1;
189a90f3f25SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
190a90f3f25SJeff Roberson 
191bce73aedSJulian Elischer static int forward_wakeup_enabled = 1;
19282a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
19382a1dfc1SJulian Elischer 	   &forward_wakeup_enabled, 0,
19482a1dfc1SJulian Elischer 	   "Forwarding of wakeup to idle CPUs");
19582a1dfc1SJulian Elischer 
19682a1dfc1SJulian Elischer static int forward_wakeups_requested = 0;
19782a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
19882a1dfc1SJulian Elischer 	   &forward_wakeups_requested, 0,
19982a1dfc1SJulian Elischer 	   "Requests for Forwarding of wakeup to idle CPUs");
20082a1dfc1SJulian Elischer 
20182a1dfc1SJulian Elischer static int forward_wakeups_delivered = 0;
20282a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
20382a1dfc1SJulian Elischer 	   &forward_wakeups_delivered, 0,
20482a1dfc1SJulian Elischer 	   "Completed Forwarding of wakeup to idle CPUs");
20582a1dfc1SJulian Elischer 
206bce73aedSJulian Elischer static int forward_wakeup_use_mask = 1;
20782a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
20882a1dfc1SJulian Elischer 	   &forward_wakeup_use_mask, 0,
20982a1dfc1SJulian Elischer 	   "Use the mask of idle cpus");
21082a1dfc1SJulian Elischer 
21182a1dfc1SJulian Elischer static int forward_wakeup_use_loop = 0;
21282a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
21382a1dfc1SJulian Elischer 	   &forward_wakeup_use_loop, 0,
21482a1dfc1SJulian Elischer 	   "Use a loop to find idle cpus");
21582a1dfc1SJulian Elischer 
21682a1dfc1SJulian Elischer static int forward_wakeup_use_single = 0;
21782a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
21882a1dfc1SJulian Elischer 	   &forward_wakeup_use_single, 0,
21982a1dfc1SJulian Elischer 	   "Only signal one idle cpu");
22082a1dfc1SJulian Elischer 
22182a1dfc1SJulian Elischer static int forward_wakeup_use_htt = 0;
22282a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
22382a1dfc1SJulian Elischer 	   &forward_wakeup_use_htt, 0,
22482a1dfc1SJulian Elischer 	   "account for htt");
2253389af30SJulian Elischer 
22637c28a02SJulian Elischer #endif
227ad1e7d28SJulian Elischer #if 0
2283389af30SJulian Elischer static int sched_followon = 0;
2293389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
2303389af30SJulian Elischer 	   &sched_followon, 0,
2313389af30SJulian Elischer 	   "allow threads to share a quantum");
2328460a577SJohn Birrell #endif
23382a1dfc1SJulian Elischer 
234907bdbc2SJeff Roberson static __inline void
235907bdbc2SJeff Roberson sched_load_add(void)
236907bdbc2SJeff Roberson {
237907bdbc2SJeff Roberson 	sched_tdcnt++;
238907bdbc2SJeff Roberson 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
239907bdbc2SJeff Roberson }
240907bdbc2SJeff Roberson 
241907bdbc2SJeff Roberson static __inline void
242907bdbc2SJeff Roberson sched_load_rem(void)
243907bdbc2SJeff Roberson {
244907bdbc2SJeff Roberson 	sched_tdcnt--;
245907bdbc2SJeff Roberson 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
246907bdbc2SJeff Roberson }
247b43179fbSJeff Roberson /*
248b43179fbSJeff Roberson  * Arrange to reschedule if necessary, taking the priorities and
249b43179fbSJeff Roberson  * schedulers into account.
250b43179fbSJeff Roberson  */
251b43179fbSJeff Roberson static void
252b43179fbSJeff Roberson maybe_resched(struct thread *td)
253b43179fbSJeff Roberson {
254b43179fbSJeff Roberson 
2557b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
256ed062c8dSJulian Elischer 	if (td->td_priority < curthread->td_priority)
2574a338afdSJulian Elischer 		curthread->td_flags |= TDF_NEEDRESCHED;
258b43179fbSJeff Roberson }
259b43179fbSJeff Roberson 
260b43179fbSJeff Roberson /*
261a90f3f25SJeff Roberson  * This function is called when a thread is about to be put on run queue
262a90f3f25SJeff Roberson  * because it has been made runnable or its priority has been adjusted.  It
263a90f3f25SJeff Roberson  * determines if the new thread should be immediately preempted to.  If so,
264a90f3f25SJeff Roberson  * it switches to it and eventually returns true.  If not, it returns false
265a90f3f25SJeff Roberson  * so that the caller may place the thread on an appropriate run queue.
266a90f3f25SJeff Roberson  */
267a90f3f25SJeff Roberson int
268a90f3f25SJeff Roberson maybe_preempt(struct thread *td)
269a90f3f25SJeff Roberson {
270a90f3f25SJeff Roberson #ifdef PREEMPTION
271a90f3f25SJeff Roberson 	struct thread *ctd;
272a90f3f25SJeff Roberson 	int cpri, pri;
273a90f3f25SJeff Roberson 
274a90f3f25SJeff Roberson 	/*
275a90f3f25SJeff Roberson 	 * The new thread should not preempt the current thread if any of the
276a90f3f25SJeff Roberson 	 * following conditions are true:
277a90f3f25SJeff Roberson 	 *
278a90f3f25SJeff Roberson 	 *  - The kernel is in the throes of crashing (panicstr).
279a90f3f25SJeff Roberson 	 *  - The current thread has a higher (numerically lower) or
280a90f3f25SJeff Roberson 	 *    equivalent priority.  Note that this prevents curthread from
281a90f3f25SJeff Roberson 	 *    trying to preempt to itself.
282a90f3f25SJeff Roberson 	 *  - It is too early in the boot for context switches (cold is set).
283a90f3f25SJeff Roberson 	 *  - The current thread has an inhibitor set or is in the process of
284a90f3f25SJeff Roberson 	 *    exiting.  In this case, the current thread is about to switch
285a90f3f25SJeff Roberson 	 *    out anyways, so there's no point in preempting.  If we did,
286a90f3f25SJeff Roberson 	 *    the current thread would not be properly resumed as well, so
287a90f3f25SJeff Roberson 	 *    just avoid that whole landmine.
288a90f3f25SJeff Roberson 	 *  - If the new thread's priority is not a realtime priority and
289a90f3f25SJeff Roberson 	 *    the current thread's priority is not an idle priority and
290a90f3f25SJeff Roberson 	 *    FULL_PREEMPTION is disabled.
291a90f3f25SJeff Roberson 	 *
292a90f3f25SJeff Roberson 	 * If all of these conditions are false, but the current thread is in
293a90f3f25SJeff Roberson 	 * a nested critical section, then we have to defer the preemption
294a90f3f25SJeff Roberson 	 * until we exit the critical section.  Otherwise, switch immediately
295a90f3f25SJeff Roberson 	 * to the new thread.
296a90f3f25SJeff Roberson 	 */
297a90f3f25SJeff Roberson 	ctd = curthread;
298a90f3f25SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
299a90f3f25SJeff Roberson 	KASSERT((td->td_inhibitors == 0),
300a90f3f25SJeff Roberson 			("maybe_preempt: trying to run inhibited thread"));
301a90f3f25SJeff Roberson 	pri = td->td_priority;
302a90f3f25SJeff Roberson 	cpri = ctd->td_priority;
303a90f3f25SJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
304a90f3f25SJeff Roberson 	    TD_IS_INHIBITED(ctd))
305a90f3f25SJeff Roberson 		return (0);
306a90f3f25SJeff Roberson #ifndef FULL_PREEMPTION
307a90f3f25SJeff Roberson 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
308a90f3f25SJeff Roberson 		return (0);
309a90f3f25SJeff Roberson #endif
310a90f3f25SJeff Roberson 
311a90f3f25SJeff Roberson 	if (ctd->td_critnest > 1) {
312a90f3f25SJeff Roberson 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
313a90f3f25SJeff Roberson 		    ctd->td_critnest);
314a90f3f25SJeff Roberson 		ctd->td_owepreempt = 1;
315a90f3f25SJeff Roberson 		return (0);
316a90f3f25SJeff Roberson 	}
317a90f3f25SJeff Roberson 	/*
318a90f3f25SJeff Roberson 	 * Thread is runnable but not yet put on system run queue.
319a90f3f25SJeff Roberson 	 */
320a90f3f25SJeff Roberson 	MPASS(ctd->td_lock == td->td_lock);
321a90f3f25SJeff Roberson 	MPASS(TD_ON_RUNQ(td));
322a90f3f25SJeff Roberson 	TD_SET_RUNNING(td);
323a90f3f25SJeff Roberson 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
324a90f3f25SJeff Roberson 	    td->td_proc->p_pid, td->td_name);
3258df78c41SJeff Roberson 	mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
326a90f3f25SJeff Roberson 	/*
327a90f3f25SJeff Roberson 	 * td's lock pointer may have changed.  We have to return with it
328a90f3f25SJeff Roberson 	 * locked.
329a90f3f25SJeff Roberson 	 */
330a90f3f25SJeff Roberson 	spinlock_enter();
331a90f3f25SJeff Roberson 	thread_unlock(ctd);
332a90f3f25SJeff Roberson 	thread_lock(td);
333a90f3f25SJeff Roberson 	spinlock_exit();
334a90f3f25SJeff Roberson 	return (1);
335a90f3f25SJeff Roberson #else
336a90f3f25SJeff Roberson 	return (0);
337a90f3f25SJeff Roberson #endif
338a90f3f25SJeff Roberson }
339a90f3f25SJeff Roberson 
340a90f3f25SJeff Roberson /*
341b43179fbSJeff Roberson  * Constants for digital decay and forget:
3428460a577SJohn Birrell  *	90% of (td_estcpu) usage in 5 * loadav time
343ad1e7d28SJulian Elischer  *	95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
344b43179fbSJeff Roberson  *          Note that, as ps(1) mentions, this can let percentages
345b43179fbSJeff Roberson  *          total over 100% (I've seen 137.9% for 3 processes).
346b43179fbSJeff Roberson  *
3478460a577SJohn Birrell  * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
348b43179fbSJeff Roberson  *
3498460a577SJohn Birrell  * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
350b43179fbSJeff Roberson  * That is, the system wants to compute a value of decay such
351b43179fbSJeff Roberson  * that the following for loop:
352b43179fbSJeff Roberson  * 	for (i = 0; i < (5 * loadavg); i++)
3538460a577SJohn Birrell  * 		td_estcpu *= decay;
354b43179fbSJeff Roberson  * will compute
3558460a577SJohn Birrell  * 	td_estcpu *= 0.1;
356b43179fbSJeff Roberson  * for all values of loadavg:
357b43179fbSJeff Roberson  *
358b43179fbSJeff Roberson  * Mathematically this loop can be expressed by saying:
359b43179fbSJeff Roberson  * 	decay ** (5 * loadavg) ~= .1
360b43179fbSJeff Roberson  *
361b43179fbSJeff Roberson  * The system computes decay as:
362b43179fbSJeff Roberson  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
363b43179fbSJeff Roberson  *
364b43179fbSJeff Roberson  * We wish to prove that the system's computation of decay
365b43179fbSJeff Roberson  * will always fulfill the equation:
366b43179fbSJeff Roberson  * 	decay ** (5 * loadavg) ~= .1
367b43179fbSJeff Roberson  *
368b43179fbSJeff Roberson  * If we compute b as:
369b43179fbSJeff Roberson  * 	b = 2 * loadavg
370b43179fbSJeff Roberson  * then
371b43179fbSJeff Roberson  * 	decay = b / (b + 1)
372b43179fbSJeff Roberson  *
373b43179fbSJeff Roberson  * We now need to prove two things:
374b43179fbSJeff Roberson  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
375b43179fbSJeff Roberson  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
376b43179fbSJeff Roberson  *
377b43179fbSJeff Roberson  * Facts:
378b43179fbSJeff Roberson  *         For x close to zero, exp(x) =~ 1 + x, since
379b43179fbSJeff Roberson  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
380b43179fbSJeff Roberson  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
381b43179fbSJeff Roberson  *         For x close to zero, ln(1+x) =~ x, since
382b43179fbSJeff Roberson  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
383b43179fbSJeff Roberson  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
384b43179fbSJeff Roberson  *         ln(.1) =~ -2.30
385b43179fbSJeff Roberson  *
386b43179fbSJeff Roberson  * Proof of (1):
387b43179fbSJeff Roberson  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
388b43179fbSJeff Roberson  *	solving for factor,
389b43179fbSJeff Roberson  *      ln(factor) =~ (-2.30/5*loadav), or
390b43179fbSJeff Roberson  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
391b43179fbSJeff Roberson  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
392b43179fbSJeff Roberson  *
393b43179fbSJeff Roberson  * Proof of (2):
394b43179fbSJeff Roberson  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
395b43179fbSJeff Roberson  *	solving for power,
396b43179fbSJeff Roberson  *      power*ln(b/(b+1)) =~ -2.30, or
397b43179fbSJeff Roberson  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
398b43179fbSJeff Roberson  *
399b43179fbSJeff Roberson  * Actual power values for the implemented algorithm are as follows:
400b43179fbSJeff Roberson  *      loadav: 1       2       3       4
401b43179fbSJeff Roberson  *      power:  5.68    10.32   14.94   19.55
402b43179fbSJeff Roberson  */
403b43179fbSJeff Roberson 
404b43179fbSJeff Roberson /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
405b43179fbSJeff Roberson #define	loadfactor(loadav)	(2 * (loadav))
406b43179fbSJeff Roberson #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
407b43179fbSJeff Roberson 
408ad1e7d28SJulian Elischer /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
409b43179fbSJeff Roberson static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
4105c06d111SJohn-Mark Gurney SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
411b43179fbSJeff Roberson 
412b43179fbSJeff Roberson /*
413b43179fbSJeff Roberson  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
414b43179fbSJeff Roberson  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
415b43179fbSJeff Roberson  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
416b43179fbSJeff Roberson  *
417b43179fbSJeff Roberson  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
418b43179fbSJeff Roberson  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
419b43179fbSJeff Roberson  *
420b43179fbSJeff Roberson  * If you don't want to bother with the faster/more-accurate formula, you
421b43179fbSJeff Roberson  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
422b43179fbSJeff Roberson  * (more general) method of calculating the %age of CPU used by a process.
423b43179fbSJeff Roberson  */
424b43179fbSJeff Roberson #define	CCPU_SHIFT	11
425b43179fbSJeff Roberson 
426b43179fbSJeff Roberson /*
427b43179fbSJeff Roberson  * Recompute process priorities, every hz ticks.
428b43179fbSJeff Roberson  * MP-safe, called without the Giant mutex.
429b43179fbSJeff Roberson  */
430b43179fbSJeff Roberson /* ARGSUSED */
431b43179fbSJeff Roberson static void
432c55bbb6cSJohn Baldwin schedcpu(void)
433b43179fbSJeff Roberson {
434b43179fbSJeff Roberson 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
435b43179fbSJeff Roberson 	struct thread *td;
436b43179fbSJeff Roberson 	struct proc *p;
437ad1e7d28SJulian Elischer 	struct td_sched *ts;
43870fca427SJohn Baldwin 	int awake, realstathz;
439b43179fbSJeff Roberson 
440b43179fbSJeff Roberson 	realstathz = stathz ? stathz : hz;
441b43179fbSJeff Roberson 	sx_slock(&allproc_lock);
442b43179fbSJeff Roberson 	FOREACH_PROC_IN_SYSTEM(p) {
443374ae2a3SJeff Roberson 		PROC_LOCK(p);
4448460a577SJohn Birrell 		FOREACH_THREAD_IN_PROC(p, td) {
445b43179fbSJeff Roberson 			awake = 0;
4467b20fb19SJeff Roberson 			thread_lock(td);
447ad1e7d28SJulian Elischer 			ts = td->td_sched;
448b43179fbSJeff Roberson 			/*
44970fca427SJohn Baldwin 			 * Increment sleep time (if sleeping).  We
45070fca427SJohn Baldwin 			 * ignore overflow, as above.
451b43179fbSJeff Roberson 			 */
452b43179fbSJeff Roberson 			/*
453ad1e7d28SJulian Elischer 			 * The td_sched slptimes are not touched in wakeup
454ad1e7d28SJulian Elischer 			 * because the thread may not HAVE everything in
455ad1e7d28SJulian Elischer 			 * memory? XXX I think this is out of date.
456b43179fbSJeff Roberson 			 */
457f0393f06SJeff Roberson 			if (TD_ON_RUNQ(td)) {
458b43179fbSJeff Roberson 				awake = 1;
4599727e637SJeff Roberson 				td->td_flags &= ~TDF_DIDRUN;
460f0393f06SJeff Roberson 			} else if (TD_IS_RUNNING(td)) {
461b43179fbSJeff Roberson 				awake = 1;
4629727e637SJeff Roberson 				/* Do not clear TDF_DIDRUN */
4639727e637SJeff Roberson 			} else if (td->td_flags & TDF_DIDRUN) {
464b43179fbSJeff Roberson 				awake = 1;
4659727e637SJeff Roberson 				td->td_flags &= ~TDF_DIDRUN;
466b43179fbSJeff Roberson 			}
467b43179fbSJeff Roberson 
468b43179fbSJeff Roberson 			/*
469ad1e7d28SJulian Elischer 			 * ts_pctcpu is only for ps and ttyinfo().
470b43179fbSJeff Roberson 			 */
471ad1e7d28SJulian Elischer 			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
472b43179fbSJeff Roberson 			/*
473ad1e7d28SJulian Elischer 			 * If the td_sched has been idle the entire second,
474b43179fbSJeff Roberson 			 * stop recalculating its priority until
475b43179fbSJeff Roberson 			 * it wakes up.
476b43179fbSJeff Roberson 			 */
477ad1e7d28SJulian Elischer 			if (ts->ts_cpticks != 0) {
478b43179fbSJeff Roberson #if	(FSHIFT >= CCPU_SHIFT)
479ad1e7d28SJulian Elischer 				ts->ts_pctcpu += (realstathz == 100)
480ad1e7d28SJulian Elischer 				    ? ((fixpt_t) ts->ts_cpticks) <<
481b43179fbSJeff Roberson 				    (FSHIFT - CCPU_SHIFT) :
482ad1e7d28SJulian Elischer 				    100 * (((fixpt_t) ts->ts_cpticks)
483bcb06d59SJeff Roberson 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
484b43179fbSJeff Roberson #else
485ad1e7d28SJulian Elischer 				ts->ts_pctcpu += ((FSCALE - ccpu) *
486ad1e7d28SJulian Elischer 				    (ts->ts_cpticks *
487bcb06d59SJeff Roberson 				    FSCALE / realstathz)) >> FSHIFT;
488b43179fbSJeff Roberson #endif
489ad1e7d28SJulian Elischer 				ts->ts_cpticks = 0;
4908460a577SJohn Birrell 			}
4918460a577SJohn Birrell 			/*
4928460a577SJohn Birrell 			 * If there are ANY running threads in this process,
493b43179fbSJeff Roberson 			 * then don't count it as sleeping.
4948aa3d7ffSJohn Baldwin 			 * XXX: this is broken.
495b43179fbSJeff Roberson 			 */
496b43179fbSJeff Roberson 			if (awake) {
49754b0e65fSJeff Roberson 				if (ts->ts_slptime > 1) {
498b43179fbSJeff Roberson 					/*
499b43179fbSJeff Roberson 					 * In an ideal world, this should not
500b43179fbSJeff Roberson 					 * happen, because whoever woke us
501b43179fbSJeff Roberson 					 * up from the long sleep should have
502b43179fbSJeff Roberson 					 * unwound the slptime and reset our
503b43179fbSJeff Roberson 					 * priority before we run at the stale
504b43179fbSJeff Roberson 					 * priority.  Should KASSERT at some
505b43179fbSJeff Roberson 					 * point when all the cases are fixed.
506b43179fbSJeff Roberson 					 */
5078460a577SJohn Birrell 					updatepri(td);
5088460a577SJohn Birrell 				}
50954b0e65fSJeff Roberson 				ts->ts_slptime = 0;
5108460a577SJohn Birrell 			} else
51154b0e65fSJeff Roberson 				ts->ts_slptime++;
51254b0e65fSJeff Roberson 			if (ts->ts_slptime > 1) {
5137b20fb19SJeff Roberson 				thread_unlock(td);
5148460a577SJohn Birrell 				continue;
5157b20fb19SJeff Roberson 			}
5168460a577SJohn Birrell 			td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
5178460a577SJohn Birrell 		      	resetpriority(td);
5188460a577SJohn Birrell 			resetpriority_thread(td);
5197b20fb19SJeff Roberson 			thread_unlock(td);
5208aa3d7ffSJohn Baldwin 		}
521374ae2a3SJeff Roberson 		PROC_UNLOCK(p);
5228aa3d7ffSJohn Baldwin 	}
523b43179fbSJeff Roberson 	sx_sunlock(&allproc_lock);
524c55bbb6cSJohn Baldwin }
525c55bbb6cSJohn Baldwin 
526c55bbb6cSJohn Baldwin /*
527c55bbb6cSJohn Baldwin  * Main loop for a kthread that executes schedcpu once a second.
528c55bbb6cSJohn Baldwin  */
529c55bbb6cSJohn Baldwin static void
530e17c57b1SJeff Roberson schedcpu_thread(void)
531c55bbb6cSJohn Baldwin {
532c55bbb6cSJohn Baldwin 
533c55bbb6cSJohn Baldwin 	for (;;) {
534c55bbb6cSJohn Baldwin 		schedcpu();
5354d70511aSJohn Baldwin 		pause("-", hz);
536c55bbb6cSJohn Baldwin 	}
537b43179fbSJeff Roberson }
538b43179fbSJeff Roberson 
539b43179fbSJeff Roberson /*
540b43179fbSJeff Roberson  * Recalculate the priority of a process after it has slept for a while.
5418460a577SJohn Birrell  * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
5428460a577SJohn Birrell  * least six times the loadfactor will decay td_estcpu to zero.
543b43179fbSJeff Roberson  */
544b43179fbSJeff Roberson static void
5458460a577SJohn Birrell updatepri(struct thread *td)
546b43179fbSJeff Roberson {
54754b0e65fSJeff Roberson 	struct td_sched *ts;
54854b0e65fSJeff Roberson 	fixpt_t loadfac;
54954b0e65fSJeff Roberson 	unsigned int newcpu;
550b43179fbSJeff Roberson 
55154b0e65fSJeff Roberson 	ts = td->td_sched;
55270fca427SJohn Baldwin 	loadfac = loadfactor(averunnable.ldavg[0]);
55354b0e65fSJeff Roberson 	if (ts->ts_slptime > 5 * loadfac)
5548460a577SJohn Birrell 		td->td_estcpu = 0;
555b43179fbSJeff Roberson 	else {
5568460a577SJohn Birrell 		newcpu = td->td_estcpu;
55754b0e65fSJeff Roberson 		ts->ts_slptime--;	/* was incremented in schedcpu() */
55854b0e65fSJeff Roberson 		while (newcpu && --ts->ts_slptime)
559b43179fbSJeff Roberson 			newcpu = decay_cpu(loadfac, newcpu);
5608460a577SJohn Birrell 		td->td_estcpu = newcpu;
561b43179fbSJeff Roberson 	}
562b43179fbSJeff Roberson }
563b43179fbSJeff Roberson 
564b43179fbSJeff Roberson /*
565b43179fbSJeff Roberson  * Compute the priority of a process when running in user mode.
566b43179fbSJeff Roberson  * Arrange to reschedule if the resulting priority is better
567b43179fbSJeff Roberson  * than that of the current process.
568b43179fbSJeff Roberson  */
569b43179fbSJeff Roberson static void
5708460a577SJohn Birrell resetpriority(struct thread *td)
571b43179fbSJeff Roberson {
572b43179fbSJeff Roberson 	register unsigned int newpriority;
573b43179fbSJeff Roberson 
5748460a577SJohn Birrell 	if (td->td_pri_class == PRI_TIMESHARE) {
5758460a577SJohn Birrell 		newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
5768460a577SJohn Birrell 		    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
577b43179fbSJeff Roberson 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
578b43179fbSJeff Roberson 		    PRI_MAX_TIMESHARE);
5798460a577SJohn Birrell 		sched_user_prio(td, newpriority);
580b43179fbSJeff Roberson 	}
581b43179fbSJeff Roberson }
582f5c157d9SJohn Baldwin 
583f5c157d9SJohn Baldwin /*
584ad1e7d28SJulian Elischer  * Update the thread's priority when the associated process's user
585f5c157d9SJohn Baldwin  * priority changes.
586f5c157d9SJohn Baldwin  */
587f5c157d9SJohn Baldwin static void
5888460a577SJohn Birrell resetpriority_thread(struct thread *td)
589f5c157d9SJohn Baldwin {
590f5c157d9SJohn Baldwin 
591f5c157d9SJohn Baldwin 	/* Only change threads with a time sharing user priority. */
592f5c157d9SJohn Baldwin 	if (td->td_priority < PRI_MIN_TIMESHARE ||
593f5c157d9SJohn Baldwin 	    td->td_priority > PRI_MAX_TIMESHARE)
594f5c157d9SJohn Baldwin 		return;
595f5c157d9SJohn Baldwin 
596f5c157d9SJohn Baldwin 	/* XXX the whole needresched thing is broken, but not silly. */
597f5c157d9SJohn Baldwin 	maybe_resched(td);
598f5c157d9SJohn Baldwin 
5998460a577SJohn Birrell 	sched_prio(td, td->td_user_pri);
600b43179fbSJeff Roberson }
601b43179fbSJeff Roberson 
602b43179fbSJeff Roberson /* ARGSUSED */
603b43179fbSJeff Roberson static void
604b43179fbSJeff Roberson sched_setup(void *dummy)
605b43179fbSJeff Roberson {
606e17c57b1SJeff Roberson 	setup_runqs();
60770fca427SJohn Baldwin 
608b43179fbSJeff Roberson 	if (sched_quantum == 0)
609b43179fbSJeff Roberson 		sched_quantum = SCHED_QUANTUM;
610b43179fbSJeff Roberson 	hogticks = 2 * sched_quantum;
611b43179fbSJeff Roberson 
612ca59f152SJeff Roberson 	/* Account for thread0. */
613907bdbc2SJeff Roberson 	sched_load_add();
614b43179fbSJeff Roberson }
615b43179fbSJeff Roberson 
616b43179fbSJeff Roberson /* External interfaces start here */
6178aa3d7ffSJohn Baldwin 
618ed062c8dSJulian Elischer /*
619ed062c8dSJulian Elischer  * Very early in the boot some setup of scheduler-specific
620f3050486SMaxim Konovalov  * parts of proc0 and of some scheduler resources needs to be done.
621ed062c8dSJulian Elischer  * Called from:
622ed062c8dSJulian Elischer  *  proc0_init()
623ed062c8dSJulian Elischer  */
624ed062c8dSJulian Elischer void
625ed062c8dSJulian Elischer schedinit(void)
626ed062c8dSJulian Elischer {
627ed062c8dSJulian Elischer 	/*
628ed062c8dSJulian Elischer 	 * Set up the scheduler specific parts of proc0.
629ed062c8dSJulian Elischer 	 */
630ed062c8dSJulian Elischer 	proc0.p_sched = NULL; /* XXX */
631ad1e7d28SJulian Elischer 	thread0.td_sched = &td_sched0;
6327b20fb19SJeff Roberson 	thread0.td_lock = &sched_lock;
6336ea38de8SJeff Roberson 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
634ed062c8dSJulian Elischer }
635ed062c8dSJulian Elischer 
636b43179fbSJeff Roberson int
637b43179fbSJeff Roberson sched_runnable(void)
638b43179fbSJeff Roberson {
639e17c57b1SJeff Roberson #ifdef SMP
640e17c57b1SJeff Roberson 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
641e17c57b1SJeff Roberson #else
642b43179fbSJeff Roberson 	return runq_check(&runq);
643e17c57b1SJeff Roberson #endif
644b43179fbSJeff Roberson }
645b43179fbSJeff Roberson 
646b43179fbSJeff Roberson int
647b43179fbSJeff Roberson sched_rr_interval(void)
648b43179fbSJeff Roberson {
649b43179fbSJeff Roberson 	if (sched_quantum == 0)
650b43179fbSJeff Roberson 		sched_quantum = SCHED_QUANTUM;
651b43179fbSJeff Roberson 	return (sched_quantum);
652b43179fbSJeff Roberson }
653b43179fbSJeff Roberson 
654b43179fbSJeff Roberson /*
655b43179fbSJeff Roberson  * We adjust the priority of the current process.  The priority of
656b43179fbSJeff Roberson  * a process gets worse as it accumulates CPU time.  The cpu usage
6578460a577SJohn Birrell  * estimator (td_estcpu) is increased here.  resetpriority() will
6588460a577SJohn Birrell  * compute a different priority each time td_estcpu increases by
659b43179fbSJeff Roberson  * INVERSE_ESTCPU_WEIGHT
660b43179fbSJeff Roberson  * (until MAXPRI is reached).  The cpu usage estimator ramps up
661b43179fbSJeff Roberson  * quite quickly when the process is running (linearly), and decays
662b43179fbSJeff Roberson  * away exponentially, at a rate which is proportionally slower when
663b43179fbSJeff Roberson  * the system is busy.  The basic principle is that the system will
664b43179fbSJeff Roberson  * 90% forget that the process used a lot of CPU time in 5 * loadav
665b43179fbSJeff Roberson  * seconds.  This causes the system to favor processes which haven't
666b43179fbSJeff Roberson  * run much recently, and to round-robin among other processes.
667b43179fbSJeff Roberson  */
668b43179fbSJeff Roberson void
6697cf90fb3SJeff Roberson sched_clock(struct thread *td)
670b43179fbSJeff Roberson {
671ad1e7d28SJulian Elischer 	struct td_sched *ts;
672b43179fbSJeff Roberson 
6737b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
674ad1e7d28SJulian Elischer 	ts = td->td_sched;
675f7f9e7f3SJeff Roberson 
676ad1e7d28SJulian Elischer 	ts->ts_cpticks++;
6778460a577SJohn Birrell 	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
6788460a577SJohn Birrell 	if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
6798460a577SJohn Birrell 		resetpriority(td);
6808460a577SJohn Birrell 		resetpriority_thread(td);
681b43179fbSJeff Roberson 	}
6829dddab6fSJohn Baldwin 
6839dddab6fSJohn Baldwin 	/*
6849dddab6fSJohn Baldwin 	 * Force a context switch if the current thread has used up a full
6859dddab6fSJohn Baldwin 	 * quantum (default quantum is 100ms).
6869dddab6fSJohn Baldwin 	 */
6879dddab6fSJohn Baldwin 	if (!TD_IS_IDLETHREAD(td) &&
6889dddab6fSJohn Baldwin 	    ticks - PCPU_GET(switchticks) >= sched_quantum)
6899dddab6fSJohn Baldwin 		td->td_flags |= TDF_NEEDRESCHED;
690b43179fbSJeff Roberson }
69170fca427SJohn Baldwin 
6928460a577SJohn Birrell /*
6938aa3d7ffSJohn Baldwin  * Charge child's scheduling CPU usage to parent.
6948460a577SJohn Birrell  */
695b43179fbSJeff Roberson void
69655d44f79SJulian Elischer sched_exit(struct proc *p, struct thread *td)
697f7f9e7f3SJeff Roberson {
6988460a577SJohn Birrell 
6998460a577SJohn Birrell 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
700431f8906SJulian Elischer 	    td, td->td_name, td->td_priority);
701374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
702ad1e7d28SJulian Elischer 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
703b43179fbSJeff Roberson }
704b43179fbSJeff Roberson 
705b43179fbSJeff Roberson void
706f7f9e7f3SJeff Roberson sched_exit_thread(struct thread *td, struct thread *child)
707b43179fbSJeff Roberson {
708ad1e7d28SJulian Elischer 
709907bdbc2SJeff Roberson 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
710431f8906SJulian Elischer 	    child, child->td_name, child->td_priority);
7117b20fb19SJeff Roberson 	thread_lock(td);
712ad1e7d28SJulian Elischer 	td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
7137b20fb19SJeff Roberson 	thread_unlock(td);
7147b20fb19SJeff Roberson 	mtx_lock_spin(&sched_lock);
7157d5ea13fSDoug Rabson 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
716907bdbc2SJeff Roberson 		sched_load_rem();
7177b20fb19SJeff Roberson 	mtx_unlock_spin(&sched_lock);
718f7f9e7f3SJeff Roberson }
719bcb06d59SJeff Roberson 
720f7f9e7f3SJeff Roberson void
721ed062c8dSJulian Elischer sched_fork(struct thread *td, struct thread *childtd)
722f7f9e7f3SJeff Roberson {
723ed062c8dSJulian Elischer 	sched_fork_thread(td, childtd);
724f7f9e7f3SJeff Roberson }
725bcb06d59SJeff Roberson 
726f7f9e7f3SJeff Roberson void
727ed062c8dSJulian Elischer sched_fork_thread(struct thread *td, struct thread *childtd)
728f7f9e7f3SJeff Roberson {
7298b16c208SJeff Roberson 	struct td_sched *ts;
7308b16c208SJeff Roberson 
731ad1e7d28SJulian Elischer 	childtd->td_estcpu = td->td_estcpu;
7327b20fb19SJeff Roberson 	childtd->td_lock = &sched_lock;
733f5a3ef99SMarcel Moolenaar 	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
7348b16c208SJeff Roberson 	ts = childtd->td_sched;
7358b16c208SJeff Roberson 	bzero(ts, sizeof(*ts));
736b43179fbSJeff Roberson }
737b43179fbSJeff Roberson 
738b43179fbSJeff Roberson void
739fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
740b43179fbSJeff Roberson {
741f5c157d9SJohn Baldwin 	struct thread *td;
7420b5318c8SJohn Baldwin 
743fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
744fa885116SJulian Elischer 	p->p_nice = nice;
7458460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
7467b20fb19SJeff Roberson 		thread_lock(td);
7478460a577SJohn Birrell 		resetpriority(td);
7488460a577SJohn Birrell 		resetpriority_thread(td);
7497b20fb19SJeff Roberson 		thread_unlock(td);
7508460a577SJohn Birrell 	}
751fa885116SJulian Elischer }
752b43179fbSJeff Roberson 
753f7f9e7f3SJeff Roberson void
7548460a577SJohn Birrell sched_class(struct thread *td, int class)
755f7f9e7f3SJeff Roberson {
7567b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
7578460a577SJohn Birrell 	td->td_pri_class = class;
758f7f9e7f3SJeff Roberson }
759f7f9e7f3SJeff Roberson 
7608460a577SJohn Birrell /*
7618460a577SJohn Birrell  * Adjust the priority of a thread.
7628460a577SJohn Birrell  */
763f5c157d9SJohn Baldwin static void
764f5c157d9SJohn Baldwin sched_priority(struct thread *td, u_char prio)
765b43179fbSJeff Roberson {
766907bdbc2SJeff Roberson 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
767431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, prio, curthread,
768431f8906SJulian Elischer 	    curthread->td_name);
769b43179fbSJeff Roberson 
7707b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
771f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
772f5c157d9SJohn Baldwin 		return;
7731f955e2dSJulian Elischer 	td->td_priority = prio;
7749727e637SJeff Roberson 	if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
775f0393f06SJeff Roberson 		sched_rem(td);
776f0393f06SJeff Roberson 		sched_add(td, SRQ_BORING);
777b43179fbSJeff Roberson 	}
778b43179fbSJeff Roberson }
779b43179fbSJeff Roberson 
780f5c157d9SJohn Baldwin /*
781f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
782f5c157d9SJohn Baldwin  * priority.
783f5c157d9SJohn Baldwin  */
784f5c157d9SJohn Baldwin void
785f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
786f5c157d9SJohn Baldwin {
787f5c157d9SJohn Baldwin 
788f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
789f5c157d9SJohn Baldwin 	sched_priority(td, prio);
790f5c157d9SJohn Baldwin }
791f5c157d9SJohn Baldwin 
792f5c157d9SJohn Baldwin /*
793f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
794f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
795f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
796f5c157d9SJohn Baldwin  * requests.  If the thread's regulary priority is less
797f5c157d9SJohn Baldwin  * important than prio the thread will keep a priority boost
798f5c157d9SJohn Baldwin  * of prio.
799f5c157d9SJohn Baldwin  */
800f5c157d9SJohn Baldwin void
801f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
802f5c157d9SJohn Baldwin {
803f5c157d9SJohn Baldwin 	u_char base_pri;
804f5c157d9SJohn Baldwin 
805f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
806f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
8078460a577SJohn Birrell 		base_pri = td->td_user_pri;
808f5c157d9SJohn Baldwin 	else
809f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
810f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
811f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
812f5c157d9SJohn Baldwin 		sched_prio(td, base_pri);
813f5c157d9SJohn Baldwin 	} else
814f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
815f5c157d9SJohn Baldwin }
816f5c157d9SJohn Baldwin 
817f5c157d9SJohn Baldwin void
818f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
819f5c157d9SJohn Baldwin {
820f5c157d9SJohn Baldwin 	u_char oldprio;
821f5c157d9SJohn Baldwin 
822f5c157d9SJohn Baldwin 	/* First, update the base priority. */
823f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
824f5c157d9SJohn Baldwin 
825f5c157d9SJohn Baldwin 	/*
826f5c157d9SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't ever
827f5c157d9SJohn Baldwin 	 * lower the priority.
828f5c157d9SJohn Baldwin 	 */
829f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
830f5c157d9SJohn Baldwin 		return;
831f5c157d9SJohn Baldwin 
832f5c157d9SJohn Baldwin 	/* Change the real priority. */
833f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
834f5c157d9SJohn Baldwin 	sched_priority(td, prio);
835f5c157d9SJohn Baldwin 
836f5c157d9SJohn Baldwin 	/*
837f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
838f5c157d9SJohn Baldwin 	 * its state.
839f5c157d9SJohn Baldwin 	 */
840f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
841f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
842f5c157d9SJohn Baldwin }
843f5c157d9SJohn Baldwin 
844b43179fbSJeff Roberson void
8458460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
8463db720fdSDavid Xu {
8473db720fdSDavid Xu 	u_char oldprio;
8483db720fdSDavid Xu 
849435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
8508460a577SJohn Birrell 	td->td_base_user_pri = prio;
8515a215147SDavid Xu 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
8525a215147SDavid Xu 		return;
8538460a577SJohn Birrell 	oldprio = td->td_user_pri;
8548460a577SJohn Birrell 	td->td_user_pri = prio;
8553db720fdSDavid Xu }
8563db720fdSDavid Xu 
8573db720fdSDavid Xu void
8583db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
8593db720fdSDavid Xu {
8603db720fdSDavid Xu 	u_char oldprio;
8613db720fdSDavid Xu 
862435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
8633db720fdSDavid Xu 	td->td_flags |= TDF_UBORROWING;
8648460a577SJohn Birrell 	oldprio = td->td_user_pri;
8658460a577SJohn Birrell 	td->td_user_pri = prio;
8663db720fdSDavid Xu }
8673db720fdSDavid Xu 
8683db720fdSDavid Xu void
8693db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio)
8703db720fdSDavid Xu {
8713db720fdSDavid Xu 	u_char base_pri;
8723db720fdSDavid Xu 
873435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
8748460a577SJohn Birrell 	base_pri = td->td_base_user_pri;
8753db720fdSDavid Xu 	if (prio >= base_pri) {
8763db720fdSDavid Xu 		td->td_flags &= ~TDF_UBORROWING;
8778460a577SJohn Birrell 		sched_user_prio(td, base_pri);
878435806d3SDavid Xu 	} else {
8793db720fdSDavid Xu 		sched_lend_user_prio(td, prio);
8803db720fdSDavid Xu 	}
881435806d3SDavid Xu }
8823db720fdSDavid Xu 
8833db720fdSDavid Xu void
884c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int pri)
885b43179fbSJeff Roberson {
8862056d0a1SJohn Baldwin 
8877b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
88854b0e65fSJeff Roberson 	td->td_slptick = ticks;
88954b0e65fSJeff Roberson 	td->td_sched->ts_slptime = 0;
890c5aa6b58SJeff Roberson 	if (pri)
891c5aa6b58SJeff Roberson 		sched_prio(td, pri);
892c5aa6b58SJeff Roberson 	if (TD_IS_SUSPENDED(td) || pri <= PSOCK)
893c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
894b43179fbSJeff Roberson }
895b43179fbSJeff Roberson 
896b43179fbSJeff Roberson void
8973389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
898b43179fbSJeff Roberson {
899ad1e7d28SJulian Elischer 	struct td_sched *ts;
900b43179fbSJeff Roberson 	struct proc *p;
901b43179fbSJeff Roberson 
902ad1e7d28SJulian Elischer 	ts = td->td_sched;
903b43179fbSJeff Roberson 	p = td->td_proc;
904b43179fbSJeff Roberson 
9057b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
9068aa3d7ffSJohn Baldwin 
9077b20fb19SJeff Roberson 	/*
9087b20fb19SJeff Roberson 	 * Switch to the sched lock to fix things up and pick
9097b20fb19SJeff Roberson 	 * a new thread.
9107b20fb19SJeff Roberson 	 */
9117b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
9127b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
9137b20fb19SJeff Roberson 		thread_unlock(td);
9147b20fb19SJeff Roberson 	}
915b43179fbSJeff Roberson 
916f2f51f8aSJeff Roberson 	if ((p->p_flag & P_NOLOAD) == 0)
917907bdbc2SJeff Roberson 		sched_load_rem();
9183389af30SJulian Elischer 
91956564741SStephan Uphoff 	if (newtd)
92056564741SStephan Uphoff 		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
92156564741SStephan Uphoff 
922060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
92352eb8464SJohn Baldwin 	td->td_flags &= ~TDF_NEEDRESCHED;
92477918643SStephan Uphoff 	td->td_owepreempt = 0;
925ca59f152SJeff Roberson 	td->td_oncpu = NOCPU;
9268aa3d7ffSJohn Baldwin 
927b43179fbSJeff Roberson 	/*
928b43179fbSJeff Roberson 	 * At the last moment, if this thread is still marked RUNNING,
929b43179fbSJeff Roberson 	 * then put it back on the run queue as it has not been suspended
930bf0acc27SJohn Baldwin 	 * or stopped or any thing else similar.  We never put the idle
931bf0acc27SJohn Baldwin 	 * threads on the run queue, however.
932b43179fbSJeff Roberson 	 */
933c6226eeaSJulian Elischer 	if (td->td_flags & TDF_IDLETD) {
934bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
935c6226eeaSJulian Elischer #ifdef SMP
936c6226eeaSJulian Elischer 		idle_cpus_mask &= ~PCPU_GET(cpumask);
937c6226eeaSJulian Elischer #endif
938c6226eeaSJulian Elischer 	} else {
939ed062c8dSJulian Elischer 		if (TD_IS_RUNNING(td)) {
940ad1e7d28SJulian Elischer 			/* Put us back on the run queue. */
941f0393f06SJeff Roberson 			sched_add(td, (flags & SW_PREEMPT) ?
942c20c691bSJulian Elischer 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
943c20c691bSJulian Elischer 			    SRQ_OURSELF|SRQ_YIELDING);
944ed062c8dSJulian Elischer 		}
945b43179fbSJeff Roberson 	}
946c20c691bSJulian Elischer 	if (newtd) {
947c20c691bSJulian Elischer 		/*
948c20c691bSJulian Elischer 		 * The thread we are about to run needs to be counted
949c20c691bSJulian Elischer 		 * as if it had been added to the run queue and selected.
950c20c691bSJulian Elischer 		 * It came from:
951c20c691bSJulian Elischer 		 * * A preemption
952c20c691bSJulian Elischer 		 * * An upcall
953c20c691bSJulian Elischer 		 * * A followon
954c20c691bSJulian Elischer 		 */
955c20c691bSJulian Elischer 		KASSERT((newtd->td_inhibitors == 0),
9562da78e38SRobert Watson 			("trying to run inhibited thread"));
9579727e637SJeff Roberson 		newtd->td_flags |= TDF_DIDRUN;
958c20c691bSJulian Elischer         	TD_SET_RUNNING(newtd);
959c20c691bSJulian Elischer 		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
960907bdbc2SJeff Roberson 			sched_load_add();
961c20c691bSJulian Elischer 	} else {
962ae53b483SJeff Roberson 		newtd = choosethread();
963c20c691bSJulian Elischer 	}
9647b20fb19SJeff Roberson 	MPASS(newtd->td_lock == &sched_lock);
965c20c691bSJulian Elischer 
966ebccf1e3SJoseph Koshy 	if (td != newtd) {
967ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
968ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
969ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
970ebccf1e3SJoseph Koshy #endif
971c6226eeaSJulian Elischer                 /* I feel sleepy */
972eea4f254SJeff Roberson 		lock_profile_release_lock(&sched_lock.lock_object);
9736f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
9746f5f25e5SJohn Birrell 		/*
9756f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
9766f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
9776f5f25e5SJohn Birrell 		 * function to call.
9786f5f25e5SJohn Birrell 		 */
9796f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
9806f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
9816f5f25e5SJohn Birrell #endif
9826f5f25e5SJohn Birrell 
983710eacdcSJeff Roberson 		cpu_switch(td, newtd, td->td_lock);
984eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(&sched_lock.lock_object,
985eea4f254SJeff Roberson 		    0, 0, __FILE__, __LINE__);
986c6226eeaSJulian Elischer 		/*
987c6226eeaSJulian Elischer 		 * Where am I?  What year is it?
988c6226eeaSJulian Elischer 		 * We are in the same thread that went to sleep above,
9898aa3d7ffSJohn Baldwin 		 * but any amount of time may have passed. All our context
990c6226eeaSJulian Elischer 		 * will still be available as will local variables.
991c6226eeaSJulian Elischer 		 * PCPU values however may have changed as we may have
992c6226eeaSJulian Elischer 		 * changed CPU so don't trust cached values of them.
993c6226eeaSJulian Elischer 		 * New threads will go to fork_exit() instead of here
994c6226eeaSJulian Elischer 		 * so if you change things here you may need to change
995c6226eeaSJulian Elischer 		 * things there too.
9968aa3d7ffSJohn Baldwin 		 *
997c6226eeaSJulian Elischer 		 * If the thread above was exiting it will never wake
998c6226eeaSJulian Elischer 		 * up again here, so either it has saved everything it
999c6226eeaSJulian Elischer 		 * needed to, or the thread_wait() or wait() will
1000c6226eeaSJulian Elischer 		 * need to reap it.
1001c6226eeaSJulian Elischer 		 */
1002ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1003ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1004ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1005ebccf1e3SJoseph Koshy #endif
1006ebccf1e3SJoseph Koshy 	}
1007ebccf1e3SJoseph Koshy 
1008c6226eeaSJulian Elischer #ifdef SMP
1009c6226eeaSJulian Elischer 	if (td->td_flags & TDF_IDLETD)
1010c6226eeaSJulian Elischer 		idle_cpus_mask |= PCPU_GET(cpumask);
1011c6226eeaSJulian Elischer #endif
1012ae53b483SJeff Roberson 	sched_lock.mtx_lock = (uintptr_t)td;
1013ae53b483SJeff Roberson 	td->td_oncpu = PCPU_GET(cpuid);
10147b20fb19SJeff Roberson 	MPASS(td->td_lock == &sched_lock);
1015b43179fbSJeff Roberson }
1016b43179fbSJeff Roberson 
1017b43179fbSJeff Roberson void
1018b43179fbSJeff Roberson sched_wakeup(struct thread *td)
1019b43179fbSJeff Roberson {
102054b0e65fSJeff Roberson 	struct td_sched *ts;
102154b0e65fSJeff Roberson 
10227b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
102354b0e65fSJeff Roberson 	ts = td->td_sched;
1024c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
102554b0e65fSJeff Roberson 	if (ts->ts_slptime > 1) {
10268460a577SJohn Birrell 		updatepri(td);
10278460a577SJohn Birrell 		resetpriority(td);
10288460a577SJohn Birrell 	}
102954b0e65fSJeff Roberson 	td->td_slptick = ticks;
103054b0e65fSJeff Roberson 	ts->ts_slptime = 0;
1031f0393f06SJeff Roberson 	sched_add(td, SRQ_BORING);
1032b43179fbSJeff Roberson }
1033b43179fbSJeff Roberson 
103437c28a02SJulian Elischer #ifdef SMP
103582a1dfc1SJulian Elischer static int
103682a1dfc1SJulian Elischer forward_wakeup(int cpunum)
103782a1dfc1SJulian Elischer {
103882a1dfc1SJulian Elischer 	struct pcpu *pc;
10398aa3d7ffSJohn Baldwin 	cpumask_t dontuse, id, map, map2, map3, me;
104082a1dfc1SJulian Elischer 
104182a1dfc1SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
104282a1dfc1SJulian Elischer 
1043ed062c8dSJulian Elischer 	CTR0(KTR_RUNQ, "forward_wakeup()");
104482a1dfc1SJulian Elischer 
104582a1dfc1SJulian Elischer 	if ((!forward_wakeup_enabled) ||
104682a1dfc1SJulian Elischer 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
104782a1dfc1SJulian Elischer 		return (0);
104882a1dfc1SJulian Elischer 	if (!smp_started || cold || panicstr)
104982a1dfc1SJulian Elischer 		return (0);
105082a1dfc1SJulian Elischer 
105182a1dfc1SJulian Elischer 	forward_wakeups_requested++;
105282a1dfc1SJulian Elischer 
105382a1dfc1SJulian Elischer 	/*
10548aa3d7ffSJohn Baldwin 	 * Check the idle mask we received against what we calculated
10558aa3d7ffSJohn Baldwin 	 * before in the old version.
105682a1dfc1SJulian Elischer 	 */
105782a1dfc1SJulian Elischer 	me = PCPU_GET(cpumask);
10588aa3d7ffSJohn Baldwin 
10598aa3d7ffSJohn Baldwin 	/* Don't bother if we should be doing it ourself. */
106082a1dfc1SJulian Elischer 	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
106182a1dfc1SJulian Elischer 		return (0);
106282a1dfc1SJulian Elischer 
106382a1dfc1SJulian Elischer 	dontuse = me | stopped_cpus | hlt_cpus_mask;
106482a1dfc1SJulian Elischer 	map3 = 0;
106582a1dfc1SJulian Elischer 	if (forward_wakeup_use_loop) {
106682a1dfc1SJulian Elischer 		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
106782a1dfc1SJulian Elischer 			id = pc->pc_cpumask;
106882a1dfc1SJulian Elischer 			if ((id & dontuse) == 0 &&
106982a1dfc1SJulian Elischer 			    pc->pc_curthread == pc->pc_idlethread) {
107082a1dfc1SJulian Elischer 				map3 |= id;
107182a1dfc1SJulian Elischer 			}
107282a1dfc1SJulian Elischer 		}
107382a1dfc1SJulian Elischer 	}
107482a1dfc1SJulian Elischer 
107582a1dfc1SJulian Elischer 	if (forward_wakeup_use_mask) {
107682a1dfc1SJulian Elischer 		map = 0;
107782a1dfc1SJulian Elischer 		map = idle_cpus_mask & ~dontuse;
107882a1dfc1SJulian Elischer 
10798aa3d7ffSJohn Baldwin 		/* If they are both on, compare and use loop if different. */
108082a1dfc1SJulian Elischer 		if (forward_wakeup_use_loop) {
108182a1dfc1SJulian Elischer 			if (map != map3) {
10828aa3d7ffSJohn Baldwin 				printf("map (%02X) != map3 (%02X)\n", map,
10838aa3d7ffSJohn Baldwin 				    map3);
108482a1dfc1SJulian Elischer 				map = map3;
108582a1dfc1SJulian Elischer 			}
108682a1dfc1SJulian Elischer 		}
108782a1dfc1SJulian Elischer 	} else {
108882a1dfc1SJulian Elischer 		map = map3;
108982a1dfc1SJulian Elischer 	}
10908aa3d7ffSJohn Baldwin 
10918aa3d7ffSJohn Baldwin 	/* If we only allow a specific CPU, then mask off all the others. */
109282a1dfc1SJulian Elischer 	if (cpunum != NOCPU) {
109382a1dfc1SJulian Elischer 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
109482a1dfc1SJulian Elischer 		map &= (1 << cpunum);
109582a1dfc1SJulian Elischer 	} else {
109682a1dfc1SJulian Elischer 		/* Try choose an idle die. */
109782a1dfc1SJulian Elischer 		if (forward_wakeup_use_htt) {
109882a1dfc1SJulian Elischer 			map2 =  (map & (map >> 1)) & 0x5555;
109982a1dfc1SJulian Elischer 			if (map2) {
110082a1dfc1SJulian Elischer 				map = map2;
110182a1dfc1SJulian Elischer 			}
110282a1dfc1SJulian Elischer 		}
110382a1dfc1SJulian Elischer 
11048aa3d7ffSJohn Baldwin 		/* Set only one bit. */
110582a1dfc1SJulian Elischer 		if (forward_wakeup_use_single) {
110682a1dfc1SJulian Elischer 			map = map & ((~map) + 1);
110782a1dfc1SJulian Elischer 		}
110882a1dfc1SJulian Elischer 	}
110982a1dfc1SJulian Elischer 	if (map) {
111082a1dfc1SJulian Elischer 		forward_wakeups_delivered++;
111182a1dfc1SJulian Elischer 		ipi_selected(map, IPI_AST);
111282a1dfc1SJulian Elischer 		return (1);
111382a1dfc1SJulian Elischer 	}
111482a1dfc1SJulian Elischer 	if (cpunum == NOCPU)
111582a1dfc1SJulian Elischer 		printf("forward_wakeup: Idle processor not found\n");
111682a1dfc1SJulian Elischer 	return (0);
111782a1dfc1SJulian Elischer }
1118f3a0f873SStephan Uphoff 
1119f3a0f873SStephan Uphoff static void
1120f3a0f873SStephan Uphoff kick_other_cpu(int pri, int cpuid)
1121f3a0f873SStephan Uphoff {
11228aa3d7ffSJohn Baldwin 	struct pcpu *pcpu;
11238aa3d7ffSJohn Baldwin 	int cpri;
1124f3a0f873SStephan Uphoff 
11258aa3d7ffSJohn Baldwin 	pcpu = pcpu_find(cpuid);
1126f3a0f873SStephan Uphoff 	if (idle_cpus_mask & pcpu->pc_cpumask) {
1127f3a0f873SStephan Uphoff 		forward_wakeups_delivered++;
1128f3a0f873SStephan Uphoff 		ipi_selected(pcpu->pc_cpumask, IPI_AST);
1129f3a0f873SStephan Uphoff 		return;
1130f3a0f873SStephan Uphoff 	}
1131f3a0f873SStephan Uphoff 
11328aa3d7ffSJohn Baldwin 	cpri = pcpu->pc_curthread->td_priority;
1133f3a0f873SStephan Uphoff 	if (pri >= cpri)
1134f3a0f873SStephan Uphoff 		return;
1135f3a0f873SStephan Uphoff 
1136f3a0f873SStephan Uphoff #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1137f3a0f873SStephan Uphoff #if !defined(FULL_PREEMPTION)
1138f3a0f873SStephan Uphoff 	if (pri <= PRI_MAX_ITHD)
1139f3a0f873SStephan Uphoff #endif /* ! FULL_PREEMPTION */
1140f3a0f873SStephan Uphoff 	{
1141f3a0f873SStephan Uphoff 		ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
1142f3a0f873SStephan Uphoff 		return;
1143f3a0f873SStephan Uphoff 	}
1144f3a0f873SStephan Uphoff #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1145f3a0f873SStephan Uphoff 
1146f3a0f873SStephan Uphoff 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1147f3a0f873SStephan Uphoff 	ipi_selected(pcpu->pc_cpumask, IPI_AST);
1148f3a0f873SStephan Uphoff 	return;
1149f3a0f873SStephan Uphoff }
1150f3a0f873SStephan Uphoff #endif /* SMP */
1151f3a0f873SStephan Uphoff 
1152b43179fbSJeff Roberson void
11532630e4c9SJulian Elischer sched_add(struct thread *td, int flags)
11546804a3abSJulian Elischer #ifdef SMP
1155f3a0f873SStephan Uphoff {
1156ad1e7d28SJulian Elischer 	struct td_sched *ts;
11576804a3abSJulian Elischer 	int forwarded = 0;
11586804a3abSJulian Elischer 	int cpu;
1159f3a0f873SStephan Uphoff 	int single_cpu = 0;
11607cf90fb3SJeff Roberson 
1161ad1e7d28SJulian Elischer 	ts = td->td_sched;
11627b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1163f0393f06SJeff Roberson 	KASSERT((td->td_inhibitors == 0),
1164f0393f06SJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
1165f0393f06SJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1166f0393f06SJeff Roberson 	    ("sched_add: bad thread state"));
1167b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1168b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
1169907bdbc2SJeff Roberson 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1170431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1171431f8906SJulian Elischer 	    curthread->td_name);
11728aa3d7ffSJohn Baldwin 
11737b20fb19SJeff Roberson 	/*
11747b20fb19SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
11757b20fb19SJeff Roberson 	 * to the scheduler's lock.
11767b20fb19SJeff Roberson 	 */
11777b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
11787b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
11797b20fb19SJeff Roberson 		thread_lock_set(td, &sched_lock);
11807b20fb19SJeff Roberson 	}
1181f0393f06SJeff Roberson 	TD_SET_RUNQ(td);
1182f3a0f873SStephan Uphoff 
1183f3a0f873SStephan Uphoff 	if (td->td_pinned != 0) {
1184f3a0f873SStephan Uphoff 		cpu = td->td_lastcpu;
1185ad1e7d28SJulian Elischer 		ts->ts_runq = &runq_pcpu[cpu];
1186f3a0f873SStephan Uphoff 		single_cpu = 1;
1187f3a0f873SStephan Uphoff 		CTR3(KTR_RUNQ,
11888aa3d7ffSJohn Baldwin 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
11898aa3d7ffSJohn Baldwin 		    cpu);
11908aa3d7ffSJohn Baldwin 	} else if (td->td_flags & TDF_BOUND) {
11918aa3d7ffSJohn Baldwin 		/* Find CPU from bound runq. */
11928aa3d7ffSJohn Baldwin 		KASSERT(SKE_RUNQ_PCPU(ts),
11938aa3d7ffSJohn Baldwin 		    ("sched_add: bound td_sched not on cpu runq"));
1194ad1e7d28SJulian Elischer 		cpu = ts->ts_runq - &runq_pcpu[0];
1195f3a0f873SStephan Uphoff 		single_cpu = 1;
1196f3a0f873SStephan Uphoff 		CTR3(KTR_RUNQ,
11978aa3d7ffSJohn Baldwin 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
11988aa3d7ffSJohn Baldwin 		    cpu);
1199f3a0f873SStephan Uphoff 	} else {
12006804a3abSJulian Elischer 		CTR2(KTR_RUNQ,
12018aa3d7ffSJohn Baldwin 		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
12028aa3d7ffSJohn Baldwin 		    td);
12036804a3abSJulian Elischer 		cpu = NOCPU;
1204ad1e7d28SJulian Elischer 		ts->ts_runq = &runq;
1205e17c57b1SJeff Roberson 	}
1206f3a0f873SStephan Uphoff 
1207a3f2d842SStephan Uphoff 	if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1208f3a0f873SStephan Uphoff 	        kick_other_cpu(td->td_priority, cpu);
1209f3a0f873SStephan Uphoff 	} else {
1210f3a0f873SStephan Uphoff 		if (!single_cpu) {
1211f3a0f873SStephan Uphoff 			cpumask_t me = PCPU_GET(cpumask);
12128aa3d7ffSJohn Baldwin 			cpumask_t idle = idle_cpus_mask & me;
1213f3a0f873SStephan Uphoff 
1214f3a0f873SStephan Uphoff 			if (!idle && ((flags & SRQ_INTR) == 0) &&
1215f3a0f873SStephan Uphoff 			    (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1216f3a0f873SStephan Uphoff 				forwarded = forward_wakeup(cpu);
1217f3a0f873SStephan Uphoff 		}
1218f3a0f873SStephan Uphoff 
1219f3a0f873SStephan Uphoff 		if (!forwarded) {
1220a3f2d842SStephan Uphoff 			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1221f3a0f873SStephan Uphoff 				return;
1222f3a0f873SStephan Uphoff 			else
1223f3a0f873SStephan Uphoff 				maybe_resched(td);
1224f3a0f873SStephan Uphoff 		}
1225f3a0f873SStephan Uphoff 	}
1226f3a0f873SStephan Uphoff 
1227f3a0f873SStephan Uphoff 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1228f3a0f873SStephan Uphoff 		sched_load_add();
12299727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
1230f3a0f873SStephan Uphoff }
1231f3a0f873SStephan Uphoff #else /* SMP */
1232f3a0f873SStephan Uphoff {
1233ad1e7d28SJulian Elischer 	struct td_sched *ts;
1234ad1e7d28SJulian Elischer 	ts = td->td_sched;
12357b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1236f0393f06SJeff Roberson 	KASSERT((td->td_inhibitors == 0),
1237f0393f06SJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
1238f0393f06SJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1239f0393f06SJeff Roberson 	    ("sched_add: bad thread state"));
1240b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1241b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
1242f3a0f873SStephan Uphoff 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1243431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1244431f8906SJulian Elischer 	    curthread->td_name);
12458aa3d7ffSJohn Baldwin 
12467b20fb19SJeff Roberson 	/*
12477b20fb19SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
12487b20fb19SJeff Roberson 	 * to the scheduler's lock.
12497b20fb19SJeff Roberson 	 */
12507b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
12517b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
12527b20fb19SJeff Roberson 		thread_lock_set(td, &sched_lock);
12537b20fb19SJeff Roberson 	}
1254f0393f06SJeff Roberson 	TD_SET_RUNQ(td);
1255ad1e7d28SJulian Elischer 	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1256ad1e7d28SJulian Elischer 	ts->ts_runq = &runq;
12576804a3abSJulian Elischer 
12586804a3abSJulian Elischer 	/*
12598aa3d7ffSJohn Baldwin 	 * If we are yielding (on the way out anyhow) or the thread
12608aa3d7ffSJohn Baldwin 	 * being saved is US, then don't try be smart about preemption
12618aa3d7ffSJohn Baldwin 	 * or kicking off another CPU as it won't help and may hinder.
12628aa3d7ffSJohn Baldwin 	 * In the YIEDLING case, we are about to run whoever is being
12638aa3d7ffSJohn Baldwin 	 * put in the queue anyhow, and in the OURSELF case, we are
12648aa3d7ffSJohn Baldwin 	 * puting ourself on the run queue which also only happens
12658aa3d7ffSJohn Baldwin 	 * when we are about to yield.
12666804a3abSJulian Elischer 	 */
12676804a3abSJulian Elischer 	if ((flags & SRQ_YIELDING) == 0) {
12686804a3abSJulian Elischer 		if (maybe_preempt(td))
12696804a3abSJulian Elischer 			return;
12706804a3abSJulian Elischer 	}
1271f2f51f8aSJeff Roberson 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1272907bdbc2SJeff Roberson 		sched_load_add();
12739727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
12746942d433SJohn Baldwin 	maybe_resched(td);
1275b43179fbSJeff Roberson }
1276f3a0f873SStephan Uphoff #endif /* SMP */
1277f3a0f873SStephan Uphoff 
1278b43179fbSJeff Roberson void
12797cf90fb3SJeff Roberson sched_rem(struct thread *td)
1280b43179fbSJeff Roberson {
1281ad1e7d28SJulian Elischer 	struct td_sched *ts;
12827cf90fb3SJeff Roberson 
1283ad1e7d28SJulian Elischer 	ts = td->td_sched;
1284b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1285b61ce5b0SJeff Roberson 	    ("sched_rem: thread swapped out"));
1286f0393f06SJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
1287ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
1288b43179fbSJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
1289907bdbc2SJeff Roberson 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1290431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1291431f8906SJulian Elischer 	    curthread->td_name);
1292b43179fbSJeff Roberson 
1293f2f51f8aSJeff Roberson 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1294907bdbc2SJeff Roberson 		sched_load_rem();
12959727e637SJeff Roberson 	runq_remove(ts->ts_runq, td);
1296f0393f06SJeff Roberson 	TD_SET_CAN_RUN(td);
1297b43179fbSJeff Roberson }
1298b43179fbSJeff Roberson 
129914f0e2e9SJulian Elischer /*
13008aa3d7ffSJohn Baldwin  * Select threads to run.  Note that running threads still consume a
13018aa3d7ffSJohn Baldwin  * slot.
130214f0e2e9SJulian Elischer  */
1303f0393f06SJeff Roberson struct thread *
1304b43179fbSJeff Roberson sched_choose(void)
1305b43179fbSJeff Roberson {
13069727e637SJeff Roberson 	struct thread *td;
1307e17c57b1SJeff Roberson 	struct runq *rq;
1308b43179fbSJeff Roberson 
13097b20fb19SJeff Roberson 	mtx_assert(&sched_lock,  MA_OWNED);
1310e17c57b1SJeff Roberson #ifdef SMP
13119727e637SJeff Roberson 	struct thread *tdcpu;
1312e17c57b1SJeff Roberson 
1313e17c57b1SJeff Roberson 	rq = &runq;
13149727e637SJeff Roberson 	td = runq_choose_fuzz(&runq, runq_fuzz);
13159727e637SJeff Roberson 	tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1316e17c57b1SJeff Roberson 
13179727e637SJeff Roberson 	if (td == NULL ||
13189727e637SJeff Roberson 	    (tdcpu != NULL &&
13199727e637SJeff Roberson 	     tdcpu->td_priority < td->td_priority)) {
13209727e637SJeff Roberson 		CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1321e17c57b1SJeff Roberson 		     PCPU_GET(cpuid));
13229727e637SJeff Roberson 		td = tdcpu;
1323e17c57b1SJeff Roberson 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1324e17c57b1SJeff Roberson 	} else {
13259727e637SJeff Roberson 		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1326e17c57b1SJeff Roberson 	}
1327e17c57b1SJeff Roberson 
1328e17c57b1SJeff Roberson #else
1329e17c57b1SJeff Roberson 	rq = &runq;
13309727e637SJeff Roberson 	td = runq_choose(&runq);
1331e17c57b1SJeff Roberson #endif
1332b43179fbSJeff Roberson 
13339727e637SJeff Roberson 	if (td) {
13349727e637SJeff Roberson 		runq_remove(rq, td);
13359727e637SJeff Roberson 		td->td_flags |= TDF_DIDRUN;
1336b43179fbSJeff Roberson 
13379727e637SJeff Roberson 		KASSERT(td->td_flags & TDF_INMEM,
1338b61ce5b0SJeff Roberson 		    ("sched_choose: thread swapped out"));
13399727e637SJeff Roberson 		return (td);
1340b43179fbSJeff Roberson 	}
1341f0393f06SJeff Roberson 	return (PCPU_GET(idlethread));
1342b43179fbSJeff Roberson }
1343b43179fbSJeff Roberson 
1344b43179fbSJeff Roberson void
13451e24c28fSJeff Roberson sched_preempt(struct thread *td)
13461e24c28fSJeff Roberson {
13471e24c28fSJeff Roberson 	thread_lock(td);
13481e24c28fSJeff Roberson 	if (td->td_critnest > 1)
13491e24c28fSJeff Roberson 		td->td_owepreempt = 1;
13501e24c28fSJeff Roberson 	else
13518df78c41SJeff Roberson 		mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
13521e24c28fSJeff Roberson 	thread_unlock(td);
13531e24c28fSJeff Roberson }
13541e24c28fSJeff Roberson 
13551e24c28fSJeff Roberson void
1356b43179fbSJeff Roberson sched_userret(struct thread *td)
1357b43179fbSJeff Roberson {
1358b43179fbSJeff Roberson 	/*
1359b43179fbSJeff Roberson 	 * XXX we cheat slightly on the locking here to avoid locking in
1360b43179fbSJeff Roberson 	 * the usual case.  Setting td_priority here is essentially an
1361b43179fbSJeff Roberson 	 * incomplete workaround for not setting it properly elsewhere.
1362b43179fbSJeff Roberson 	 * Now that some interrupt handlers are threads, not setting it
1363b43179fbSJeff Roberson 	 * properly elsewhere can clobber it in the window between setting
1364b43179fbSJeff Roberson 	 * it here and returning to user mode, so don't waste time setting
1365b43179fbSJeff Roberson 	 * it perfectly here.
1366b43179fbSJeff Roberson 	 */
1367f5c157d9SJohn Baldwin 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1368f5c157d9SJohn Baldwin 	    ("thread with borrowed priority returning to userland"));
13698460a577SJohn Birrell 	if (td->td_priority != td->td_user_pri) {
13707b20fb19SJeff Roberson 		thread_lock(td);
13718460a577SJohn Birrell 		td->td_priority = td->td_user_pri;
13728460a577SJohn Birrell 		td->td_base_pri = td->td_user_pri;
13737b20fb19SJeff Roberson 		thread_unlock(td);
13748460a577SJohn Birrell 	}
1375b43179fbSJeff Roberson }
1376de028f5aSJeff Roberson 
1377e17c57b1SJeff Roberson void
1378e17c57b1SJeff Roberson sched_bind(struct thread *td, int cpu)
1379e17c57b1SJeff Roberson {
1380ad1e7d28SJulian Elischer 	struct td_sched *ts;
1381e17c57b1SJeff Roberson 
13827b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1383e17c57b1SJeff Roberson 	KASSERT(TD_IS_RUNNING(td),
1384e17c57b1SJeff Roberson 	    ("sched_bind: cannot bind non-running thread"));
1385e17c57b1SJeff Roberson 
1386ad1e7d28SJulian Elischer 	ts = td->td_sched;
1387e17c57b1SJeff Roberson 
13889727e637SJeff Roberson 	td->td_flags |= TDF_BOUND;
1389e17c57b1SJeff Roberson #ifdef SMP
1390ad1e7d28SJulian Elischer 	ts->ts_runq = &runq_pcpu[cpu];
1391e17c57b1SJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
1392e17c57b1SJeff Roberson 		return;
1393e17c57b1SJeff Roberson 
1394bf0acc27SJohn Baldwin 	mi_switch(SW_VOL, NULL);
1395e17c57b1SJeff Roberson #endif
1396e17c57b1SJeff Roberson }
1397e17c57b1SJeff Roberson 
1398e17c57b1SJeff Roberson void
1399e17c57b1SJeff Roberson sched_unbind(struct thread* td)
1400e17c57b1SJeff Roberson {
14017b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
14029727e637SJeff Roberson 	td->td_flags &= ~TDF_BOUND;
1403e17c57b1SJeff Roberson }
1404e17c57b1SJeff Roberson 
1405de028f5aSJeff Roberson int
1406ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
1407ebccf1e3SJoseph Koshy {
14087b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
14099727e637SJeff Roberson 	return (td->td_flags & TDF_BOUND);
1410ebccf1e3SJoseph Koshy }
1411ebccf1e3SJoseph Koshy 
141236ec198bSDavid Xu void
141336ec198bSDavid Xu sched_relinquish(struct thread *td)
141436ec198bSDavid Xu {
14157b20fb19SJeff Roberson 	thread_lock(td);
14168df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
14177b20fb19SJeff Roberson 	thread_unlock(td);
141836ec198bSDavid Xu }
141936ec198bSDavid Xu 
1420ebccf1e3SJoseph Koshy int
1421ca59f152SJeff Roberson sched_load(void)
1422ca59f152SJeff Roberson {
1423ca59f152SJeff Roberson 	return (sched_tdcnt);
1424ca59f152SJeff Roberson }
1425ca59f152SJeff Roberson 
1426de028f5aSJeff Roberson int
1427de028f5aSJeff Roberson sched_sizeof_proc(void)
1428de028f5aSJeff Roberson {
1429de028f5aSJeff Roberson 	return (sizeof(struct proc));
1430de028f5aSJeff Roberson }
143136ec198bSDavid Xu 
1432de028f5aSJeff Roberson int
1433de028f5aSJeff Roberson sched_sizeof_thread(void)
1434de028f5aSJeff Roberson {
1435ad1e7d28SJulian Elischer 	return (sizeof(struct thread) + sizeof(struct td_sched));
1436de028f5aSJeff Roberson }
143779acfc49SJeff Roberson 
143879acfc49SJeff Roberson fixpt_t
14397cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
144079acfc49SJeff Roberson {
1441ad1e7d28SJulian Elischer 	struct td_sched *ts;
144255f2099aSJeff Roberson 
1443ad1e7d28SJulian Elischer 	ts = td->td_sched;
1444ad1e7d28SJulian Elischer 	return (ts->ts_pctcpu);
144579acfc49SJeff Roberson }
1446b41f1452SDavid Xu 
1447b41f1452SDavid Xu void
1448b41f1452SDavid Xu sched_tick(void)
1449b41f1452SDavid Xu {
1450b41f1452SDavid Xu }
1451f0393f06SJeff Roberson 
1452f0393f06SJeff Roberson /*
1453f0393f06SJeff Roberson  * The actual idle process.
1454f0393f06SJeff Roberson  */
1455f0393f06SJeff Roberson void
1456f0393f06SJeff Roberson sched_idletd(void *dummy)
1457f0393f06SJeff Roberson {
1458f0393f06SJeff Roberson 
1459f0393f06SJeff Roberson 	for (;;) {
1460f0393f06SJeff Roberson 		mtx_assert(&Giant, MA_NOTOWNED);
1461f0393f06SJeff Roberson 
1462f0393f06SJeff Roberson 		while (sched_runnable() == 0)
14636c47aaaeSJeff Roberson 			cpu_idle(0);
1464f0393f06SJeff Roberson 
1465f0393f06SJeff Roberson 		mtx_lock_spin(&sched_lock);
14668df78c41SJeff Roberson 		mi_switch(SW_VOL | SWT_IDLE, NULL);
1467f0393f06SJeff Roberson 		mtx_unlock_spin(&sched_lock);
1468f0393f06SJeff Roberson 	}
1469f0393f06SJeff Roberson }
1470f0393f06SJeff Roberson 
14717b20fb19SJeff Roberson /*
14727b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
14737b20fb19SJeff Roberson  */
14747b20fb19SJeff Roberson void
14757b20fb19SJeff Roberson sched_throw(struct thread *td)
14767b20fb19SJeff Roberson {
14777b20fb19SJeff Roberson 	/*
14787b20fb19SJeff Roberson 	 * Correct spinlock nesting.  The idle thread context that we are
14797b20fb19SJeff Roberson 	 * borrowing was created so that it would start out with a single
14807b20fb19SJeff Roberson 	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
14817b20fb19SJeff Roberson 	 * explicitly acquired locks in this function, the nesting count
14827b20fb19SJeff Roberson 	 * is now 2 rather than 1.  Since we are nested, calling
14837b20fb19SJeff Roberson 	 * spinlock_exit() will simply adjust the counts without allowing
14847b20fb19SJeff Roberson 	 * spin lock using code to interrupt us.
14857b20fb19SJeff Roberson 	 */
14867b20fb19SJeff Roberson 	if (td == NULL) {
14877b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
14887b20fb19SJeff Roberson 		spinlock_exit();
14897b20fb19SJeff Roberson 	} else {
1490eea4f254SJeff Roberson 		lock_profile_release_lock(&sched_lock.lock_object);
14917b20fb19SJeff Roberson 		MPASS(td->td_lock == &sched_lock);
14927b20fb19SJeff Roberson 	}
14937b20fb19SJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
14947b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
14957b20fb19SJeff Roberson 	PCPU_SET(switchtime, cpu_ticks());
14967b20fb19SJeff Roberson 	PCPU_SET(switchticks, ticks);
14977b20fb19SJeff Roberson 	cpu_throw(td, choosethread());	/* doesn't return */
14987b20fb19SJeff Roberson }
14997b20fb19SJeff Roberson 
15007b20fb19SJeff Roberson void
1501fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
15027b20fb19SJeff Roberson {
15037b20fb19SJeff Roberson 
15047b20fb19SJeff Roberson 	/*
15057b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
15067b20fb19SJeff Roberson 	 * non-nested critical section with sched_lock held but not recursed.
15077b20fb19SJeff Roberson 	 */
1508fe54587fSJeff Roberson 	td->td_oncpu = PCPU_GET(cpuid);
1509fe54587fSJeff Roberson 	sched_lock.mtx_lock = (uintptr_t)td;
1510eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(&sched_lock.lock_object,
1511eea4f254SJeff Roberson 	    0, 0, __FILE__, __LINE__);
1512fe54587fSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
15137b20fb19SJeff Roberson }
15147b20fb19SJeff Roberson 
1515885d51a3SJeff Roberson void
1516885d51a3SJeff Roberson sched_affinity(struct thread *td)
1517885d51a3SJeff Roberson {
1518885d51a3SJeff Roberson }
1519