xref: /freebsd/sys/kern/sched_4bsd.c (revision c5aa6b581d3bb4c466526bc0144c0e56ee852901)
1b43179fbSJeff Roberson /*-
2b43179fbSJeff Roberson  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3b43179fbSJeff Roberson  *	The Regents of the University of California.  All rights reserved.
4b43179fbSJeff Roberson  * (c) UNIX System Laboratories, Inc.
5b43179fbSJeff Roberson  * All or some portions of this file are derived from material licensed
6b43179fbSJeff Roberson  * to the University of California by American Telephone and Telegraph
7b43179fbSJeff Roberson  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8b43179fbSJeff Roberson  * the permission of UNIX System Laboratories, Inc.
9b43179fbSJeff Roberson  *
10b43179fbSJeff Roberson  * Redistribution and use in source and binary forms, with or without
11b43179fbSJeff Roberson  * modification, are permitted provided that the following conditions
12b43179fbSJeff Roberson  * are met:
13b43179fbSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
14b43179fbSJeff Roberson  *    notice, this list of conditions and the following disclaimer.
15b43179fbSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
16b43179fbSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
17b43179fbSJeff Roberson  *    documentation and/or other materials provided with the distribution.
18b43179fbSJeff Roberson  * 4. Neither the name of the University nor the names of its contributors
19b43179fbSJeff Roberson  *    may be used to endorse or promote products derived from this software
20b43179fbSJeff Roberson  *    without specific prior written permission.
21b43179fbSJeff Roberson  *
22b43179fbSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23b43179fbSJeff Roberson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24b43179fbSJeff Roberson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25b43179fbSJeff Roberson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26b43179fbSJeff Roberson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27b43179fbSJeff Roberson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28b43179fbSJeff Roberson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29b43179fbSJeff Roberson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30b43179fbSJeff Roberson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31b43179fbSJeff Roberson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32b43179fbSJeff Roberson  * SUCH DAMAGE.
33b43179fbSJeff Roberson  */
34b43179fbSJeff Roberson 
35677b542eSDavid E. O'Brien #include <sys/cdefs.h>
36677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
37677b542eSDavid E. O'Brien 
384da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
394da0d332SPeter Wemm 
40b43179fbSJeff Roberson #include <sys/param.h>
41b43179fbSJeff Roberson #include <sys/systm.h>
42f5a3ef99SMarcel Moolenaar #include <sys/cpuset.h>
43b43179fbSJeff Roberson #include <sys/kernel.h>
44b43179fbSJeff Roberson #include <sys/ktr.h>
45b43179fbSJeff Roberson #include <sys/lock.h>
46c55bbb6cSJohn Baldwin #include <sys/kthread.h>
47b43179fbSJeff Roberson #include <sys/mutex.h>
48b43179fbSJeff Roberson #include <sys/proc.h>
49b43179fbSJeff Roberson #include <sys/resourcevar.h>
50b43179fbSJeff Roberson #include <sys/sched.h>
51b43179fbSJeff Roberson #include <sys/smp.h>
52b43179fbSJeff Roberson #include <sys/sysctl.h>
53b43179fbSJeff Roberson #include <sys/sx.h>
54f5c157d9SJohn Baldwin #include <sys/turnstile.h>
553db720fdSDavid Xu #include <sys/umtx.h>
562e4db89cSDavid E. O'Brien #include <machine/pcb.h>
57293968d8SJulian Elischer #include <machine/smp.h>
58b43179fbSJeff Roberson 
59ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
60ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
61ebccf1e3SJoseph Koshy #endif
62ebccf1e3SJoseph Koshy 
6306439a04SJeff Roberson /*
6406439a04SJeff Roberson  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
6506439a04SJeff Roberson  * the range 100-256 Hz (approximately).
6606439a04SJeff Roberson  */
6706439a04SJeff Roberson #define	ESTCPULIM(e) \
6806439a04SJeff Roberson     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
6906439a04SJeff Roberson     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
70b698380fSBruce Evans #ifdef SMP
71b698380fSBruce Evans #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
72b698380fSBruce Evans #else
7306439a04SJeff Roberson #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
74b698380fSBruce Evans #endif
7506439a04SJeff Roberson #define	NICE_WEIGHT		1	/* Priorities per nice level. */
7606439a04SJeff Roberson 
778460a577SJohn Birrell /*
788460a577SJohn Birrell  * The schedulable entity that runs a context.
79ad1e7d28SJulian Elischer  * This is  an extension to the thread structure and is tailored to
80ad1e7d28SJulian Elischer  * the requirements of this scheduler
818460a577SJohn Birrell  */
82ad1e7d28SJulian Elischer struct td_sched {
83ad1e7d28SJulian Elischer 	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
84ad1e7d28SJulian Elischer 	struct thread	*ts_thread;	/* (*) Active associated thread. */
85ad1e7d28SJulian Elischer 	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
86ad1e7d28SJulian Elischer 	u_char		ts_rqindex;	/* (j) Run queue index. */
87ad1e7d28SJulian Elischer 	int		ts_cpticks;	/* (j) Ticks of cpu time. */
8854b0e65fSJeff Roberson 	int		ts_slptime;	/* (j) Seconds !RUNNING. */
89ad1e7d28SJulian Elischer 	struct runq	*ts_runq;	/* runq the thread is currently on */
90bcb06d59SJeff Roberson };
91ed062c8dSJulian Elischer 
92ed062c8dSJulian Elischer /* flags kept in td_flags */
93ad1e7d28SJulian Elischer #define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
94ad1e7d28SJulian Elischer #define TDF_EXIT	TDF_SCHED1	/* thread is being killed. */
95ed062c8dSJulian Elischer #define TDF_BOUND	TDF_SCHED2
96ed062c8dSJulian Elischer 
97ad1e7d28SJulian Elischer #define ts_flags	ts_thread->td_flags
98ad1e7d28SJulian Elischer #define TSF_DIDRUN	TDF_DIDRUN /* thread actually ran. */
99ad1e7d28SJulian Elischer #define TSF_EXIT	TDF_EXIT /* thread is being killed. */
100ad1e7d28SJulian Elischer #define TSF_BOUND	TDF_BOUND /* stuck to one CPU */
101bcb06d59SJeff Roberson 
102ad1e7d28SJulian Elischer #define SKE_RUNQ_PCPU(ts)						\
103ad1e7d28SJulian Elischer     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
104e17c57b1SJeff Roberson 
105ad1e7d28SJulian Elischer static struct td_sched td_sched0;
1066ea38de8SJeff Roberson struct mtx sched_lock;
107b43179fbSJeff Roberson 
108ca59f152SJeff Roberson static int	sched_tdcnt;	/* Total runnable threads in the system. */
109b43179fbSJeff Roberson static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
1104974b53eSMaxime Henrion #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
111b43179fbSJeff Roberson 
112e17c57b1SJeff Roberson static void	setup_runqs(void);
113c55bbb6cSJohn Baldwin static void	schedcpu(void);
114e17c57b1SJeff Roberson static void	schedcpu_thread(void);
115f5c157d9SJohn Baldwin static void	sched_priority(struct thread *td, u_char prio);
116b43179fbSJeff Roberson static void	sched_setup(void *dummy);
117b43179fbSJeff Roberson static void	maybe_resched(struct thread *td);
1188460a577SJohn Birrell static void	updatepri(struct thread *td);
1198460a577SJohn Birrell static void	resetpriority(struct thread *td);
1208460a577SJohn Birrell static void	resetpriority_thread(struct thread *td);
12100b0483dSJulian Elischer #ifdef SMP
12282a1dfc1SJulian Elischer static int	forward_wakeup(int  cpunum);
12300b0483dSJulian Elischer #endif
124b43179fbSJeff Roberson 
125e17c57b1SJeff Roberson static struct kproc_desc sched_kp = {
126e17c57b1SJeff Roberson         "schedcpu",
127e17c57b1SJeff Roberson         schedcpu_thread,
128e17c57b1SJeff Roberson         NULL
129e17c57b1SJeff Roberson };
130e17c57b1SJeff Roberson SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
131e17c57b1SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
132b43179fbSJeff Roberson 
133b43179fbSJeff Roberson /*
134b43179fbSJeff Roberson  * Global run queue.
135b43179fbSJeff Roberson  */
136b43179fbSJeff Roberson static struct runq runq;
137e17c57b1SJeff Roberson 
138e17c57b1SJeff Roberson #ifdef SMP
139e17c57b1SJeff Roberson /*
140e17c57b1SJeff Roberson  * Per-CPU run queues
141e17c57b1SJeff Roberson  */
142e17c57b1SJeff Roberson static struct runq runq_pcpu[MAXCPU];
143e17c57b1SJeff Roberson #endif
144e17c57b1SJeff Roberson 
145e17c57b1SJeff Roberson static void
146e17c57b1SJeff Roberson setup_runqs(void)
147e17c57b1SJeff Roberson {
148e17c57b1SJeff Roberson #ifdef SMP
149e17c57b1SJeff Roberson 	int i;
150e17c57b1SJeff Roberson 
151e17c57b1SJeff Roberson 	for (i = 0; i < MAXCPU; ++i)
152e17c57b1SJeff Roberson 		runq_init(&runq_pcpu[i]);
153e17c57b1SJeff Roberson #endif
154e17c57b1SJeff Roberson 
155e17c57b1SJeff Roberson 	runq_init(&runq);
156e17c57b1SJeff Roberson }
157b43179fbSJeff Roberson 
158b43179fbSJeff Roberson static int
159b43179fbSJeff Roberson sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
160b43179fbSJeff Roberson {
161b43179fbSJeff Roberson 	int error, new_val;
162b43179fbSJeff Roberson 
163b43179fbSJeff Roberson 	new_val = sched_quantum * tick;
164b43179fbSJeff Roberson 	error = sysctl_handle_int(oidp, &new_val, 0, req);
165b43179fbSJeff Roberson         if (error != 0 || req->newptr == NULL)
166b43179fbSJeff Roberson 		return (error);
167b43179fbSJeff Roberson 	if (new_val < tick)
168b43179fbSJeff Roberson 		return (EINVAL);
169b43179fbSJeff Roberson 	sched_quantum = new_val / tick;
170b43179fbSJeff Roberson 	hogticks = 2 * sched_quantum;
171b43179fbSJeff Roberson 	return (0);
172b43179fbSJeff Roberson }
173b43179fbSJeff Roberson 
174e038d354SScott Long SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
175dc095794SScott Long 
176e038d354SScott Long SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
177e038d354SScott Long     "Scheduler name");
178dc095794SScott Long 
179dc095794SScott Long SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
180b43179fbSJeff Roberson     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
181b43179fbSJeff Roberson     "Roundrobin scheduling quantum in microseconds");
182b43179fbSJeff Roberson 
18337c28a02SJulian Elischer #ifdef SMP
18482a1dfc1SJulian Elischer /* Enable forwarding of wakeups to all other cpus */
18582a1dfc1SJulian Elischer SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
18682a1dfc1SJulian Elischer 
187bce73aedSJulian Elischer static int forward_wakeup_enabled = 1;
18882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
18982a1dfc1SJulian Elischer 	   &forward_wakeup_enabled, 0,
19082a1dfc1SJulian Elischer 	   "Forwarding of wakeup to idle CPUs");
19182a1dfc1SJulian Elischer 
19282a1dfc1SJulian Elischer static int forward_wakeups_requested = 0;
19382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
19482a1dfc1SJulian Elischer 	   &forward_wakeups_requested, 0,
19582a1dfc1SJulian Elischer 	   "Requests for Forwarding of wakeup to idle CPUs");
19682a1dfc1SJulian Elischer 
19782a1dfc1SJulian Elischer static int forward_wakeups_delivered = 0;
19882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
19982a1dfc1SJulian Elischer 	   &forward_wakeups_delivered, 0,
20082a1dfc1SJulian Elischer 	   "Completed Forwarding of wakeup to idle CPUs");
20182a1dfc1SJulian Elischer 
202bce73aedSJulian Elischer static int forward_wakeup_use_mask = 1;
20382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
20482a1dfc1SJulian Elischer 	   &forward_wakeup_use_mask, 0,
20582a1dfc1SJulian Elischer 	   "Use the mask of idle cpus");
20682a1dfc1SJulian Elischer 
20782a1dfc1SJulian Elischer static int forward_wakeup_use_loop = 0;
20882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
20982a1dfc1SJulian Elischer 	   &forward_wakeup_use_loop, 0,
21082a1dfc1SJulian Elischer 	   "Use a loop to find idle cpus");
21182a1dfc1SJulian Elischer 
21282a1dfc1SJulian Elischer static int forward_wakeup_use_single = 0;
21382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
21482a1dfc1SJulian Elischer 	   &forward_wakeup_use_single, 0,
21582a1dfc1SJulian Elischer 	   "Only signal one idle cpu");
21682a1dfc1SJulian Elischer 
21782a1dfc1SJulian Elischer static int forward_wakeup_use_htt = 0;
21882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
21982a1dfc1SJulian Elischer 	   &forward_wakeup_use_htt, 0,
22082a1dfc1SJulian Elischer 	   "account for htt");
2213389af30SJulian Elischer 
22237c28a02SJulian Elischer #endif
223ad1e7d28SJulian Elischer #if 0
2243389af30SJulian Elischer static int sched_followon = 0;
2253389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
2263389af30SJulian Elischer 	   &sched_followon, 0,
2273389af30SJulian Elischer 	   "allow threads to share a quantum");
2288460a577SJohn Birrell #endif
22982a1dfc1SJulian Elischer 
230907bdbc2SJeff Roberson static __inline void
231907bdbc2SJeff Roberson sched_load_add(void)
232907bdbc2SJeff Roberson {
233907bdbc2SJeff Roberson 	sched_tdcnt++;
234907bdbc2SJeff Roberson 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
235907bdbc2SJeff Roberson }
236907bdbc2SJeff Roberson 
237907bdbc2SJeff Roberson static __inline void
238907bdbc2SJeff Roberson sched_load_rem(void)
239907bdbc2SJeff Roberson {
240907bdbc2SJeff Roberson 	sched_tdcnt--;
241907bdbc2SJeff Roberson 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
242907bdbc2SJeff Roberson }
243b43179fbSJeff Roberson /*
244b43179fbSJeff Roberson  * Arrange to reschedule if necessary, taking the priorities and
245b43179fbSJeff Roberson  * schedulers into account.
246b43179fbSJeff Roberson  */
247b43179fbSJeff Roberson static void
248b43179fbSJeff Roberson maybe_resched(struct thread *td)
249b43179fbSJeff Roberson {
250b43179fbSJeff Roberson 
2517b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
252ed062c8dSJulian Elischer 	if (td->td_priority < curthread->td_priority)
2534a338afdSJulian Elischer 		curthread->td_flags |= TDF_NEEDRESCHED;
254b43179fbSJeff Roberson }
255b43179fbSJeff Roberson 
256b43179fbSJeff Roberson /*
257b43179fbSJeff Roberson  * Constants for digital decay and forget:
2588460a577SJohn Birrell  *	90% of (td_estcpu) usage in 5 * loadav time
259ad1e7d28SJulian Elischer  *	95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
260b43179fbSJeff Roberson  *          Note that, as ps(1) mentions, this can let percentages
261b43179fbSJeff Roberson  *          total over 100% (I've seen 137.9% for 3 processes).
262b43179fbSJeff Roberson  *
2638460a577SJohn Birrell  * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
264b43179fbSJeff Roberson  *
2658460a577SJohn Birrell  * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
266b43179fbSJeff Roberson  * That is, the system wants to compute a value of decay such
267b43179fbSJeff Roberson  * that the following for loop:
268b43179fbSJeff Roberson  * 	for (i = 0; i < (5 * loadavg); i++)
2698460a577SJohn Birrell  * 		td_estcpu *= decay;
270b43179fbSJeff Roberson  * will compute
2718460a577SJohn Birrell  * 	td_estcpu *= 0.1;
272b43179fbSJeff Roberson  * for all values of loadavg:
273b43179fbSJeff Roberson  *
274b43179fbSJeff Roberson  * Mathematically this loop can be expressed by saying:
275b43179fbSJeff Roberson  * 	decay ** (5 * loadavg) ~= .1
276b43179fbSJeff Roberson  *
277b43179fbSJeff Roberson  * The system computes decay as:
278b43179fbSJeff Roberson  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
279b43179fbSJeff Roberson  *
280b43179fbSJeff Roberson  * We wish to prove that the system's computation of decay
281b43179fbSJeff Roberson  * will always fulfill the equation:
282b43179fbSJeff Roberson  * 	decay ** (5 * loadavg) ~= .1
283b43179fbSJeff Roberson  *
284b43179fbSJeff Roberson  * If we compute b as:
285b43179fbSJeff Roberson  * 	b = 2 * loadavg
286b43179fbSJeff Roberson  * then
287b43179fbSJeff Roberson  * 	decay = b / (b + 1)
288b43179fbSJeff Roberson  *
289b43179fbSJeff Roberson  * We now need to prove two things:
290b43179fbSJeff Roberson  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
291b43179fbSJeff Roberson  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
292b43179fbSJeff Roberson  *
293b43179fbSJeff Roberson  * Facts:
294b43179fbSJeff Roberson  *         For x close to zero, exp(x) =~ 1 + x, since
295b43179fbSJeff Roberson  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
296b43179fbSJeff Roberson  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
297b43179fbSJeff Roberson  *         For x close to zero, ln(1+x) =~ x, since
298b43179fbSJeff Roberson  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
299b43179fbSJeff Roberson  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
300b43179fbSJeff Roberson  *         ln(.1) =~ -2.30
301b43179fbSJeff Roberson  *
302b43179fbSJeff Roberson  * Proof of (1):
303b43179fbSJeff Roberson  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
304b43179fbSJeff Roberson  *	solving for factor,
305b43179fbSJeff Roberson  *      ln(factor) =~ (-2.30/5*loadav), or
306b43179fbSJeff Roberson  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
307b43179fbSJeff Roberson  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
308b43179fbSJeff Roberson  *
309b43179fbSJeff Roberson  * Proof of (2):
310b43179fbSJeff Roberson  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
311b43179fbSJeff Roberson  *	solving for power,
312b43179fbSJeff Roberson  *      power*ln(b/(b+1)) =~ -2.30, or
313b43179fbSJeff Roberson  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
314b43179fbSJeff Roberson  *
315b43179fbSJeff Roberson  * Actual power values for the implemented algorithm are as follows:
316b43179fbSJeff Roberson  *      loadav: 1       2       3       4
317b43179fbSJeff Roberson  *      power:  5.68    10.32   14.94   19.55
318b43179fbSJeff Roberson  */
319b43179fbSJeff Roberson 
320b43179fbSJeff Roberson /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
321b43179fbSJeff Roberson #define	loadfactor(loadav)	(2 * (loadav))
322b43179fbSJeff Roberson #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
323b43179fbSJeff Roberson 
324ad1e7d28SJulian Elischer /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
325b43179fbSJeff Roberson static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
3265c06d111SJohn-Mark Gurney SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
327b43179fbSJeff Roberson 
328b43179fbSJeff Roberson /*
329b43179fbSJeff Roberson  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
330b43179fbSJeff Roberson  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
331b43179fbSJeff Roberson  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
332b43179fbSJeff Roberson  *
333b43179fbSJeff Roberson  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
334b43179fbSJeff Roberson  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
335b43179fbSJeff Roberson  *
336b43179fbSJeff Roberson  * If you don't want to bother with the faster/more-accurate formula, you
337b43179fbSJeff Roberson  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
338b43179fbSJeff Roberson  * (more general) method of calculating the %age of CPU used by a process.
339b43179fbSJeff Roberson  */
340b43179fbSJeff Roberson #define	CCPU_SHIFT	11
341b43179fbSJeff Roberson 
342b43179fbSJeff Roberson /*
343b43179fbSJeff Roberson  * Recompute process priorities, every hz ticks.
344b43179fbSJeff Roberson  * MP-safe, called without the Giant mutex.
345b43179fbSJeff Roberson  */
346b43179fbSJeff Roberson /* ARGSUSED */
347b43179fbSJeff Roberson static void
348c55bbb6cSJohn Baldwin schedcpu(void)
349b43179fbSJeff Roberson {
350b43179fbSJeff Roberson 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
351b43179fbSJeff Roberson 	struct thread *td;
352b43179fbSJeff Roberson 	struct proc *p;
353ad1e7d28SJulian Elischer 	struct td_sched *ts;
35470fca427SJohn Baldwin 	int awake, realstathz;
355b43179fbSJeff Roberson 
356b43179fbSJeff Roberson 	realstathz = stathz ? stathz : hz;
357b43179fbSJeff Roberson 	sx_slock(&allproc_lock);
358b43179fbSJeff Roberson 	FOREACH_PROC_IN_SYSTEM(p) {
3597b20fb19SJeff Roberson 		PROC_SLOCK(p);
3608460a577SJohn Birrell 		FOREACH_THREAD_IN_PROC(p, td) {
361b43179fbSJeff Roberson 			awake = 0;
3627b20fb19SJeff Roberson 			thread_lock(td);
363ad1e7d28SJulian Elischer 			ts = td->td_sched;
364b43179fbSJeff Roberson 			/*
36570fca427SJohn Baldwin 			 * Increment sleep time (if sleeping).  We
36670fca427SJohn Baldwin 			 * ignore overflow, as above.
367b43179fbSJeff Roberson 			 */
368b43179fbSJeff Roberson 			/*
369ad1e7d28SJulian Elischer 			 * The td_sched slptimes are not touched in wakeup
370ad1e7d28SJulian Elischer 			 * because the thread may not HAVE everything in
371ad1e7d28SJulian Elischer 			 * memory? XXX I think this is out of date.
372b43179fbSJeff Roberson 			 */
373f0393f06SJeff Roberson 			if (TD_ON_RUNQ(td)) {
374b43179fbSJeff Roberson 				awake = 1;
375ad1e7d28SJulian Elischer 				ts->ts_flags &= ~TSF_DIDRUN;
376f0393f06SJeff Roberson 			} else if (TD_IS_RUNNING(td)) {
377b43179fbSJeff Roberson 				awake = 1;
378ad1e7d28SJulian Elischer 				/* Do not clear TSF_DIDRUN */
379ad1e7d28SJulian Elischer 			} else if (ts->ts_flags & TSF_DIDRUN) {
380b43179fbSJeff Roberson 				awake = 1;
381ad1e7d28SJulian Elischer 				ts->ts_flags &= ~TSF_DIDRUN;
382b43179fbSJeff Roberson 			}
383b43179fbSJeff Roberson 
384b43179fbSJeff Roberson 			/*
385ad1e7d28SJulian Elischer 			 * ts_pctcpu is only for ps and ttyinfo().
386ad1e7d28SJulian Elischer 			 * Do it per td_sched, and add them up at the end?
387b43179fbSJeff Roberson 			 * XXXKSE
388b43179fbSJeff Roberson 			 */
389ad1e7d28SJulian Elischer 			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
390b43179fbSJeff Roberson 			/*
391ad1e7d28SJulian Elischer 			 * If the td_sched has been idle the entire second,
392b43179fbSJeff Roberson 			 * stop recalculating its priority until
393b43179fbSJeff Roberson 			 * it wakes up.
394b43179fbSJeff Roberson 			 */
395ad1e7d28SJulian Elischer 			if (ts->ts_cpticks != 0) {
396b43179fbSJeff Roberson #if	(FSHIFT >= CCPU_SHIFT)
397ad1e7d28SJulian Elischer 				ts->ts_pctcpu += (realstathz == 100)
398ad1e7d28SJulian Elischer 				    ? ((fixpt_t) ts->ts_cpticks) <<
399b43179fbSJeff Roberson 				    (FSHIFT - CCPU_SHIFT) :
400ad1e7d28SJulian Elischer 				    100 * (((fixpt_t) ts->ts_cpticks)
401bcb06d59SJeff Roberson 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
402b43179fbSJeff Roberson #else
403ad1e7d28SJulian Elischer 				ts->ts_pctcpu += ((FSCALE - ccpu) *
404ad1e7d28SJulian Elischer 				    (ts->ts_cpticks *
405bcb06d59SJeff Roberson 				    FSCALE / realstathz)) >> FSHIFT;
406b43179fbSJeff Roberson #endif
407ad1e7d28SJulian Elischer 				ts->ts_cpticks = 0;
4088460a577SJohn Birrell 			}
4098460a577SJohn Birrell 			/*
4108460a577SJohn Birrell 			 * If there are ANY running threads in this process,
411b43179fbSJeff Roberson 			 * then don't count it as sleeping.
412ad1e7d28SJulian Elischer XXX  this is broken
413ad1e7d28SJulian Elischer 
414b43179fbSJeff Roberson 			 */
415b43179fbSJeff Roberson 			if (awake) {
41654b0e65fSJeff Roberson 				if (ts->ts_slptime > 1) {
417b43179fbSJeff Roberson 					/*
418b43179fbSJeff Roberson 					 * In an ideal world, this should not
419b43179fbSJeff Roberson 					 * happen, because whoever woke us
420b43179fbSJeff Roberson 					 * up from the long sleep should have
421b43179fbSJeff Roberson 					 * unwound the slptime and reset our
422b43179fbSJeff Roberson 					 * priority before we run at the stale
423b43179fbSJeff Roberson 					 * priority.  Should KASSERT at some
424b43179fbSJeff Roberson 					 * point when all the cases are fixed.
425b43179fbSJeff Roberson 					 */
4268460a577SJohn Birrell 					updatepri(td);
4278460a577SJohn Birrell 				}
42854b0e65fSJeff Roberson 				ts->ts_slptime = 0;
4298460a577SJohn Birrell 			} else
43054b0e65fSJeff Roberson 				ts->ts_slptime++;
43154b0e65fSJeff Roberson 			if (ts->ts_slptime > 1) {
4327b20fb19SJeff Roberson 				thread_unlock(td);
4338460a577SJohn Birrell 				continue;
4347b20fb19SJeff Roberson 			}
4358460a577SJohn Birrell 			td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
4368460a577SJohn Birrell 		      	resetpriority(td);
4378460a577SJohn Birrell 			resetpriority_thread(td);
4387b20fb19SJeff Roberson 			thread_unlock(td);
4398460a577SJohn Birrell 		} /* end of thread loop */
4407b20fb19SJeff Roberson 		PROC_SUNLOCK(p);
441b43179fbSJeff Roberson 	} /* end of process loop */
442b43179fbSJeff Roberson 	sx_sunlock(&allproc_lock);
443c55bbb6cSJohn Baldwin }
444c55bbb6cSJohn Baldwin 
445c55bbb6cSJohn Baldwin /*
446c55bbb6cSJohn Baldwin  * Main loop for a kthread that executes schedcpu once a second.
447c55bbb6cSJohn Baldwin  */
448c55bbb6cSJohn Baldwin static void
449e17c57b1SJeff Roberson schedcpu_thread(void)
450c55bbb6cSJohn Baldwin {
451c55bbb6cSJohn Baldwin 
452c55bbb6cSJohn Baldwin 	for (;;) {
453c55bbb6cSJohn Baldwin 		schedcpu();
4544d70511aSJohn Baldwin 		pause("-", hz);
455c55bbb6cSJohn Baldwin 	}
456b43179fbSJeff Roberson }
457b43179fbSJeff Roberson 
458b43179fbSJeff Roberson /*
459b43179fbSJeff Roberson  * Recalculate the priority of a process after it has slept for a while.
4608460a577SJohn Birrell  * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
4618460a577SJohn Birrell  * least six times the loadfactor will decay td_estcpu to zero.
462b43179fbSJeff Roberson  */
463b43179fbSJeff Roberson static void
4648460a577SJohn Birrell updatepri(struct thread *td)
465b43179fbSJeff Roberson {
46654b0e65fSJeff Roberson 	struct td_sched *ts;
46754b0e65fSJeff Roberson 	fixpt_t loadfac;
46854b0e65fSJeff Roberson 	unsigned int newcpu;
469b43179fbSJeff Roberson 
47054b0e65fSJeff Roberson 	ts = td->td_sched;
47170fca427SJohn Baldwin 	loadfac = loadfactor(averunnable.ldavg[0]);
47254b0e65fSJeff Roberson 	if (ts->ts_slptime > 5 * loadfac)
4738460a577SJohn Birrell 		td->td_estcpu = 0;
474b43179fbSJeff Roberson 	else {
4758460a577SJohn Birrell 		newcpu = td->td_estcpu;
47654b0e65fSJeff Roberson 		ts->ts_slptime--;	/* was incremented in schedcpu() */
47754b0e65fSJeff Roberson 		while (newcpu && --ts->ts_slptime)
478b43179fbSJeff Roberson 			newcpu = decay_cpu(loadfac, newcpu);
4798460a577SJohn Birrell 		td->td_estcpu = newcpu;
480b43179fbSJeff Roberson 	}
481b43179fbSJeff Roberson }
482b43179fbSJeff Roberson 
483b43179fbSJeff Roberson /*
484b43179fbSJeff Roberson  * Compute the priority of a process when running in user mode.
485b43179fbSJeff Roberson  * Arrange to reschedule if the resulting priority is better
486b43179fbSJeff Roberson  * than that of the current process.
487b43179fbSJeff Roberson  */
488b43179fbSJeff Roberson static void
4898460a577SJohn Birrell resetpriority(struct thread *td)
490b43179fbSJeff Roberson {
491b43179fbSJeff Roberson 	register unsigned int newpriority;
492b43179fbSJeff Roberson 
4938460a577SJohn Birrell 	if (td->td_pri_class == PRI_TIMESHARE) {
4948460a577SJohn Birrell 		newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
4958460a577SJohn Birrell 		    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
496b43179fbSJeff Roberson 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
497b43179fbSJeff Roberson 		    PRI_MAX_TIMESHARE);
4988460a577SJohn Birrell 		sched_user_prio(td, newpriority);
499b43179fbSJeff Roberson 	}
500b43179fbSJeff Roberson }
501f5c157d9SJohn Baldwin 
502f5c157d9SJohn Baldwin /*
503ad1e7d28SJulian Elischer  * Update the thread's priority when the associated process's user
504f5c157d9SJohn Baldwin  * priority changes.
505f5c157d9SJohn Baldwin  */
506f5c157d9SJohn Baldwin static void
5078460a577SJohn Birrell resetpriority_thread(struct thread *td)
508f5c157d9SJohn Baldwin {
509f5c157d9SJohn Baldwin 
510f5c157d9SJohn Baldwin 	/* Only change threads with a time sharing user priority. */
511f5c157d9SJohn Baldwin 	if (td->td_priority < PRI_MIN_TIMESHARE ||
512f5c157d9SJohn Baldwin 	    td->td_priority > PRI_MAX_TIMESHARE)
513f5c157d9SJohn Baldwin 		return;
514f5c157d9SJohn Baldwin 
515f5c157d9SJohn Baldwin 	/* XXX the whole needresched thing is broken, but not silly. */
516f5c157d9SJohn Baldwin 	maybe_resched(td);
517f5c157d9SJohn Baldwin 
5188460a577SJohn Birrell 	sched_prio(td, td->td_user_pri);
519b43179fbSJeff Roberson }
520b43179fbSJeff Roberson 
521b43179fbSJeff Roberson /* ARGSUSED */
522b43179fbSJeff Roberson static void
523b43179fbSJeff Roberson sched_setup(void *dummy)
524b43179fbSJeff Roberson {
525e17c57b1SJeff Roberson 	setup_runqs();
52670fca427SJohn Baldwin 
527b43179fbSJeff Roberson 	if (sched_quantum == 0)
528b43179fbSJeff Roberson 		sched_quantum = SCHED_QUANTUM;
529b43179fbSJeff Roberson 	hogticks = 2 * sched_quantum;
530b43179fbSJeff Roberson 
531ca59f152SJeff Roberson 	/* Account for thread0. */
532907bdbc2SJeff Roberson 	sched_load_add();
533b43179fbSJeff Roberson }
534b43179fbSJeff Roberson 
535b43179fbSJeff Roberson /* External interfaces start here */
536ed062c8dSJulian Elischer /*
537ed062c8dSJulian Elischer  * Very early in the boot some setup of scheduler-specific
538f3050486SMaxim Konovalov  * parts of proc0 and of some scheduler resources needs to be done.
539ed062c8dSJulian Elischer  * Called from:
540ed062c8dSJulian Elischer  *  proc0_init()
541ed062c8dSJulian Elischer  */
542ed062c8dSJulian Elischer void
543ed062c8dSJulian Elischer schedinit(void)
544ed062c8dSJulian Elischer {
545ed062c8dSJulian Elischer 	/*
546ed062c8dSJulian Elischer 	 * Set up the scheduler specific parts of proc0.
547ed062c8dSJulian Elischer 	 */
548ed062c8dSJulian Elischer 	proc0.p_sched = NULL; /* XXX */
549ad1e7d28SJulian Elischer 	thread0.td_sched = &td_sched0;
5507b20fb19SJeff Roberson 	thread0.td_lock = &sched_lock;
551ad1e7d28SJulian Elischer 	td_sched0.ts_thread = &thread0;
5526ea38de8SJeff Roberson 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
553ed062c8dSJulian Elischer }
554ed062c8dSJulian Elischer 
555b43179fbSJeff Roberson int
556b43179fbSJeff Roberson sched_runnable(void)
557b43179fbSJeff Roberson {
558e17c57b1SJeff Roberson #ifdef SMP
559e17c57b1SJeff Roberson 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
560e17c57b1SJeff Roberson #else
561b43179fbSJeff Roberson 	return runq_check(&runq);
562e17c57b1SJeff Roberson #endif
563b43179fbSJeff Roberson }
564b43179fbSJeff Roberson 
565b43179fbSJeff Roberson int
566b43179fbSJeff Roberson sched_rr_interval(void)
567b43179fbSJeff Roberson {
568b43179fbSJeff Roberson 	if (sched_quantum == 0)
569b43179fbSJeff Roberson 		sched_quantum = SCHED_QUANTUM;
570b43179fbSJeff Roberson 	return (sched_quantum);
571b43179fbSJeff Roberson }
572b43179fbSJeff Roberson 
573b43179fbSJeff Roberson /*
574b43179fbSJeff Roberson  * We adjust the priority of the current process.  The priority of
575b43179fbSJeff Roberson  * a process gets worse as it accumulates CPU time.  The cpu usage
5768460a577SJohn Birrell  * estimator (td_estcpu) is increased here.  resetpriority() will
5778460a577SJohn Birrell  * compute a different priority each time td_estcpu increases by
578b43179fbSJeff Roberson  * INVERSE_ESTCPU_WEIGHT
579b43179fbSJeff Roberson  * (until MAXPRI is reached).  The cpu usage estimator ramps up
580b43179fbSJeff Roberson  * quite quickly when the process is running (linearly), and decays
581b43179fbSJeff Roberson  * away exponentially, at a rate which is proportionally slower when
582b43179fbSJeff Roberson  * the system is busy.  The basic principle is that the system will
583b43179fbSJeff Roberson  * 90% forget that the process used a lot of CPU time in 5 * loadav
584b43179fbSJeff Roberson  * seconds.  This causes the system to favor processes which haven't
585b43179fbSJeff Roberson  * run much recently, and to round-robin among other processes.
586b43179fbSJeff Roberson  */
587b43179fbSJeff Roberson void
5887cf90fb3SJeff Roberson sched_clock(struct thread *td)
589b43179fbSJeff Roberson {
590ad1e7d28SJulian Elischer 	struct td_sched *ts;
591b43179fbSJeff Roberson 
5927b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
593ad1e7d28SJulian Elischer 	ts = td->td_sched;
594f7f9e7f3SJeff Roberson 
595ad1e7d28SJulian Elischer 	ts->ts_cpticks++;
5968460a577SJohn Birrell 	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
5978460a577SJohn Birrell 	if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
5988460a577SJohn Birrell 		resetpriority(td);
5998460a577SJohn Birrell 		resetpriority_thread(td);
600b43179fbSJeff Roberson 	}
6019dddab6fSJohn Baldwin 
6029dddab6fSJohn Baldwin 	/*
6039dddab6fSJohn Baldwin 	 * Force a context switch if the current thread has used up a full
6049dddab6fSJohn Baldwin 	 * quantum (default quantum is 100ms).
6059dddab6fSJohn Baldwin 	 */
6069dddab6fSJohn Baldwin 	if (!TD_IS_IDLETHREAD(td) &&
6079dddab6fSJohn Baldwin 	    ticks - PCPU_GET(switchticks) >= sched_quantum)
6089dddab6fSJohn Baldwin 		td->td_flags |= TDF_NEEDRESCHED;
609b43179fbSJeff Roberson }
61070fca427SJohn Baldwin 
6118460a577SJohn Birrell /*
6128460a577SJohn Birrell  * charge childs scheduling cpu usage to parent.
6138460a577SJohn Birrell  */
614b43179fbSJeff Roberson void
61555d44f79SJulian Elischer sched_exit(struct proc *p, struct thread *td)
616f7f9e7f3SJeff Roberson {
6178460a577SJohn Birrell 
6188460a577SJohn Birrell 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
619431f8906SJulian Elischer 	    td, td->td_name, td->td_priority);
6207b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
621ad1e7d28SJulian Elischer 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
622b43179fbSJeff Roberson }
623b43179fbSJeff Roberson 
624b43179fbSJeff Roberson void
625f7f9e7f3SJeff Roberson sched_exit_thread(struct thread *td, struct thread *child)
626b43179fbSJeff Roberson {
627ad1e7d28SJulian Elischer 
628907bdbc2SJeff Roberson 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
629431f8906SJulian Elischer 	    child, child->td_name, child->td_priority);
6307b20fb19SJeff Roberson 	thread_lock(td);
631ad1e7d28SJulian Elischer 	td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
6327b20fb19SJeff Roberson 	thread_unlock(td);
6337b20fb19SJeff Roberson 	mtx_lock_spin(&sched_lock);
6347d5ea13fSDoug Rabson 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
635907bdbc2SJeff Roberson 		sched_load_rem();
6367b20fb19SJeff Roberson 	mtx_unlock_spin(&sched_lock);
637f7f9e7f3SJeff Roberson }
638bcb06d59SJeff Roberson 
639f7f9e7f3SJeff Roberson void
640ed062c8dSJulian Elischer sched_fork(struct thread *td, struct thread *childtd)
641f7f9e7f3SJeff Roberson {
642ed062c8dSJulian Elischer 	sched_fork_thread(td, childtd);
643f7f9e7f3SJeff Roberson }
644bcb06d59SJeff Roberson 
645f7f9e7f3SJeff Roberson void
646ed062c8dSJulian Elischer sched_fork_thread(struct thread *td, struct thread *childtd)
647f7f9e7f3SJeff Roberson {
648ad1e7d28SJulian Elischer 	childtd->td_estcpu = td->td_estcpu;
6497b20fb19SJeff Roberson 	childtd->td_lock = &sched_lock;
650f5a3ef99SMarcel Moolenaar 	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
651ed062c8dSJulian Elischer 	sched_newthread(childtd);
652b43179fbSJeff Roberson }
653b43179fbSJeff Roberson 
654b43179fbSJeff Roberson void
655fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
656b43179fbSJeff Roberson {
657f5c157d9SJohn Baldwin 	struct thread *td;
6580b5318c8SJohn Baldwin 
659fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
6607b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
661fa885116SJulian Elischer 	p->p_nice = nice;
6628460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
6637b20fb19SJeff Roberson 		thread_lock(td);
6648460a577SJohn Birrell 		resetpriority(td);
6658460a577SJohn Birrell 		resetpriority_thread(td);
6667b20fb19SJeff Roberson 		thread_unlock(td);
6678460a577SJohn Birrell 	}
668fa885116SJulian Elischer }
669b43179fbSJeff Roberson 
670f7f9e7f3SJeff Roberson void
6718460a577SJohn Birrell sched_class(struct thread *td, int class)
672f7f9e7f3SJeff Roberson {
6737b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
6748460a577SJohn Birrell 	td->td_pri_class = class;
675f7f9e7f3SJeff Roberson }
676f7f9e7f3SJeff Roberson 
6778460a577SJohn Birrell /*
6788460a577SJohn Birrell  * Adjust the priority of a thread.
6798460a577SJohn Birrell  */
680f5c157d9SJohn Baldwin static void
681f5c157d9SJohn Baldwin sched_priority(struct thread *td, u_char prio)
682b43179fbSJeff Roberson {
683907bdbc2SJeff Roberson 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
684431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, prio, curthread,
685431f8906SJulian Elischer 	    curthread->td_name);
686b43179fbSJeff Roberson 
6877b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
688f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
689f5c157d9SJohn Baldwin 		return;
6901f955e2dSJulian Elischer 	td->td_priority = prio;
691f0393f06SJeff Roberson 	if (TD_ON_RUNQ(td) &&
692f0393f06SJeff Roberson 	    td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
693f0393f06SJeff Roberson 		sched_rem(td);
694f0393f06SJeff Roberson 		sched_add(td, SRQ_BORING);
695b43179fbSJeff Roberson 	}
696b43179fbSJeff Roberson }
697b43179fbSJeff Roberson 
698f5c157d9SJohn Baldwin /*
699f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
700f5c157d9SJohn Baldwin  * priority.
701f5c157d9SJohn Baldwin  */
702f5c157d9SJohn Baldwin void
703f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
704f5c157d9SJohn Baldwin {
705f5c157d9SJohn Baldwin 
706f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
707f5c157d9SJohn Baldwin 	sched_priority(td, prio);
708f5c157d9SJohn Baldwin }
709f5c157d9SJohn Baldwin 
710f5c157d9SJohn Baldwin /*
711f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
712f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
713f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
714f5c157d9SJohn Baldwin  * requests.  If the thread's regulary priority is less
715f5c157d9SJohn Baldwin  * important than prio the thread will keep a priority boost
716f5c157d9SJohn Baldwin  * of prio.
717f5c157d9SJohn Baldwin  */
718f5c157d9SJohn Baldwin void
719f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
720f5c157d9SJohn Baldwin {
721f5c157d9SJohn Baldwin 	u_char base_pri;
722f5c157d9SJohn Baldwin 
723f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
724f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
7258460a577SJohn Birrell 		base_pri = td->td_user_pri;
726f5c157d9SJohn Baldwin 	else
727f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
728f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
729f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
730f5c157d9SJohn Baldwin 		sched_prio(td, base_pri);
731f5c157d9SJohn Baldwin 	} else
732f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
733f5c157d9SJohn Baldwin }
734f5c157d9SJohn Baldwin 
735f5c157d9SJohn Baldwin void
736f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
737f5c157d9SJohn Baldwin {
738f5c157d9SJohn Baldwin 	u_char oldprio;
739f5c157d9SJohn Baldwin 
740f5c157d9SJohn Baldwin 	/* First, update the base priority. */
741f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
742f5c157d9SJohn Baldwin 
743f5c157d9SJohn Baldwin 	/*
744f5c157d9SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't ever
745f5c157d9SJohn Baldwin 	 * lower the priority.
746f5c157d9SJohn Baldwin 	 */
747f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
748f5c157d9SJohn Baldwin 		return;
749f5c157d9SJohn Baldwin 
750f5c157d9SJohn Baldwin 	/* Change the real priority. */
751f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
752f5c157d9SJohn Baldwin 	sched_priority(td, prio);
753f5c157d9SJohn Baldwin 
754f5c157d9SJohn Baldwin 	/*
755f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
756f5c157d9SJohn Baldwin 	 * its state.
757f5c157d9SJohn Baldwin 	 */
758f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
759f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
760f5c157d9SJohn Baldwin }
761f5c157d9SJohn Baldwin 
762b43179fbSJeff Roberson void
7638460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
7643db720fdSDavid Xu {
7653db720fdSDavid Xu 	u_char oldprio;
7663db720fdSDavid Xu 
767435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
7688460a577SJohn Birrell 	td->td_base_user_pri = prio;
7695a215147SDavid Xu 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
7705a215147SDavid Xu 		return;
7718460a577SJohn Birrell 	oldprio = td->td_user_pri;
7728460a577SJohn Birrell 	td->td_user_pri = prio;
7733db720fdSDavid Xu }
7743db720fdSDavid Xu 
7753db720fdSDavid Xu void
7763db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
7773db720fdSDavid Xu {
7783db720fdSDavid Xu 	u_char oldprio;
7793db720fdSDavid Xu 
780435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
7813db720fdSDavid Xu 	td->td_flags |= TDF_UBORROWING;
7828460a577SJohn Birrell 	oldprio = td->td_user_pri;
7838460a577SJohn Birrell 	td->td_user_pri = prio;
7843db720fdSDavid Xu }
7853db720fdSDavid Xu 
7863db720fdSDavid Xu void
7873db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio)
7883db720fdSDavid Xu {
7893db720fdSDavid Xu 	u_char base_pri;
7903db720fdSDavid Xu 
791435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
7928460a577SJohn Birrell 	base_pri = td->td_base_user_pri;
7933db720fdSDavid Xu 	if (prio >= base_pri) {
7943db720fdSDavid Xu 		td->td_flags &= ~TDF_UBORROWING;
7958460a577SJohn Birrell 		sched_user_prio(td, base_pri);
796435806d3SDavid Xu 	} else {
7973db720fdSDavid Xu 		sched_lend_user_prio(td, prio);
7983db720fdSDavid Xu 	}
799435806d3SDavid Xu }
8003db720fdSDavid Xu 
8013db720fdSDavid Xu void
802c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int pri)
803b43179fbSJeff Roberson {
8042056d0a1SJohn Baldwin 
8057b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
80654b0e65fSJeff Roberson 	td->td_slptick = ticks;
80754b0e65fSJeff Roberson 	td->td_sched->ts_slptime = 0;
808c5aa6b58SJeff Roberson 	if (pri)
809c5aa6b58SJeff Roberson 		sched_prio(td, pri);
810c5aa6b58SJeff Roberson 	if (TD_IS_SUSPENDED(td) || pri <= PSOCK)
811c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
812b43179fbSJeff Roberson }
813b43179fbSJeff Roberson 
814b43179fbSJeff Roberson void
8153389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
816b43179fbSJeff Roberson {
817ad1e7d28SJulian Elischer 	struct td_sched *ts;
818b43179fbSJeff Roberson 	struct proc *p;
819b43179fbSJeff Roberson 
820ad1e7d28SJulian Elischer 	ts = td->td_sched;
821b43179fbSJeff Roberson 	p = td->td_proc;
822b43179fbSJeff Roberson 
8237b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
8247b20fb19SJeff Roberson 	/*
8257b20fb19SJeff Roberson 	 * Switch to the sched lock to fix things up and pick
8267b20fb19SJeff Roberson 	 * a new thread.
8277b20fb19SJeff Roberson 	 */
8287b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
8297b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
8307b20fb19SJeff Roberson 		thread_unlock(td);
8317b20fb19SJeff Roberson 	}
832b43179fbSJeff Roberson 
833f2f51f8aSJeff Roberson 	if ((p->p_flag & P_NOLOAD) == 0)
834907bdbc2SJeff Roberson 		sched_load_rem();
8353389af30SJulian Elischer 
83656564741SStephan Uphoff 	if (newtd)
83756564741SStephan Uphoff 		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
83856564741SStephan Uphoff 
839060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
84052eb8464SJohn Baldwin 	td->td_flags &= ~TDF_NEEDRESCHED;
84177918643SStephan Uphoff 	td->td_owepreempt = 0;
842ca59f152SJeff Roberson 	td->td_oncpu = NOCPU;
843b43179fbSJeff Roberson 	/*
844b43179fbSJeff Roberson 	 * At the last moment, if this thread is still marked RUNNING,
845b43179fbSJeff Roberson 	 * then put it back on the run queue as it has not been suspended
846bf0acc27SJohn Baldwin 	 * or stopped or any thing else similar.  We never put the idle
847bf0acc27SJohn Baldwin 	 * threads on the run queue, however.
848b43179fbSJeff Roberson 	 */
849c6226eeaSJulian Elischer 	if (td->td_flags & TDF_IDLETD) {
850bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
851c6226eeaSJulian Elischer #ifdef SMP
852c6226eeaSJulian Elischer 		idle_cpus_mask &= ~PCPU_GET(cpumask);
853c6226eeaSJulian Elischer #endif
854c6226eeaSJulian Elischer 	} else {
855ed062c8dSJulian Elischer 		if (TD_IS_RUNNING(td)) {
856ad1e7d28SJulian Elischer 			/* Put us back on the run queue. */
857f0393f06SJeff Roberson 			sched_add(td, (flags & SW_PREEMPT) ?
858c20c691bSJulian Elischer 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
859c20c691bSJulian Elischer 			    SRQ_OURSELF|SRQ_YIELDING);
860ed062c8dSJulian Elischer 		}
861b43179fbSJeff Roberson 	}
862c20c691bSJulian Elischer 	if (newtd) {
863c20c691bSJulian Elischer 		/*
864c20c691bSJulian Elischer 		 * The thread we are about to run needs to be counted
865c20c691bSJulian Elischer 		 * as if it had been added to the run queue and selected.
866c20c691bSJulian Elischer 		 * It came from:
867c20c691bSJulian Elischer 		 * * A preemption
868c20c691bSJulian Elischer 		 * * An upcall
869c20c691bSJulian Elischer 		 * * A followon
870c20c691bSJulian Elischer 		 */
871c20c691bSJulian Elischer 		KASSERT((newtd->td_inhibitors == 0),
8722da78e38SRobert Watson 			("trying to run inhibited thread"));
873ad1e7d28SJulian Elischer 		newtd->td_sched->ts_flags |= TSF_DIDRUN;
874c20c691bSJulian Elischer         	TD_SET_RUNNING(newtd);
875c20c691bSJulian Elischer 		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
876907bdbc2SJeff Roberson 			sched_load_add();
877c20c691bSJulian Elischer 	} else {
878ae53b483SJeff Roberson 		newtd = choosethread();
879c20c691bSJulian Elischer 	}
8807b20fb19SJeff Roberson 	MPASS(newtd->td_lock == &sched_lock);
881c20c691bSJulian Elischer 
882ebccf1e3SJoseph Koshy 	if (td != newtd) {
883ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
884ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
885ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
886ebccf1e3SJoseph Koshy #endif
887c6226eeaSJulian Elischer                 /* I feel sleepy */
888eea4f254SJeff Roberson 		lock_profile_release_lock(&sched_lock.lock_object);
889710eacdcSJeff Roberson 		cpu_switch(td, newtd, td->td_lock);
890eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(&sched_lock.lock_object,
891eea4f254SJeff Roberson 		    0, 0, __FILE__, __LINE__);
892c6226eeaSJulian Elischer 		/*
893c6226eeaSJulian Elischer 		 * Where am I?  What year is it?
894c6226eeaSJulian Elischer 		 * We are in the same thread that went to sleep above,
895c6226eeaSJulian Elischer 		 * but any amount of time may have passed. All out context
896c6226eeaSJulian Elischer 		 * will still be available as will local variables.
897c6226eeaSJulian Elischer 		 * PCPU values however may have changed as we may have
898c6226eeaSJulian Elischer 		 * changed CPU so don't trust cached values of them.
899c6226eeaSJulian Elischer 		 * New threads will go to fork_exit() instead of here
900c6226eeaSJulian Elischer 		 * so if you change things here you may need to change
901c6226eeaSJulian Elischer 		 * things there too.
902c6226eeaSJulian Elischer 		 * If the thread above was exiting it will never wake
903c6226eeaSJulian Elischer 		 * up again here, so either it has saved everything it
904c6226eeaSJulian Elischer 		 * needed to, or the thread_wait() or wait() will
905c6226eeaSJulian Elischer 		 * need to reap it.
906c6226eeaSJulian Elischer 		 */
907ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
908ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
909ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
910ebccf1e3SJoseph Koshy #endif
911ebccf1e3SJoseph Koshy 	}
912ebccf1e3SJoseph Koshy 
913c6226eeaSJulian Elischer #ifdef SMP
914c6226eeaSJulian Elischer 	if (td->td_flags & TDF_IDLETD)
915c6226eeaSJulian Elischer 		idle_cpus_mask |= PCPU_GET(cpumask);
916c6226eeaSJulian Elischer #endif
917ae53b483SJeff Roberson 	sched_lock.mtx_lock = (uintptr_t)td;
918ae53b483SJeff Roberson 	td->td_oncpu = PCPU_GET(cpuid);
9197b20fb19SJeff Roberson 	MPASS(td->td_lock == &sched_lock);
920b43179fbSJeff Roberson }
921b43179fbSJeff Roberson 
922b43179fbSJeff Roberson void
923b43179fbSJeff Roberson sched_wakeup(struct thread *td)
924b43179fbSJeff Roberson {
92554b0e65fSJeff Roberson 	struct td_sched *ts;
92654b0e65fSJeff Roberson 
9277b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
92854b0e65fSJeff Roberson 	ts = td->td_sched;
929c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
93054b0e65fSJeff Roberson 	if (ts->ts_slptime > 1) {
9318460a577SJohn Birrell 		updatepri(td);
9328460a577SJohn Birrell 		resetpriority(td);
9338460a577SJohn Birrell 	}
93454b0e65fSJeff Roberson 	td->td_slptick = ticks;
93554b0e65fSJeff Roberson 	ts->ts_slptime = 0;
936f0393f06SJeff Roberson 	sched_add(td, SRQ_BORING);
937b43179fbSJeff Roberson }
938b43179fbSJeff Roberson 
93937c28a02SJulian Elischer #ifdef SMP
94082a1dfc1SJulian Elischer /* enable HTT_2 if you have a 2-way HTT cpu.*/
94182a1dfc1SJulian Elischer static int
94282a1dfc1SJulian Elischer forward_wakeup(int  cpunum)
94382a1dfc1SJulian Elischer {
94482a1dfc1SJulian Elischer 	cpumask_t map, me, dontuse;
94582a1dfc1SJulian Elischer 	cpumask_t map2;
94682a1dfc1SJulian Elischer 	struct pcpu *pc;
94782a1dfc1SJulian Elischer 	cpumask_t id, map3;
94882a1dfc1SJulian Elischer 
94982a1dfc1SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
95082a1dfc1SJulian Elischer 
951ed062c8dSJulian Elischer 	CTR0(KTR_RUNQ, "forward_wakeup()");
95282a1dfc1SJulian Elischer 
95382a1dfc1SJulian Elischer 	if ((!forward_wakeup_enabled) ||
95482a1dfc1SJulian Elischer 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
95582a1dfc1SJulian Elischer 		return (0);
95682a1dfc1SJulian Elischer 	if (!smp_started || cold || panicstr)
95782a1dfc1SJulian Elischer 		return (0);
95882a1dfc1SJulian Elischer 
95982a1dfc1SJulian Elischer 	forward_wakeups_requested++;
96082a1dfc1SJulian Elischer 
96182a1dfc1SJulian Elischer /*
96282a1dfc1SJulian Elischer  * check the idle mask we received against what we calculated before
96382a1dfc1SJulian Elischer  * in the old version.
96482a1dfc1SJulian Elischer  */
96582a1dfc1SJulian Elischer 	me = PCPU_GET(cpumask);
96682a1dfc1SJulian Elischer 	/*
96782a1dfc1SJulian Elischer 	 * don't bother if we should be doing it ourself..
96882a1dfc1SJulian Elischer 	 */
96982a1dfc1SJulian Elischer 	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
97082a1dfc1SJulian Elischer 		return (0);
97182a1dfc1SJulian Elischer 
97282a1dfc1SJulian Elischer 	dontuse = me | stopped_cpus | hlt_cpus_mask;
97382a1dfc1SJulian Elischer 	map3 = 0;
97482a1dfc1SJulian Elischer 	if (forward_wakeup_use_loop) {
97582a1dfc1SJulian Elischer 		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
97682a1dfc1SJulian Elischer 			id = pc->pc_cpumask;
97782a1dfc1SJulian Elischer 			if ( (id & dontuse) == 0 &&
97882a1dfc1SJulian Elischer 			    pc->pc_curthread == pc->pc_idlethread) {
97982a1dfc1SJulian Elischer 				map3 |= id;
98082a1dfc1SJulian Elischer 			}
98182a1dfc1SJulian Elischer 		}
98282a1dfc1SJulian Elischer 	}
98382a1dfc1SJulian Elischer 
98482a1dfc1SJulian Elischer 	if (forward_wakeup_use_mask) {
98582a1dfc1SJulian Elischer 		map = 0;
98682a1dfc1SJulian Elischer 		map = idle_cpus_mask & ~dontuse;
98782a1dfc1SJulian Elischer 
98882a1dfc1SJulian Elischer 		/* If they are both on, compare and use loop if different */
98982a1dfc1SJulian Elischer 		if (forward_wakeup_use_loop) {
99082a1dfc1SJulian Elischer 			if (map != map3) {
99182a1dfc1SJulian Elischer 				printf("map (%02X) != map3 (%02X)\n",
99282a1dfc1SJulian Elischer 						map, map3);
99382a1dfc1SJulian Elischer 				map = map3;
99482a1dfc1SJulian Elischer 			}
99582a1dfc1SJulian Elischer 		}
99682a1dfc1SJulian Elischer 	} else {
99782a1dfc1SJulian Elischer 		map = map3;
99882a1dfc1SJulian Elischer 	}
99982a1dfc1SJulian Elischer 	/* If we only allow a specific CPU, then mask off all the others */
100082a1dfc1SJulian Elischer 	if (cpunum != NOCPU) {
100182a1dfc1SJulian Elischer 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
100282a1dfc1SJulian Elischer 		map &= (1 << cpunum);
100382a1dfc1SJulian Elischer 	} else {
100482a1dfc1SJulian Elischer 		/* Try choose an idle die. */
100582a1dfc1SJulian Elischer 		if (forward_wakeup_use_htt) {
100682a1dfc1SJulian Elischer 			map2 =  (map & (map >> 1)) & 0x5555;
100782a1dfc1SJulian Elischer 			if (map2) {
100882a1dfc1SJulian Elischer 				map = map2;
100982a1dfc1SJulian Elischer 			}
101082a1dfc1SJulian Elischer 		}
101182a1dfc1SJulian Elischer 
101282a1dfc1SJulian Elischer 		/* set only one bit */
101382a1dfc1SJulian Elischer 		if (forward_wakeup_use_single) {
101482a1dfc1SJulian Elischer 			map = map & ((~map) + 1);
101582a1dfc1SJulian Elischer 		}
101682a1dfc1SJulian Elischer 	}
101782a1dfc1SJulian Elischer 	if (map) {
101882a1dfc1SJulian Elischer 		forward_wakeups_delivered++;
101982a1dfc1SJulian Elischer 		ipi_selected(map, IPI_AST);
102082a1dfc1SJulian Elischer 		return (1);
102182a1dfc1SJulian Elischer 	}
102282a1dfc1SJulian Elischer 	if (cpunum == NOCPU)
102382a1dfc1SJulian Elischer 		printf("forward_wakeup: Idle processor not found\n");
102482a1dfc1SJulian Elischer 	return (0);
102582a1dfc1SJulian Elischer }
102637c28a02SJulian Elischer #endif
102782a1dfc1SJulian Elischer 
1028f3a0f873SStephan Uphoff #ifdef SMP
1029a3f2d842SStephan Uphoff static void kick_other_cpu(int pri,int cpuid);
1030f3a0f873SStephan Uphoff 
1031f3a0f873SStephan Uphoff static void
1032f3a0f873SStephan Uphoff kick_other_cpu(int pri,int cpuid)
1033f3a0f873SStephan Uphoff {
1034f3a0f873SStephan Uphoff 	struct pcpu * pcpu = pcpu_find(cpuid);
1035f3a0f873SStephan Uphoff 	int cpri = pcpu->pc_curthread->td_priority;
1036f3a0f873SStephan Uphoff 
1037f3a0f873SStephan Uphoff 	if (idle_cpus_mask & pcpu->pc_cpumask) {
1038f3a0f873SStephan Uphoff 		forward_wakeups_delivered++;
1039f3a0f873SStephan Uphoff 		ipi_selected(pcpu->pc_cpumask, IPI_AST);
1040f3a0f873SStephan Uphoff 		return;
1041f3a0f873SStephan Uphoff 	}
1042f3a0f873SStephan Uphoff 
1043f3a0f873SStephan Uphoff 	if (pri >= cpri)
1044f3a0f873SStephan Uphoff 		return;
1045f3a0f873SStephan Uphoff 
1046f3a0f873SStephan Uphoff #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1047f3a0f873SStephan Uphoff #if !defined(FULL_PREEMPTION)
1048f3a0f873SStephan Uphoff 	if (pri <= PRI_MAX_ITHD)
1049f3a0f873SStephan Uphoff #endif /* ! FULL_PREEMPTION */
1050f3a0f873SStephan Uphoff 	{
1051f3a0f873SStephan Uphoff 		ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
1052f3a0f873SStephan Uphoff 		return;
1053f3a0f873SStephan Uphoff 	}
1054f3a0f873SStephan Uphoff #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1055f3a0f873SStephan Uphoff 
1056f3a0f873SStephan Uphoff 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1057f3a0f873SStephan Uphoff 	ipi_selected( pcpu->pc_cpumask , IPI_AST);
1058f3a0f873SStephan Uphoff 	return;
1059f3a0f873SStephan Uphoff }
1060f3a0f873SStephan Uphoff #endif /* SMP */
1061f3a0f873SStephan Uphoff 
1062b43179fbSJeff Roberson void
10632630e4c9SJulian Elischer sched_add(struct thread *td, int flags)
10646804a3abSJulian Elischer #ifdef SMP
1065f3a0f873SStephan Uphoff {
1066ad1e7d28SJulian Elischer 	struct td_sched *ts;
10676804a3abSJulian Elischer 	int forwarded = 0;
10686804a3abSJulian Elischer 	int cpu;
1069f3a0f873SStephan Uphoff 	int single_cpu = 0;
10707cf90fb3SJeff Roberson 
1071ad1e7d28SJulian Elischer 	ts = td->td_sched;
10727b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1073f0393f06SJeff Roberson 	KASSERT((td->td_inhibitors == 0),
1074f0393f06SJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
1075f0393f06SJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1076f0393f06SJeff Roberson 	    ("sched_add: bad thread state"));
1077b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1078b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
1079907bdbc2SJeff Roberson 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1080431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1081431f8906SJulian Elischer 	    curthread->td_name);
10827b20fb19SJeff Roberson 	/*
10837b20fb19SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
10847b20fb19SJeff Roberson 	 * to the scheduler's lock.
10857b20fb19SJeff Roberson 	 */
10867b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
10877b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
10887b20fb19SJeff Roberson 		thread_lock_set(td, &sched_lock);
10897b20fb19SJeff Roberson 	}
1090f0393f06SJeff Roberson 	TD_SET_RUNQ(td);
1091f3a0f873SStephan Uphoff 
1092f3a0f873SStephan Uphoff 	if (td->td_pinned != 0) {
1093f3a0f873SStephan Uphoff 		cpu = td->td_lastcpu;
1094ad1e7d28SJulian Elischer 		ts->ts_runq = &runq_pcpu[cpu];
1095f3a0f873SStephan Uphoff 		single_cpu = 1;
1096f3a0f873SStephan Uphoff 		CTR3(KTR_RUNQ,
1097ad1e7d28SJulian Elischer 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1098ad1e7d28SJulian Elischer 	} else if ((ts)->ts_flags & TSF_BOUND) {
1099f3a0f873SStephan Uphoff 		/* Find CPU from bound runq */
1100ad1e7d28SJulian Elischer 		KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
1101ad1e7d28SJulian Elischer 		cpu = ts->ts_runq - &runq_pcpu[0];
1102f3a0f873SStephan Uphoff 		single_cpu = 1;
1103f3a0f873SStephan Uphoff 		CTR3(KTR_RUNQ,
1104ad1e7d28SJulian Elischer 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1105f3a0f873SStephan Uphoff 	} else {
11066804a3abSJulian Elischer 		CTR2(KTR_RUNQ,
1107ad1e7d28SJulian Elischer 		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td);
11086804a3abSJulian Elischer 		cpu = NOCPU;
1109ad1e7d28SJulian Elischer 		ts->ts_runq = &runq;
1110e17c57b1SJeff Roberson 	}
1111f3a0f873SStephan Uphoff 
1112a3f2d842SStephan Uphoff 	if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1113f3a0f873SStephan Uphoff 	        kick_other_cpu(td->td_priority,cpu);
1114f3a0f873SStephan Uphoff 	} else {
1115f3a0f873SStephan Uphoff 
1116f3a0f873SStephan Uphoff 		if (!single_cpu) {
1117f3a0f873SStephan Uphoff 			cpumask_t me = PCPU_GET(cpumask);
1118f3a0f873SStephan Uphoff 			int idle = idle_cpus_mask & me;
1119f3a0f873SStephan Uphoff 
1120f3a0f873SStephan Uphoff 			if (!idle && ((flags & SRQ_INTR) == 0) &&
1121f3a0f873SStephan Uphoff 			    (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1122f3a0f873SStephan Uphoff 				forwarded = forward_wakeup(cpu);
1123f3a0f873SStephan Uphoff 		}
1124f3a0f873SStephan Uphoff 
1125f3a0f873SStephan Uphoff 		if (!forwarded) {
1126a3f2d842SStephan Uphoff 			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1127f3a0f873SStephan Uphoff 				return;
1128f3a0f873SStephan Uphoff 			else
1129f3a0f873SStephan Uphoff 				maybe_resched(td);
1130f3a0f873SStephan Uphoff 		}
1131f3a0f873SStephan Uphoff 	}
1132f3a0f873SStephan Uphoff 
1133f3a0f873SStephan Uphoff 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1134f3a0f873SStephan Uphoff 		sched_load_add();
1135ad1e7d28SJulian Elischer 	runq_add(ts->ts_runq, ts, flags);
1136f3a0f873SStephan Uphoff }
1137f3a0f873SStephan Uphoff #else /* SMP */
1138f3a0f873SStephan Uphoff {
1139ad1e7d28SJulian Elischer 	struct td_sched *ts;
1140ad1e7d28SJulian Elischer 	ts = td->td_sched;
11417b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1142f0393f06SJeff Roberson 	KASSERT((td->td_inhibitors == 0),
1143f0393f06SJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
1144f0393f06SJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1145f0393f06SJeff Roberson 	    ("sched_add: bad thread state"));
1146b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1147b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
1148f3a0f873SStephan Uphoff 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1149431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1150431f8906SJulian Elischer 	    curthread->td_name);
11517b20fb19SJeff Roberson 	/*
11527b20fb19SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
11537b20fb19SJeff Roberson 	 * to the scheduler's lock.
11547b20fb19SJeff Roberson 	 */
11557b20fb19SJeff Roberson 	if (td->td_lock != &sched_lock) {
11567b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
11577b20fb19SJeff Roberson 		thread_lock_set(td, &sched_lock);
11587b20fb19SJeff Roberson 	}
1159f0393f06SJeff Roberson 	TD_SET_RUNQ(td);
1160ad1e7d28SJulian Elischer 	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1161ad1e7d28SJulian Elischer 	ts->ts_runq = &runq;
11626804a3abSJulian Elischer 
11636804a3abSJulian Elischer 	/*
11646804a3abSJulian Elischer 	 * If we are yielding (on the way out anyhow)
11656804a3abSJulian Elischer 	 * or the thread being saved is US,
11666804a3abSJulian Elischer 	 * then don't try be smart about preemption
11676804a3abSJulian Elischer 	 * or kicking off another CPU
11686804a3abSJulian Elischer 	 * as it won't help and may hinder.
11696804a3abSJulian Elischer 	 * In the YIEDLING case, we are about to run whoever is
11706804a3abSJulian Elischer 	 * being put in the queue anyhow, and in the
11716804a3abSJulian Elischer 	 * OURSELF case, we are puting ourself on the run queue
11726804a3abSJulian Elischer 	 * which also only happens when we are about to yield.
11736804a3abSJulian Elischer 	 */
11746804a3abSJulian Elischer 	if((flags & SRQ_YIELDING) == 0) {
11756804a3abSJulian Elischer 		if (maybe_preempt(td))
11766804a3abSJulian Elischer 			return;
11776804a3abSJulian Elischer 	}
1178f2f51f8aSJeff Roberson 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1179907bdbc2SJeff Roberson 		sched_load_add();
1180ad1e7d28SJulian Elischer 	runq_add(ts->ts_runq, ts, flags);
11816942d433SJohn Baldwin 	maybe_resched(td);
1182b43179fbSJeff Roberson }
1183f3a0f873SStephan Uphoff #endif /* SMP */
1184f3a0f873SStephan Uphoff 
1185b43179fbSJeff Roberson void
11867cf90fb3SJeff Roberson sched_rem(struct thread *td)
1187b43179fbSJeff Roberson {
1188ad1e7d28SJulian Elischer 	struct td_sched *ts;
11897cf90fb3SJeff Roberson 
1190ad1e7d28SJulian Elischer 	ts = td->td_sched;
1191b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
1192b61ce5b0SJeff Roberson 	    ("sched_rem: thread swapped out"));
1193f0393f06SJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
1194ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
1195b43179fbSJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
1196907bdbc2SJeff Roberson 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1197431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
1198431f8906SJulian Elischer 	    curthread->td_name);
1199b43179fbSJeff Roberson 
1200f2f51f8aSJeff Roberson 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1201907bdbc2SJeff Roberson 		sched_load_rem();
1202ad1e7d28SJulian Elischer 	runq_remove(ts->ts_runq, ts);
1203f0393f06SJeff Roberson 	TD_SET_CAN_RUN(td);
1204b43179fbSJeff Roberson }
1205b43179fbSJeff Roberson 
120614f0e2e9SJulian Elischer /*
120714f0e2e9SJulian Elischer  * Select threads to run.
120814f0e2e9SJulian Elischer  * Notice that the running threads still consume a slot.
120914f0e2e9SJulian Elischer  */
1210f0393f06SJeff Roberson struct thread *
1211b43179fbSJeff Roberson sched_choose(void)
1212b43179fbSJeff Roberson {
1213ad1e7d28SJulian Elischer 	struct td_sched *ts;
1214e17c57b1SJeff Roberson 	struct runq *rq;
1215b43179fbSJeff Roberson 
12167b20fb19SJeff Roberson 	mtx_assert(&sched_lock,  MA_OWNED);
1217e17c57b1SJeff Roberson #ifdef SMP
1218ad1e7d28SJulian Elischer 	struct td_sched *kecpu;
1219e17c57b1SJeff Roberson 
1220e17c57b1SJeff Roberson 	rq = &runq;
1221ad1e7d28SJulian Elischer 	ts = runq_choose(&runq);
1222e17c57b1SJeff Roberson 	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1223e17c57b1SJeff Roberson 
1224ad1e7d28SJulian Elischer 	if (ts == NULL ||
1225e17c57b1SJeff Roberson 	    (kecpu != NULL &&
1226ad1e7d28SJulian Elischer 	     kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
1227ad1e7d28SJulian Elischer 		CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
1228e17c57b1SJeff Roberson 		     PCPU_GET(cpuid));
1229ad1e7d28SJulian Elischer 		ts = kecpu;
1230e17c57b1SJeff Roberson 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1231e17c57b1SJeff Roberson 	} else {
1232ad1e7d28SJulian Elischer 		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts);
1233e17c57b1SJeff Roberson 	}
1234e17c57b1SJeff Roberson 
1235e17c57b1SJeff Roberson #else
1236e17c57b1SJeff Roberson 	rq = &runq;
1237ad1e7d28SJulian Elischer 	ts = runq_choose(&runq);
1238e17c57b1SJeff Roberson #endif
1239b43179fbSJeff Roberson 
1240ad1e7d28SJulian Elischer 	if (ts) {
1241ad1e7d28SJulian Elischer 		runq_remove(rq, ts);
1242f0393f06SJeff Roberson 		ts->ts_flags |= TSF_DIDRUN;
1243b43179fbSJeff Roberson 
1244b61ce5b0SJeff Roberson 		KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
1245b61ce5b0SJeff Roberson 		    ("sched_choose: thread swapped out"));
1246f0393f06SJeff Roberson 		return (ts->ts_thread);
1247b43179fbSJeff Roberson 	}
1248f0393f06SJeff Roberson 	return (PCPU_GET(idlethread));
1249b43179fbSJeff Roberson }
1250b43179fbSJeff Roberson 
1251b43179fbSJeff Roberson void
12521e24c28fSJeff Roberson sched_preempt(struct thread *td)
12531e24c28fSJeff Roberson {
12541e24c28fSJeff Roberson 	thread_lock(td);
12551e24c28fSJeff Roberson 	if (td->td_critnest > 1)
12561e24c28fSJeff Roberson 		td->td_owepreempt = 1;
12571e24c28fSJeff Roberson 	else
12581e24c28fSJeff Roberson 		mi_switch(SW_INVOL | SW_PREEMPT, NULL);
12591e24c28fSJeff Roberson 	thread_unlock(td);
12601e24c28fSJeff Roberson }
12611e24c28fSJeff Roberson 
12621e24c28fSJeff Roberson void
1263b43179fbSJeff Roberson sched_userret(struct thread *td)
1264b43179fbSJeff Roberson {
1265b43179fbSJeff Roberson 	/*
1266b43179fbSJeff Roberson 	 * XXX we cheat slightly on the locking here to avoid locking in
1267b43179fbSJeff Roberson 	 * the usual case.  Setting td_priority here is essentially an
1268b43179fbSJeff Roberson 	 * incomplete workaround for not setting it properly elsewhere.
1269b43179fbSJeff Roberson 	 * Now that some interrupt handlers are threads, not setting it
1270b43179fbSJeff Roberson 	 * properly elsewhere can clobber it in the window between setting
1271b43179fbSJeff Roberson 	 * it here and returning to user mode, so don't waste time setting
1272b43179fbSJeff Roberson 	 * it perfectly here.
1273b43179fbSJeff Roberson 	 */
1274f5c157d9SJohn Baldwin 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1275f5c157d9SJohn Baldwin 	    ("thread with borrowed priority returning to userland"));
12768460a577SJohn Birrell 	if (td->td_priority != td->td_user_pri) {
12777b20fb19SJeff Roberson 		thread_lock(td);
12788460a577SJohn Birrell 		td->td_priority = td->td_user_pri;
12798460a577SJohn Birrell 		td->td_base_pri = td->td_user_pri;
12807b20fb19SJeff Roberson 		thread_unlock(td);
12818460a577SJohn Birrell 	}
1282b43179fbSJeff Roberson }
1283de028f5aSJeff Roberson 
1284e17c57b1SJeff Roberson void
1285e17c57b1SJeff Roberson sched_bind(struct thread *td, int cpu)
1286e17c57b1SJeff Roberson {
1287ad1e7d28SJulian Elischer 	struct td_sched *ts;
1288e17c57b1SJeff Roberson 
12897b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1290e17c57b1SJeff Roberson 	KASSERT(TD_IS_RUNNING(td),
1291e17c57b1SJeff Roberson 	    ("sched_bind: cannot bind non-running thread"));
1292e17c57b1SJeff Roberson 
1293ad1e7d28SJulian Elischer 	ts = td->td_sched;
1294e17c57b1SJeff Roberson 
1295ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
1296e17c57b1SJeff Roberson #ifdef SMP
1297ad1e7d28SJulian Elischer 	ts->ts_runq = &runq_pcpu[cpu];
1298e17c57b1SJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
1299e17c57b1SJeff Roberson 		return;
1300e17c57b1SJeff Roberson 
1301bf0acc27SJohn Baldwin 	mi_switch(SW_VOL, NULL);
1302e17c57b1SJeff Roberson #endif
1303e17c57b1SJeff Roberson }
1304e17c57b1SJeff Roberson 
1305e17c57b1SJeff Roberson void
1306e17c57b1SJeff Roberson sched_unbind(struct thread* td)
1307e17c57b1SJeff Roberson {
13087b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1309ad1e7d28SJulian Elischer 	td->td_sched->ts_flags &= ~TSF_BOUND;
1310e17c57b1SJeff Roberson }
1311e17c57b1SJeff Roberson 
1312de028f5aSJeff Roberson int
1313ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
1314ebccf1e3SJoseph Koshy {
13157b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1316ad1e7d28SJulian Elischer 	return (td->td_sched->ts_flags & TSF_BOUND);
1317ebccf1e3SJoseph Koshy }
1318ebccf1e3SJoseph Koshy 
131936ec198bSDavid Xu void
132036ec198bSDavid Xu sched_relinquish(struct thread *td)
132136ec198bSDavid Xu {
13227b20fb19SJeff Roberson 	thread_lock(td);
13237b20fb19SJeff Roberson 	SCHED_STAT_INC(switch_relinquish);
132436ec198bSDavid Xu 	mi_switch(SW_VOL, NULL);
13257b20fb19SJeff Roberson 	thread_unlock(td);
132636ec198bSDavid Xu }
132736ec198bSDavid Xu 
1328ebccf1e3SJoseph Koshy int
1329ca59f152SJeff Roberson sched_load(void)
1330ca59f152SJeff Roberson {
1331ca59f152SJeff Roberson 	return (sched_tdcnt);
1332ca59f152SJeff Roberson }
1333ca59f152SJeff Roberson 
1334de028f5aSJeff Roberson int
1335de028f5aSJeff Roberson sched_sizeof_proc(void)
1336de028f5aSJeff Roberson {
1337de028f5aSJeff Roberson 	return (sizeof(struct proc));
1338de028f5aSJeff Roberson }
133936ec198bSDavid Xu 
1340de028f5aSJeff Roberson int
1341de028f5aSJeff Roberson sched_sizeof_thread(void)
1342de028f5aSJeff Roberson {
1343ad1e7d28SJulian Elischer 	return (sizeof(struct thread) + sizeof(struct td_sched));
1344de028f5aSJeff Roberson }
134579acfc49SJeff Roberson 
134679acfc49SJeff Roberson fixpt_t
13477cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
134879acfc49SJeff Roberson {
1349ad1e7d28SJulian Elischer 	struct td_sched *ts;
135055f2099aSJeff Roberson 
1351ad1e7d28SJulian Elischer 	ts = td->td_sched;
1352ad1e7d28SJulian Elischer 	return (ts->ts_pctcpu);
135379acfc49SJeff Roberson }
1354b41f1452SDavid Xu 
1355b41f1452SDavid Xu void
1356b41f1452SDavid Xu sched_tick(void)
1357b41f1452SDavid Xu {
1358b41f1452SDavid Xu }
1359f0393f06SJeff Roberson 
1360f0393f06SJeff Roberson /*
1361f0393f06SJeff Roberson  * The actual idle process.
1362f0393f06SJeff Roberson  */
1363f0393f06SJeff Roberson void
1364f0393f06SJeff Roberson sched_idletd(void *dummy)
1365f0393f06SJeff Roberson {
1366f0393f06SJeff Roberson 
1367f0393f06SJeff Roberson 	for (;;) {
1368f0393f06SJeff Roberson 		mtx_assert(&Giant, MA_NOTOWNED);
1369f0393f06SJeff Roberson 
1370f0393f06SJeff Roberson 		while (sched_runnable() == 0)
1371f0393f06SJeff Roberson 			cpu_idle();
1372f0393f06SJeff Roberson 
1373f0393f06SJeff Roberson 		mtx_lock_spin(&sched_lock);
1374f0393f06SJeff Roberson 		mi_switch(SW_VOL, NULL);
1375f0393f06SJeff Roberson 		mtx_unlock_spin(&sched_lock);
1376f0393f06SJeff Roberson 	}
1377f0393f06SJeff Roberson }
1378f0393f06SJeff Roberson 
13797b20fb19SJeff Roberson /*
13807b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
13817b20fb19SJeff Roberson  */
13827b20fb19SJeff Roberson void
13837b20fb19SJeff Roberson sched_throw(struct thread *td)
13847b20fb19SJeff Roberson {
13857b20fb19SJeff Roberson 	/*
13867b20fb19SJeff Roberson 	 * Correct spinlock nesting.  The idle thread context that we are
13877b20fb19SJeff Roberson 	 * borrowing was created so that it would start out with a single
13887b20fb19SJeff Roberson 	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
13897b20fb19SJeff Roberson 	 * explicitly acquired locks in this function, the nesting count
13907b20fb19SJeff Roberson 	 * is now 2 rather than 1.  Since we are nested, calling
13917b20fb19SJeff Roberson 	 * spinlock_exit() will simply adjust the counts without allowing
13927b20fb19SJeff Roberson 	 * spin lock using code to interrupt us.
13937b20fb19SJeff Roberson 	 */
13947b20fb19SJeff Roberson 	if (td == NULL) {
13957b20fb19SJeff Roberson 		mtx_lock_spin(&sched_lock);
13967b20fb19SJeff Roberson 		spinlock_exit();
13977b20fb19SJeff Roberson 	} else {
1398eea4f254SJeff Roberson 		lock_profile_release_lock(&sched_lock.lock_object);
13997b20fb19SJeff Roberson 		MPASS(td->td_lock == &sched_lock);
14007b20fb19SJeff Roberson 	}
14017b20fb19SJeff Roberson 	mtx_assert(&sched_lock, MA_OWNED);
14027b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
14037b20fb19SJeff Roberson 	PCPU_SET(switchtime, cpu_ticks());
14047b20fb19SJeff Roberson 	PCPU_SET(switchticks, ticks);
14057b20fb19SJeff Roberson 	cpu_throw(td, choosethread());	/* doesn't return */
14067b20fb19SJeff Roberson }
14077b20fb19SJeff Roberson 
14087b20fb19SJeff Roberson void
1409fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
14107b20fb19SJeff Roberson {
14117b20fb19SJeff Roberson 
14127b20fb19SJeff Roberson 	/*
14137b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
14147b20fb19SJeff Roberson 	 * non-nested critical section with sched_lock held but not recursed.
14157b20fb19SJeff Roberson 	 */
1416fe54587fSJeff Roberson 	td->td_oncpu = PCPU_GET(cpuid);
1417fe54587fSJeff Roberson 	sched_lock.mtx_lock = (uintptr_t)td;
1418eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(&sched_lock.lock_object,
1419eea4f254SJeff Roberson 	    0, 0, __FILE__, __LINE__);
1420fe54587fSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
14217b20fb19SJeff Roberson }
14227b20fb19SJeff Roberson 
1423885d51a3SJeff Roberson void
1424885d51a3SJeff Roberson sched_affinity(struct thread *td)
1425885d51a3SJeff Roberson {
1426885d51a3SJeff Roberson }
1427885d51a3SJeff Roberson 
1428ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1
1429ed062c8dSJulian Elischer #include "kern/kern_switch.c"
1430