xref: /freebsd/sys/kern/sched_ule.c (revision 167057914b42f2ab587d9e215158d07efb1a9cf4)
135e6168fSJeff Roberson /*-
2e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
335e6168fSJeff Roberson  * All rights reserved.
435e6168fSJeff Roberson  *
535e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
635e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
735e6168fSJeff Roberson  * are met:
835e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
935e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1035e6168fSJeff Roberson  *    disclaimer.
1135e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1235e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1335e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1435e6168fSJeff Roberson  *
1535e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1635e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1735e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1835e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1935e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2035e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2135e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2235e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2335e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2435e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2535e6168fSJeff Roberson  */
2635e6168fSJeff Roberson 
27ae7a6b38SJeff Roberson /*
28ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
29ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
30ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
31ae7a6b38SJeff Roberson  *
32ae7a6b38SJeff Roberson  * etymology:
33a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
34ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
35ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
36ae7a6b38SJeff Roberson  */
37ae7a6b38SJeff Roberson 
38677b542eSDavid E. O'Brien #include <sys/cdefs.h>
39113dda8aSJeff Roberson __FBSDID("$FreeBSD$");
40677b542eSDavid E. O'Brien 
414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
426f5f25e5SJohn Birrell #include "opt_kdtrace.h"
434da0d332SPeter Wemm #include "opt_sched.h"
449923b511SScott Long 
4535e6168fSJeff Roberson #include <sys/param.h>
4635e6168fSJeff Roberson #include <sys/systm.h>
472c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4835e6168fSJeff Roberson #include <sys/kernel.h>
4935e6168fSJeff Roberson #include <sys/ktr.h>
5035e6168fSJeff Roberson #include <sys/lock.h>
5135e6168fSJeff Roberson #include <sys/mutex.h>
5235e6168fSJeff Roberson #include <sys/proc.h>
53245f3abfSJeff Roberson #include <sys/resource.h>
549bacd788SJeff Roberson #include <sys/resourcevar.h>
5535e6168fSJeff Roberson #include <sys/sched.h>
5635e6168fSJeff Roberson #include <sys/smp.h>
5735e6168fSJeff Roberson #include <sys/sx.h>
5835e6168fSJeff Roberson #include <sys/sysctl.h>
5935e6168fSJeff Roberson #include <sys/sysproto.h>
60f5c157d9SJohn Baldwin #include <sys/turnstile.h>
613db720fdSDavid Xu #include <sys/umtx.h>
6235e6168fSJeff Roberson #include <sys/vmmeter.h>
6362fa74d9SJeff Roberson #include <sys/cpuset.h>
6407095abfSIvan Voras #include <sys/sbuf.h>
6535e6168fSJeff Roberson 
66ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
67ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
68ebccf1e3SJoseph Koshy #endif
69ebccf1e3SJoseph Koshy 
706f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
716f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
726f5f25e5SJohn Birrell int				dtrace_vtime_active;
736f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
746f5f25e5SJohn Birrell #endif
756f5f25e5SJohn Birrell 
7635e6168fSJeff Roberson #include <machine/cpu.h>
7722bf7d9aSJeff Roberson #include <machine/smp.h>
7835e6168fSJeff Roberson 
79880bf8b9SMarius Strobl #if defined(__powerpc__) && defined(E500)
8002e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE"
817a5e5e2aSJeff Roberson #endif
827a5e5e2aSJeff Roberson 
83ae7a6b38SJeff Roberson #define	KTR_ULE	0
8414618990SJeff Roberson 
850d2cf837SJeff Roberson #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
860d2cf837SJeff Roberson #define	TDQ_NAME_LEN	(sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
876338c579SAttilio Rao #define	TDQ_LOADNAME_LEN	(sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
888f51ad55SJeff Roberson 
896b2f763fSJeff Roberson /*
90ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
91ae7a6b38SJeff Roberson  * by the thread lock.
92ed062c8dSJulian Elischer  */
93ad1e7d28SJulian Elischer struct td_sched {
94ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
95ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
96ad1e7d28SJulian Elischer 	u_char		ts_cpu;		/* CPU that we have affinity for. */
9773daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
98ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
99ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
100ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
101ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
102cbc4ea28SIvan Voras 	int		ts_incrtick;	/* Last tick that we incremented on */
103ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
104ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
1058f51ad55SJeff Roberson #ifdef KTR
1068f51ad55SJeff Roberson 	char		ts_name[TS_NAME_LEN];
1078f51ad55SJeff Roberson #endif
108ed062c8dSJulian Elischer };
109ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1107b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1117b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
11235e6168fSJeff Roberson 
113ad1e7d28SJulian Elischer static struct td_sched td_sched0;
11435e6168fSJeff Roberson 
11562fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
11662fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
11762fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
11862fa74d9SJeff Roberson 
11935e6168fSJeff Roberson /*
12012d56c0fSJohn Baldwin  * Priority ranges used for interactive and non-interactive timeshare
1212dc29adbSJohn Baldwin  * threads.  The timeshare priorities are split up into four ranges.
1222dc29adbSJohn Baldwin  * The first range handles interactive threads.  The last three ranges
1232dc29adbSJohn Baldwin  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
1242dc29adbSJohn Baldwin  * ranges supporting nice values.
12512d56c0fSJohn Baldwin  */
1262dc29adbSJohn Baldwin #define	PRI_TIMESHARE_RANGE	(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
1272dc29adbSJohn Baldwin #define	PRI_INTERACT_RANGE	((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
128*16705791SAndriy Gapon #define	PRI_BATCH_RANGE		(PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
1292dc29adbSJohn Baldwin 
1302dc29adbSJohn Baldwin #define	PRI_MIN_INTERACT	PRI_MIN_TIMESHARE
1312dc29adbSJohn Baldwin #define	PRI_MAX_INTERACT	(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
1322dc29adbSJohn Baldwin #define	PRI_MIN_BATCH		(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
13312d56c0fSJohn Baldwin #define	PRI_MAX_BATCH		PRI_MAX_TIMESHARE
13412d56c0fSJohn Baldwin 
13512d56c0fSJohn Baldwin /*
136e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
137e1f89c22SJeff Roberson  *
138e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
139e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1408ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
141e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
142e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
143e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
14435e6168fSJeff Roberson  */
145e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
146e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1478ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
148e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
149e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
150eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
15135e6168fSJeff Roberson 
15235e6168fSJeff Roberson /*
153e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
154e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
155e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
156e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
157e7d50326SJeff Roberson  * or positive nice respectively.
158e7d50326SJeff Roberson  *
159e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
160e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
161e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
162e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
163e7d50326SJeff Roberson  */
164e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
165e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
16612d56c0fSJohn Baldwin #define	SCHED_PRI_MIN		(PRI_MIN_BATCH + SCHED_PRI_NHALF)
16712d56c0fSJohn Baldwin #define	SCHED_PRI_MAX		(PRI_MAX_BATCH - SCHED_PRI_NHALF)
16878920008SJohn Baldwin #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
169e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
170e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1711e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
172e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
173e7d50326SJeff Roberson 
174e7d50326SJeff Roberson /*
175e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
176e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
177e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
178e7d50326SJeff Roberson  * models the intent of the thread.
17935e6168fSJeff Roberson  *
180407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
181407b0157SJeff Roberson  *		before throttling back.
182d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
183210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
1849f518f20SAttilio Rao  * INTERACT_THRESH:	Threshold for placement on the current runq.
18535e6168fSJeff Roberson  */
186e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
187e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
188210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
189210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1904c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
191e1f89c22SJeff Roberson 
19235e6168fSJeff Roberson /*
193e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
194e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
195e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
196e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
197e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
198ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
19935e6168fSJeff Roberson  */
200e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH;
201e7d50326SJeff Roberson static int realstathz;
202e7d50326SJeff Roberson static int tickincr;
20373daf66fSJeff Roberson static int sched_slice = 1;
20402e2d6b4SJeff Roberson #ifdef PREEMPTION
20502e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
20602e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE;
20702e2d6b4SJeff Roberson #else
208ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN;
20902e2d6b4SJeff Roberson #endif
21002e2d6b4SJeff Roberson #else
21102e2d6b4SJeff Roberson static int preempt_thresh = 0;
21202e2d6b4SJeff Roberson #endif
21312d56c0fSJohn Baldwin static int static_boost = PRI_MIN_BATCH;
2141690c6c1SJeff Roberson static int sched_idlespins = 10000;
215a157e425SAlexander Motin static int sched_idlespinthresh = 16;
216ae7a6b38SJeff Roberson 
21735e6168fSJeff Roberson /*
218ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
219ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
220ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
22135e6168fSJeff Roberson  */
222ad1e7d28SJulian Elischer struct tdq {
22373daf66fSJeff Roberson 	/* Ordered to improve efficiency of cpu_search() and switch(). */
22462fa74d9SJeff Roberson 	struct mtx	tdq_lock;		/* run queue lock. */
22573daf66fSJeff Roberson 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
2261690c6c1SJeff Roberson 	volatile int	tdq_load;		/* Aggregate load. */
2279f9ad565SAlexander Motin 	volatile int	tdq_cpu_idle;		/* cpu_idle() is active. */
22873daf66fSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
22973daf66fSJeff Roberson 	int		tdq_transferable;	/* Transferable thread count. */
2301690c6c1SJeff Roberson 	short		tdq_switchcnt;		/* Switches this tick. */
2311690c6c1SJeff Roberson 	short		tdq_oldswitchcnt;	/* Switches last tick. */
23273daf66fSJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
23373daf66fSJeff Roberson 	u_char		tdq_ipipending;		/* IPI pending. */
23473daf66fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
23573daf66fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
236e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
237ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
238ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
2398f51ad55SJeff Roberson 	char		tdq_name[TDQ_NAME_LEN];
2408f51ad55SJeff Roberson #ifdef KTR
2418f51ad55SJeff Roberson 	char		tdq_loadname[TDQ_LOADNAME_LEN];
2428f51ad55SJeff Roberson #endif
243ae7a6b38SJeff Roberson } __aligned(64);
24435e6168fSJeff Roberson 
2451690c6c1SJeff Roberson /* Idle thread states and config. */
2461690c6c1SJeff Roberson #define	TDQ_RUNNING	1
2471690c6c1SJeff Roberson #define	TDQ_IDLE	2
2487b8bfa0dSJeff Roberson 
24980f86c9fSJeff Roberson #ifdef SMP
25007095abfSIvan Voras struct cpu_group *cpu_top;		/* CPU topology */
2517b8bfa0dSJeff Roberson 
25262fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
25362fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2547b8bfa0dSJeff Roberson 
2557b8bfa0dSJeff Roberson /*
2567b8bfa0dSJeff Roberson  * Run-time tunables.
2577b8bfa0dSJeff Roberson  */
25828994a58SJeff Roberson static int rebalance = 1;
2597fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
2607b8bfa0dSJeff Roberson static int affinity;
2617fcf154aSJeff Roberson static int steal_htt = 1;
26228994a58SJeff Roberson static int steal_idle = 1;
26328994a58SJeff Roberson static int steal_thresh = 2;
26480f86c9fSJeff Roberson 
26535e6168fSJeff Roberson /*
266d2ad694cSJeff Roberson  * One thread queue per processor.
26735e6168fSJeff Roberson  */
268ad1e7d28SJulian Elischer static struct tdq	tdq_cpu[MAXCPU];
2697fcf154aSJeff Roberson static struct tdq	*balance_tdq;
2707fcf154aSJeff Roberson static int balance_ticks;
271dc03363dSJeff Roberson 
272ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
273ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu[(x)])
274c47f202bSJeff Roberson #define	TDQ_ID(x)	((int)((x) - tdq_cpu))
27580f86c9fSJeff Roberson #else	/* !SMP */
276ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
277dc03363dSJeff Roberson 
27836b36916SJeff Roberson #define	TDQ_ID(x)	(0)
279ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
280ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
2810a016a05SJeff Roberson #endif
28235e6168fSJeff Roberson 
283ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
284ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
285ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
286ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
28762fa74d9SJeff Roberson #define	TDQ_LOCKPTR(t)		(&(t)->tdq_lock)
288ae7a6b38SJeff Roberson 
2898460a577SJohn Birrell static void sched_priority(struct thread *);
29021381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
2918460a577SJohn Birrell static int sched_interact_score(struct thread *);
2928460a577SJohn Birrell static void sched_interact_update(struct thread *);
2938460a577SJohn Birrell static void sched_interact_fork(struct thread *);
294ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *);
29535e6168fSJeff Roberson 
2965d7ef00cSJeff Roberson /* Operations on per processor queues */
2979727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *);
298ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *);
2999727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *);
3009727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *);
3019727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
3029727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *);
303ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
304ad1e7d28SJulian Elischer void tdq_print(int cpu);
305e7d50326SJeff Roberson static void runq_print(struct runq *rq);
306ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int);
3075d7ef00cSJeff Roberson #ifdef SMP
30862fa74d9SJeff Roberson static int tdq_move(struct tdq *, struct tdq *);
309ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
3109727e637SJeff Roberson static void tdq_notify(struct tdq *, struct thread *);
3119727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int);
3129727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int);
3139727e637SJeff Roberson static int sched_pickcpu(struct thread *, int);
3147fcf154aSJeff Roberson static void sched_balance(void);
31562fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *);
3169727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int);
317ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
318c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
31907095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
32007095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
32107095abfSIvan Voras     struct cpu_group *cg, int indent);
3225d7ef00cSJeff Roberson #endif
3235d7ef00cSJeff Roberson 
324e7d50326SJeff Roberson static void sched_setup(void *dummy);
325237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
326e7d50326SJeff Roberson 
327e7d50326SJeff Roberson static void sched_initticks(void *dummy);
328237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
329237fdd78SRobert Watson     NULL);
330e7d50326SJeff Roberson 
331ae7a6b38SJeff Roberson /*
332ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
333ae7a6b38SJeff Roberson  */
334e7d50326SJeff Roberson static void
335e7d50326SJeff Roberson runq_print(struct runq *rq)
336e7d50326SJeff Roberson {
337e7d50326SJeff Roberson 	struct rqhead *rqh;
3389727e637SJeff Roberson 	struct thread *td;
339e7d50326SJeff Roberson 	int pri;
340e7d50326SJeff Roberson 	int j;
341e7d50326SJeff Roberson 	int i;
342e7d50326SJeff Roberson 
343e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
344e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
345e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
346e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
347e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
348e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
349e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
3509727e637SJeff Roberson 				TAILQ_FOREACH(td, rqh, td_runq) {
351e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
3529727e637SJeff Roberson 					    td, td->td_name, td->td_priority,
3539727e637SJeff Roberson 					    td->td_rqindex, pri);
354e7d50326SJeff Roberson 				}
355e7d50326SJeff Roberson 			}
356e7d50326SJeff Roberson 	}
357e7d50326SJeff Roberson }
358e7d50326SJeff Roberson 
359ae7a6b38SJeff Roberson /*
360ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
361ae7a6b38SJeff Roberson  */
36215dc847eSJeff Roberson void
363ad1e7d28SJulian Elischer tdq_print(int cpu)
36415dc847eSJeff Roberson {
365ad1e7d28SJulian Elischer 	struct tdq *tdq;
36615dc847eSJeff Roberson 
367ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
36815dc847eSJeff Roberson 
369c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
37062fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
37162fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
372d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
3731690c6c1SJeff Roberson 	printf("\tswitch cnt:     %d\n", tdq->tdq_switchcnt);
3741690c6c1SJeff Roberson 	printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
375e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
3763f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
3771690c6c1SJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
3781690c6c1SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
379e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
380e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
381e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
382e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
383e7d50326SJeff Roberson 	printf("\tidle runq:\n");
384e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
38515dc847eSJeff Roberson }
38615dc847eSJeff Roberson 
387ff256d9cSJeff Roberson static inline int
388ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
389ff256d9cSJeff Roberson {
390ff256d9cSJeff Roberson 	/*
391ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
392ff256d9cSJeff Roberson 	 * nothing to do.
393ff256d9cSJeff Roberson 	 */
394ff256d9cSJeff Roberson 	if (pri >= cpri)
395ff256d9cSJeff Roberson 		return (0);
396ff256d9cSJeff Roberson 	/*
397ff256d9cSJeff Roberson 	 * Always preempt idle.
398ff256d9cSJeff Roberson 	 */
399ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
400ff256d9cSJeff Roberson 		return (1);
401ff256d9cSJeff Roberson 	/*
402ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
403ff256d9cSJeff Roberson 	 */
404ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
405ff256d9cSJeff Roberson 		return (0);
406ff256d9cSJeff Roberson 	/*
407ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
408ff256d9cSJeff Roberson 	 */
409ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
410ff256d9cSJeff Roberson 		return (1);
411ff256d9cSJeff Roberson 	/*
41212d56c0fSJohn Baldwin 	 * If we're interactive or better and there is non-interactive
41312d56c0fSJohn Baldwin 	 * or worse running preempt only remote processors.
414ff256d9cSJeff Roberson 	 */
41512d56c0fSJohn Baldwin 	if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
416ff256d9cSJeff Roberson 		return (1);
417ff256d9cSJeff Roberson 	return (0);
418ff256d9cSJeff Roberson }
419ff256d9cSJeff Roberson 
420ae7a6b38SJeff Roberson /*
421ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
422ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
423ae7a6b38SJeff Roberson  * queue position for timeshare threads.
424ae7a6b38SJeff Roberson  */
425155b9987SJeff Roberson static __inline void
4269727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
427155b9987SJeff Roberson {
4289727e637SJeff Roberson 	struct td_sched *ts;
429c143ac21SJeff Roberson 	u_char pri;
430c143ac21SJeff Roberson 
431ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
4329727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
43373daf66fSJeff Roberson 
4349727e637SJeff Roberson 	pri = td->td_priority;
4359727e637SJeff Roberson 	ts = td->td_sched;
4369727e637SJeff Roberson 	TD_SET_RUNQ(td);
4379727e637SJeff Roberson 	if (THREAD_CAN_MIGRATE(td)) {
438d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
439ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
44080f86c9fSJeff Roberson 	}
44112d56c0fSJohn Baldwin 	if (pri < PRI_MIN_BATCH) {
442c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
44312d56c0fSJohn Baldwin 	} else if (pri <= PRI_MAX_BATCH) {
444c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
44512d56c0fSJohn Baldwin 		KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
446e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
447e7d50326SJeff Roberson 		/*
448e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
449e7d50326SJeff Roberson 		 * realtime.  Use the whole queue to represent these values.
450e7d50326SJeff Roberson 		 */
451c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
452*16705791SAndriy Gapon 			pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
453e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4543f872f85SJeff Roberson 			/*
4553f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4563f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4573f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4583f872f85SJeff Roberson 			 */
4593f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4603f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4614499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
462e7d50326SJeff Roberson 		} else
4633f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
4649727e637SJeff Roberson 		runq_add_pri(ts->ts_runq, td, pri, flags);
465c143ac21SJeff Roberson 		return;
466e7d50326SJeff Roberson 	} else
46773daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
4689727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
46973daf66fSJeff Roberson }
47073daf66fSJeff Roberson 
47173daf66fSJeff Roberson /*
472ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
473ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
474ae7a6b38SJeff Roberson  * transferable count does not reflect them.
475ae7a6b38SJeff Roberson  */
476155b9987SJeff Roberson static __inline void
4779727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td)
478155b9987SJeff Roberson {
4799727e637SJeff Roberson 	struct td_sched *ts;
4809727e637SJeff Roberson 
4819727e637SJeff Roberson 	ts = td->td_sched;
482ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
483ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
4849727e637SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", td));
485ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
486d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
487ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
48880f86c9fSJeff Roberson 	}
4893f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
4903f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
4919727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
492e7d50326SJeff Roberson 		else
4939727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, NULL);
4943f872f85SJeff Roberson 	} else
4959727e637SJeff Roberson 		runq_remove(ts->ts_runq, td);
496155b9987SJeff Roberson }
497155b9987SJeff Roberson 
498ae7a6b38SJeff Roberson /*
499ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
500ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
501ae7a6b38SJeff Roberson  */
502a8949de2SJeff Roberson static void
5039727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td)
5045d7ef00cSJeff Roberson {
505ae7a6b38SJeff Roberson 
506ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
5079727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
50803d17db7SJeff Roberson 
509d2ad694cSJeff Roberson 	tdq->tdq_load++;
5101b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
511d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
5128f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
5135d7ef00cSJeff Roberson }
51415dc847eSJeff Roberson 
515ae7a6b38SJeff Roberson /*
516ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
517ae7a6b38SJeff Roberson  * exiting.
518ae7a6b38SJeff Roberson  */
519a8949de2SJeff Roberson static void
5209727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td)
5215d7ef00cSJeff Roberson {
522ae7a6b38SJeff Roberson 
5239727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
524ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
525ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
526c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
52703d17db7SJeff Roberson 
528d2ad694cSJeff Roberson 	tdq->tdq_load--;
5291b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
53003d17db7SJeff Roberson 		tdq->tdq_sysload--;
5318f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
53215dc847eSJeff Roberson }
53315dc847eSJeff Roberson 
534356500a3SJeff Roberson /*
53562fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
53662fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
537356500a3SJeff Roberson  */
53822bf7d9aSJeff Roberson static void
53962fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
54062fa74d9SJeff Roberson {
54162fa74d9SJeff Roberson 	struct thread *td;
54262fa74d9SJeff Roberson 
54362fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
54462fa74d9SJeff Roberson 	if (ctd == NULL)
54562fa74d9SJeff Roberson 		ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
5469727e637SJeff Roberson 	td = tdq_choose(tdq);
5479727e637SJeff Roberson 	if (td == NULL || td->td_priority > ctd->td_priority)
54862fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
54962fa74d9SJeff Roberson 	else
55062fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
55162fa74d9SJeff Roberson }
55262fa74d9SJeff Roberson 
55362fa74d9SJeff Roberson #ifdef SMP
55462fa74d9SJeff Roberson struct cpu_search {
555c76ee827SJeff Roberson 	cpuset_t cs_mask;
55662fa74d9SJeff Roberson 	u_int	cs_load;
55762fa74d9SJeff Roberson 	u_int	cs_cpu;
55862fa74d9SJeff Roberson 	int	cs_limit;	/* Min priority for low min load for high. */
55962fa74d9SJeff Roberson };
56062fa74d9SJeff Roberson 
56162fa74d9SJeff Roberson #define	CPU_SEARCH_LOWEST	0x1
56262fa74d9SJeff Roberson #define	CPU_SEARCH_HIGHEST	0x2
56362fa74d9SJeff Roberson #define	CPU_SEARCH_BOTH		(CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
56462fa74d9SJeff Roberson 
565c76ee827SJeff Roberson #define	CPUSET_FOREACH(cpu, mask)				\
566c76ee827SJeff Roberson 	for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++)		\
56771a19bdcSAttilio Rao 		if (CPU_ISSET(cpu, &mask))
56862fa74d9SJeff Roberson 
569d628fbfaSJohn Baldwin static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
57062fa74d9SJeff Roberson     struct cpu_search *high, const int match);
57162fa74d9SJeff Roberson int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low);
57262fa74d9SJeff Roberson int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high);
57362fa74d9SJeff Roberson int cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
57462fa74d9SJeff Roberson     struct cpu_search *high);
57562fa74d9SJeff Roberson 
57662fa74d9SJeff Roberson /*
57762fa74d9SJeff Roberson  * This routine compares according to the match argument and should be
57862fa74d9SJeff Roberson  * reduced in actual instantiations via constant propagation and dead code
57962fa74d9SJeff Roberson  * elimination.
58062fa74d9SJeff Roberson  */
58162fa74d9SJeff Roberson static __inline int
58262fa74d9SJeff Roberson cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high,
58362fa74d9SJeff Roberson     const int match)
58462fa74d9SJeff Roberson {
58562fa74d9SJeff Roberson 	struct tdq *tdq;
58662fa74d9SJeff Roberson 
58762fa74d9SJeff Roberson 	tdq = TDQ_CPU(cpu);
58862fa74d9SJeff Roberson 	if (match & CPU_SEARCH_LOWEST)
589c76ee827SJeff Roberson 		if (CPU_ISSET(cpu, &low->cs_mask) &&
59062fa74d9SJeff Roberson 		    tdq->tdq_load < low->cs_load &&
59162fa74d9SJeff Roberson 		    tdq->tdq_lowpri > low->cs_limit) {
59262fa74d9SJeff Roberson 			low->cs_cpu = cpu;
59362fa74d9SJeff Roberson 			low->cs_load = tdq->tdq_load;
59462fa74d9SJeff Roberson 		}
59562fa74d9SJeff Roberson 	if (match & CPU_SEARCH_HIGHEST)
596c76ee827SJeff Roberson 		if (CPU_ISSET(cpu, &high->cs_mask) &&
59762fa74d9SJeff Roberson 		    tdq->tdq_load >= high->cs_limit &&
59862fa74d9SJeff Roberson 		    tdq->tdq_load > high->cs_load &&
59962fa74d9SJeff Roberson 		    tdq->tdq_transferable) {
60062fa74d9SJeff Roberson 			high->cs_cpu = cpu;
60162fa74d9SJeff Roberson 			high->cs_load = tdq->tdq_load;
60262fa74d9SJeff Roberson 		}
60362fa74d9SJeff Roberson 	return (tdq->tdq_load);
60462fa74d9SJeff Roberson }
60562fa74d9SJeff Roberson 
60662fa74d9SJeff Roberson /*
60762fa74d9SJeff Roberson  * Search the tree of cpu_groups for the lowest or highest loaded cpu
60862fa74d9SJeff Roberson  * according to the match argument.  This routine actually compares the
60962fa74d9SJeff Roberson  * load on all paths through the tree and finds the least loaded cpu on
61062fa74d9SJeff Roberson  * the least loaded path, which may differ from the least loaded cpu in
61162fa74d9SJeff Roberson  * the system.  This balances work among caches and busses.
61262fa74d9SJeff Roberson  *
61362fa74d9SJeff Roberson  * This inline is instantiated in three forms below using constants for the
61462fa74d9SJeff Roberson  * match argument.  It is reduced to the minimum set for each case.  It is
61562fa74d9SJeff Roberson  * also recursive to the depth of the tree.
61662fa74d9SJeff Roberson  */
617d628fbfaSJohn Baldwin static __inline int
61862fa74d9SJeff Roberson cpu_search(struct cpu_group *cg, struct cpu_search *low,
61962fa74d9SJeff Roberson     struct cpu_search *high, const int match)
62062fa74d9SJeff Roberson {
62162fa74d9SJeff Roberson 	int total;
62262fa74d9SJeff Roberson 
62362fa74d9SJeff Roberson 	total = 0;
62462fa74d9SJeff Roberson 	if (cg->cg_children) {
62562fa74d9SJeff Roberson 		struct cpu_search lgroup;
62662fa74d9SJeff Roberson 		struct cpu_search hgroup;
62762fa74d9SJeff Roberson 		struct cpu_group *child;
62862fa74d9SJeff Roberson 		u_int lload;
62962fa74d9SJeff Roberson 		int hload;
63062fa74d9SJeff Roberson 		int load;
63162fa74d9SJeff Roberson 		int i;
63262fa74d9SJeff Roberson 
63362fa74d9SJeff Roberson 		lload = -1;
63462fa74d9SJeff Roberson 		hload = -1;
63562fa74d9SJeff Roberson 		for (i = 0; i < cg->cg_children; i++) {
63662fa74d9SJeff Roberson 			child = &cg->cg_child[i];
63762fa74d9SJeff Roberson 			if (match & CPU_SEARCH_LOWEST) {
63862fa74d9SJeff Roberson 				lgroup = *low;
63962fa74d9SJeff Roberson 				lgroup.cs_load = -1;
64062fa74d9SJeff Roberson 			}
64162fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST) {
64262fa74d9SJeff Roberson 				hgroup = *high;
64362fa74d9SJeff Roberson 				lgroup.cs_load = 0;
64462fa74d9SJeff Roberson 			}
64562fa74d9SJeff Roberson 			switch (match) {
64662fa74d9SJeff Roberson 			case CPU_SEARCH_LOWEST:
64762fa74d9SJeff Roberson 				load = cpu_search_lowest(child, &lgroup);
64862fa74d9SJeff Roberson 				break;
64962fa74d9SJeff Roberson 			case CPU_SEARCH_HIGHEST:
65062fa74d9SJeff Roberson 				load = cpu_search_highest(child, &hgroup);
65162fa74d9SJeff Roberson 				break;
65262fa74d9SJeff Roberson 			case CPU_SEARCH_BOTH:
65362fa74d9SJeff Roberson 				load = cpu_search_both(child, &lgroup, &hgroup);
65462fa74d9SJeff Roberson 				break;
65562fa74d9SJeff Roberson 			}
65662fa74d9SJeff Roberson 			total += load;
65762fa74d9SJeff Roberson 			if (match & CPU_SEARCH_LOWEST)
65862fa74d9SJeff Roberson 				if (load < lload || low->cs_cpu == -1) {
65962fa74d9SJeff Roberson 					*low = lgroup;
66062fa74d9SJeff Roberson 					lload = load;
66162fa74d9SJeff Roberson 				}
66262fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST)
66362fa74d9SJeff Roberson 				if (load > hload || high->cs_cpu == -1) {
66462fa74d9SJeff Roberson 					hload = load;
66562fa74d9SJeff Roberson 					*high = hgroup;
66662fa74d9SJeff Roberson 				}
66762fa74d9SJeff Roberson 		}
66862fa74d9SJeff Roberson 	} else {
66962fa74d9SJeff Roberson 		int cpu;
67062fa74d9SJeff Roberson 
671c76ee827SJeff Roberson 		CPUSET_FOREACH(cpu, cg->cg_mask)
67262fa74d9SJeff Roberson 			total += cpu_compare(cpu, low, high, match);
67362fa74d9SJeff Roberson 	}
67462fa74d9SJeff Roberson 	return (total);
67562fa74d9SJeff Roberson }
67662fa74d9SJeff Roberson 
67762fa74d9SJeff Roberson /*
67862fa74d9SJeff Roberson  * cpu_search instantiations must pass constants to maintain the inline
67962fa74d9SJeff Roberson  * optimization.
68062fa74d9SJeff Roberson  */
68162fa74d9SJeff Roberson int
68262fa74d9SJeff Roberson cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low)
68362fa74d9SJeff Roberson {
68462fa74d9SJeff Roberson 	return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
68562fa74d9SJeff Roberson }
68662fa74d9SJeff Roberson 
68762fa74d9SJeff Roberson int
68862fa74d9SJeff Roberson cpu_search_highest(struct cpu_group *cg, struct cpu_search *high)
68962fa74d9SJeff Roberson {
69062fa74d9SJeff Roberson 	return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
69162fa74d9SJeff Roberson }
69262fa74d9SJeff Roberson 
69362fa74d9SJeff Roberson int
69462fa74d9SJeff Roberson cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
69562fa74d9SJeff Roberson     struct cpu_search *high)
69662fa74d9SJeff Roberson {
69762fa74d9SJeff Roberson 	return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
69862fa74d9SJeff Roberson }
69962fa74d9SJeff Roberson 
70062fa74d9SJeff Roberson /*
70162fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
70262fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
70362fa74d9SJeff Roberson  * acceptable.
70462fa74d9SJeff Roberson  */
70562fa74d9SJeff Roberson static inline int
706c76ee827SJeff Roberson sched_lowest(struct cpu_group *cg, cpuset_t mask, int pri)
70762fa74d9SJeff Roberson {
70862fa74d9SJeff Roberson 	struct cpu_search low;
70962fa74d9SJeff Roberson 
71062fa74d9SJeff Roberson 	low.cs_cpu = -1;
71162fa74d9SJeff Roberson 	low.cs_load = -1;
71262fa74d9SJeff Roberson 	low.cs_mask = mask;
71362fa74d9SJeff Roberson 	low.cs_limit = pri;
71462fa74d9SJeff Roberson 	cpu_search_lowest(cg, &low);
71562fa74d9SJeff Roberson 	return low.cs_cpu;
71662fa74d9SJeff Roberson }
71762fa74d9SJeff Roberson 
71862fa74d9SJeff Roberson /*
71962fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
72062fa74d9SJeff Roberson  */
72162fa74d9SJeff Roberson static inline int
722c76ee827SJeff Roberson sched_highest(struct cpu_group *cg, cpuset_t mask, int minload)
72362fa74d9SJeff Roberson {
72462fa74d9SJeff Roberson 	struct cpu_search high;
72562fa74d9SJeff Roberson 
72662fa74d9SJeff Roberson 	high.cs_cpu = -1;
72762fa74d9SJeff Roberson 	high.cs_load = 0;
72862fa74d9SJeff Roberson 	high.cs_mask = mask;
72962fa74d9SJeff Roberson 	high.cs_limit = minload;
73062fa74d9SJeff Roberson 	cpu_search_highest(cg, &high);
73162fa74d9SJeff Roberson 	return high.cs_cpu;
73262fa74d9SJeff Roberson }
73362fa74d9SJeff Roberson 
73462fa74d9SJeff Roberson /*
73562fa74d9SJeff Roberson  * Simultaneously find the highest and lowest loaded cpu reachable via
73662fa74d9SJeff Roberson  * cg.
73762fa74d9SJeff Roberson  */
73862fa74d9SJeff Roberson static inline void
739c76ee827SJeff Roberson sched_both(struct cpu_group *cg, cpuset_t mask, int *lowcpu, int *highcpu)
74062fa74d9SJeff Roberson {
74162fa74d9SJeff Roberson 	struct cpu_search high;
74262fa74d9SJeff Roberson 	struct cpu_search low;
74362fa74d9SJeff Roberson 
74462fa74d9SJeff Roberson 	low.cs_cpu = -1;
74562fa74d9SJeff Roberson 	low.cs_limit = -1;
74662fa74d9SJeff Roberson 	low.cs_load = -1;
74762fa74d9SJeff Roberson 	low.cs_mask = mask;
74862fa74d9SJeff Roberson 	high.cs_load = 0;
74962fa74d9SJeff Roberson 	high.cs_cpu = -1;
75062fa74d9SJeff Roberson 	high.cs_limit = -1;
75162fa74d9SJeff Roberson 	high.cs_mask = mask;
75262fa74d9SJeff Roberson 	cpu_search_both(cg, &low, &high);
75362fa74d9SJeff Roberson 	*lowcpu = low.cs_cpu;
75462fa74d9SJeff Roberson 	*highcpu = high.cs_cpu;
75562fa74d9SJeff Roberson 	return;
75662fa74d9SJeff Roberson }
75762fa74d9SJeff Roberson 
75862fa74d9SJeff Roberson static void
75962fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
76062fa74d9SJeff Roberson {
761c76ee827SJeff Roberson 	cpuset_t mask;
76262fa74d9SJeff Roberson 	int high;
76362fa74d9SJeff Roberson 	int low;
76462fa74d9SJeff Roberson 	int i;
76562fa74d9SJeff Roberson 
766c76ee827SJeff Roberson 	CPU_FILL(&mask);
76762fa74d9SJeff Roberson 	for (;;) {
76862fa74d9SJeff Roberson 		sched_both(cg, mask, &low, &high);
76962fa74d9SJeff Roberson 		if (low == high || low == -1 || high == -1)
77062fa74d9SJeff Roberson 			break;
77162fa74d9SJeff Roberson 		if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low)))
77262fa74d9SJeff Roberson 			break;
77362fa74d9SJeff Roberson 		/*
77462fa74d9SJeff Roberson 		 * If we failed to move any threads determine which cpu
77562fa74d9SJeff Roberson 		 * to kick out of the set and try again.
77662fa74d9SJeff Roberson 	 	 */
77762fa74d9SJeff Roberson 		if (TDQ_CPU(high)->tdq_transferable == 0)
778c76ee827SJeff Roberson 			CPU_CLR(high, &mask);
77962fa74d9SJeff Roberson 		else
780c76ee827SJeff Roberson 			CPU_CLR(low, &mask);
78162fa74d9SJeff Roberson 	}
78262fa74d9SJeff Roberson 
78362fa74d9SJeff Roberson 	for (i = 0; i < cg->cg_children; i++)
78462fa74d9SJeff Roberson 		sched_balance_group(&cg->cg_child[i]);
78562fa74d9SJeff Roberson }
78662fa74d9SJeff Roberson 
78762fa74d9SJeff Roberson static void
78862375ca8SEd Schouten sched_balance(void)
789356500a3SJeff Roberson {
7907fcf154aSJeff Roberson 	struct tdq *tdq;
791356500a3SJeff Roberson 
7927fcf154aSJeff Roberson 	/*
7937fcf154aSJeff Roberson 	 * Select a random time between .5 * balance_interval and
7947fcf154aSJeff Roberson 	 * 1.5 * balance_interval.
7957fcf154aSJeff Roberson 	 */
7967fcf154aSJeff Roberson 	balance_ticks = max(balance_interval / 2, 1);
7977fcf154aSJeff Roberson 	balance_ticks += random() % balance_interval;
798ae7a6b38SJeff Roberson 	if (smp_started == 0 || rebalance == 0)
799598b368dSJeff Roberson 		return;
8007fcf154aSJeff Roberson 	tdq = TDQ_SELF();
8017fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
80262fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
8037fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
804cac77d04SJeff Roberson }
80586f8ae96SJeff Roberson 
806ae7a6b38SJeff Roberson /*
807ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
808ae7a6b38SJeff Roberson  */
809ae7a6b38SJeff Roberson static void
810ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
811ae7a6b38SJeff Roberson {
812ae7a6b38SJeff Roberson 	if (one < two) {
813ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
814ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
815ae7a6b38SJeff Roberson 	} else {
816ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
817ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
818ae7a6b38SJeff Roberson 	}
819ae7a6b38SJeff Roberson }
820ae7a6b38SJeff Roberson 
821ae7a6b38SJeff Roberson /*
8227fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
8237fcf154aSJeff Roberson  */
8247fcf154aSJeff Roberson static void
8257fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
8267fcf154aSJeff Roberson {
8277fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
8287fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
8297fcf154aSJeff Roberson }
8307fcf154aSJeff Roberson 
8317fcf154aSJeff Roberson /*
832ae7a6b38SJeff Roberson  * Transfer load between two imbalanced thread queues.
833ae7a6b38SJeff Roberson  */
83462fa74d9SJeff Roberson static int
835ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
836cac77d04SJeff Roberson {
837cac77d04SJeff Roberson 	int transferable;
838cac77d04SJeff Roberson 	int high_load;
839cac77d04SJeff Roberson 	int low_load;
84062fa74d9SJeff Roberson 	int moved;
841cac77d04SJeff Roberson 	int move;
842880bf8b9SMarius Strobl 	int cpu;
843cac77d04SJeff Roberson 	int diff;
844cac77d04SJeff Roberson 	int i;
845cac77d04SJeff Roberson 
846ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
847d2ad694cSJeff Roberson 	transferable = high->tdq_transferable;
848d2ad694cSJeff Roberson 	high_load = high->tdq_load;
849d2ad694cSJeff Roberson 	low_load = low->tdq_load;
85062fa74d9SJeff Roberson 	moved = 0;
851155b9987SJeff Roberson 	/*
852155b9987SJeff Roberson 	 * Determine what the imbalance is and then adjust that to how many
853d2ad694cSJeff Roberson 	 * threads we actually have to give up (transferable).
854155b9987SJeff Roberson 	 */
855ae7a6b38SJeff Roberson 	if (transferable != 0) {
856cac77d04SJeff Roberson 		diff = high_load - low_load;
857356500a3SJeff Roberson 		move = diff / 2;
858356500a3SJeff Roberson 		if (diff & 0x1)
859356500a3SJeff Roberson 			move++;
86080f86c9fSJeff Roberson 		move = min(move, transferable);
861356500a3SJeff Roberson 		for (i = 0; i < move; i++)
86262fa74d9SJeff Roberson 			moved += tdq_move(high, low);
863a5423ea3SJeff Roberson 		/*
864880bf8b9SMarius Strobl 		 * In case the target isn't the current cpu IPI it to force a
865880bf8b9SMarius Strobl 		 * reschedule with the new workload.
866a5423ea3SJeff Roberson 		 */
867880bf8b9SMarius Strobl 		cpu = TDQ_ID(low);
868880bf8b9SMarius Strobl 		sched_pin();
869880bf8b9SMarius Strobl 		if (cpu != PCPU_GET(cpuid))
870880bf8b9SMarius Strobl 			ipi_cpu(cpu, IPI_PREEMPT);
871880bf8b9SMarius Strobl 		sched_unpin();
872ae7a6b38SJeff Roberson 	}
8737fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
87462fa74d9SJeff Roberson 	return (moved);
875356500a3SJeff Roberson }
876356500a3SJeff Roberson 
877ae7a6b38SJeff Roberson /*
878ae7a6b38SJeff Roberson  * Move a thread from one thread queue to another.
879ae7a6b38SJeff Roberson  */
88062fa74d9SJeff Roberson static int
881ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
882356500a3SJeff Roberson {
883ad1e7d28SJulian Elischer 	struct td_sched *ts;
884ae7a6b38SJeff Roberson 	struct thread *td;
885ae7a6b38SJeff Roberson 	struct tdq *tdq;
886ae7a6b38SJeff Roberson 	int cpu;
887356500a3SJeff Roberson 
8887fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
8897fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
8907fcf154aSJeff Roberson 
891ad1e7d28SJulian Elischer 	tdq = from;
892ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
8939727e637SJeff Roberson 	td = tdq_steal(tdq, cpu);
8949727e637SJeff Roberson 	if (td == NULL)
89562fa74d9SJeff Roberson 		return (0);
8969727e637SJeff Roberson 	ts = td->td_sched;
897ae7a6b38SJeff Roberson 	/*
898ae7a6b38SJeff Roberson 	 * Although the run queue is locked the thread may be blocked.  Lock
8997fcf154aSJeff Roberson 	 * it to clear this and acquire the run-queue lock.
900ae7a6b38SJeff Roberson 	 */
901ae7a6b38SJeff Roberson 	thread_lock(td);
9027fcf154aSJeff Roberson 	/* Drop recursive lock on from acquired via thread_lock(). */
903ae7a6b38SJeff Roberson 	TDQ_UNLOCK(from);
904ae7a6b38SJeff Roberson 	sched_rem(td);
9057b8bfa0dSJeff Roberson 	ts->ts_cpu = cpu;
906ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
907ae7a6b38SJeff Roberson 	tdq_add(to, td, SRQ_YIELDING);
90862fa74d9SJeff Roberson 	return (1);
909356500a3SJeff Roberson }
91022bf7d9aSJeff Roberson 
911ae7a6b38SJeff Roberson /*
912ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
913ae7a6b38SJeff Roberson  * to it.
914ae7a6b38SJeff Roberson  */
91580f86c9fSJeff Roberson static int
916ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
91722bf7d9aSJeff Roberson {
91862fa74d9SJeff Roberson 	struct cpu_group *cg;
919ad1e7d28SJulian Elischer 	struct tdq *steal;
920c76ee827SJeff Roberson 	cpuset_t mask;
92162fa74d9SJeff Roberson 	int thresh;
922ae7a6b38SJeff Roberson 	int cpu;
92380f86c9fSJeff Roberson 
92488f530ccSJeff Roberson 	if (smp_started == 0 || steal_idle == 0)
92588f530ccSJeff Roberson 		return (1);
926c76ee827SJeff Roberson 	CPU_FILL(&mask);
927c76ee827SJeff Roberson 	CPU_CLR(PCPU_GET(cpuid), &mask);
92862fa74d9SJeff Roberson 	/* We don't want to be preempted while we're iterating. */
929ae7a6b38SJeff Roberson 	spinlock_enter();
93062fa74d9SJeff Roberson 	for (cg = tdq->tdq_cg; cg != NULL; ) {
9317b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_THREAD) == 0)
93262fa74d9SJeff Roberson 			thresh = steal_thresh;
93362fa74d9SJeff Roberson 		else
93462fa74d9SJeff Roberson 			thresh = 1;
93562fa74d9SJeff Roberson 		cpu = sched_highest(cg, mask, thresh);
93662fa74d9SJeff Roberson 		if (cpu == -1) {
93762fa74d9SJeff Roberson 			cg = cg->cg_parent;
93880f86c9fSJeff Roberson 			continue;
9397b8bfa0dSJeff Roberson 		}
9407b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
941c76ee827SJeff Roberson 		CPU_CLR(cpu, &mask);
9427fcf154aSJeff Roberson 		tdq_lock_pair(tdq, steal);
94362fa74d9SJeff Roberson 		if (steal->tdq_load < thresh || steal->tdq_transferable == 0) {
9447fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
94562fa74d9SJeff Roberson 			continue;
94662fa74d9SJeff Roberson 		}
94762fa74d9SJeff Roberson 		/*
94862fa74d9SJeff Roberson 		 * If a thread was added while interrupts were disabled don't
94962fa74d9SJeff Roberson 		 * steal one here.  If we fail to acquire one due to affinity
95062fa74d9SJeff Roberson 		 * restrictions loop again with this cpu removed from the
95162fa74d9SJeff Roberson 		 * set.
95262fa74d9SJeff Roberson 		 */
95362fa74d9SJeff Roberson 		if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) {
95462fa74d9SJeff Roberson 			tdq_unlock_pair(tdq, steal);
95562fa74d9SJeff Roberson 			continue;
95680f86c9fSJeff Roberson 		}
957ae7a6b38SJeff Roberson 		spinlock_exit();
958ae7a6b38SJeff Roberson 		TDQ_UNLOCK(steal);
9598df78c41SJeff Roberson 		mi_switch(SW_VOL | SWT_IDLE, NULL);
960ae7a6b38SJeff Roberson 		thread_unlock(curthread);
9617b8bfa0dSJeff Roberson 
9627b8bfa0dSJeff Roberson 		return (0);
96322bf7d9aSJeff Roberson 	}
96462fa74d9SJeff Roberson 	spinlock_exit();
96562fa74d9SJeff Roberson 	return (1);
96662fa74d9SJeff Roberson }
96722bf7d9aSJeff Roberson 
968ae7a6b38SJeff Roberson /*
969ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
970ae7a6b38SJeff Roberson  */
97122bf7d9aSJeff Roberson static void
9729727e637SJeff Roberson tdq_notify(struct tdq *tdq, struct thread *td)
97322bf7d9aSJeff Roberson {
97402f0ff6dSJohn Baldwin 	struct thread *ctd;
975fc3a97dcSJeff Roberson 	int pri;
9767b8bfa0dSJeff Roberson 	int cpu;
97722bf7d9aSJeff Roberson 
978ff256d9cSJeff Roberson 	if (tdq->tdq_ipipending)
979ff256d9cSJeff Roberson 		return;
9809727e637SJeff Roberson 	cpu = td->td_sched->ts_cpu;
9819727e637SJeff Roberson 	pri = td->td_priority;
98202f0ff6dSJohn Baldwin 	ctd = pcpu_find(cpu)->pc_curthread;
98302f0ff6dSJohn Baldwin 	if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
9846b2f763fSJeff Roberson 		return;
98502f0ff6dSJohn Baldwin 	if (TD_IS_IDLETHREAD(ctd)) {
9861690c6c1SJeff Roberson 		/*
9876c47aaaeSJeff Roberson 		 * If the MD code has an idle wakeup routine try that before
9886c47aaaeSJeff Roberson 		 * falling back to IPI.
9896c47aaaeSJeff Roberson 		 */
9909f9ad565SAlexander Motin 		if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
9916c47aaaeSJeff Roberson 			return;
9921690c6c1SJeff Roberson 	}
993ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 1;
994d9d8d144SJohn Baldwin 	ipi_cpu(cpu, IPI_PREEMPT);
99522bf7d9aSJeff Roberson }
99622bf7d9aSJeff Roberson 
997ae7a6b38SJeff Roberson /*
998ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
999ae7a6b38SJeff Roberson  * index.
1000ae7a6b38SJeff Roberson  */
10019727e637SJeff Roberson static struct thread *
100262fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
1003ae7a6b38SJeff Roberson {
1004ae7a6b38SJeff Roberson 	struct rqbits *rqb;
1005ae7a6b38SJeff Roberson 	struct rqhead *rqh;
10069727e637SJeff Roberson 	struct thread *td;
1007ae7a6b38SJeff Roberson 	int first;
1008ae7a6b38SJeff Roberson 	int bit;
1009ae7a6b38SJeff Roberson 	int pri;
1010ae7a6b38SJeff Roberson 	int i;
1011ae7a6b38SJeff Roberson 
1012ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
1013ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
1014ae7a6b38SJeff Roberson 	pri = 0;
1015ae7a6b38SJeff Roberson 	first = 0;
1016ae7a6b38SJeff Roberson again:
1017ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1018ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
1019ae7a6b38SJeff Roberson 			continue;
1020ae7a6b38SJeff Roberson 		if (bit != 0) {
1021ae7a6b38SJeff Roberson 			for (pri = bit; pri < RQB_BPW; pri++)
1022ae7a6b38SJeff Roberson 				if (rqb->rqb_bits[i] & (1ul << pri))
1023ae7a6b38SJeff Roberson 					break;
1024ae7a6b38SJeff Roberson 			if (pri >= RQB_BPW)
1025ae7a6b38SJeff Roberson 				continue;
1026ae7a6b38SJeff Roberson 		} else
1027ae7a6b38SJeff Roberson 			pri = RQB_FFS(rqb->rqb_bits[i]);
1028ae7a6b38SJeff Roberson 		pri += (i << RQB_L2BPW);
1029ae7a6b38SJeff Roberson 		rqh = &rq->rq_queues[pri];
10309727e637SJeff Roberson 		TAILQ_FOREACH(td, rqh, td_runq) {
10319727e637SJeff Roberson 			if (first && THREAD_CAN_MIGRATE(td) &&
10329727e637SJeff Roberson 			    THREAD_CAN_SCHED(td, cpu))
10339727e637SJeff Roberson 				return (td);
1034ae7a6b38SJeff Roberson 			first = 1;
1035ae7a6b38SJeff Roberson 		}
1036ae7a6b38SJeff Roberson 	}
1037ae7a6b38SJeff Roberson 	if (start != 0) {
1038ae7a6b38SJeff Roberson 		start = 0;
1039ae7a6b38SJeff Roberson 		goto again;
1040ae7a6b38SJeff Roberson 	}
1041ae7a6b38SJeff Roberson 
1042ae7a6b38SJeff Roberson 	return (NULL);
1043ae7a6b38SJeff Roberson }
1044ae7a6b38SJeff Roberson 
1045ae7a6b38SJeff Roberson /*
1046ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1047ae7a6b38SJeff Roberson  */
10489727e637SJeff Roberson static struct thread *
104962fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
105022bf7d9aSJeff Roberson {
105122bf7d9aSJeff Roberson 	struct rqhead *rqh;
105222bf7d9aSJeff Roberson 	struct rqbits *rqb;
10539727e637SJeff Roberson 	struct thread *td;
105422bf7d9aSJeff Roberson 	int word;
105522bf7d9aSJeff Roberson 	int bit;
105622bf7d9aSJeff Roberson 
105722bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
105822bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
105922bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
106022bf7d9aSJeff Roberson 			continue;
106122bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1062a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
106322bf7d9aSJeff Roberson 				continue;
106422bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
10659727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq)
10669727e637SJeff Roberson 				if (THREAD_CAN_MIGRATE(td) &&
10679727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
10689727e637SJeff Roberson 					return (td);
106922bf7d9aSJeff Roberson 		}
107022bf7d9aSJeff Roberson 	}
107122bf7d9aSJeff Roberson 	return (NULL);
107222bf7d9aSJeff Roberson }
107322bf7d9aSJeff Roberson 
1074ae7a6b38SJeff Roberson /*
1075ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1076ae7a6b38SJeff Roberson  */
10779727e637SJeff Roberson static struct thread *
107862fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
107922bf7d9aSJeff Roberson {
10809727e637SJeff Roberson 	struct thread *td;
108122bf7d9aSJeff Roberson 
1082ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
10839727e637SJeff Roberson 	if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
10849727e637SJeff Roberson 		return (td);
10859727e637SJeff Roberson 	if ((td = runq_steal_from(&tdq->tdq_timeshare,
10869727e637SJeff Roberson 	    cpu, tdq->tdq_ridx)) != NULL)
10879727e637SJeff Roberson 		return (td);
108862fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
108922bf7d9aSJeff Roberson }
109080f86c9fSJeff Roberson 
1091ae7a6b38SJeff Roberson /*
1092ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
10937fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1094ae7a6b38SJeff Roberson  */
1095ae7a6b38SJeff Roberson static inline struct tdq *
10969727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags)
109780f86c9fSJeff Roberson {
10989727e637SJeff Roberson 
1099ae7a6b38SJeff Roberson 	struct tdq *tdq;
110080f86c9fSJeff Roberson 
11019727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1102ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
11039727e637SJeff Roberson 	td->td_sched->ts_cpu = cpu;
11049727e637SJeff Roberson 	/*
11059727e637SJeff Roberson 	 * If the lock matches just return the queue.
11069727e637SJeff Roberson 	 */
1107ae7a6b38SJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq))
1108ae7a6b38SJeff Roberson 		return (tdq);
1109ae7a6b38SJeff Roberson #ifdef notyet
111080f86c9fSJeff Roberson 	/*
1111a5423ea3SJeff Roberson 	 * If the thread isn't running its lockptr is a
1112ae7a6b38SJeff Roberson 	 * turnstile or a sleepqueue.  We can just lock_set without
1113ae7a6b38SJeff Roberson 	 * blocking.
1114670c524fSJeff Roberson 	 */
1115ae7a6b38SJeff Roberson 	if (TD_CAN_RUN(td)) {
1116ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1117ae7a6b38SJeff Roberson 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
1118ae7a6b38SJeff Roberson 		return (tdq);
1119ae7a6b38SJeff Roberson 	}
1120ae7a6b38SJeff Roberson #endif
112180f86c9fSJeff Roberson 	/*
1122ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1123ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
11247b8bfa0dSJeff Roberson 	 */
1125b0b9dee5SAttilio Rao 	spinlock_enter();
1126ae7a6b38SJeff Roberson 	thread_lock_block(td);
1127ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1128ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1129b0b9dee5SAttilio Rao 	spinlock_exit();
1130ae7a6b38SJeff Roberson 	return (tdq);
113180f86c9fSJeff Roberson }
11322454aaf5SJeff Roberson 
11338df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
11348df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
11358df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
11368df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
11378df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
11388df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
11398df78c41SJeff Roberson 
1140ae7a6b38SJeff Roberson static int
11419727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags)
1142ae7a6b38SJeff Roberson {
114362fa74d9SJeff Roberson 	struct cpu_group *cg;
11449727e637SJeff Roberson 	struct td_sched *ts;
1145ae7a6b38SJeff Roberson 	struct tdq *tdq;
1146c76ee827SJeff Roberson 	cpuset_t mask;
11477b8bfa0dSJeff Roberson 	int self;
11487b8bfa0dSJeff Roberson 	int pri;
11497b8bfa0dSJeff Roberson 	int cpu;
11507b8bfa0dSJeff Roberson 
115162fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
11529727e637SJeff Roberson 	ts = td->td_sched;
11537b8bfa0dSJeff Roberson 	if (smp_started == 0)
11547b8bfa0dSJeff Roberson 		return (self);
115528994a58SJeff Roberson 	/*
115628994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
115728994a58SJeff Roberson 	 */
115862fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
115962fa74d9SJeff Roberson 		return (ts->ts_cpu);
11607b8bfa0dSJeff Roberson 	/*
116162fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
116262fa74d9SJeff Roberson 	 * the interrupt.
11637b8bfa0dSJeff Roberson 	 */
116462fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
11658df78c41SJeff Roberson 	    curthread->td_intr_nesting_level && ts->ts_cpu != self) {
11668df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_intrbind);
116762fa74d9SJeff Roberson 		ts->ts_cpu = self;
11688df78c41SJeff Roberson 	}
116962fa74d9SJeff Roberson 	/*
117062fa74d9SJeff Roberson 	 * If the thread can run on the last cpu and the affinity has not
117162fa74d9SJeff Roberson 	 * expired or it is idle run it there.
117262fa74d9SJeff Roberson 	 */
117362fa74d9SJeff Roberson 	pri = td->td_priority;
117462fa74d9SJeff Roberson 	tdq = TDQ_CPU(ts->ts_cpu);
117562fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu)) {
11768df78c41SJeff Roberson 		if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
11778df78c41SJeff Roberson 			SCHED_STAT_INC(pickcpu_idle_affinity);
117862fa74d9SJeff Roberson 			return (ts->ts_cpu);
11798df78c41SJeff Roberson 		}
11808df78c41SJeff Roberson 		if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) {
11818df78c41SJeff Roberson 			SCHED_STAT_INC(pickcpu_affinity);
11827b8bfa0dSJeff Roberson 			return (ts->ts_cpu);
11837b8bfa0dSJeff Roberson 		}
11848df78c41SJeff Roberson 	}
11857b8bfa0dSJeff Roberson 	/*
118662fa74d9SJeff Roberson 	 * Search for the highest level in the tree that still has affinity.
11877b8bfa0dSJeff Roberson 	 */
118862fa74d9SJeff Roberson 	cg = NULL;
118962fa74d9SJeff Roberson 	for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent)
119062fa74d9SJeff Roberson 		if (SCHED_AFFINITY(ts, cg->cg_level))
119162fa74d9SJeff Roberson 			break;
119262fa74d9SJeff Roberson 	cpu = -1;
1193c76ee827SJeff Roberson 	mask = td->td_cpuset->cs_mask;
119462fa74d9SJeff Roberson 	if (cg)
119562fa74d9SJeff Roberson 		cpu = sched_lowest(cg, mask, pri);
119662fa74d9SJeff Roberson 	if (cpu == -1)
119762fa74d9SJeff Roberson 		cpu = sched_lowest(cpu_top, mask, -1);
119862fa74d9SJeff Roberson 	/*
119962fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
120062fa74d9SJeff Roberson 	 */
1201ff256d9cSJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri &&
12028df78c41SJeff Roberson 	    TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) {
12038df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_local);
120462fa74d9SJeff Roberson 		cpu = self;
12058df78c41SJeff Roberson 	} else
12068df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_lowest);
12078df78c41SJeff Roberson 	if (cpu != ts->ts_cpu)
12088df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_migration);
1209ff256d9cSJeff Roberson 	KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1210ae7a6b38SJeff Roberson 	return (cpu);
121180f86c9fSJeff Roberson }
121262fa74d9SJeff Roberson #endif
121322bf7d9aSJeff Roberson 
121422bf7d9aSJeff Roberson /*
121522bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
12160c0a98b2SJeff Roberson  */
12179727e637SJeff Roberson static struct thread *
1218ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
12195d7ef00cSJeff Roberson {
12209727e637SJeff Roberson 	struct thread *td;
12215d7ef00cSJeff Roberson 
1222ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
12239727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_realtime);
12249727e637SJeff Roberson 	if (td != NULL)
12259727e637SJeff Roberson 		return (td);
12269727e637SJeff Roberson 	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
12279727e637SJeff Roberson 	if (td != NULL) {
122812d56c0fSJohn Baldwin 		KASSERT(td->td_priority >= PRI_MIN_BATCH,
1229e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
12309727e637SJeff Roberson 		    td->td_priority));
12319727e637SJeff Roberson 		return (td);
123215dc847eSJeff Roberson 	}
12339727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_idle);
12349727e637SJeff Roberson 	if (td != NULL) {
12359727e637SJeff Roberson 		KASSERT(td->td_priority >= PRI_MIN_IDLE,
1236e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
12379727e637SJeff Roberson 		    td->td_priority));
12389727e637SJeff Roberson 		return (td);
1239e7d50326SJeff Roberson 	}
1240e7d50326SJeff Roberson 
1241e7d50326SJeff Roberson 	return (NULL);
1242245f3abfSJeff Roberson }
12430a016a05SJeff Roberson 
1244ae7a6b38SJeff Roberson /*
1245ae7a6b38SJeff Roberson  * Initialize a thread queue.
1246ae7a6b38SJeff Roberson  */
12470a016a05SJeff Roberson static void
1248ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq)
12490a016a05SJeff Roberson {
1250ae7a6b38SJeff Roberson 
1251c47f202bSJeff Roberson 	if (bootverbose)
1252c47f202bSJeff Roberson 		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1253e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1254e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1255d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
125662fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
125762fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
125862fa74d9SJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
125962fa74d9SJeff Roberson 	    MTX_SPIN | MTX_RECURSE);
12608f51ad55SJeff Roberson #ifdef KTR
12618f51ad55SJeff Roberson 	snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
12628f51ad55SJeff Roberson 	    "CPU %d load", (int)TDQ_ID(tdq));
12638f51ad55SJeff Roberson #endif
12640a016a05SJeff Roberson }
12650a016a05SJeff Roberson 
1266c47f202bSJeff Roberson #ifdef SMP
1267c47f202bSJeff Roberson static void
1268c47f202bSJeff Roberson sched_setup_smp(void)
1269c47f202bSJeff Roberson {
1270c47f202bSJeff Roberson 	struct tdq *tdq;
1271c47f202bSJeff Roberson 	int i;
1272c47f202bSJeff Roberson 
127362fa74d9SJeff Roberson 	cpu_top = smp_topo();
12743aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
127562fa74d9SJeff Roberson 		tdq = TDQ_CPU(i);
1276c47f202bSJeff Roberson 		tdq_setup(tdq);
127762fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
127862fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
127962fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1280c47f202bSJeff Roberson 	}
128162fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
128262fa74d9SJeff Roberson 	sched_balance();
1283c47f202bSJeff Roberson }
1284c47f202bSJeff Roberson #endif
1285c47f202bSJeff Roberson 
1286ae7a6b38SJeff Roberson /*
1287ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1288ae7a6b38SJeff Roberson  * information.
1289ae7a6b38SJeff Roberson  */
129035e6168fSJeff Roberson static void
129135e6168fSJeff Roberson sched_setup(void *dummy)
129235e6168fSJeff Roberson {
1293ae7a6b38SJeff Roberson 	struct tdq *tdq;
1294c47f202bSJeff Roberson 
1295c47f202bSJeff Roberson 	tdq = TDQ_SELF();
12960ec896fdSJeff Roberson #ifdef SMP
1297c47f202bSJeff Roberson 	sched_setup_smp();
1298749d01b0SJeff Roberson #else
1299c47f202bSJeff Roberson 	tdq_setup(tdq);
1300356500a3SJeff Roberson #endif
1301ae7a6b38SJeff Roberson 	/*
1302ae7a6b38SJeff Roberson 	 * To avoid divide-by-zero, we set realstathz a dummy value
1303ae7a6b38SJeff Roberson 	 * in case which sched_clock() called before sched_initticks().
1304ae7a6b38SJeff Roberson 	 */
1305ae7a6b38SJeff Roberson 	realstathz = hz;
1306ae7a6b38SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1307ae7a6b38SJeff Roberson 	tickincr = 1 << SCHED_TICK_SHIFT;
1308ae7a6b38SJeff Roberson 
1309ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1310ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1311c47f202bSJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
13129727e637SJeff Roberson 	tdq_load_add(tdq, &thread0);
131362fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1314ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
131535e6168fSJeff Roberson }
131635e6168fSJeff Roberson 
1317ae7a6b38SJeff Roberson /*
1318ae7a6b38SJeff Roberson  * This routine determines the tickincr after stathz and hz are setup.
1319ae7a6b38SJeff Roberson  */
1320a1d4fe69SDavid Xu /* ARGSUSED */
1321a1d4fe69SDavid Xu static void
1322a1d4fe69SDavid Xu sched_initticks(void *dummy)
1323a1d4fe69SDavid Xu {
1324ae7a6b38SJeff Roberson 	int incr;
1325ae7a6b38SJeff Roberson 
1326a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
132714618990SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1328a1d4fe69SDavid Xu 
1329a1d4fe69SDavid Xu 	/*
1330e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
13313f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1332e7d50326SJeff Roberson 	 */
1333ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1334e7d50326SJeff Roberson 	/*
1335e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1336e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1337a1d4fe69SDavid Xu 	 */
1338ae7a6b38SJeff Roberson 	if (incr == 0)
1339ae7a6b38SJeff Roberson 		incr = 1;
1340ae7a6b38SJeff Roberson 	tickincr = incr;
13417b8bfa0dSJeff Roberson #ifdef SMP
13429862717aSJeff Roberson 	/*
13437fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
13447fcf154aSJeff Roberson 	 * what realstathz is.
13457fcf154aSJeff Roberson 	 */
13467fcf154aSJeff Roberson 	balance_interval = realstathz;
13477fcf154aSJeff Roberson 	/*
134853a6c8b3SJeff Roberson 	 * Set steal thresh to roughly log2(mp_ncpu) but no greater than 4.
134953a6c8b3SJeff Roberson 	 * This prevents excess thrashing on large machines and excess idle
135053a6c8b3SJeff Roberson 	 * on smaller machines.
13519862717aSJeff Roberson 	 */
135253a6c8b3SJeff Roberson 	steal_thresh = min(fls(mp_ncpus) - 1, 3);
13537b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
13547b8bfa0dSJeff Roberson #endif
1355a1d4fe69SDavid Xu }
1356a1d4fe69SDavid Xu 
1357a1d4fe69SDavid Xu 
135835e6168fSJeff Roberson /*
1359ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1360ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1361ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1362ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1363ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
1364ae7a6b38SJeff Roberson  */
1365ae7a6b38SJeff Roberson static int
1366ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1367ae7a6b38SJeff Roberson {
1368ae7a6b38SJeff Roberson 	struct td_sched *ts;
1369ae7a6b38SJeff Roberson 	int div;
1370ae7a6b38SJeff Roberson 
1371ae7a6b38SJeff Roberson 	ts = td->td_sched;
1372ae7a6b38SJeff Roberson 	/*
1373ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1374ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1375ae7a6b38SJeff Roberson 	 * no chance.
1376ae7a6b38SJeff Roberson 	 */
1377ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1378ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1379ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1380ae7a6b38SJeff Roberson 
1381ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1382ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1383ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1384ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1385ae7a6b38SJeff Roberson 	}
1386ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1387ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1388ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1389ae7a6b38SJeff Roberson 	}
1390ae7a6b38SJeff Roberson 	/* runtime == slptime */
1391ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1392ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1393ae7a6b38SJeff Roberson 
1394ae7a6b38SJeff Roberson 	/*
1395ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1396ae7a6b38SJeff Roberson 	 */
1397ae7a6b38SJeff Roberson 	return (0);
1398ae7a6b38SJeff Roberson 
1399ae7a6b38SJeff Roberson }
1400ae7a6b38SJeff Roberson 
1401ae7a6b38SJeff Roberson /*
140235e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
140335e6168fSJeff Roberson  * process.
140435e6168fSJeff Roberson  */
140515dc847eSJeff Roberson static void
14068460a577SJohn Birrell sched_priority(struct thread *td)
140735e6168fSJeff Roberson {
1408e7d50326SJeff Roberson 	int score;
140935e6168fSJeff Roberson 	int pri;
141035e6168fSJeff Roberson 
1411c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
141215dc847eSJeff Roberson 		return;
1413e7d50326SJeff Roberson 	/*
1414e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1415e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1416e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1417e7d50326SJeff Roberson 	 *
1418ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1419e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1420e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1421a5423ea3SJeff Roberson 	 *
1422a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1423a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1424a5423ea3SJeff Roberson 	 * considered interactive.
1425e7d50326SJeff Roberson 	 */
1426a0f15352SJohn Baldwin 	score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1427e7d50326SJeff Roberson 	if (score < sched_interact) {
142812d56c0fSJohn Baldwin 		pri = PRI_MIN_INTERACT;
142912d56c0fSJohn Baldwin 		pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) /
143078920008SJohn Baldwin 		    sched_interact) * score;
143112d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
14329a93305aSJeff Roberson 		    ("sched_priority: invalid interactive priority %d score %d",
14339a93305aSJeff Roberson 		    pri, score));
1434e7d50326SJeff Roberson 	} else {
1435e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
1436e7d50326SJeff Roberson 		if (td->td_sched->ts_ticks)
1437e7d50326SJeff Roberson 			pri += SCHED_PRI_TICKS(td->td_sched);
1438e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
143912d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
1440ae7a6b38SJeff Roberson 		    ("sched_priority: invalid priority %d: nice %d, "
1441ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
1442ae7a6b38SJeff Roberson 		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1443ae7a6b38SJeff Roberson 		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1444ae7a6b38SJeff Roberson 		    SCHED_PRI_TICKS(td->td_sched)));
1445e7d50326SJeff Roberson 	}
14468460a577SJohn Birrell 	sched_user_prio(td, pri);
144735e6168fSJeff Roberson 
144815dc847eSJeff Roberson 	return;
144935e6168fSJeff Roberson }
145035e6168fSJeff Roberson 
145135e6168fSJeff Roberson /*
1452d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1453ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1454ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1455d322132cSJeff Roberson  */
14564b60e324SJeff Roberson static void
14578460a577SJohn Birrell sched_interact_update(struct thread *td)
14584b60e324SJeff Roberson {
1459155b6ca1SJeff Roberson 	struct td_sched *ts;
14609a93305aSJeff Roberson 	u_int sum;
14613f741ca1SJeff Roberson 
1462155b6ca1SJeff Roberson 	ts = td->td_sched;
1463ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1464d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1465d322132cSJeff Roberson 		return;
1466d322132cSJeff Roberson 	/*
1467155b6ca1SJeff Roberson 	 * This only happens from two places:
1468155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1469155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1470155b6ca1SJeff Roberson 	 */
1471155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1472ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1473ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1474ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1475155b6ca1SJeff Roberson 		} else {
1476ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1477ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1478155b6ca1SJeff Roberson 		}
1479155b6ca1SJeff Roberson 		return;
1480155b6ca1SJeff Roberson 	}
1481155b6ca1SJeff Roberson 	/*
1482d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1483d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
14842454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1485d322132cSJeff Roberson 	 */
148637a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1487ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1488ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1489d322132cSJeff Roberson 		return;
1490d322132cSJeff Roberson 	}
1491ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1492ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1493d322132cSJeff Roberson }
1494d322132cSJeff Roberson 
1495ae7a6b38SJeff Roberson /*
1496ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1497ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1498ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1499ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1500ae7a6b38SJeff Roberson  */
1501d322132cSJeff Roberson static void
15028460a577SJohn Birrell sched_interact_fork(struct thread *td)
1503d322132cSJeff Roberson {
1504d322132cSJeff Roberson 	int ratio;
1505d322132cSJeff Roberson 	int sum;
1506d322132cSJeff Roberson 
1507ae7a6b38SJeff Roberson 	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1508d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1509d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
1510ae7a6b38SJeff Roberson 		td->td_sched->ts_runtime /= ratio;
1511ae7a6b38SJeff Roberson 		td->td_sched->ts_slptime /= ratio;
15124b60e324SJeff Roberson 	}
15134b60e324SJeff Roberson }
15144b60e324SJeff Roberson 
151515dc847eSJeff Roberson /*
1516ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1517ed062c8dSJulian Elischer  */
1518ed062c8dSJulian Elischer void
1519ed062c8dSJulian Elischer schedinit(void)
1520ed062c8dSJulian Elischer {
1521e7d50326SJeff Roberson 
1522ed062c8dSJulian Elischer 	/*
1523ed062c8dSJulian Elischer 	 * Set up the scheduler specific parts of proc0.
1524ed062c8dSJulian Elischer 	 */
1525ed062c8dSJulian Elischer 	proc0.p_sched = NULL; /* XXX */
1526ad1e7d28SJulian Elischer 	thread0.td_sched = &td_sched0;
1527e7d50326SJeff Roberson 	td_sched0.ts_ltick = ticks;
15288ab80cf0SJeff Roberson 	td_sched0.ts_ftick = ticks;
152973daf66fSJeff Roberson 	td_sched0.ts_slice = sched_slice;
1530ed062c8dSJulian Elischer }
1531ed062c8dSJulian Elischer 
1532ed062c8dSJulian Elischer /*
153315dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
153415dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1535e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
153615dc847eSJeff Roberson  */
153735e6168fSJeff Roberson int
153835e6168fSJeff Roberson sched_rr_interval(void)
153935e6168fSJeff Roberson {
1540e7d50326SJeff Roberson 
1541e7d50326SJeff Roberson 	/* Convert sched_slice to hz */
1542e7d50326SJeff Roberson 	return (hz/(realstathz/sched_slice));
154335e6168fSJeff Roberson }
154435e6168fSJeff Roberson 
1545ae7a6b38SJeff Roberson /*
1546ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1547ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1548ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1549ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1550ae7a6b38SJeff Roberson  */
155122bf7d9aSJeff Roberson static void
1552ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts)
155335e6168fSJeff Roberson {
1554e7d50326SJeff Roberson 
1555e7d50326SJeff Roberson 	if (ts->ts_ticks == 0)
1556e7d50326SJeff Roberson 		return;
15578ab80cf0SJeff Roberson 	if (ticks - (hz / 10) < ts->ts_ltick &&
15588ab80cf0SJeff Roberson 	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
15598ab80cf0SJeff Roberson 		return;
156035e6168fSJeff Roberson 	/*
156135e6168fSJeff Roberson 	 * Adjust counters and watermark for pctcpu calc.
1562210491d3SJeff Roberson 	 */
1563e7d50326SJeff Roberson 	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1564ad1e7d28SJulian Elischer 		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1565e7d50326SJeff Roberson 			    SCHED_TICK_TARG;
1566e7d50326SJeff Roberson 	else
1567ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
1568ad1e7d28SJulian Elischer 	ts->ts_ltick = ticks;
1569e7d50326SJeff Roberson 	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
157035e6168fSJeff Roberson }
157135e6168fSJeff Roberson 
1572ae7a6b38SJeff Roberson /*
1573ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1574ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1575ae7a6b38SJeff Roberson  * functions.
1576ae7a6b38SJeff Roberson  */
1577e7d50326SJeff Roberson static void
1578f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
157935e6168fSJeff Roberson {
1580ad1e7d28SJulian Elischer 	struct td_sched *ts;
158173daf66fSJeff Roberson 	struct tdq *tdq;
158273daf66fSJeff Roberson 	int oldpri;
158335e6168fSJeff Roberson 
15848f51ad55SJeff Roberson 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
15858f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "new prio:%d", prio,
15868f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(curthread));
15878f51ad55SJeff Roberson 	if (td != curthread && prio > td->td_priority) {
15888f51ad55SJeff Roberson 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
15898f51ad55SJeff Roberson 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
15908f51ad55SJeff Roberson 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
15918f51ad55SJeff Roberson 	}
1592ad1e7d28SJulian Elischer 	ts = td->td_sched;
15937b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1594f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1595f5c157d9SJohn Baldwin 		return;
15963f741ca1SJeff Roberson 	/*
15973f741ca1SJeff Roberson 	 * If the priority has been elevated due to priority
15983f741ca1SJeff Roberson 	 * propagation, we may have to move ourselves to a new
1599e7d50326SJeff Roberson 	 * queue.  This could be optimized to not re-add in some
1600e7d50326SJeff Roberson 	 * cases.
1601f2b74cbfSJeff Roberson 	 */
16026d55b3ecSJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1603e7d50326SJeff Roberson 		sched_rem(td);
1604e7d50326SJeff Roberson 		td->td_priority = prio;
1605ae7a6b38SJeff Roberson 		sched_add(td, SRQ_BORROWING);
160673daf66fSJeff Roberson 		return;
160773daf66fSJeff Roberson 	}
16086d55b3ecSJeff Roberson 	/*
16096d55b3ecSJeff Roberson 	 * If the thread is currently running we may have to adjust the lowpri
16106d55b3ecSJeff Roberson 	 * information so other cpus are aware of our current priority.
16116d55b3ecSJeff Roberson 	 */
16126d55b3ecSJeff Roberson 	if (TD_IS_RUNNING(td)) {
1613ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(ts->ts_cpu);
161462fa74d9SJeff Roberson 		oldpri = td->td_priority;
16153f741ca1SJeff Roberson 		td->td_priority = prio;
161662fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
161762fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
161862fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
161962fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
16206d55b3ecSJeff Roberson 		return;
162173daf66fSJeff Roberson 	}
16226d55b3ecSJeff Roberson 	td->td_priority = prio;
1623ae7a6b38SJeff Roberson }
162435e6168fSJeff Roberson 
1625f5c157d9SJohn Baldwin /*
1626f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1627f5c157d9SJohn Baldwin  * priority.
1628f5c157d9SJohn Baldwin  */
1629f5c157d9SJohn Baldwin void
1630f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1631f5c157d9SJohn Baldwin {
1632f5c157d9SJohn Baldwin 
1633f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1634f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1635f5c157d9SJohn Baldwin }
1636f5c157d9SJohn Baldwin 
1637f5c157d9SJohn Baldwin /*
1638f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1639f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1640f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1641f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1642f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1643f5c157d9SJohn Baldwin  * of prio.
1644f5c157d9SJohn Baldwin  */
1645f5c157d9SJohn Baldwin void
1646f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1647f5c157d9SJohn Baldwin {
1648f5c157d9SJohn Baldwin 	u_char base_pri;
1649f5c157d9SJohn Baldwin 
1650f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1651f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
16528460a577SJohn Birrell 		base_pri = td->td_user_pri;
1653f5c157d9SJohn Baldwin 	else
1654f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1655f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1656f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1657f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1658f5c157d9SJohn Baldwin 	} else
1659f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1660f5c157d9SJohn Baldwin }
1661f5c157d9SJohn Baldwin 
1662ae7a6b38SJeff Roberson /*
1663ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1664ae7a6b38SJeff Roberson  */
1665f5c157d9SJohn Baldwin void
1666f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1667f5c157d9SJohn Baldwin {
1668f5c157d9SJohn Baldwin 	u_char oldprio;
1669f5c157d9SJohn Baldwin 
1670f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1671f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1672f5c157d9SJohn Baldwin 
1673f5c157d9SJohn Baldwin 	/*
167450aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1675f5c157d9SJohn Baldwin 	 * ever lower the priority.
1676f5c157d9SJohn Baldwin 	 */
1677f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1678f5c157d9SJohn Baldwin 		return;
1679f5c157d9SJohn Baldwin 
1680f5c157d9SJohn Baldwin 	/* Change the real priority. */
1681f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1682f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1683f5c157d9SJohn Baldwin 
1684f5c157d9SJohn Baldwin 	/*
1685f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1686f5c157d9SJohn Baldwin 	 * its state.
1687f5c157d9SJohn Baldwin 	 */
1688f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1689f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1690f5c157d9SJohn Baldwin }
1691f5c157d9SJohn Baldwin 
1692ae7a6b38SJeff Roberson /*
1693ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1694ae7a6b38SJeff Roberson  */
169535e6168fSJeff Roberson void
16968460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
16973db720fdSDavid Xu {
16983db720fdSDavid Xu 
16998460a577SJohn Birrell 	td->td_base_user_pri = prio;
1700acbe332aSDavid Xu 	if (td->td_lend_user_pri <= prio)
1701fc6c30f6SJulian Elischer 		return;
17028460a577SJohn Birrell 	td->td_user_pri = prio;
17033db720fdSDavid Xu }
17043db720fdSDavid Xu 
17053db720fdSDavid Xu void
17063db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
17073db720fdSDavid Xu {
17083db720fdSDavid Xu 
1709435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1710acbe332aSDavid Xu 	td->td_lend_user_pri = prio;
1711c8e368a9SDavid Xu 	td->td_user_pri = min(prio, td->td_base_user_pri);
1712c8e368a9SDavid Xu 	if (td->td_priority > td->td_user_pri)
1713c8e368a9SDavid Xu 		sched_prio(td, td->td_user_pri);
1714c8e368a9SDavid Xu 	else if (td->td_priority != td->td_user_pri)
1715c8e368a9SDavid Xu 		td->td_flags |= TDF_NEEDRESCHED;
1716435806d3SDavid Xu }
17173db720fdSDavid Xu 
1718ae7a6b38SJeff Roberson /*
1719c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
1720c47f202bSJeff Roberson  * cpu binding.
1721c47f202bSJeff Roberson  */
1722c47f202bSJeff Roberson static struct mtx *
1723c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1724c47f202bSJeff Roberson {
1725c47f202bSJeff Roberson 	struct tdq *tdn;
1726c47f202bSJeff Roberson 
1727c47f202bSJeff Roberson 	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1728c47f202bSJeff Roberson #ifdef SMP
17299727e637SJeff Roberson 	tdq_load_rem(tdq, td);
1730c47f202bSJeff Roberson 	/*
1731c47f202bSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We grab an extra
1732c47f202bSJeff Roberson 	 * spinlock nesting to prevent preemption while we're
1733c47f202bSJeff Roberson 	 * not holding either run-queue lock.
1734c47f202bSJeff Roberson 	 */
1735c47f202bSJeff Roberson 	spinlock_enter();
1736b0b9dee5SAttilio Rao 	thread_lock_block(td);	/* This releases the lock on tdq. */
1737435068aaSAttilio Rao 
1738435068aaSAttilio Rao 	/*
1739435068aaSAttilio Rao 	 * Acquire both run-queue locks before placing the thread on the new
1740435068aaSAttilio Rao 	 * run-queue to avoid deadlocks created by placing a thread with a
1741435068aaSAttilio Rao 	 * blocked lock on the run-queue of a remote processor.  The deadlock
1742435068aaSAttilio Rao 	 * occurs when a third processor attempts to lock the two queues in
1743435068aaSAttilio Rao 	 * question while the target processor is spinning with its own
1744435068aaSAttilio Rao 	 * run-queue lock held while waiting for the blocked lock to clear.
1745435068aaSAttilio Rao 	 */
1746435068aaSAttilio Rao 	tdq_lock_pair(tdn, tdq);
1747c47f202bSJeff Roberson 	tdq_add(tdn, td, flags);
17489727e637SJeff Roberson 	tdq_notify(tdn, td);
1749c47f202bSJeff Roberson 	TDQ_UNLOCK(tdn);
1750c47f202bSJeff Roberson 	spinlock_exit();
1751c47f202bSJeff Roberson #endif
1752c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
1753c47f202bSJeff Roberson }
1754c47f202bSJeff Roberson 
1755c47f202bSJeff Roberson /*
1756b0b9dee5SAttilio Rao  * Variadic version of thread_lock_unblock() that does not assume td_lock
1757b0b9dee5SAttilio Rao  * is blocked.
1758ae7a6b38SJeff Roberson  */
1759ae7a6b38SJeff Roberson static inline void
1760ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
1761ae7a6b38SJeff Roberson {
1762ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1763ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
1764ae7a6b38SJeff Roberson }
1765ae7a6b38SJeff Roberson 
1766ae7a6b38SJeff Roberson /*
1767ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
1768ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
1769ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
1770ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
1771ae7a6b38SJeff Roberson  */
17723db720fdSDavid Xu void
17733389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
177435e6168fSJeff Roberson {
1775c02bbb43SJeff Roberson 	struct tdq *tdq;
1776ad1e7d28SJulian Elischer 	struct td_sched *ts;
1777ae7a6b38SJeff Roberson 	struct mtx *mtx;
1778c47f202bSJeff Roberson 	int srqflag;
1779ae7a6b38SJeff Roberson 	int cpuid;
178035e6168fSJeff Roberson 
17817b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
17826d55b3ecSJeff Roberson 	KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument"));
178335e6168fSJeff Roberson 
1784ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
1785ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
1786e7d50326SJeff Roberson 	ts = td->td_sched;
1787c47f202bSJeff Roberson 	mtx = td->td_lock;
1788ae7a6b38SJeff Roberson 	ts->ts_rltick = ticks;
1789060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
1790060563ecSJulian Elischer 	td->td_oncpu = NOCPU;
1791586cb6ecSFabien Thomas 	if (!(flags & SW_PREEMPT))
179252eb8464SJohn Baldwin 		td->td_flags &= ~TDF_NEEDRESCHED;
179377918643SStephan Uphoff 	td->td_owepreempt = 0;
17941690c6c1SJeff Roberson 	tdq->tdq_switchcnt++;
1795b11fdad0SJeff Roberson 	/*
1796ae7a6b38SJeff Roberson 	 * The lock pointer in an idle thread should never change.  Reset it
1797ae7a6b38SJeff Roberson 	 * to CAN_RUN as well.
1798b11fdad0SJeff Roberson 	 */
1799486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
1800ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1801bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
18027b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
1803ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1804c47f202bSJeff Roberson 		srqflag = (flags & SW_PREEMPT) ?
1805598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1806c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
1807ba4932b5SMatthew D Fleming #ifdef SMP
18080f7a0ebdSMatthew D Fleming 		if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
18090f7a0ebdSMatthew D Fleming 			ts->ts_cpu = sched_pickcpu(td, 0);
1810ba4932b5SMatthew D Fleming #endif
1811c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
18129727e637SJeff Roberson 			tdq_runq_add(tdq, td, srqflag);
18130f7a0ebdSMatthew D Fleming 		else {
18140f7a0ebdSMatthew D Fleming 			KASSERT(THREAD_CAN_MIGRATE(td) ||
18150f7a0ebdSMatthew D Fleming 			    (ts->ts_flags & TSF_BOUND) != 0,
18160f7a0ebdSMatthew D Fleming 			    ("Thread %p shouldn't migrate", td));
1817c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
18180f7a0ebdSMatthew D Fleming 		}
1819ae7a6b38SJeff Roberson 	} else {
1820ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
1821ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1822b0b9dee5SAttilio Rao 		mtx = thread_lock_block(td);
18239727e637SJeff Roberson 		tdq_load_rem(tdq, td);
1824ae7a6b38SJeff Roberson 	}
1825ae7a6b38SJeff Roberson 	/*
1826ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
1827ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
1828ae7a6b38SJeff Roberson 	 * thread-queue locked.
1829ae7a6b38SJeff Roberson 	 */
1830ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
18312454aaf5SJeff Roberson 	newtd = choosethread();
1832ae7a6b38SJeff Roberson 	/*
1833ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
1834ae7a6b38SJeff Roberson 	 */
1835ebccf1e3SJoseph Koshy 	if (td != newtd) {
1836ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1837ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1838ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1839ebccf1e3SJoseph Koshy #endif
1840eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
184159c68134SJeff Roberson 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
18426f5f25e5SJohn Birrell 
18436f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
18446f5f25e5SJohn Birrell 		/*
18456f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
18466f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
18476f5f25e5SJohn Birrell 		 * function to call.
18486f5f25e5SJohn Birrell 		 */
18496f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
18506f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
18516f5f25e5SJohn Birrell #endif
18526f5f25e5SJohn Birrell 
1853ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
1854ae7a6b38SJeff Roberson 		/*
1855ae7a6b38SJeff Roberson 		 * We may return from cpu_switch on a different cpu.  However,
1856ae7a6b38SJeff Roberson 		 * we always return with td_lock pointing to the current cpu's
1857ae7a6b38SJeff Roberson 		 * run queue lock.
1858ae7a6b38SJeff Roberson 		 */
1859ae7a6b38SJeff Roberson 		cpuid = PCPU_GET(cpuid);
1860ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(cpuid);
1861eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(
1862eea4f254SJeff Roberson 		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
1863ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1864ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1865ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1866ebccf1e3SJoseph Koshy #endif
1867ae7a6b38SJeff Roberson 	} else
1868ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
1869ae7a6b38SJeff Roberson 	/*
1870ae7a6b38SJeff Roberson 	 * Assert that all went well and return.
1871ae7a6b38SJeff Roberson 	 */
1872ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1873ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1874ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
187535e6168fSJeff Roberson }
187635e6168fSJeff Roberson 
1877ae7a6b38SJeff Roberson /*
1878ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
1879ae7a6b38SJeff Roberson  */
188035e6168fSJeff Roberson void
1881fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
188235e6168fSJeff Roberson {
188335e6168fSJeff Roberson 	struct thread *td;
188435e6168fSJeff Roberson 
1885fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
1886e7d50326SJeff Roberson 
1887fa885116SJulian Elischer 	p->p_nice = nice;
18888460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
18897b20fb19SJeff Roberson 		thread_lock(td);
18908460a577SJohn Birrell 		sched_priority(td);
1891e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
18927b20fb19SJeff Roberson 		thread_unlock(td);
189335e6168fSJeff Roberson 	}
1894fa885116SJulian Elischer }
189535e6168fSJeff Roberson 
1896ae7a6b38SJeff Roberson /*
1897ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
1898ae7a6b38SJeff Roberson  */
189935e6168fSJeff Roberson void
1900c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio)
190135e6168fSJeff Roberson {
1902e7d50326SJeff Roberson 
19037b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
190435e6168fSJeff Roberson 
190554b0e65fSJeff Roberson 	td->td_slptick = ticks;
190617c4c356SKonstantin Belousov 	if (TD_IS_SUSPENDED(td) || prio >= PSOCK)
1907c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
19082dc29adbSJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
19092dc29adbSJohn Baldwin 		return;
19100502fe2eSJeff Roberson 	if (static_boost == 1 && prio)
1911c5aa6b58SJeff Roberson 		sched_prio(td, prio);
19120502fe2eSJeff Roberson 	else if (static_boost && td->td_priority > static_boost)
19130502fe2eSJeff Roberson 		sched_prio(td, static_boost);
191435e6168fSJeff Roberson }
191535e6168fSJeff Roberson 
1916ae7a6b38SJeff Roberson /*
1917ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
1918ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
1919ae7a6b38SJeff Roberson  */
192035e6168fSJeff Roberson void
192135e6168fSJeff Roberson sched_wakeup(struct thread *td)
192235e6168fSJeff Roberson {
192314618990SJeff Roberson 	struct td_sched *ts;
1924ae7a6b38SJeff Roberson 	int slptick;
1925e7d50326SJeff Roberson 
19267b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
192714618990SJeff Roberson 	ts = td->td_sched;
1928c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
192935e6168fSJeff Roberson 	/*
1930e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
1931e7d50326SJeff Roberson 	 * priority.
193235e6168fSJeff Roberson 	 */
193354b0e65fSJeff Roberson 	slptick = td->td_slptick;
193454b0e65fSJeff Roberson 	td->td_slptick = 0;
1935ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
19369a93305aSJeff Roberson 		u_int hzticks;
1937f1e8dc4aSJeff Roberson 
1938ae7a6b38SJeff Roberson 		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1939ae7a6b38SJeff Roberson 		ts->ts_slptime += hzticks;
19408460a577SJohn Birrell 		sched_interact_update(td);
194114618990SJeff Roberson 		sched_pctcpu_update(ts);
1942f1e8dc4aSJeff Roberson 	}
194314618990SJeff Roberson 	/* Reset the slice value after we sleep. */
194414618990SJeff Roberson 	ts->ts_slice = sched_slice;
19457a5e5e2aSJeff Roberson 	sched_add(td, SRQ_BORING);
194635e6168fSJeff Roberson }
194735e6168fSJeff Roberson 
194835e6168fSJeff Roberson /*
194935e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
195035e6168fSJeff Roberson  * priority.
195135e6168fSJeff Roberson  */
195235e6168fSJeff Roberson void
19538460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
195415dc847eSJeff Roberson {
19557b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1956ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
1957e7d50326SJeff Roberson 	/*
1958e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
1959e7d50326SJeff Roberson 	 */
1960e7d50326SJeff Roberson 	sched_interact_fork(child);
1961e7d50326SJeff Roberson 	sched_priority(child);
1962ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += tickincr;
1963e7d50326SJeff Roberson 	sched_interact_update(td);
1964e7d50326SJeff Roberson 	sched_priority(td);
1965ad1e7d28SJulian Elischer }
1966ad1e7d28SJulian Elischer 
1967ae7a6b38SJeff Roberson /*
1968ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
1969ae7a6b38SJeff Roberson  */
1970ad1e7d28SJulian Elischer void
1971ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
1972ad1e7d28SJulian Elischer {
1973ad1e7d28SJulian Elischer 	struct td_sched *ts;
1974ad1e7d28SJulian Elischer 	struct td_sched *ts2;
19758460a577SJohn Birrell 
19768b16c208SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1977e7d50326SJeff Roberson 	/*
1978e7d50326SJeff Roberson 	 * Initialize child.
1979e7d50326SJeff Roberson 	 */
1980ad1e7d28SJulian Elischer 	ts = td->td_sched;
1981ad1e7d28SJulian Elischer 	ts2 = child->td_sched;
19828b16c208SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
19838b16c208SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
1984ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
19858b16c208SJeff Roberson 	ts2->ts_flags = 0;
1986e7d50326SJeff Roberson 	/*
198722d19207SJohn Baldwin 	 * Grab our parents cpu estimation information.
1988e7d50326SJeff Roberson 	 */
1989ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
1990ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
1991cbc4ea28SIvan Voras 	ts2->ts_incrtick = ts->ts_incrtick;
1992ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
199322d19207SJohn Baldwin 	/*
199422d19207SJohn Baldwin 	 * Do not inherit any borrowed priority from the parent.
199522d19207SJohn Baldwin 	 */
199622d19207SJohn Baldwin 	child->td_priority = child->td_base_pri;
1997e7d50326SJeff Roberson 	/*
1998e7d50326SJeff Roberson 	 * And update interactivity score.
1999e7d50326SJeff Roberson 	 */
2000ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2001ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
2002e7d50326SJeff Roberson 	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
20038f51ad55SJeff Roberson #ifdef KTR
20048f51ad55SJeff Roberson 	bzero(ts2->ts_name, sizeof(ts2->ts_name));
20058f51ad55SJeff Roberson #endif
200615dc847eSJeff Roberson }
200715dc847eSJeff Roberson 
2008ae7a6b38SJeff Roberson /*
2009ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2010ae7a6b38SJeff Roberson  */
201115dc847eSJeff Roberson void
20128460a577SJohn Birrell sched_class(struct thread *td, int class)
201315dc847eSJeff Roberson {
201415dc847eSJeff Roberson 
20157b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
20168460a577SJohn Birrell 	if (td->td_pri_class == class)
201715dc847eSJeff Roberson 		return;
20188460a577SJohn Birrell 	td->td_pri_class = class;
201935e6168fSJeff Roberson }
202035e6168fSJeff Roberson 
202135e6168fSJeff Roberson /*
202235e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
202335e6168fSJeff Roberson  */
202435e6168fSJeff Roberson void
2025fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
202635e6168fSJeff Roberson {
2027e7d50326SJeff Roberson 	struct thread *td;
2028141ad61cSJeff Roberson 
20298f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2030cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2031374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
2032e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2033e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2034ad1e7d28SJulian Elischer }
2035ad1e7d28SJulian Elischer 
2036ae7a6b38SJeff Roberson /*
2037ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2038ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2039ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2040ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2041ae7a6b38SJeff Roberson  */
2042ad1e7d28SJulian Elischer void
2043fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2044ad1e7d28SJulian Elischer {
2045fc6c30f6SJulian Elischer 
20468f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2047cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2048e7d50326SJeff Roberson 	/*
2049e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2050e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2051e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2052e7d50326SJeff Roberson 	 */
20537b20fb19SJeff Roberson 	thread_lock(td);
2054ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2055fc6c30f6SJulian Elischer 	sched_interact_update(td);
2056e7d50326SJeff Roberson 	sched_priority(td);
20577b20fb19SJeff Roberson 	thread_unlock(td);
2058ad1e7d28SJulian Elischer }
2059ad1e7d28SJulian Elischer 
2060ff256d9cSJeff Roberson void
2061ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2062ff256d9cSJeff Roberson {
2063ff256d9cSJeff Roberson 	struct tdq *tdq;
2064ff256d9cSJeff Roberson 
2065ff256d9cSJeff Roberson 	thread_lock(td);
2066ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2067ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2068ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 0;
2069ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
20708df78c41SJeff Roberson 		int flags;
20718df78c41SJeff Roberson 
20728df78c41SJeff Roberson 		flags = SW_INVOL | SW_PREEMPT;
2073ff256d9cSJeff Roberson 		if (td->td_critnest > 1)
2074ff256d9cSJeff Roberson 			td->td_owepreempt = 1;
20758df78c41SJeff Roberson 		else if (TD_IS_IDLETHREAD(td))
20768df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL);
2077ff256d9cSJeff Roberson 		else
20788df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEPREEMPT, NULL);
2079ff256d9cSJeff Roberson 	}
2080ff256d9cSJeff Roberson 	thread_unlock(td);
2081ff256d9cSJeff Roberson }
2082ff256d9cSJeff Roberson 
2083ae7a6b38SJeff Roberson /*
2084ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2085ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2086ae7a6b38SJeff Roberson  */
2087ad1e7d28SJulian Elischer void
2088ad1e7d28SJulian Elischer sched_userret(struct thread *td)
2089ad1e7d28SJulian Elischer {
2090ad1e7d28SJulian Elischer 	/*
2091ad1e7d28SJulian Elischer 	 * XXX we cheat slightly on the locking here to avoid locking in
2092ad1e7d28SJulian Elischer 	 * the usual case.  Setting td_priority here is essentially an
2093ad1e7d28SJulian Elischer 	 * incomplete workaround for not setting it properly elsewhere.
2094ad1e7d28SJulian Elischer 	 * Now that some interrupt handlers are threads, not setting it
2095ad1e7d28SJulian Elischer 	 * properly elsewhere can clobber it in the window between setting
2096ad1e7d28SJulian Elischer 	 * it here and returning to user mode, so don't waste time setting
2097ad1e7d28SJulian Elischer 	 * it perfectly here.
2098ad1e7d28SJulian Elischer 	 */
2099ad1e7d28SJulian Elischer 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2100ad1e7d28SJulian Elischer 	    ("thread with borrowed priority returning to userland"));
2101ad1e7d28SJulian Elischer 	if (td->td_priority != td->td_user_pri) {
21027b20fb19SJeff Roberson 		thread_lock(td);
2103ad1e7d28SJulian Elischer 		td->td_priority = td->td_user_pri;
2104ad1e7d28SJulian Elischer 		td->td_base_pri = td->td_user_pri;
210562fa74d9SJeff Roberson 		tdq_setlowpri(TDQ_SELF(), td);
21067b20fb19SJeff Roberson 		thread_unlock(td);
2107ad1e7d28SJulian Elischer         }
210835e6168fSJeff Roberson }
210935e6168fSJeff Roberson 
2110ae7a6b38SJeff Roberson /*
2111ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2112ae7a6b38SJeff Roberson  * threads.
2113ae7a6b38SJeff Roberson  */
211435e6168fSJeff Roberson void
21157cf90fb3SJeff Roberson sched_clock(struct thread *td)
211635e6168fSJeff Roberson {
2117ad1e7d28SJulian Elischer 	struct tdq *tdq;
2118ad1e7d28SJulian Elischer 	struct td_sched *ts;
211935e6168fSJeff Roberson 
2120ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
21213f872f85SJeff Roberson 	tdq = TDQ_SELF();
21227fcf154aSJeff Roberson #ifdef SMP
21237fcf154aSJeff Roberson 	/*
21247fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
21257fcf154aSJeff Roberson 	 */
21267fcf154aSJeff Roberson 	if (balance_tdq == tdq) {
21277fcf154aSJeff Roberson 		if (balance_ticks && --balance_ticks == 0)
21287fcf154aSJeff Roberson 			sched_balance();
21297fcf154aSJeff Roberson 	}
21307fcf154aSJeff Roberson #endif
21313f872f85SJeff Roberson 	/*
21321690c6c1SJeff Roberson 	 * Save the old switch count so we have a record of the last ticks
21331690c6c1SJeff Roberson 	 * activity.   Initialize the new switch count based on our load.
21341690c6c1SJeff Roberson 	 * If there is some activity seed it to reflect that.
21351690c6c1SJeff Roberson 	 */
21361690c6c1SJeff Roberson 	tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
21376c47aaaeSJeff Roberson 	tdq->tdq_switchcnt = tdq->tdq_load;
21381690c6c1SJeff Roberson 	/*
21393f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
21403f872f85SJeff Roberson 	 * threads get a chance to run.
21413f872f85SJeff Roberson 	 */
21423f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
21433f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
21443f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
21453f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
21463f872f85SJeff Roberson 	}
21473f872f85SJeff Roberson 	ts = td->td_sched;
2148fd0b8c78SJeff Roberson 	if (td->td_pri_class & PRI_FIFO_BIT)
2149a8949de2SJeff Roberson 		return;
2150c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2151a8949de2SJeff Roberson 		/*
2152fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2153fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
215415dc847eSJeff Roberson 		 */
2155ae7a6b38SJeff Roberson 		td->td_sched->ts_runtime += tickincr;
21568460a577SJohn Birrell 		sched_interact_update(td);
215773daf66fSJeff Roberson 		sched_priority(td);
2158fd0b8c78SJeff Roberson 	}
215935e6168fSJeff Roberson 	/*
216035e6168fSJeff Roberson 	 * We used up one time slice.
216135e6168fSJeff Roberson 	 */
2162ad1e7d28SJulian Elischer 	if (--ts->ts_slice > 0)
216315dc847eSJeff Roberson 		return;
216435e6168fSJeff Roberson 	/*
216573daf66fSJeff Roberson 	 * We're out of time, force a requeue at userret().
216635e6168fSJeff Roberson 	 */
216773daf66fSJeff Roberson 	ts->ts_slice = sched_slice;
21684a338afdSJulian Elischer 	td->td_flags |= TDF_NEEDRESCHED;
216935e6168fSJeff Roberson }
217035e6168fSJeff Roberson 
2171ae7a6b38SJeff Roberson /*
2172ae7a6b38SJeff Roberson  * Called once per hz tick.  Used for cpu utilization information.  This
2173ae7a6b38SJeff Roberson  * is easier than trying to scale based on stathz.
2174ae7a6b38SJeff Roberson  */
2175ae7a6b38SJeff Roberson void
2176a157e425SAlexander Motin sched_tick(int cnt)
2177ae7a6b38SJeff Roberson {
2178ae7a6b38SJeff Roberson 	struct td_sched *ts;
2179ae7a6b38SJeff Roberson 
2180ae7a6b38SJeff Roberson 	ts = curthread->td_sched;
2181e980fff6SJeff Roberson 	/*
2182e980fff6SJeff Roberson 	 * Ticks is updated asynchronously on a single cpu.  Check here to
2183e980fff6SJeff Roberson 	 * avoid incrementing ts_ticks multiple times in a single tick.
2184e980fff6SJeff Roberson 	 */
2185cbc4ea28SIvan Voras 	if (ts->ts_incrtick == ticks)
2186e980fff6SJeff Roberson 		return;
2187ae7a6b38SJeff Roberson 	/* Adjust ticks for pctcpu */
2188a157e425SAlexander Motin 	ts->ts_ticks += cnt << SCHED_TICK_SHIFT;
2189ae7a6b38SJeff Roberson 	ts->ts_ltick = ticks;
2190cbc4ea28SIvan Voras 	ts->ts_incrtick = ticks;
2191ae7a6b38SJeff Roberson 	/*
21929f518f20SAttilio Rao 	 * Update if we've exceeded our desired tick threshold by over one
2193ae7a6b38SJeff Roberson 	 * second.
2194ae7a6b38SJeff Roberson 	 */
2195ae7a6b38SJeff Roberson 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2196ae7a6b38SJeff Roberson 		sched_pctcpu_update(ts);
2197ae7a6b38SJeff Roberson }
2198ae7a6b38SJeff Roberson 
2199ae7a6b38SJeff Roberson /*
2200ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2201ae7a6b38SJeff Roberson  * cooperative idle threads.
2202ae7a6b38SJeff Roberson  */
220335e6168fSJeff Roberson int
220435e6168fSJeff Roberson sched_runnable(void)
220535e6168fSJeff Roberson {
2206ad1e7d28SJulian Elischer 	struct tdq *tdq;
2207b90816f1SJeff Roberson 	int load;
220835e6168fSJeff Roberson 
2209b90816f1SJeff Roberson 	load = 1;
2210b90816f1SJeff Roberson 
2211ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
22123f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2213d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
22143f741ca1SJeff Roberson 			goto out;
22153f741ca1SJeff Roberson 	} else
2216d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2217b90816f1SJeff Roberson 			goto out;
2218b90816f1SJeff Roberson 	load = 0;
2219b90816f1SJeff Roberson out:
2220b90816f1SJeff Roberson 	return (load);
222135e6168fSJeff Roberson }
222235e6168fSJeff Roberson 
2223ae7a6b38SJeff Roberson /*
2224ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2225ae7a6b38SJeff Roberson  * the run-queue while running however the load remains.  For SMP we set
2226ae7a6b38SJeff Roberson  * the tdq in the global idle bitmask if it idles here.
2227ae7a6b38SJeff Roberson  */
22287a5e5e2aSJeff Roberson struct thread *
2229c9f25d8fSJeff Roberson sched_choose(void)
2230c9f25d8fSJeff Roberson {
22319727e637SJeff Roberson 	struct thread *td;
2232ae7a6b38SJeff Roberson 	struct tdq *tdq;
2233ae7a6b38SJeff Roberson 
2234ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2235ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
22369727e637SJeff Roberson 	td = tdq_choose(tdq);
22379727e637SJeff Roberson 	if (td) {
22389727e637SJeff Roberson 		td->td_sched->ts_ltick = ticks;
22399727e637SJeff Roberson 		tdq_runq_rem(tdq, td);
22400502fe2eSJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
22419727e637SJeff Roberson 		return (td);
224235e6168fSJeff Roberson 	}
22430502fe2eSJeff Roberson 	tdq->tdq_lowpri = PRI_MAX_IDLE;
224462fa74d9SJeff Roberson 	return (PCPU_GET(idlethread));
22457a5e5e2aSJeff Roberson }
22467a5e5e2aSJeff Roberson 
2247ae7a6b38SJeff Roberson /*
2248ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2249ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2250ae7a6b38SJeff Roberson  */
2251ae7a6b38SJeff Roberson static inline void
2252ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
22537a5e5e2aSJeff Roberson {
22547a5e5e2aSJeff Roberson 	struct thread *ctd;
22557a5e5e2aSJeff Roberson 	int cpri;
22567a5e5e2aSJeff Roberson 	int pri;
22577a5e5e2aSJeff Roberson 
2258ff256d9cSJeff Roberson 	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2259ff256d9cSJeff Roberson 
22607a5e5e2aSJeff Roberson 	ctd = curthread;
22617a5e5e2aSJeff Roberson 	pri = td->td_priority;
22627a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2263ff256d9cSJeff Roberson 	if (pri < cpri)
2264ff256d9cSJeff Roberson 		ctd->td_flags |= TDF_NEEDRESCHED;
22657a5e5e2aSJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2266ae7a6b38SJeff Roberson 		return;
2267ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2268ae7a6b38SJeff Roberson 		return;
22697a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
227035e6168fSJeff Roberson }
227135e6168fSJeff Roberson 
2272ae7a6b38SJeff Roberson /*
227373daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
227473daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
227573daf66fSJeff Roberson  * predetermined.
2276ae7a6b38SJeff Roberson  */
227735e6168fSJeff Roberson void
2278ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
227935e6168fSJeff Roberson {
2280c9f25d8fSJeff Roberson 
2281ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
22827a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
22837a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
22847a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
22857a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2286b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2287b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2288ae7a6b38SJeff Roberson 
2289ae7a6b38SJeff Roberson 	if (td->td_priority < tdq->tdq_lowpri)
2290ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
22919727e637SJeff Roberson 	tdq_runq_add(tdq, td, flags);
22929727e637SJeff Roberson 	tdq_load_add(tdq, td);
2293ae7a6b38SJeff Roberson }
2294ae7a6b38SJeff Roberson 
2295ae7a6b38SJeff Roberson /*
2296ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2297ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
2298ae7a6b38SJeff Roberson  */
2299ae7a6b38SJeff Roberson void
2300ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2301ae7a6b38SJeff Roberson {
2302ae7a6b38SJeff Roberson 	struct tdq *tdq;
23037b8bfa0dSJeff Roberson #ifdef SMP
2304ae7a6b38SJeff Roberson 	int cpu;
2305ae7a6b38SJeff Roberson #endif
23068f51ad55SJeff Roberson 
23078f51ad55SJeff Roberson 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
23088f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
23098f51ad55SJeff Roberson 	    sched_tdname(curthread));
23108f51ad55SJeff Roberson 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
23118f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(td));
2312ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2313ae7a6b38SJeff Roberson 	/*
2314ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2315ae7a6b38SJeff Roberson 	 * run-queue.
2316ae7a6b38SJeff Roberson 	 */
2317ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2318ae7a6b38SJeff Roberson 		sched_priority(td);
2319ae7a6b38SJeff Roberson #ifdef SMP
2320ae7a6b38SJeff Roberson 	/*
2321ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2322ae7a6b38SJeff Roberson 	 * target cpu.
2323ae7a6b38SJeff Roberson 	 */
23249727e637SJeff Roberson 	cpu = sched_pickcpu(td, flags);
23259727e637SJeff Roberson 	tdq = sched_setcpu(td, cpu, flags);
2326ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
232773daf66fSJeff Roberson 	if (cpu != PCPU_GET(cpuid)) {
23289727e637SJeff Roberson 		tdq_notify(tdq, td);
23297b8bfa0dSJeff Roberson 		return;
23307b8bfa0dSJeff Roberson 	}
2331ae7a6b38SJeff Roberson #else
2332ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2333ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
2334ae7a6b38SJeff Roberson 	/*
2335ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2336ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2337ae7a6b38SJeff Roberson 	 */
2338ae7a6b38SJeff Roberson 	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2339ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
23407b8bfa0dSJeff Roberson #endif
2341ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2342ae7a6b38SJeff Roberson 		sched_setpreempt(td);
234335e6168fSJeff Roberson }
234435e6168fSJeff Roberson 
2345ae7a6b38SJeff Roberson /*
2346ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2347ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2348ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2349ae7a6b38SJeff Roberson  */
235035e6168fSJeff Roberson void
23517cf90fb3SJeff Roberson sched_rem(struct thread *td)
235235e6168fSJeff Roberson {
2353ad1e7d28SJulian Elischer 	struct tdq *tdq;
23547cf90fb3SJeff Roberson 
23558f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
23568f51ad55SJeff Roberson 	    "prio:%d", td->td_priority);
23579727e637SJeff Roberson 	tdq = TDQ_CPU(td->td_sched->ts_cpu);
2358ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2359ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
23607a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2361ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
23629727e637SJeff Roberson 	tdq_runq_rem(tdq, td);
23639727e637SJeff Roberson 	tdq_load_rem(tdq, td);
23647a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
236562fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
236662fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
236735e6168fSJeff Roberson }
236835e6168fSJeff Roberson 
2369ae7a6b38SJeff Roberson /*
2370ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2371ae7a6b38SJeff Roberson  */
237235e6168fSJeff Roberson fixpt_t
23737cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
237435e6168fSJeff Roberson {
237535e6168fSJeff Roberson 	fixpt_t pctcpu;
2376ad1e7d28SJulian Elischer 	struct td_sched *ts;
237735e6168fSJeff Roberson 
237835e6168fSJeff Roberson 	pctcpu = 0;
2379ad1e7d28SJulian Elischer 	ts = td->td_sched;
2380ad1e7d28SJulian Elischer 	if (ts == NULL)
2381484288deSJeff Roberson 		return (0);
238235e6168fSJeff Roberson 
23833da35a0aSJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2384ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
238535e6168fSJeff Roberson 		int rtick;
238635e6168fSJeff Roberson 
2387ad1e7d28SJulian Elischer 		sched_pctcpu_update(ts);
238835e6168fSJeff Roberson 		/* How many rtick per second ? */
2389e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2390e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
239135e6168fSJeff Roberson 	}
239235e6168fSJeff Roberson 
239335e6168fSJeff Roberson 	return (pctcpu);
239435e6168fSJeff Roberson }
239535e6168fSJeff Roberson 
239662fa74d9SJeff Roberson /*
239762fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
239862fa74d9SJeff Roberson  * cpumask.
239962fa74d9SJeff Roberson  */
2400885d51a3SJeff Roberson void
2401885d51a3SJeff Roberson sched_affinity(struct thread *td)
2402885d51a3SJeff Roberson {
240362fa74d9SJeff Roberson #ifdef SMP
240462fa74d9SJeff Roberson 	struct td_sched *ts;
240562fa74d9SJeff Roberson 
240662fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
240762fa74d9SJeff Roberson 	ts = td->td_sched;
240862fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
240962fa74d9SJeff Roberson 		return;
241053a6c8b3SJeff Roberson 	if (TD_ON_RUNQ(td)) {
241153a6c8b3SJeff Roberson 		sched_rem(td);
241253a6c8b3SJeff Roberson 		sched_add(td, SRQ_BORING);
241353a6c8b3SJeff Roberson 		return;
241453a6c8b3SJeff Roberson 	}
241562fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
241662fa74d9SJeff Roberson 		return;
241762fa74d9SJeff Roberson 	/*
24180f7a0ebdSMatthew D Fleming 	 * Force a switch before returning to userspace.  If the
24190f7a0ebdSMatthew D Fleming 	 * target thread is not running locally send an ipi to force
24200f7a0ebdSMatthew D Fleming 	 * the issue.
242162fa74d9SJeff Roberson 	 */
2422a8103ae8SJohn Baldwin 	td->td_flags |= TDF_NEEDRESCHED;
24230f7a0ebdSMatthew D Fleming 	if (td != curthread)
24240f7a0ebdSMatthew D Fleming 		ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
242562fa74d9SJeff Roberson #endif
2426885d51a3SJeff Roberson }
2427885d51a3SJeff Roberson 
2428ae7a6b38SJeff Roberson /*
2429ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2430ae7a6b38SJeff Roberson  */
24319bacd788SJeff Roberson void
24329bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
24339bacd788SJeff Roberson {
2434ad1e7d28SJulian Elischer 	struct td_sched *ts;
24359bacd788SJeff Roberson 
2436c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
24371d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
2438ad1e7d28SJulian Elischer 	ts = td->td_sched;
24396b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2440c95d2db2SJeff Roberson 		sched_unbind(td);
24410f7a0ebdSMatthew D Fleming 	KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2442ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
24436b2f763fSJeff Roberson 	sched_pin();
244480f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
24459bacd788SJeff Roberson 		return;
24466b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
24479bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2448279f949eSPoul-Henning Kamp 	mi_switch(SW_VOL, NULL);
24499bacd788SJeff Roberson }
24509bacd788SJeff Roberson 
2451ae7a6b38SJeff Roberson /*
2452ae7a6b38SJeff Roberson  * Release a bound thread.
2453ae7a6b38SJeff Roberson  */
24549bacd788SJeff Roberson void
24559bacd788SJeff Roberson sched_unbind(struct thread *td)
24569bacd788SJeff Roberson {
2457e7d50326SJeff Roberson 	struct td_sched *ts;
2458e7d50326SJeff Roberson 
24597b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
24601d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
2461e7d50326SJeff Roberson 	ts = td->td_sched;
24626b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
24636b2f763fSJeff Roberson 		return;
2464e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2465e7d50326SJeff Roberson 	sched_unpin();
24669bacd788SJeff Roberson }
24679bacd788SJeff Roberson 
246835e6168fSJeff Roberson int
2469ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2470ebccf1e3SJoseph Koshy {
24717b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2472ad1e7d28SJulian Elischer 	return (td->td_sched->ts_flags & TSF_BOUND);
2473ebccf1e3SJoseph Koshy }
2474ebccf1e3SJoseph Koshy 
2475ae7a6b38SJeff Roberson /*
2476ae7a6b38SJeff Roberson  * Basic yield call.
2477ae7a6b38SJeff Roberson  */
247836ec198bSDavid Xu void
247936ec198bSDavid Xu sched_relinquish(struct thread *td)
248036ec198bSDavid Xu {
24817b20fb19SJeff Roberson 	thread_lock(td);
24828df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
24837b20fb19SJeff Roberson 	thread_unlock(td);
248436ec198bSDavid Xu }
248536ec198bSDavid Xu 
2486ae7a6b38SJeff Roberson /*
2487ae7a6b38SJeff Roberson  * Return the total system load.
2488ae7a6b38SJeff Roberson  */
2489ebccf1e3SJoseph Koshy int
249033916c36SJeff Roberson sched_load(void)
249133916c36SJeff Roberson {
249233916c36SJeff Roberson #ifdef SMP
249333916c36SJeff Roberson 	int total;
249433916c36SJeff Roberson 	int i;
249533916c36SJeff Roberson 
249633916c36SJeff Roberson 	total = 0;
24973aa6d94eSJohn Baldwin 	CPU_FOREACH(i)
249862fa74d9SJeff Roberson 		total += TDQ_CPU(i)->tdq_sysload;
249933916c36SJeff Roberson 	return (total);
250033916c36SJeff Roberson #else
2501d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
250233916c36SJeff Roberson #endif
250333916c36SJeff Roberson }
250433916c36SJeff Roberson 
250533916c36SJeff Roberson int
250635e6168fSJeff Roberson sched_sizeof_proc(void)
250735e6168fSJeff Roberson {
250835e6168fSJeff Roberson 	return (sizeof(struct proc));
250935e6168fSJeff Roberson }
251035e6168fSJeff Roberson 
251135e6168fSJeff Roberson int
251235e6168fSJeff Roberson sched_sizeof_thread(void)
251335e6168fSJeff Roberson {
251435e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
251535e6168fSJeff Roberson }
2516b41f1452SDavid Xu 
251709c8a4ccSJeff Roberson #ifdef SMP
251809c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)						\
251909c8a4ccSJeff Roberson     ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
252009c8a4ccSJeff Roberson #else
252109c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)	1
252209c8a4ccSJeff Roberson #endif
252309c8a4ccSJeff Roberson 
25247a5e5e2aSJeff Roberson /*
25257a5e5e2aSJeff Roberson  * The actual idle process.
25267a5e5e2aSJeff Roberson  */
25277a5e5e2aSJeff Roberson void
25287a5e5e2aSJeff Roberson sched_idletd(void *dummy)
25297a5e5e2aSJeff Roberson {
25307a5e5e2aSJeff Roberson 	struct thread *td;
2531ae7a6b38SJeff Roberson 	struct tdq *tdq;
25321690c6c1SJeff Roberson 	int switchcnt;
25331690c6c1SJeff Roberson 	int i;
25347a5e5e2aSJeff Roberson 
25357b55ab05SJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
25367a5e5e2aSJeff Roberson 	td = curthread;
2537ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2538ae7a6b38SJeff Roberson 	for (;;) {
2539ae7a6b38SJeff Roberson #ifdef SMP
25401690c6c1SJeff Roberson 		if (tdq_idled(tdq) == 0)
25411690c6c1SJeff Roberson 			continue;
2542ae7a6b38SJeff Roberson #endif
25431690c6c1SJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
25441690c6c1SJeff Roberson 		/*
25451690c6c1SJeff Roberson 		 * If we're switching very frequently, spin while checking
25461690c6c1SJeff Roberson 		 * for load rather than entering a low power state that
25477b55ab05SJeff Roberson 		 * may require an IPI.  However, don't do any busy
25487b55ab05SJeff Roberson 		 * loops while on SMT machines as this simply steals
25497b55ab05SJeff Roberson 		 * cycles from cores doing useful work.
25501690c6c1SJeff Roberson 		 */
255109c8a4ccSJeff Roberson 		if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
25521690c6c1SJeff Roberson 			for (i = 0; i < sched_idlespins; i++) {
25531690c6c1SJeff Roberson 				if (tdq->tdq_load)
25541690c6c1SJeff Roberson 					break;
25551690c6c1SJeff Roberson 				cpu_spinwait();
25561690c6c1SJeff Roberson 			}
25571690c6c1SJeff Roberson 		}
25586c47aaaeSJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
25599f9ad565SAlexander Motin 		if (tdq->tdq_load == 0) {
25609f9ad565SAlexander Motin 			tdq->tdq_cpu_idle = 1;
25619f9ad565SAlexander Motin 			if (tdq->tdq_load == 0) {
2562a157e425SAlexander Motin 				cpu_idle(switchcnt > sched_idlespinthresh * 4);
25639f9ad565SAlexander Motin 				tdq->tdq_switchcnt++;
25649f9ad565SAlexander Motin 			}
25659f9ad565SAlexander Motin 			tdq->tdq_cpu_idle = 0;
25669f9ad565SAlexander Motin 		}
25671690c6c1SJeff Roberson 		if (tdq->tdq_load) {
25681690c6c1SJeff Roberson 			thread_lock(td);
25691690c6c1SJeff Roberson 			mi_switch(SW_VOL | SWT_IDLE, NULL);
25701690c6c1SJeff Roberson 			thread_unlock(td);
25711690c6c1SJeff Roberson 		}
2572ae7a6b38SJeff Roberson 	}
2573b41f1452SDavid Xu }
2574e7d50326SJeff Roberson 
25757b20fb19SJeff Roberson /*
25767b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
25777b20fb19SJeff Roberson  */
25787b20fb19SJeff Roberson void
25797b20fb19SJeff Roberson sched_throw(struct thread *td)
25807b20fb19SJeff Roberson {
258159c68134SJeff Roberson 	struct thread *newtd;
2582ae7a6b38SJeff Roberson 	struct tdq *tdq;
2583ae7a6b38SJeff Roberson 
2584ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
25857b20fb19SJeff Roberson 	if (td == NULL) {
2586ae7a6b38SJeff Roberson 		/* Correct spinlock nesting and acquire the correct lock. */
2587ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
25887b20fb19SJeff Roberson 		spinlock_exit();
25897b20fb19SJeff Roberson 	} else {
2590ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
25919727e637SJeff Roberson 		tdq_load_rem(tdq, td);
2592eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
25937b20fb19SJeff Roberson 	}
25947b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
259559c68134SJeff Roberson 	newtd = choosethread();
259659c68134SJeff Roberson 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
25977b20fb19SJeff Roberson 	PCPU_SET(switchtime, cpu_ticks());
25987b20fb19SJeff Roberson 	PCPU_SET(switchticks, ticks);
259959c68134SJeff Roberson 	cpu_throw(td, newtd);		/* doesn't return */
26007b20fb19SJeff Roberson }
26017b20fb19SJeff Roberson 
2602ae7a6b38SJeff Roberson /*
2603ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
2604ae7a6b38SJeff Roberson  * let fork do the rest of the work.
2605ae7a6b38SJeff Roberson  */
26067b20fb19SJeff Roberson void
2607fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
26087b20fb19SJeff Roberson {
2609ae7a6b38SJeff Roberson 	struct td_sched *ts;
2610ae7a6b38SJeff Roberson 	struct tdq *tdq;
2611ae7a6b38SJeff Roberson 	int cpuid;
26127b20fb19SJeff Roberson 
26137b20fb19SJeff Roberson 	/*
26147b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
2615ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
26167b20fb19SJeff Roberson 	 */
2617ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2618ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
2619ae7a6b38SJeff Roberson 	ts = td->td_sched;
2620ae7a6b38SJeff Roberson 	if (TD_IS_IDLETHREAD(td))
2621ae7a6b38SJeff Roberson 		td->td_lock = TDQ_LOCKPTR(tdq);
2622ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2623ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
262459c68134SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2625eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(
2626eea4f254SJeff Roberson 	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
26277b20fb19SJeff Roberson }
26287b20fb19SJeff Roberson 
26298f51ad55SJeff Roberson /*
26308f51ad55SJeff Roberson  * Create on first use to catch odd startup conditons.
26318f51ad55SJeff Roberson  */
26328f51ad55SJeff Roberson char *
26338f51ad55SJeff Roberson sched_tdname(struct thread *td)
26348f51ad55SJeff Roberson {
26358f51ad55SJeff Roberson #ifdef KTR
26368f51ad55SJeff Roberson 	struct td_sched *ts;
26378f51ad55SJeff Roberson 
26388f51ad55SJeff Roberson 	ts = td->td_sched;
26398f51ad55SJeff Roberson 	if (ts->ts_name[0] == '\0')
26408f51ad55SJeff Roberson 		snprintf(ts->ts_name, sizeof(ts->ts_name),
26418f51ad55SJeff Roberson 		    "%s tid %d", td->td_name, td->td_tid);
26428f51ad55SJeff Roberson 	return (ts->ts_name);
26438f51ad55SJeff Roberson #else
26448f51ad55SJeff Roberson 	return (td->td_name);
26458f51ad55SJeff Roberson #endif
26468f51ad55SJeff Roberson }
26478f51ad55SJeff Roberson 
264807095abfSIvan Voras #ifdef SMP
264907095abfSIvan Voras 
265007095abfSIvan Voras /*
265107095abfSIvan Voras  * Build the CPU topology dump string. Is recursively called to collect
265207095abfSIvan Voras  * the topology tree.
265307095abfSIvan Voras  */
265407095abfSIvan Voras static int
265507095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
265607095abfSIvan Voras     int indent)
265707095abfSIvan Voras {
265871a19bdcSAttilio Rao 	char cpusetbuf[CPUSETBUFSIZ];
265907095abfSIvan Voras 	int i, first;
266007095abfSIvan Voras 
266107095abfSIvan Voras 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
266219b8a6dbSAndriy Gapon 	    "", 1 + indent / 2, cg->cg_level);
266371a19bdcSAttilio Rao 	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
266471a19bdcSAttilio Rao 	    cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
266507095abfSIvan Voras 	first = TRUE;
266607095abfSIvan Voras 	for (i = 0; i < MAXCPU; i++) {
266771a19bdcSAttilio Rao 		if (CPU_ISSET(i, &cg->cg_mask)) {
266807095abfSIvan Voras 			if (!first)
266907095abfSIvan Voras 				sbuf_printf(sb, ", ");
267007095abfSIvan Voras 			else
267107095abfSIvan Voras 				first = FALSE;
267207095abfSIvan Voras 			sbuf_printf(sb, "%d", i);
267307095abfSIvan Voras 		}
267407095abfSIvan Voras 	}
267507095abfSIvan Voras 	sbuf_printf(sb, "</cpu>\n");
267607095abfSIvan Voras 
267707095abfSIvan Voras 	if (cg->cg_flags != 0) {
2678611daf7eSIvan Voras 		sbuf_printf(sb, "%*s <flags>", indent, "");
267907095abfSIvan Voras 		if ((cg->cg_flags & CG_FLAG_HTT) != 0)
26805368befbSIvan Voras 			sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>");
2681a401f2d0SIvan Voras 		if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
2682a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>");
26837b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_SMT) != 0)
2684a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>");
268507095abfSIvan Voras 		sbuf_printf(sb, "</flags>\n");
2686611daf7eSIvan Voras 	}
268707095abfSIvan Voras 
268807095abfSIvan Voras 	if (cg->cg_children > 0) {
268907095abfSIvan Voras 		sbuf_printf(sb, "%*s <children>\n", indent, "");
269007095abfSIvan Voras 		for (i = 0; i < cg->cg_children; i++)
269107095abfSIvan Voras 			sysctl_kern_sched_topology_spec_internal(sb,
269207095abfSIvan Voras 			    &cg->cg_child[i], indent+2);
269307095abfSIvan Voras 		sbuf_printf(sb, "%*s </children>\n", indent, "");
269407095abfSIvan Voras 	}
269507095abfSIvan Voras 	sbuf_printf(sb, "%*s</group>\n", indent, "");
269607095abfSIvan Voras 	return (0);
269707095abfSIvan Voras }
269807095abfSIvan Voras 
269907095abfSIvan Voras /*
270007095abfSIvan Voras  * Sysctl handler for retrieving topology dump. It's a wrapper for
270107095abfSIvan Voras  * the recursive sysctl_kern_smp_topology_spec_internal().
270207095abfSIvan Voras  */
270307095abfSIvan Voras static int
270407095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
270507095abfSIvan Voras {
270607095abfSIvan Voras 	struct sbuf *topo;
270707095abfSIvan Voras 	int err;
270807095abfSIvan Voras 
270907095abfSIvan Voras 	KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
271007095abfSIvan Voras 
2711aa880b90SIvan Voras 	topo = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
271207095abfSIvan Voras 	if (topo == NULL)
271307095abfSIvan Voras 		return (ENOMEM);
271407095abfSIvan Voras 
271507095abfSIvan Voras 	sbuf_printf(topo, "<groups>\n");
271607095abfSIvan Voras 	err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
271707095abfSIvan Voras 	sbuf_printf(topo, "</groups>\n");
271807095abfSIvan Voras 
271907095abfSIvan Voras 	if (err == 0) {
272007095abfSIvan Voras 		sbuf_finish(topo);
272107095abfSIvan Voras 		err = SYSCTL_OUT(req, sbuf_data(topo), sbuf_len(topo));
272207095abfSIvan Voras 	}
272307095abfSIvan Voras 	sbuf_delete(topo);
272407095abfSIvan Voras 	return (err);
272507095abfSIvan Voras }
2726b67cc292SDavid Xu 
272707095abfSIvan Voras #endif
272807095abfSIvan Voras 
27299727e637SJeff Roberson SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2730ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2731e7d50326SJeff Roberson     "Scheduler name");
2732ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2733ae7a6b38SJeff Roberson     "Slice size for timeshare threads");
2734ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2735ae7a6b38SJeff Roberson      "Interactivity score threshold");
2736ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2737ae7a6b38SJeff Roberson      0,"Min priority for preemption, lower priorities have greater precedence");
2738c5aa6b58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost,
2739c5aa6b58SJeff Roberson      0,"Controls whether static kernel priorities are assigned to sleeping threads.");
27401690c6c1SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins,
27411690c6c1SJeff Roberson      0,"Number of times idle will spin waiting for new work.");
27421690c6c1SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, &sched_idlespinthresh,
27431690c6c1SJeff Roberson      0,"Threshold before we will permit idle spinning.");
27447b8bfa0dSJeff Roberson #ifdef SMP
2745ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2746ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
2747ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2748ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
27497fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
27507fcf154aSJeff Roberson     &balance_interval, 0,
27517fcf154aSJeff Roberson     "Average frequency in stathz ticks to run the long-term balancer");
2752ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2753ae7a6b38SJeff Roberson     "Steals work from another hyper-threaded core on idle");
2754ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2755ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
275628994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
275728994a58SJeff Roberson     "Minimum load on remote cpu before we'll steal");
275807095abfSIvan Voras 
275907095abfSIvan Voras /* Retrieve SMP topology */
276007095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
276107095abfSIvan Voras     CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
276207095abfSIvan Voras     "XML dump of detected CPU topology");
2763b67cc292SDavid Xu 
27647b8bfa0dSJeff Roberson #endif
2765e7d50326SJeff Roberson 
276654b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
2767a5423ea3SJeff Roberson static int ccpu = 0;
2768e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2769