xref: /freebsd/sys/kern/sched_ule.c (revision 73daf66f4185d1278c91f4a5afd5f83b5cbfacca)
135e6168fSJeff Roberson /*-
2e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
335e6168fSJeff Roberson  * All rights reserved.
435e6168fSJeff Roberson  *
535e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
635e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
735e6168fSJeff Roberson  * are met:
835e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
935e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1035e6168fSJeff Roberson  *    disclaimer.
1135e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1235e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1335e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1435e6168fSJeff Roberson  *
1535e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1635e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1735e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1835e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1935e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2035e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2135e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2235e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2335e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2435e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2535e6168fSJeff Roberson  */
2635e6168fSJeff Roberson 
27ae7a6b38SJeff Roberson /*
28ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
29ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
30ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
31ae7a6b38SJeff Roberson  *
32ae7a6b38SJeff Roberson  * etymology:
33a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
34ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
35ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
36ae7a6b38SJeff Roberson  */
37ae7a6b38SJeff Roberson 
38677b542eSDavid E. O'Brien #include <sys/cdefs.h>
39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
40677b542eSDavid E. O'Brien 
414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
424da0d332SPeter Wemm #include "opt_sched.h"
439923b511SScott Long 
4435e6168fSJeff Roberson #include <sys/param.h>
4535e6168fSJeff Roberson #include <sys/systm.h>
462c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4735e6168fSJeff Roberson #include <sys/kernel.h>
4835e6168fSJeff Roberson #include <sys/ktr.h>
4935e6168fSJeff Roberson #include <sys/lock.h>
5035e6168fSJeff Roberson #include <sys/mutex.h>
5135e6168fSJeff Roberson #include <sys/proc.h>
52245f3abfSJeff Roberson #include <sys/resource.h>
539bacd788SJeff Roberson #include <sys/resourcevar.h>
5435e6168fSJeff Roberson #include <sys/sched.h>
5535e6168fSJeff Roberson #include <sys/smp.h>
5635e6168fSJeff Roberson #include <sys/sx.h>
5735e6168fSJeff Roberson #include <sys/sysctl.h>
5835e6168fSJeff Roberson #include <sys/sysproto.h>
59f5c157d9SJohn Baldwin #include <sys/turnstile.h>
603db720fdSDavid Xu #include <sys/umtx.h>
6135e6168fSJeff Roberson #include <sys/vmmeter.h>
6262fa74d9SJeff Roberson #include <sys/cpuset.h>
6335e6168fSJeff Roberson #ifdef KTRACE
6435e6168fSJeff Roberson #include <sys/uio.h>
6535e6168fSJeff Roberson #include <sys/ktrace.h>
6635e6168fSJeff Roberson #endif
6735e6168fSJeff Roberson 
68ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
69ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
70ebccf1e3SJoseph Koshy #endif
71ebccf1e3SJoseph Koshy 
7235e6168fSJeff Roberson #include <machine/cpu.h>
7322bf7d9aSJeff Roberson #include <machine/smp.h>
7435e6168fSJeff Roberson 
75cbdd62adSPeter Grehan #if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__)
7602e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE"
777a5e5e2aSJeff Roberson #endif
787a5e5e2aSJeff Roberson 
79ae7a6b38SJeff Roberson #define	KTR_ULE	0
8014618990SJeff Roberson 
816b2f763fSJeff Roberson /*
82ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
83ae7a6b38SJeff Roberson  * by the thread lock.
84ed062c8dSJulian Elischer  */
85ad1e7d28SJulian Elischer struct td_sched {
86ae7a6b38SJeff Roberson 	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
87ae7a6b38SJeff Roberson 	struct thread	*ts_thread;	/* Active associated thread. */
88ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
89ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
90ae7a6b38SJeff Roberson 	u_char		ts_rqindex;	/* Run queue index. */
91ad1e7d28SJulian Elischer 	u_char		ts_cpu;		/* CPU that we have affinity for. */
9273daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
93ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
94ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
95ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
96ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
97ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
98ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
99ed062c8dSJulian Elischer };
100ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1017b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1027b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
10335e6168fSJeff Roberson 
104ad1e7d28SJulian Elischer static struct td_sched td_sched0;
10535e6168fSJeff Roberson 
10662fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
10762fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
10862fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
10962fa74d9SJeff Roberson 
11035e6168fSJeff Roberson /*
111e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
112e1f89c22SJeff Roberson  *
113e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
114e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1158ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
116e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
117e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
118e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
11935e6168fSJeff Roberson  */
120e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
121e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1228ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
123e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
124e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
125eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
12635e6168fSJeff Roberson 
12735e6168fSJeff Roberson /*
128e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
129e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
130e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
131e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
132e7d50326SJeff Roberson  * or positive nice respectively.
133e7d50326SJeff Roberson  *
134e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
135e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
136e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
137e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
138e7d50326SJeff Roberson  */
139e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
140e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
141e7d50326SJeff Roberson #define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
142e7d50326SJeff Roberson #define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
143dda713dfSJeff Roberson #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
144e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
145e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1461e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
147e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
148e7d50326SJeff Roberson 
149e7d50326SJeff Roberson /*
150e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
151e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
152e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
153e7d50326SJeff Roberson  * models the intent of the thread.
15435e6168fSJeff Roberson  *
155407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
156407b0157SJeff Roberson  *		before throttling back.
157d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
158210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
159e1f89c22SJeff Roberson  * INTERACT_THRESH:	Threshhold for placement on the current runq.
16035e6168fSJeff Roberson  */
161e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
162e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
163210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
164210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1654c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
166e1f89c22SJeff Roberson 
16735e6168fSJeff Roberson /*
168e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
169e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
170e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
171e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
172e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
173ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
17435e6168fSJeff Roberson  */
175e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH;
176e7d50326SJeff Roberson static int realstathz;
177e7d50326SJeff Roberson static int tickincr;
17873daf66fSJeff Roberson static int sched_slice = 1;
17902e2d6b4SJeff Roberson #ifdef PREEMPTION
18002e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
18102e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE;
18202e2d6b4SJeff Roberson #else
183ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN;
18402e2d6b4SJeff Roberson #endif
18502e2d6b4SJeff Roberson #else
18602e2d6b4SJeff Roberson static int preempt_thresh = 0;
18702e2d6b4SJeff Roberson #endif
188ae7a6b38SJeff Roberson 
18935e6168fSJeff Roberson /*
190ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
191ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
192ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
19335e6168fSJeff Roberson  */
194ad1e7d28SJulian Elischer struct tdq {
19573daf66fSJeff Roberson 	/* Ordered to improve efficiency of cpu_search() and switch(). */
19662fa74d9SJeff Roberson 	struct mtx	tdq_lock;		/* run queue lock. */
19773daf66fSJeff Roberson 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
19873daf66fSJeff Roberson 	int		tdq_load;		/* Aggregate load. */
19973daf66fSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
20073daf66fSJeff Roberson 	int		tdq_transferable;	/* Transferable thread count. */
20173daf66fSJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
20273daf66fSJeff Roberson 	u_char		tdq_ipipending;		/* IPI pending. */
20373daf66fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
20473daf66fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
205e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
206ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
207ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
20862fa74d9SJeff Roberson 	char		tdq_name[sizeof("sched lock") + 6];
209ae7a6b38SJeff Roberson } __aligned(64);
21035e6168fSJeff Roberson 
2117b8bfa0dSJeff Roberson 
21280f86c9fSJeff Roberson #ifdef SMP
21362fa74d9SJeff Roberson struct cpu_group *cpu_top;
2147b8bfa0dSJeff Roberson 
21562fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
21662fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2177b8bfa0dSJeff Roberson 
2187b8bfa0dSJeff Roberson /*
2197b8bfa0dSJeff Roberson  * Run-time tunables.
2207b8bfa0dSJeff Roberson  */
22128994a58SJeff Roberson static int rebalance = 1;
2227fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
2237b8bfa0dSJeff Roberson static int affinity;
2247fcf154aSJeff Roberson static int steal_htt = 1;
22528994a58SJeff Roberson static int steal_idle = 1;
22628994a58SJeff Roberson static int steal_thresh = 2;
22780f86c9fSJeff Roberson 
22835e6168fSJeff Roberson /*
229d2ad694cSJeff Roberson  * One thread queue per processor.
23035e6168fSJeff Roberson  */
231ad1e7d28SJulian Elischer static struct tdq	tdq_cpu[MAXCPU];
2327fcf154aSJeff Roberson static struct tdq	*balance_tdq;
2337fcf154aSJeff Roberson static int balance_ticks;
234dc03363dSJeff Roberson 
235ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
236ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu[(x)])
237c47f202bSJeff Roberson #define	TDQ_ID(x)	((int)((x) - tdq_cpu))
23880f86c9fSJeff Roberson #else	/* !SMP */
239ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
240dc03363dSJeff Roberson 
24136b36916SJeff Roberson #define	TDQ_ID(x)	(0)
242ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
243ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
2440a016a05SJeff Roberson #endif
24535e6168fSJeff Roberson 
246ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
247ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
248ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
249ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
25062fa74d9SJeff Roberson #define	TDQ_LOCKPTR(t)		(&(t)->tdq_lock)
251ae7a6b38SJeff Roberson 
2528460a577SJohn Birrell static void sched_priority(struct thread *);
25321381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
2548460a577SJohn Birrell static int sched_interact_score(struct thread *);
2558460a577SJohn Birrell static void sched_interact_update(struct thread *);
2568460a577SJohn Birrell static void sched_interact_fork(struct thread *);
257ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *);
25835e6168fSJeff Roberson 
2595d7ef00cSJeff Roberson /* Operations on per processor queues */
260ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *);
261ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *);
262ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *);
263ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *);
264ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
265ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
266ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
267ad1e7d28SJulian Elischer void tdq_print(int cpu);
268e7d50326SJeff Roberson static void runq_print(struct runq *rq);
269ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int);
2705d7ef00cSJeff Roberson #ifdef SMP
27162fa74d9SJeff Roberson static int tdq_move(struct tdq *, struct tdq *);
272ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
273ff256d9cSJeff Roberson static void tdq_notify(struct tdq *, struct td_sched *);
27462fa74d9SJeff Roberson static struct td_sched *tdq_steal(struct tdq *, int);
27562fa74d9SJeff Roberson static struct td_sched *runq_steal(struct runq *, int);
276ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int);
2777fcf154aSJeff Roberson static void sched_balance(void);
27862fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *);
279ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
280ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *);
281ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
282c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
2835d7ef00cSJeff Roberson #endif
2845d7ef00cSJeff Roberson 
285e7d50326SJeff Roberson static void sched_setup(void *dummy);
286e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
287e7d50326SJeff Roberson 
288e7d50326SJeff Roberson static void sched_initticks(void *dummy);
289e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
290e7d50326SJeff Roberson 
291ae7a6b38SJeff Roberson /*
292ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
293ae7a6b38SJeff Roberson  */
294e7d50326SJeff Roberson static void
295e7d50326SJeff Roberson runq_print(struct runq *rq)
296e7d50326SJeff Roberson {
297e7d50326SJeff Roberson 	struct rqhead *rqh;
298e7d50326SJeff Roberson 	struct td_sched *ts;
299e7d50326SJeff Roberson 	int pri;
300e7d50326SJeff Roberson 	int j;
301e7d50326SJeff Roberson 	int i;
302e7d50326SJeff Roberson 
303e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
304e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
305e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
306e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
307e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
308e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
309e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
310e7d50326SJeff Roberson 				TAILQ_FOREACH(ts, rqh, ts_procq) {
311e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
312431f8906SJulian Elischer 					    ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
313e7d50326SJeff Roberson 				}
314e7d50326SJeff Roberson 			}
315e7d50326SJeff Roberson 	}
316e7d50326SJeff Roberson }
317e7d50326SJeff Roberson 
318ae7a6b38SJeff Roberson /*
319ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
320ae7a6b38SJeff Roberson  */
32115dc847eSJeff Roberson void
322ad1e7d28SJulian Elischer tdq_print(int cpu)
32315dc847eSJeff Roberson {
324ad1e7d28SJulian Elischer 	struct tdq *tdq;
32515dc847eSJeff Roberson 
326ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
32715dc847eSJeff Roberson 
328c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
32962fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
33062fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
331d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
332e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
3333f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
334e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
335e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
336e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
337e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
338e7d50326SJeff Roberson 	printf("\tidle runq:\n");
339e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
340d2ad694cSJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
341ae7a6b38SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
34215dc847eSJeff Roberson }
34315dc847eSJeff Roberson 
344ff256d9cSJeff Roberson static inline int
345ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
346ff256d9cSJeff Roberson {
347ff256d9cSJeff Roberson 	/*
348ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
349ff256d9cSJeff Roberson 	 * nothing to do.
350ff256d9cSJeff Roberson 	 */
351ff256d9cSJeff Roberson 	if (pri >= cpri)
352ff256d9cSJeff Roberson 		return (0);
353ff256d9cSJeff Roberson 	/*
354ff256d9cSJeff Roberson 	 * Always preempt idle.
355ff256d9cSJeff Roberson 	 */
356ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
357ff256d9cSJeff Roberson 		return (1);
358ff256d9cSJeff Roberson 	/*
359ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
360ff256d9cSJeff Roberson 	 */
361ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
362ff256d9cSJeff Roberson 		return (0);
363ff256d9cSJeff Roberson 	/*
364ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
365ff256d9cSJeff Roberson 	 */
366ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
367ff256d9cSJeff Roberson 		return (1);
368ff256d9cSJeff Roberson 	/*
369ff256d9cSJeff Roberson 	 * If we're realtime or better and there is timeshare or worse running
370ff256d9cSJeff Roberson 	 * preempt only remote processors.
371ff256d9cSJeff Roberson 	 */
372ff256d9cSJeff Roberson 	if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
373ff256d9cSJeff Roberson 		return (1);
374ff256d9cSJeff Roberson 	return (0);
375ff256d9cSJeff Roberson }
376ff256d9cSJeff Roberson 
377ae7a6b38SJeff Roberson #define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
378ae7a6b38SJeff Roberson /*
379ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
380ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
381ae7a6b38SJeff Roberson  * queue position for timeshare threads.
382ae7a6b38SJeff Roberson  */
383155b9987SJeff Roberson static __inline void
384ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
385155b9987SJeff Roberson {
386ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
38873daf66fSJeff Roberson 
38973daf66fSJeff Roberson 	TD_SET_RUNQ(ts->ts_thread);
390e7d50326SJeff Roberson 	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
391d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
392ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
39380f86c9fSJeff Roberson 	}
394e7d50326SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
395ed0e8f2fSJeff Roberson 		u_char pri;
396e7d50326SJeff Roberson 
397e7d50326SJeff Roberson 		pri = ts->ts_thread->td_priority;
398e7d50326SJeff Roberson 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
399e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
400e7d50326SJeff Roberson 		/*
401e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
402e7d50326SJeff Roberson 		 * realtime.  Use the whole queue to represent these values.
403e7d50326SJeff Roberson 		 */
404c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
405e7d50326SJeff Roberson 			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
406e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4073f872f85SJeff Roberson 			/*
4083f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4093f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4103f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4113f872f85SJeff Roberson 			 */
4123f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4133f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4144499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
415e7d50326SJeff Roberson 		} else
4163f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
417e7d50326SJeff Roberson 		runq_add_pri(ts->ts_runq, ts, pri, flags);
418e7d50326SJeff Roberson 	} else
419ad1e7d28SJulian Elischer 		runq_add(ts->ts_runq, ts, flags);
420155b9987SJeff Roberson }
421155b9987SJeff Roberson 
422ae7a6b38SJeff Roberson /*
42373daf66fSJeff Roberson  * Pick the run queue based on priority.
42473daf66fSJeff Roberson  */
42573daf66fSJeff Roberson static __inline void
42673daf66fSJeff Roberson tdq_runq_pick(struct tdq *tdq, struct td_sched *ts)
42773daf66fSJeff Roberson {
42873daf66fSJeff Roberson 	int pri;
42973daf66fSJeff Roberson 
43073daf66fSJeff Roberson 	pri = ts->ts_thread->td_priority;
43173daf66fSJeff Roberson 	if (pri <= PRI_MAX_REALTIME)
43273daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
43373daf66fSJeff Roberson 	else if (pri <= PRI_MAX_TIMESHARE)
43473daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
43573daf66fSJeff Roberson 	else
43673daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
43773daf66fSJeff Roberson }
43873daf66fSJeff Roberson 
43973daf66fSJeff Roberson /*
440ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
441ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
442ae7a6b38SJeff Roberson  * transferable count does not reflect them.
443ae7a6b38SJeff Roberson  */
444155b9987SJeff Roberson static __inline void
445ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
446155b9987SJeff Roberson {
447ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
448ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
449ae7a6b38SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
450ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
451d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
452ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
45380f86c9fSJeff Roberson 	}
4543f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
4553f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
4563f872f85SJeff Roberson 			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
457e7d50326SJeff Roberson 		else
4583f872f85SJeff Roberson 			runq_remove_idx(ts->ts_runq, ts, NULL);
4598ab80cf0SJeff Roberson 		ts->ts_ltick = ticks;
4603f872f85SJeff Roberson 	} else
461ad1e7d28SJulian Elischer 		runq_remove(ts->ts_runq, ts);
462155b9987SJeff Roberson }
463155b9987SJeff Roberson 
464ae7a6b38SJeff Roberson /*
465ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
466ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
467ae7a6b38SJeff Roberson  */
468a8949de2SJeff Roberson static void
469ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts)
4705d7ef00cSJeff Roberson {
471ef1134c9SJeff Roberson 	int class;
472ae7a6b38SJeff Roberson 
473ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
474ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
475ad1e7d28SJulian Elischer 	class = PRI_BASE(ts->ts_thread->td_pri_class);
476d2ad694cSJeff Roberson 	tdq->tdq_load++;
477c47f202bSJeff Roberson 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
4787b8bfa0dSJeff Roberson 	if (class != PRI_ITHD &&
4797b8bfa0dSJeff Roberson 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
480d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
4815d7ef00cSJeff Roberson }
48215dc847eSJeff Roberson 
483ae7a6b38SJeff Roberson /*
484ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
485ae7a6b38SJeff Roberson  * exiting.
486ae7a6b38SJeff Roberson  */
487a8949de2SJeff Roberson static void
488ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
4895d7ef00cSJeff Roberson {
490ef1134c9SJeff Roberson 	int class;
491ae7a6b38SJeff Roberson 
492ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
493ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
494ad1e7d28SJulian Elischer 	class = PRI_BASE(ts->ts_thread->td_pri_class);
4957b8bfa0dSJeff Roberson 	if (class != PRI_ITHD &&
4967b8bfa0dSJeff Roberson 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
497d2ad694cSJeff Roberson 		tdq->tdq_sysload--;
498ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
499c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
500d2ad694cSJeff Roberson 	tdq->tdq_load--;
501d2ad694cSJeff Roberson 	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
502ad1e7d28SJulian Elischer 	ts->ts_runq = NULL;
50315dc847eSJeff Roberson }
50415dc847eSJeff Roberson 
505356500a3SJeff Roberson /*
50662fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
50762fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
508356500a3SJeff Roberson  */
50922bf7d9aSJeff Roberson static void
51062fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
51162fa74d9SJeff Roberson {
51262fa74d9SJeff Roberson 	struct td_sched *ts;
51362fa74d9SJeff Roberson 	struct thread *td;
51462fa74d9SJeff Roberson 
51562fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
51662fa74d9SJeff Roberson 	if (ctd == NULL)
51762fa74d9SJeff Roberson 		ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
51862fa74d9SJeff Roberson 	ts = tdq_choose(tdq);
51962fa74d9SJeff Roberson 	if (ts)
52062fa74d9SJeff Roberson 		td = ts->ts_thread;
52162fa74d9SJeff Roberson 	if (ts == NULL || td->td_priority > ctd->td_priority)
52262fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
52362fa74d9SJeff Roberson 	else
52462fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
52562fa74d9SJeff Roberson }
52662fa74d9SJeff Roberson 
52762fa74d9SJeff Roberson #ifdef SMP
52862fa74d9SJeff Roberson struct cpu_search {
52962fa74d9SJeff Roberson 	cpumask_t cs_mask;	/* Mask of valid cpus. */
53062fa74d9SJeff Roberson 	u_int	cs_load;
53162fa74d9SJeff Roberson 	u_int	cs_cpu;
53262fa74d9SJeff Roberson 	int	cs_limit;	/* Min priority for low min load for high. */
53362fa74d9SJeff Roberson };
53462fa74d9SJeff Roberson 
53562fa74d9SJeff Roberson #define	CPU_SEARCH_LOWEST	0x1
53662fa74d9SJeff Roberson #define	CPU_SEARCH_HIGHEST	0x2
53762fa74d9SJeff Roberson #define	CPU_SEARCH_BOTH		(CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
53862fa74d9SJeff Roberson 
53962fa74d9SJeff Roberson #define	CPUMASK_FOREACH(cpu, mask)				\
54062fa74d9SJeff Roberson 	for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++)	\
54162fa74d9SJeff Roberson 		if ((mask) & 1 << (cpu))
54262fa74d9SJeff Roberson 
54362fa74d9SJeff Roberson __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
54462fa74d9SJeff Roberson     struct cpu_search *high, const int match);
54562fa74d9SJeff Roberson int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low);
54662fa74d9SJeff Roberson int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high);
54762fa74d9SJeff Roberson int cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
54862fa74d9SJeff Roberson     struct cpu_search *high);
54962fa74d9SJeff Roberson 
55062fa74d9SJeff Roberson /*
55162fa74d9SJeff Roberson  * This routine compares according to the match argument and should be
55262fa74d9SJeff Roberson  * reduced in actual instantiations via constant propagation and dead code
55362fa74d9SJeff Roberson  * elimination.
55462fa74d9SJeff Roberson  */
55562fa74d9SJeff Roberson static __inline int
55662fa74d9SJeff Roberson cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high,
55762fa74d9SJeff Roberson     const int match)
55862fa74d9SJeff Roberson {
55962fa74d9SJeff Roberson 	struct tdq *tdq;
56062fa74d9SJeff Roberson 
56162fa74d9SJeff Roberson 	tdq = TDQ_CPU(cpu);
56262fa74d9SJeff Roberson 	if (match & CPU_SEARCH_LOWEST)
56362fa74d9SJeff Roberson 		if (low->cs_mask & (1 << cpu) &&
56462fa74d9SJeff Roberson 		    tdq->tdq_load < low->cs_load &&
56562fa74d9SJeff Roberson 		    tdq->tdq_lowpri > low->cs_limit) {
56662fa74d9SJeff Roberson 			low->cs_cpu = cpu;
56762fa74d9SJeff Roberson 			low->cs_load = tdq->tdq_load;
56862fa74d9SJeff Roberson 		}
56962fa74d9SJeff Roberson 	if (match & CPU_SEARCH_HIGHEST)
57062fa74d9SJeff Roberson 		if (high->cs_mask & (1 << cpu) &&
57162fa74d9SJeff Roberson 		    tdq->tdq_load >= high->cs_limit &&
57262fa74d9SJeff Roberson 		    tdq->tdq_load > high->cs_load &&
57362fa74d9SJeff Roberson 		    tdq->tdq_transferable) {
57462fa74d9SJeff Roberson 			high->cs_cpu = cpu;
57562fa74d9SJeff Roberson 			high->cs_load = tdq->tdq_load;
57662fa74d9SJeff Roberson 		}
57762fa74d9SJeff Roberson 	return (tdq->tdq_load);
57862fa74d9SJeff Roberson }
57962fa74d9SJeff Roberson 
58062fa74d9SJeff Roberson /*
58162fa74d9SJeff Roberson  * Search the tree of cpu_groups for the lowest or highest loaded cpu
58262fa74d9SJeff Roberson  * according to the match argument.  This routine actually compares the
58362fa74d9SJeff Roberson  * load on all paths through the tree and finds the least loaded cpu on
58462fa74d9SJeff Roberson  * the least loaded path, which may differ from the least loaded cpu in
58562fa74d9SJeff Roberson  * the system.  This balances work among caches and busses.
58662fa74d9SJeff Roberson  *
58762fa74d9SJeff Roberson  * This inline is instantiated in three forms below using constants for the
58862fa74d9SJeff Roberson  * match argument.  It is reduced to the minimum set for each case.  It is
58962fa74d9SJeff Roberson  * also recursive to the depth of the tree.
59062fa74d9SJeff Roberson  */
59162fa74d9SJeff Roberson static inline int
59262fa74d9SJeff Roberson cpu_search(struct cpu_group *cg, struct cpu_search *low,
59362fa74d9SJeff Roberson     struct cpu_search *high, const int match)
59462fa74d9SJeff Roberson {
59562fa74d9SJeff Roberson 	int total;
59662fa74d9SJeff Roberson 
59762fa74d9SJeff Roberson 	total = 0;
59862fa74d9SJeff Roberson 	if (cg->cg_children) {
59962fa74d9SJeff Roberson 		struct cpu_search lgroup;
60062fa74d9SJeff Roberson 		struct cpu_search hgroup;
60162fa74d9SJeff Roberson 		struct cpu_group *child;
60262fa74d9SJeff Roberson 		u_int lload;
60362fa74d9SJeff Roberson 		int hload;
60462fa74d9SJeff Roberson 		int load;
60562fa74d9SJeff Roberson 		int i;
60662fa74d9SJeff Roberson 
60762fa74d9SJeff Roberson 		lload = -1;
60862fa74d9SJeff Roberson 		hload = -1;
60962fa74d9SJeff Roberson 		for (i = 0; i < cg->cg_children; i++) {
61062fa74d9SJeff Roberson 			child = &cg->cg_child[i];
61162fa74d9SJeff Roberson 			if (match & CPU_SEARCH_LOWEST) {
61262fa74d9SJeff Roberson 				lgroup = *low;
61362fa74d9SJeff Roberson 				lgroup.cs_load = -1;
61462fa74d9SJeff Roberson 			}
61562fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST) {
61662fa74d9SJeff Roberson 				hgroup = *high;
61762fa74d9SJeff Roberson 				lgroup.cs_load = 0;
61862fa74d9SJeff Roberson 			}
61962fa74d9SJeff Roberson 			switch (match) {
62062fa74d9SJeff Roberson 			case CPU_SEARCH_LOWEST:
62162fa74d9SJeff Roberson 				load = cpu_search_lowest(child, &lgroup);
62262fa74d9SJeff Roberson 				break;
62362fa74d9SJeff Roberson 			case CPU_SEARCH_HIGHEST:
62462fa74d9SJeff Roberson 				load = cpu_search_highest(child, &hgroup);
62562fa74d9SJeff Roberson 				break;
62662fa74d9SJeff Roberson 			case CPU_SEARCH_BOTH:
62762fa74d9SJeff Roberson 				load = cpu_search_both(child, &lgroup, &hgroup);
62862fa74d9SJeff Roberson 				break;
62962fa74d9SJeff Roberson 			}
63062fa74d9SJeff Roberson 			total += load;
63162fa74d9SJeff Roberson 			if (match & CPU_SEARCH_LOWEST)
63262fa74d9SJeff Roberson 				if (load < lload || low->cs_cpu == -1) {
63362fa74d9SJeff Roberson 					*low = lgroup;
63462fa74d9SJeff Roberson 					lload = load;
63562fa74d9SJeff Roberson 				}
63662fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST)
63762fa74d9SJeff Roberson 				if (load > hload || high->cs_cpu == -1) {
63862fa74d9SJeff Roberson 					hload = load;
63962fa74d9SJeff Roberson 					*high = hgroup;
64062fa74d9SJeff Roberson 				}
64162fa74d9SJeff Roberson 		}
64262fa74d9SJeff Roberson 	} else {
64362fa74d9SJeff Roberson 		int cpu;
64462fa74d9SJeff Roberson 
64562fa74d9SJeff Roberson 		CPUMASK_FOREACH(cpu, cg->cg_mask)
64662fa74d9SJeff Roberson 			total += cpu_compare(cpu, low, high, match);
64762fa74d9SJeff Roberson 	}
64862fa74d9SJeff Roberson 	return (total);
64962fa74d9SJeff Roberson }
65062fa74d9SJeff Roberson 
65162fa74d9SJeff Roberson /*
65262fa74d9SJeff Roberson  * cpu_search instantiations must pass constants to maintain the inline
65362fa74d9SJeff Roberson  * optimization.
65462fa74d9SJeff Roberson  */
65562fa74d9SJeff Roberson int
65662fa74d9SJeff Roberson cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low)
65762fa74d9SJeff Roberson {
65862fa74d9SJeff Roberson 	return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
65962fa74d9SJeff Roberson }
66062fa74d9SJeff Roberson 
66162fa74d9SJeff Roberson int
66262fa74d9SJeff Roberson cpu_search_highest(struct cpu_group *cg, struct cpu_search *high)
66362fa74d9SJeff Roberson {
66462fa74d9SJeff Roberson 	return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
66562fa74d9SJeff Roberson }
66662fa74d9SJeff Roberson 
66762fa74d9SJeff Roberson int
66862fa74d9SJeff Roberson cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
66962fa74d9SJeff Roberson     struct cpu_search *high)
67062fa74d9SJeff Roberson {
67162fa74d9SJeff Roberson 	return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
67262fa74d9SJeff Roberson }
67362fa74d9SJeff Roberson 
67462fa74d9SJeff Roberson /*
67562fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
67662fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
67762fa74d9SJeff Roberson  * acceptable.
67862fa74d9SJeff Roberson  */
67962fa74d9SJeff Roberson static inline int
68062fa74d9SJeff Roberson sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri)
68162fa74d9SJeff Roberson {
68262fa74d9SJeff Roberson 	struct cpu_search low;
68362fa74d9SJeff Roberson 
68462fa74d9SJeff Roberson 	low.cs_cpu = -1;
68562fa74d9SJeff Roberson 	low.cs_load = -1;
68662fa74d9SJeff Roberson 	low.cs_mask = mask;
68762fa74d9SJeff Roberson 	low.cs_limit = pri;
68862fa74d9SJeff Roberson 	cpu_search_lowest(cg, &low);
68962fa74d9SJeff Roberson 	return low.cs_cpu;
69062fa74d9SJeff Roberson }
69162fa74d9SJeff Roberson 
69262fa74d9SJeff Roberson /*
69362fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
69462fa74d9SJeff Roberson  */
69562fa74d9SJeff Roberson static inline int
69662fa74d9SJeff Roberson sched_highest(struct cpu_group *cg, cpumask_t mask, int minload)
69762fa74d9SJeff Roberson {
69862fa74d9SJeff Roberson 	struct cpu_search high;
69962fa74d9SJeff Roberson 
70062fa74d9SJeff Roberson 	high.cs_cpu = -1;
70162fa74d9SJeff Roberson 	high.cs_load = 0;
70262fa74d9SJeff Roberson 	high.cs_mask = mask;
70362fa74d9SJeff Roberson 	high.cs_limit = minload;
70462fa74d9SJeff Roberson 	cpu_search_highest(cg, &high);
70562fa74d9SJeff Roberson 	return high.cs_cpu;
70662fa74d9SJeff Roberson }
70762fa74d9SJeff Roberson 
70862fa74d9SJeff Roberson /*
70962fa74d9SJeff Roberson  * Simultaneously find the highest and lowest loaded cpu reachable via
71062fa74d9SJeff Roberson  * cg.
71162fa74d9SJeff Roberson  */
71262fa74d9SJeff Roberson static inline void
71362fa74d9SJeff Roberson sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu)
71462fa74d9SJeff Roberson {
71562fa74d9SJeff Roberson 	struct cpu_search high;
71662fa74d9SJeff Roberson 	struct cpu_search low;
71762fa74d9SJeff Roberson 
71862fa74d9SJeff Roberson 	low.cs_cpu = -1;
71962fa74d9SJeff Roberson 	low.cs_limit = -1;
72062fa74d9SJeff Roberson 	low.cs_load = -1;
72162fa74d9SJeff Roberson 	low.cs_mask = mask;
72262fa74d9SJeff Roberson 	high.cs_load = 0;
72362fa74d9SJeff Roberson 	high.cs_cpu = -1;
72462fa74d9SJeff Roberson 	high.cs_limit = -1;
72562fa74d9SJeff Roberson 	high.cs_mask = mask;
72662fa74d9SJeff Roberson 	cpu_search_both(cg, &low, &high);
72762fa74d9SJeff Roberson 	*lowcpu = low.cs_cpu;
72862fa74d9SJeff Roberson 	*highcpu = high.cs_cpu;
72962fa74d9SJeff Roberson 	return;
73062fa74d9SJeff Roberson }
73162fa74d9SJeff Roberson 
73262fa74d9SJeff Roberson static void
73362fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
73462fa74d9SJeff Roberson {
73562fa74d9SJeff Roberson 	cpumask_t mask;
73662fa74d9SJeff Roberson 	int high;
73762fa74d9SJeff Roberson 	int low;
73862fa74d9SJeff Roberson 	int i;
73962fa74d9SJeff Roberson 
74062fa74d9SJeff Roberson 	mask = -1;
74162fa74d9SJeff Roberson 	for (;;) {
74262fa74d9SJeff Roberson 		sched_both(cg, mask, &low, &high);
74362fa74d9SJeff Roberson 		if (low == high || low == -1 || high == -1)
74462fa74d9SJeff Roberson 			break;
74562fa74d9SJeff Roberson 		if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low)))
74662fa74d9SJeff Roberson 			break;
74762fa74d9SJeff Roberson 		/*
74862fa74d9SJeff Roberson 		 * If we failed to move any threads determine which cpu
74962fa74d9SJeff Roberson 		 * to kick out of the set and try again.
75062fa74d9SJeff Roberson 	 	 */
75162fa74d9SJeff Roberson 		if (TDQ_CPU(high)->tdq_transferable == 0)
75262fa74d9SJeff Roberson 			mask &= ~(1 << high);
75362fa74d9SJeff Roberson 		else
75462fa74d9SJeff Roberson 			mask &= ~(1 << low);
75562fa74d9SJeff Roberson 	}
75662fa74d9SJeff Roberson 
75762fa74d9SJeff Roberson 	for (i = 0; i < cg->cg_children; i++)
75862fa74d9SJeff Roberson 		sched_balance_group(&cg->cg_child[i]);
75962fa74d9SJeff Roberson }
76062fa74d9SJeff Roberson 
76162fa74d9SJeff Roberson static void
7627fcf154aSJeff Roberson sched_balance()
763356500a3SJeff Roberson {
7647fcf154aSJeff Roberson 	struct tdq *tdq;
765356500a3SJeff Roberson 
7667fcf154aSJeff Roberson 	/*
7677fcf154aSJeff Roberson 	 * Select a random time between .5 * balance_interval and
7687fcf154aSJeff Roberson 	 * 1.5 * balance_interval.
7697fcf154aSJeff Roberson 	 */
7707fcf154aSJeff Roberson 	balance_ticks = max(balance_interval / 2, 1);
7717fcf154aSJeff Roberson 	balance_ticks += random() % balance_interval;
772ae7a6b38SJeff Roberson 	if (smp_started == 0 || rebalance == 0)
773598b368dSJeff Roberson 		return;
7747fcf154aSJeff Roberson 	tdq = TDQ_SELF();
7757fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
77662fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
7777fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
778cac77d04SJeff Roberson }
77986f8ae96SJeff Roberson 
780ae7a6b38SJeff Roberson /*
781ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
782ae7a6b38SJeff Roberson  */
783ae7a6b38SJeff Roberson static void
784ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
785ae7a6b38SJeff Roberson {
786ae7a6b38SJeff Roberson 	if (one < two) {
787ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
788ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
789ae7a6b38SJeff Roberson 	} else {
790ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
791ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
792ae7a6b38SJeff Roberson 	}
793ae7a6b38SJeff Roberson }
794ae7a6b38SJeff Roberson 
795ae7a6b38SJeff Roberson /*
7967fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
7977fcf154aSJeff Roberson  */
7987fcf154aSJeff Roberson static void
7997fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
8007fcf154aSJeff Roberson {
8017fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
8027fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
8037fcf154aSJeff Roberson }
8047fcf154aSJeff Roberson 
8057fcf154aSJeff Roberson /*
806ae7a6b38SJeff Roberson  * Transfer load between two imbalanced thread queues.
807ae7a6b38SJeff Roberson  */
80862fa74d9SJeff Roberson static int
809ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
810cac77d04SJeff Roberson {
811cac77d04SJeff Roberson 	int transferable;
812cac77d04SJeff Roberson 	int high_load;
813cac77d04SJeff Roberson 	int low_load;
81462fa74d9SJeff Roberson 	int moved;
815cac77d04SJeff Roberson 	int move;
816cac77d04SJeff Roberson 	int diff;
817cac77d04SJeff Roberson 	int i;
818cac77d04SJeff Roberson 
819ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
820d2ad694cSJeff Roberson 	transferable = high->tdq_transferable;
821d2ad694cSJeff Roberson 	high_load = high->tdq_load;
822d2ad694cSJeff Roberson 	low_load = low->tdq_load;
82362fa74d9SJeff Roberson 	moved = 0;
824155b9987SJeff Roberson 	/*
825155b9987SJeff Roberson 	 * Determine what the imbalance is and then adjust that to how many
826d2ad694cSJeff Roberson 	 * threads we actually have to give up (transferable).
827155b9987SJeff Roberson 	 */
828ae7a6b38SJeff Roberson 	if (transferable != 0) {
829cac77d04SJeff Roberson 		diff = high_load - low_load;
830356500a3SJeff Roberson 		move = diff / 2;
831356500a3SJeff Roberson 		if (diff & 0x1)
832356500a3SJeff Roberson 			move++;
83380f86c9fSJeff Roberson 		move = min(move, transferable);
834356500a3SJeff Roberson 		for (i = 0; i < move; i++)
83562fa74d9SJeff Roberson 			moved += tdq_move(high, low);
836a5423ea3SJeff Roberson 		/*
837a5423ea3SJeff Roberson 		 * IPI the target cpu to force it to reschedule with the new
838a5423ea3SJeff Roberson 		 * workload.
839a5423ea3SJeff Roberson 		 */
840a5423ea3SJeff Roberson 		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
841ae7a6b38SJeff Roberson 	}
8427fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
84362fa74d9SJeff Roberson 	return (moved);
844356500a3SJeff Roberson }
845356500a3SJeff Roberson 
846ae7a6b38SJeff Roberson /*
847ae7a6b38SJeff Roberson  * Move a thread from one thread queue to another.
848ae7a6b38SJeff Roberson  */
84962fa74d9SJeff Roberson static int
850ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
851356500a3SJeff Roberson {
852ad1e7d28SJulian Elischer 	struct td_sched *ts;
853ae7a6b38SJeff Roberson 	struct thread *td;
854ae7a6b38SJeff Roberson 	struct tdq *tdq;
855ae7a6b38SJeff Roberson 	int cpu;
856356500a3SJeff Roberson 
8577fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
8587fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
8597fcf154aSJeff Roberson 
860ad1e7d28SJulian Elischer 	tdq = from;
861ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
86262fa74d9SJeff Roberson 	ts = tdq_steal(tdq, cpu);
863ad1e7d28SJulian Elischer 	if (ts == NULL)
86462fa74d9SJeff Roberson 		return (0);
865ae7a6b38SJeff Roberson 	td = ts->ts_thread;
866ae7a6b38SJeff Roberson 	/*
867ae7a6b38SJeff Roberson 	 * Although the run queue is locked the thread may be blocked.  Lock
8687fcf154aSJeff Roberson 	 * it to clear this and acquire the run-queue lock.
869ae7a6b38SJeff Roberson 	 */
870ae7a6b38SJeff Roberson 	thread_lock(td);
8717fcf154aSJeff Roberson 	/* Drop recursive lock on from acquired via thread_lock(). */
872ae7a6b38SJeff Roberson 	TDQ_UNLOCK(from);
873ae7a6b38SJeff Roberson 	sched_rem(td);
8747b8bfa0dSJeff Roberson 	ts->ts_cpu = cpu;
875ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
876ae7a6b38SJeff Roberson 	tdq_add(to, td, SRQ_YIELDING);
87762fa74d9SJeff Roberson 	return (1);
878356500a3SJeff Roberson }
87922bf7d9aSJeff Roberson 
880ae7a6b38SJeff Roberson /*
881ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
882ae7a6b38SJeff Roberson  * to it.
883ae7a6b38SJeff Roberson  */
88480f86c9fSJeff Roberson static int
885ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
88622bf7d9aSJeff Roberson {
88762fa74d9SJeff Roberson 	struct cpu_group *cg;
888ad1e7d28SJulian Elischer 	struct tdq *steal;
88962fa74d9SJeff Roberson 	cpumask_t mask;
89062fa74d9SJeff Roberson 	int thresh;
891ae7a6b38SJeff Roberson 	int cpu;
89280f86c9fSJeff Roberson 
89388f530ccSJeff Roberson 	if (smp_started == 0 || steal_idle == 0)
89488f530ccSJeff Roberson 		return (1);
89562fa74d9SJeff Roberson 	mask = -1;
89662fa74d9SJeff Roberson 	mask &= ~PCPU_GET(cpumask);
89762fa74d9SJeff Roberson 	/* We don't want to be preempted while we're iterating. */
898ae7a6b38SJeff Roberson 	spinlock_enter();
89962fa74d9SJeff Roberson 	for (cg = tdq->tdq_cg; cg != NULL; ) {
90062fa74d9SJeff Roberson 		if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0)
90162fa74d9SJeff Roberson 			thresh = steal_thresh;
90262fa74d9SJeff Roberson 		else
90362fa74d9SJeff Roberson 			thresh = 1;
90462fa74d9SJeff Roberson 		cpu = sched_highest(cg, mask, thresh);
90562fa74d9SJeff Roberson 		if (cpu == -1) {
90662fa74d9SJeff Roberson 			cg = cg->cg_parent;
90780f86c9fSJeff Roberson 			continue;
9087b8bfa0dSJeff Roberson 		}
9097b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
91062fa74d9SJeff Roberson 		mask &= ~(1 << cpu);
9117fcf154aSJeff Roberson 		tdq_lock_pair(tdq, steal);
91262fa74d9SJeff Roberson 		if (steal->tdq_load < thresh || steal->tdq_transferable == 0) {
9137fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
91462fa74d9SJeff Roberson 			continue;
91562fa74d9SJeff Roberson 		}
91662fa74d9SJeff Roberson 		/*
91762fa74d9SJeff Roberson 		 * If a thread was added while interrupts were disabled don't
91862fa74d9SJeff Roberson 		 * steal one here.  If we fail to acquire one due to affinity
91962fa74d9SJeff Roberson 		 * restrictions loop again with this cpu removed from the
92062fa74d9SJeff Roberson 		 * set.
92162fa74d9SJeff Roberson 		 */
92262fa74d9SJeff Roberson 		if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) {
92362fa74d9SJeff Roberson 			tdq_unlock_pair(tdq, steal);
92462fa74d9SJeff Roberson 			continue;
92580f86c9fSJeff Roberson 		}
926ae7a6b38SJeff Roberson 		spinlock_exit();
927ae7a6b38SJeff Roberson 		TDQ_UNLOCK(steal);
928ae7a6b38SJeff Roberson 		mi_switch(SW_VOL, NULL);
929ae7a6b38SJeff Roberson 		thread_unlock(curthread);
9307b8bfa0dSJeff Roberson 
9317b8bfa0dSJeff Roberson 		return (0);
93222bf7d9aSJeff Roberson 	}
93362fa74d9SJeff Roberson 	spinlock_exit();
93462fa74d9SJeff Roberson 	return (1);
93562fa74d9SJeff Roberson }
93622bf7d9aSJeff Roberson 
937ae7a6b38SJeff Roberson /*
938ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
939ae7a6b38SJeff Roberson  */
94022bf7d9aSJeff Roberson static void
941ff256d9cSJeff Roberson tdq_notify(struct tdq *tdq, struct td_sched *ts)
94222bf7d9aSJeff Roberson {
943fc3a97dcSJeff Roberson 	int cpri;
944fc3a97dcSJeff Roberson 	int pri;
9457b8bfa0dSJeff Roberson 	int cpu;
94622bf7d9aSJeff Roberson 
947ff256d9cSJeff Roberson 	if (tdq->tdq_ipipending)
948ff256d9cSJeff Roberson 		return;
9497b8bfa0dSJeff Roberson 	cpu = ts->ts_cpu;
950fc3a97dcSJeff Roberson 	pri = ts->ts_thread->td_priority;
951ff256d9cSJeff Roberson 	cpri = pcpu_find(cpu)->pc_curthread->td_priority;
952ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 1))
9536b2f763fSJeff Roberson 		return;
954ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 1;
95514618990SJeff Roberson 	ipi_selected(1 << cpu, IPI_PREEMPT);
95622bf7d9aSJeff Roberson }
95722bf7d9aSJeff Roberson 
958ae7a6b38SJeff Roberson /*
959ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
960ae7a6b38SJeff Roberson  * index.
961ae7a6b38SJeff Roberson  */
962ae7a6b38SJeff Roberson static struct td_sched *
96362fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
964ae7a6b38SJeff Roberson {
965ae7a6b38SJeff Roberson 	struct td_sched *ts;
966ae7a6b38SJeff Roberson 	struct rqbits *rqb;
967ae7a6b38SJeff Roberson 	struct rqhead *rqh;
968ae7a6b38SJeff Roberson 	int first;
969ae7a6b38SJeff Roberson 	int bit;
970ae7a6b38SJeff Roberson 	int pri;
971ae7a6b38SJeff Roberson 	int i;
972ae7a6b38SJeff Roberson 
973ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
974ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
975ae7a6b38SJeff Roberson 	pri = 0;
976ae7a6b38SJeff Roberson 	first = 0;
977ae7a6b38SJeff Roberson again:
978ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
979ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
980ae7a6b38SJeff Roberson 			continue;
981ae7a6b38SJeff Roberson 		if (bit != 0) {
982ae7a6b38SJeff Roberson 			for (pri = bit; pri < RQB_BPW; pri++)
983ae7a6b38SJeff Roberson 				if (rqb->rqb_bits[i] & (1ul << pri))
984ae7a6b38SJeff Roberson 					break;
985ae7a6b38SJeff Roberson 			if (pri >= RQB_BPW)
986ae7a6b38SJeff Roberson 				continue;
987ae7a6b38SJeff Roberson 		} else
988ae7a6b38SJeff Roberson 			pri = RQB_FFS(rqb->rqb_bits[i]);
989ae7a6b38SJeff Roberson 		pri += (i << RQB_L2BPW);
990ae7a6b38SJeff Roberson 		rqh = &rq->rq_queues[pri];
991ae7a6b38SJeff Roberson 		TAILQ_FOREACH(ts, rqh, ts_procq) {
99262fa74d9SJeff Roberson 			if (first && THREAD_CAN_MIGRATE(ts->ts_thread) &&
99362fa74d9SJeff Roberson 			    THREAD_CAN_SCHED(ts->ts_thread, cpu))
994ae7a6b38SJeff Roberson 				return (ts);
995ae7a6b38SJeff Roberson 			first = 1;
996ae7a6b38SJeff Roberson 		}
997ae7a6b38SJeff Roberson 	}
998ae7a6b38SJeff Roberson 	if (start != 0) {
999ae7a6b38SJeff Roberson 		start = 0;
1000ae7a6b38SJeff Roberson 		goto again;
1001ae7a6b38SJeff Roberson 	}
1002ae7a6b38SJeff Roberson 
1003ae7a6b38SJeff Roberson 	return (NULL);
1004ae7a6b38SJeff Roberson }
1005ae7a6b38SJeff Roberson 
1006ae7a6b38SJeff Roberson /*
1007ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1008ae7a6b38SJeff Roberson  */
1009ad1e7d28SJulian Elischer static struct td_sched *
101062fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
101122bf7d9aSJeff Roberson {
101222bf7d9aSJeff Roberson 	struct rqhead *rqh;
101322bf7d9aSJeff Roberson 	struct rqbits *rqb;
1014ad1e7d28SJulian Elischer 	struct td_sched *ts;
101522bf7d9aSJeff Roberson 	int word;
101622bf7d9aSJeff Roberson 	int bit;
101722bf7d9aSJeff Roberson 
101822bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
101922bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
102022bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
102122bf7d9aSJeff Roberson 			continue;
102222bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1023a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
102422bf7d9aSJeff Roberson 				continue;
102522bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
102628994a58SJeff Roberson 			TAILQ_FOREACH(ts, rqh, ts_procq)
102762fa74d9SJeff Roberson 				if (THREAD_CAN_MIGRATE(ts->ts_thread) &&
102862fa74d9SJeff Roberson 				    THREAD_CAN_SCHED(ts->ts_thread, cpu))
1029ad1e7d28SJulian Elischer 					return (ts);
103022bf7d9aSJeff Roberson 		}
103122bf7d9aSJeff Roberson 	}
103222bf7d9aSJeff Roberson 	return (NULL);
103322bf7d9aSJeff Roberson }
103422bf7d9aSJeff Roberson 
1035ae7a6b38SJeff Roberson /*
1036ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1037ae7a6b38SJeff Roberson  */
1038ad1e7d28SJulian Elischer static struct td_sched *
103962fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
104022bf7d9aSJeff Roberson {
1041ad1e7d28SJulian Elischer 	struct td_sched *ts;
104222bf7d9aSJeff Roberson 
1043ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
104462fa74d9SJeff Roberson 	if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1045ad1e7d28SJulian Elischer 		return (ts);
104662fa74d9SJeff Roberson 	if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx))
104762fa74d9SJeff Roberson 	    != NULL)
1048ad1e7d28SJulian Elischer 		return (ts);
104962fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
105022bf7d9aSJeff Roberson }
105180f86c9fSJeff Roberson 
1052ae7a6b38SJeff Roberson /*
1053ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
10547fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1055ae7a6b38SJeff Roberson  */
1056ae7a6b38SJeff Roberson static inline struct tdq *
1057ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags)
105880f86c9fSJeff Roberson {
1059ae7a6b38SJeff Roberson 	struct thread *td;
1060ae7a6b38SJeff Roberson 	struct tdq *tdq;
106180f86c9fSJeff Roberson 
1062ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
1063ae7a6b38SJeff Roberson 
1064ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
1065ae7a6b38SJeff Roberson 	td = ts->ts_thread;
1066ae7a6b38SJeff Roberson 	ts->ts_cpu = cpu;
1067c47f202bSJeff Roberson 
1068c47f202bSJeff Roberson 	/* If the lock matches just return the queue. */
1069ae7a6b38SJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq))
1070ae7a6b38SJeff Roberson 		return (tdq);
1071ae7a6b38SJeff Roberson #ifdef notyet
107280f86c9fSJeff Roberson 	/*
1073a5423ea3SJeff Roberson 	 * If the thread isn't running its lockptr is a
1074ae7a6b38SJeff Roberson 	 * turnstile or a sleepqueue.  We can just lock_set without
1075ae7a6b38SJeff Roberson 	 * blocking.
1076670c524fSJeff Roberson 	 */
1077ae7a6b38SJeff Roberson 	if (TD_CAN_RUN(td)) {
1078ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1079ae7a6b38SJeff Roberson 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
1080ae7a6b38SJeff Roberson 		return (tdq);
1081ae7a6b38SJeff Roberson 	}
1082ae7a6b38SJeff Roberson #endif
108380f86c9fSJeff Roberson 	/*
1084ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1085ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
10867b8bfa0dSJeff Roberson 	 */
1087ae7a6b38SJeff Roberson 	thread_lock_block(td);
1088ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1089ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1090ae7a6b38SJeff Roberson 	return (tdq);
109180f86c9fSJeff Roberson }
10922454aaf5SJeff Roberson 
1093ae7a6b38SJeff Roberson static int
1094ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags)
1095ae7a6b38SJeff Roberson {
109662fa74d9SJeff Roberson 	struct cpu_group *cg;
109762fa74d9SJeff Roberson 	struct thread *td;
1098ae7a6b38SJeff Roberson 	struct tdq *tdq;
109962fa74d9SJeff Roberson 	cpumask_t mask;
11007b8bfa0dSJeff Roberson 	int self;
11017b8bfa0dSJeff Roberson 	int pri;
11027b8bfa0dSJeff Roberson 	int cpu;
11037b8bfa0dSJeff Roberson 
110462fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
110562fa74d9SJeff Roberson 	td = ts->ts_thread;
11067b8bfa0dSJeff Roberson 	if (smp_started == 0)
11077b8bfa0dSJeff Roberson 		return (self);
110828994a58SJeff Roberson 	/*
110928994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
111028994a58SJeff Roberson 	 */
111162fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
111262fa74d9SJeff Roberson 		return (ts->ts_cpu);
11137b8bfa0dSJeff Roberson 	/*
111462fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
111562fa74d9SJeff Roberson 	 * the interrupt.
11167b8bfa0dSJeff Roberson 	 */
111762fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
111862fa74d9SJeff Roberson 	    curthread->td_intr_nesting_level)
111962fa74d9SJeff Roberson 		ts->ts_cpu = self;
112062fa74d9SJeff Roberson 	/*
112162fa74d9SJeff Roberson 	 * If the thread can run on the last cpu and the affinity has not
112262fa74d9SJeff Roberson 	 * expired or it is idle run it there.
112362fa74d9SJeff Roberson 	 */
112462fa74d9SJeff Roberson 	pri = td->td_priority;
112562fa74d9SJeff Roberson 	tdq = TDQ_CPU(ts->ts_cpu);
112662fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu)) {
112762fa74d9SJeff Roberson 		if (tdq->tdq_lowpri > PRI_MIN_IDLE)
112862fa74d9SJeff Roberson 			return (ts->ts_cpu);
112962fa74d9SJeff Roberson 		if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri)
11307b8bfa0dSJeff Roberson 			return (ts->ts_cpu);
11317b8bfa0dSJeff Roberson 	}
11327b8bfa0dSJeff Roberson 	/*
113362fa74d9SJeff Roberson 	 * Search for the highest level in the tree that still has affinity.
11347b8bfa0dSJeff Roberson 	 */
113562fa74d9SJeff Roberson 	cg = NULL;
113662fa74d9SJeff Roberson 	for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent)
113762fa74d9SJeff Roberson 		if (SCHED_AFFINITY(ts, cg->cg_level))
113862fa74d9SJeff Roberson 			break;
113962fa74d9SJeff Roberson 	cpu = -1;
114062fa74d9SJeff Roberson 	mask = td->td_cpuset->cs_mask.__bits[0];
114162fa74d9SJeff Roberson 	if (cg)
114262fa74d9SJeff Roberson 		cpu = sched_lowest(cg, mask, pri);
114362fa74d9SJeff Roberson 	if (cpu == -1)
114462fa74d9SJeff Roberson 		cpu = sched_lowest(cpu_top, mask, -1);
114562fa74d9SJeff Roberson 	/*
114662fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
114762fa74d9SJeff Roberson 	 */
1148ff256d9cSJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri &&
1149ff256d9cSJeff Roberson 	    TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
115062fa74d9SJeff Roberson 		cpu = self;
1151ff256d9cSJeff Roberson 	KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1152ae7a6b38SJeff Roberson 	return (cpu);
115380f86c9fSJeff Roberson }
115462fa74d9SJeff Roberson #endif
115522bf7d9aSJeff Roberson 
115622bf7d9aSJeff Roberson /*
115722bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
11580c0a98b2SJeff Roberson  */
1159ad1e7d28SJulian Elischer static struct td_sched *
1160ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
11615d7ef00cSJeff Roberson {
1162ad1e7d28SJulian Elischer 	struct td_sched *ts;
11635d7ef00cSJeff Roberson 
1164ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1165e7d50326SJeff Roberson 	ts = runq_choose(&tdq->tdq_realtime);
1166dda713dfSJeff Roberson 	if (ts != NULL)
1167e7d50326SJeff Roberson 		return (ts);
11683f872f85SJeff Roberson 	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1169e7d50326SJeff Roberson 	if (ts != NULL) {
1170dda713dfSJeff Roberson 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1171e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
1172e7d50326SJeff Roberson 		    ts->ts_thread->td_priority));
1173ad1e7d28SJulian Elischer 		return (ts);
117415dc847eSJeff Roberson 	}
117515dc847eSJeff Roberson 
1176e7d50326SJeff Roberson 	ts = runq_choose(&tdq->tdq_idle);
1177e7d50326SJeff Roberson 	if (ts != NULL) {
1178e7d50326SJeff Roberson 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1179e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
1180e7d50326SJeff Roberson 		    ts->ts_thread->td_priority));
1181e7d50326SJeff Roberson 		return (ts);
1182e7d50326SJeff Roberson 	}
1183e7d50326SJeff Roberson 
1184e7d50326SJeff Roberson 	return (NULL);
1185245f3abfSJeff Roberson }
11860a016a05SJeff Roberson 
1187ae7a6b38SJeff Roberson /*
1188ae7a6b38SJeff Roberson  * Initialize a thread queue.
1189ae7a6b38SJeff Roberson  */
11900a016a05SJeff Roberson static void
1191ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq)
11920a016a05SJeff Roberson {
1193ae7a6b38SJeff Roberson 
1194c47f202bSJeff Roberson 	if (bootverbose)
1195c47f202bSJeff Roberson 		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1196e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1197e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1198d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
119962fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
120062fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
120162fa74d9SJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
120262fa74d9SJeff Roberson 	    MTX_SPIN | MTX_RECURSE);
12030a016a05SJeff Roberson }
12040a016a05SJeff Roberson 
1205c47f202bSJeff Roberson #ifdef SMP
1206c47f202bSJeff Roberson static void
1207c47f202bSJeff Roberson sched_setup_smp(void)
1208c47f202bSJeff Roberson {
1209c47f202bSJeff Roberson 	struct tdq *tdq;
1210c47f202bSJeff Roberson 	int i;
1211c47f202bSJeff Roberson 
121262fa74d9SJeff Roberson 	cpu_top = smp_topo();
121362fa74d9SJeff Roberson 	for (i = 0; i < MAXCPU; i++) {
1214c47f202bSJeff Roberson 		if (CPU_ABSENT(i))
1215c47f202bSJeff Roberson 			continue;
121662fa74d9SJeff Roberson 		tdq = TDQ_CPU(i);
1217c47f202bSJeff Roberson 		tdq_setup(tdq);
121862fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
121962fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
122062fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1221c47f202bSJeff Roberson 	}
122262fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
122362fa74d9SJeff Roberson 	sched_balance();
1224c47f202bSJeff Roberson }
1225c47f202bSJeff Roberson #endif
1226c47f202bSJeff Roberson 
1227ae7a6b38SJeff Roberson /*
1228ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1229ae7a6b38SJeff Roberson  * information.
1230ae7a6b38SJeff Roberson  */
123135e6168fSJeff Roberson static void
123235e6168fSJeff Roberson sched_setup(void *dummy)
123335e6168fSJeff Roberson {
1234ae7a6b38SJeff Roberson 	struct tdq *tdq;
1235c47f202bSJeff Roberson 
1236c47f202bSJeff Roberson 	tdq = TDQ_SELF();
12370ec896fdSJeff Roberson #ifdef SMP
1238c47f202bSJeff Roberson 	sched_setup_smp();
1239749d01b0SJeff Roberson #else
1240c47f202bSJeff Roberson 	tdq_setup(tdq);
1241356500a3SJeff Roberson #endif
1242ae7a6b38SJeff Roberson 	/*
1243ae7a6b38SJeff Roberson 	 * To avoid divide-by-zero, we set realstathz a dummy value
1244ae7a6b38SJeff Roberson 	 * in case which sched_clock() called before sched_initticks().
1245ae7a6b38SJeff Roberson 	 */
1246ae7a6b38SJeff Roberson 	realstathz = hz;
1247ae7a6b38SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1248ae7a6b38SJeff Roberson 	tickincr = 1 << SCHED_TICK_SHIFT;
1249ae7a6b38SJeff Roberson 
1250ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1251ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1252c47f202bSJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1253ae7a6b38SJeff Roberson 	tdq_load_add(tdq, &td_sched0);
125462fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1255ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
125635e6168fSJeff Roberson }
125735e6168fSJeff Roberson 
1258ae7a6b38SJeff Roberson /*
1259ae7a6b38SJeff Roberson  * This routine determines the tickincr after stathz and hz are setup.
1260ae7a6b38SJeff Roberson  */
1261a1d4fe69SDavid Xu /* ARGSUSED */
1262a1d4fe69SDavid Xu static void
1263a1d4fe69SDavid Xu sched_initticks(void *dummy)
1264a1d4fe69SDavid Xu {
1265ae7a6b38SJeff Roberson 	int incr;
1266ae7a6b38SJeff Roberson 
1267a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
126814618990SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1269a1d4fe69SDavid Xu 
1270a1d4fe69SDavid Xu 	/*
1271e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
12723f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1273e7d50326SJeff Roberson 	 */
1274ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1275e7d50326SJeff Roberson 	/*
1276e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1277e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1278a1d4fe69SDavid Xu 	 */
1279ae7a6b38SJeff Roberson 	if (incr == 0)
1280ae7a6b38SJeff Roberson 		incr = 1;
1281ae7a6b38SJeff Roberson 	tickincr = incr;
12827b8bfa0dSJeff Roberson #ifdef SMP
12839862717aSJeff Roberson 	/*
12847fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
12857fcf154aSJeff Roberson 	 * what realstathz is.
12867fcf154aSJeff Roberson 	 */
12877fcf154aSJeff Roberson 	balance_interval = realstathz;
12887fcf154aSJeff Roberson 	/*
12899862717aSJeff Roberson 	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
12909862717aSJeff Roberson 	 * prevents excess thrashing on large machines and excess idle on
12919862717aSJeff Roberson 	 * smaller machines.
12929862717aSJeff Roberson 	 */
129362fa74d9SJeff Roberson 	steal_thresh = min(ffs(mp_ncpus) - 1, 3);
12947b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
12957b8bfa0dSJeff Roberson #endif
1296a1d4fe69SDavid Xu }
1297a1d4fe69SDavid Xu 
1298a1d4fe69SDavid Xu 
129935e6168fSJeff Roberson /*
1300ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1301ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1302ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1303ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1304ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
1305ae7a6b38SJeff Roberson  */
1306ae7a6b38SJeff Roberson static int
1307ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1308ae7a6b38SJeff Roberson {
1309ae7a6b38SJeff Roberson 	struct td_sched *ts;
1310ae7a6b38SJeff Roberson 	int div;
1311ae7a6b38SJeff Roberson 
1312ae7a6b38SJeff Roberson 	ts = td->td_sched;
1313ae7a6b38SJeff Roberson 	/*
1314ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1315ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1316ae7a6b38SJeff Roberson 	 * no chance.
1317ae7a6b38SJeff Roberson 	 */
1318ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1319ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1320ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1321ae7a6b38SJeff Roberson 
1322ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1323ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1324ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1325ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1326ae7a6b38SJeff Roberson 	}
1327ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1328ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1329ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1330ae7a6b38SJeff Roberson 	}
1331ae7a6b38SJeff Roberson 	/* runtime == slptime */
1332ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1333ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1334ae7a6b38SJeff Roberson 
1335ae7a6b38SJeff Roberson 	/*
1336ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1337ae7a6b38SJeff Roberson 	 */
1338ae7a6b38SJeff Roberson 	return (0);
1339ae7a6b38SJeff Roberson 
1340ae7a6b38SJeff Roberson }
1341ae7a6b38SJeff Roberson 
1342ae7a6b38SJeff Roberson /*
134335e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
134435e6168fSJeff Roberson  * process.
134535e6168fSJeff Roberson  */
134615dc847eSJeff Roberson static void
13478460a577SJohn Birrell sched_priority(struct thread *td)
134835e6168fSJeff Roberson {
1349e7d50326SJeff Roberson 	int score;
135035e6168fSJeff Roberson 	int pri;
135135e6168fSJeff Roberson 
13528460a577SJohn Birrell 	if (td->td_pri_class != PRI_TIMESHARE)
135315dc847eSJeff Roberson 		return;
1354e7d50326SJeff Roberson 	/*
1355e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1356e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1357e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1358e7d50326SJeff Roberson 	 *
1359ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1360e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1361e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1362a5423ea3SJeff Roberson 	 *
1363a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1364a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1365a5423ea3SJeff Roberson 	 * considered interactive.
1366e7d50326SJeff Roberson 	 */
1367e270652bSJeff Roberson 	score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1368e7d50326SJeff Roberson 	if (score < sched_interact) {
1369e7d50326SJeff Roberson 		pri = PRI_MIN_REALTIME;
1370e7d50326SJeff Roberson 		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1371e7d50326SJeff Roberson 		    * score;
1372e7d50326SJeff Roberson 		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
13739a93305aSJeff Roberson 		    ("sched_priority: invalid interactive priority %d score %d",
13749a93305aSJeff Roberson 		    pri, score));
1375e7d50326SJeff Roberson 	} else {
1376e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
1377e7d50326SJeff Roberson 		if (td->td_sched->ts_ticks)
1378e7d50326SJeff Roberson 			pri += SCHED_PRI_TICKS(td->td_sched);
1379e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1380ae7a6b38SJeff Roberson 		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1381ae7a6b38SJeff Roberson 		    ("sched_priority: invalid priority %d: nice %d, "
1382ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
1383ae7a6b38SJeff Roberson 		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1384ae7a6b38SJeff Roberson 		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1385ae7a6b38SJeff Roberson 		    SCHED_PRI_TICKS(td->td_sched)));
1386e7d50326SJeff Roberson 	}
13878460a577SJohn Birrell 	sched_user_prio(td, pri);
138835e6168fSJeff Roberson 
138915dc847eSJeff Roberson 	return;
139035e6168fSJeff Roberson }
139135e6168fSJeff Roberson 
139235e6168fSJeff Roberson /*
1393d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1394ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1395ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1396d322132cSJeff Roberson  */
13974b60e324SJeff Roberson static void
13988460a577SJohn Birrell sched_interact_update(struct thread *td)
13994b60e324SJeff Roberson {
1400155b6ca1SJeff Roberson 	struct td_sched *ts;
14019a93305aSJeff Roberson 	u_int sum;
14023f741ca1SJeff Roberson 
1403155b6ca1SJeff Roberson 	ts = td->td_sched;
1404ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1405d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1406d322132cSJeff Roberson 		return;
1407d322132cSJeff Roberson 	/*
1408155b6ca1SJeff Roberson 	 * This only happens from two places:
1409155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1410155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1411155b6ca1SJeff Roberson 	 */
1412155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1413ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1414ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1415ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1416155b6ca1SJeff Roberson 		} else {
1417ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1418ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1419155b6ca1SJeff Roberson 		}
1420155b6ca1SJeff Roberson 		return;
1421155b6ca1SJeff Roberson 	}
1422155b6ca1SJeff Roberson 	/*
1423d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1424d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
14252454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1426d322132cSJeff Roberson 	 */
142737a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1428ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1429ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1430d322132cSJeff Roberson 		return;
1431d322132cSJeff Roberson 	}
1432ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1433ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1434d322132cSJeff Roberson }
1435d322132cSJeff Roberson 
1436ae7a6b38SJeff Roberson /*
1437ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1438ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1439ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1440ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1441ae7a6b38SJeff Roberson  */
1442d322132cSJeff Roberson static void
14438460a577SJohn Birrell sched_interact_fork(struct thread *td)
1444d322132cSJeff Roberson {
1445d322132cSJeff Roberson 	int ratio;
1446d322132cSJeff Roberson 	int sum;
1447d322132cSJeff Roberson 
1448ae7a6b38SJeff Roberson 	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1449d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1450d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
1451ae7a6b38SJeff Roberson 		td->td_sched->ts_runtime /= ratio;
1452ae7a6b38SJeff Roberson 		td->td_sched->ts_slptime /= ratio;
14534b60e324SJeff Roberson 	}
14544b60e324SJeff Roberson }
14554b60e324SJeff Roberson 
145615dc847eSJeff Roberson /*
1457ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1458ed062c8dSJulian Elischer  */
1459ed062c8dSJulian Elischer void
1460ed062c8dSJulian Elischer schedinit(void)
1461ed062c8dSJulian Elischer {
1462e7d50326SJeff Roberson 
1463ed062c8dSJulian Elischer 	/*
1464ed062c8dSJulian Elischer 	 * Set up the scheduler specific parts of proc0.
1465ed062c8dSJulian Elischer 	 */
1466ed062c8dSJulian Elischer 	proc0.p_sched = NULL; /* XXX */
1467ad1e7d28SJulian Elischer 	thread0.td_sched = &td_sched0;
1468e7d50326SJeff Roberson 	td_sched0.ts_ltick = ticks;
14698ab80cf0SJeff Roberson 	td_sched0.ts_ftick = ticks;
1470ad1e7d28SJulian Elischer 	td_sched0.ts_thread = &thread0;
147173daf66fSJeff Roberson 	td_sched0.ts_slice = sched_slice;
1472ed062c8dSJulian Elischer }
1473ed062c8dSJulian Elischer 
1474ed062c8dSJulian Elischer /*
147515dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
147615dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1477e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
147815dc847eSJeff Roberson  */
147935e6168fSJeff Roberson int
148035e6168fSJeff Roberson sched_rr_interval(void)
148135e6168fSJeff Roberson {
1482e7d50326SJeff Roberson 
1483e7d50326SJeff Roberson 	/* Convert sched_slice to hz */
1484e7d50326SJeff Roberson 	return (hz/(realstathz/sched_slice));
148535e6168fSJeff Roberson }
148635e6168fSJeff Roberson 
1487ae7a6b38SJeff Roberson /*
1488ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1489ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1490ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1491ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1492ae7a6b38SJeff Roberson  */
149322bf7d9aSJeff Roberson static void
1494ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts)
149535e6168fSJeff Roberson {
1496e7d50326SJeff Roberson 
1497e7d50326SJeff Roberson 	if (ts->ts_ticks == 0)
1498e7d50326SJeff Roberson 		return;
14998ab80cf0SJeff Roberson 	if (ticks - (hz / 10) < ts->ts_ltick &&
15008ab80cf0SJeff Roberson 	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
15018ab80cf0SJeff Roberson 		return;
150235e6168fSJeff Roberson 	/*
150335e6168fSJeff Roberson 	 * Adjust counters and watermark for pctcpu calc.
1504210491d3SJeff Roberson 	 */
1505e7d50326SJeff Roberson 	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1506ad1e7d28SJulian Elischer 		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1507e7d50326SJeff Roberson 			    SCHED_TICK_TARG;
1508e7d50326SJeff Roberson 	else
1509ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
1510ad1e7d28SJulian Elischer 	ts->ts_ltick = ticks;
1511e7d50326SJeff Roberson 	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
151235e6168fSJeff Roberson }
151335e6168fSJeff Roberson 
1514ae7a6b38SJeff Roberson /*
1515ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1516ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1517ae7a6b38SJeff Roberson  * functions.
1518ae7a6b38SJeff Roberson  */
1519e7d50326SJeff Roberson static void
1520f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
152135e6168fSJeff Roberson {
1522ad1e7d28SJulian Elischer 	struct td_sched *ts;
152373daf66fSJeff Roberson 	struct tdq *tdq;
152473daf66fSJeff Roberson 	int oldpri;
152535e6168fSJeff Roberson 
152681d47d3fSJeff Roberson 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1527431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, prio, curthread,
1528431f8906SJulian Elischer 	    curthread->td_name);
1529ad1e7d28SJulian Elischer 	ts = td->td_sched;
15307b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1531f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1532f5c157d9SJohn Baldwin 		return;
1533e7d50326SJeff Roberson 
15343f872f85SJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
15353f741ca1SJeff Roberson 		/*
15363f741ca1SJeff Roberson 		 * If the priority has been elevated due to priority
15373f741ca1SJeff Roberson 		 * propagation, we may have to move ourselves to a new
1538e7d50326SJeff Roberson 		 * queue.  This could be optimized to not re-add in some
1539e7d50326SJeff Roberson 		 * cases.
1540f2b74cbfSJeff Roberson 		 */
1541e7d50326SJeff Roberson 		sched_rem(td);
1542e7d50326SJeff Roberson 		td->td_priority = prio;
1543ae7a6b38SJeff Roberson 		sched_add(td, SRQ_BORROWING);
154473daf66fSJeff Roberson 		return;
154573daf66fSJeff Roberson 	}
1546ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(ts->ts_cpu);
154762fa74d9SJeff Roberson 	oldpri = td->td_priority;
15483f741ca1SJeff Roberson 	td->td_priority = prio;
154973daf66fSJeff Roberson 	tdq_runq_pick(tdq, ts);
155073daf66fSJeff Roberson 	if (TD_IS_RUNNING(td)) {
155162fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
155262fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
155362fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
155462fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
155573daf66fSJeff Roberson 	}
1556ae7a6b38SJeff Roberson }
155735e6168fSJeff Roberson 
1558f5c157d9SJohn Baldwin /*
1559f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1560f5c157d9SJohn Baldwin  * priority.
1561f5c157d9SJohn Baldwin  */
1562f5c157d9SJohn Baldwin void
1563f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1564f5c157d9SJohn Baldwin {
1565f5c157d9SJohn Baldwin 
1566f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1567f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1568f5c157d9SJohn Baldwin }
1569f5c157d9SJohn Baldwin 
1570f5c157d9SJohn Baldwin /*
1571f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1572f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1573f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1574f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1575f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1576f5c157d9SJohn Baldwin  * of prio.
1577f5c157d9SJohn Baldwin  */
1578f5c157d9SJohn Baldwin void
1579f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1580f5c157d9SJohn Baldwin {
1581f5c157d9SJohn Baldwin 	u_char base_pri;
1582f5c157d9SJohn Baldwin 
1583f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1584f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
15858460a577SJohn Birrell 		base_pri = td->td_user_pri;
1586f5c157d9SJohn Baldwin 	else
1587f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1588f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1589f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1590f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1591f5c157d9SJohn Baldwin 	} else
1592f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1593f5c157d9SJohn Baldwin }
1594f5c157d9SJohn Baldwin 
1595ae7a6b38SJeff Roberson /*
1596ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1597ae7a6b38SJeff Roberson  */
1598f5c157d9SJohn Baldwin void
1599f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1600f5c157d9SJohn Baldwin {
1601f5c157d9SJohn Baldwin 	u_char oldprio;
1602f5c157d9SJohn Baldwin 
1603f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1604f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1605f5c157d9SJohn Baldwin 
1606f5c157d9SJohn Baldwin 	/*
160750aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1608f5c157d9SJohn Baldwin 	 * ever lower the priority.
1609f5c157d9SJohn Baldwin 	 */
1610f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1611f5c157d9SJohn Baldwin 		return;
1612f5c157d9SJohn Baldwin 
1613f5c157d9SJohn Baldwin 	/* Change the real priority. */
1614f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1615f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1616f5c157d9SJohn Baldwin 
1617f5c157d9SJohn Baldwin 	/*
1618f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1619f5c157d9SJohn Baldwin 	 * its state.
1620f5c157d9SJohn Baldwin 	 */
1621f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1622f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1623f5c157d9SJohn Baldwin }
1624f5c157d9SJohn Baldwin 
1625ae7a6b38SJeff Roberson /*
1626ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1627ae7a6b38SJeff Roberson  */
162835e6168fSJeff Roberson void
16298460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
16303db720fdSDavid Xu {
16313db720fdSDavid Xu 	u_char oldprio;
16323db720fdSDavid Xu 
16338460a577SJohn Birrell 	td->td_base_user_pri = prio;
1634fc6c30f6SJulian Elischer 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1635fc6c30f6SJulian Elischer                 return;
16368460a577SJohn Birrell 	oldprio = td->td_user_pri;
16378460a577SJohn Birrell 	td->td_user_pri = prio;
16383db720fdSDavid Xu }
16393db720fdSDavid Xu 
16403db720fdSDavid Xu void
16413db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
16423db720fdSDavid Xu {
16433db720fdSDavid Xu 	u_char oldprio;
16443db720fdSDavid Xu 
1645435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
16463db720fdSDavid Xu 	td->td_flags |= TDF_UBORROWING;
1647f645b5daSMaxim Konovalov 	oldprio = td->td_user_pri;
16488460a577SJohn Birrell 	td->td_user_pri = prio;
16493db720fdSDavid Xu }
16503db720fdSDavid Xu 
16513db720fdSDavid Xu void
16523db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio)
16533db720fdSDavid Xu {
16543db720fdSDavid Xu 	u_char base_pri;
16553db720fdSDavid Xu 
1656435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
16578460a577SJohn Birrell 	base_pri = td->td_base_user_pri;
16583db720fdSDavid Xu 	if (prio >= base_pri) {
16593db720fdSDavid Xu 		td->td_flags &= ~TDF_UBORROWING;
16608460a577SJohn Birrell 		sched_user_prio(td, base_pri);
1661435806d3SDavid Xu 	} else {
16623db720fdSDavid Xu 		sched_lend_user_prio(td, prio);
16633db720fdSDavid Xu 	}
1664435806d3SDavid Xu }
16653db720fdSDavid Xu 
1666ae7a6b38SJeff Roberson /*
166708c9a16cSJeff Roberson  * Add the thread passed as 'newtd' to the run queue before selecting
166808c9a16cSJeff Roberson  * the next thread to run.  This is only used for KSE.
166908c9a16cSJeff Roberson  */
167008c9a16cSJeff Roberson static void
167108c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td)
167208c9a16cSJeff Roberson {
167308c9a16cSJeff Roberson #ifdef SMP
167408c9a16cSJeff Roberson 	spinlock_enter();
167508c9a16cSJeff Roberson 	TDQ_UNLOCK(tdq);
167608c9a16cSJeff Roberson 	thread_lock(td);
167708c9a16cSJeff Roberson 	spinlock_exit();
167808c9a16cSJeff Roberson 	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
167908c9a16cSJeff Roberson #else
168008c9a16cSJeff Roberson 	td->td_lock = TDQ_LOCKPTR(tdq);
168108c9a16cSJeff Roberson #endif
168208c9a16cSJeff Roberson 	tdq_add(tdq, td, SRQ_YIELDING);
168308c9a16cSJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
168408c9a16cSJeff Roberson }
168508c9a16cSJeff Roberson 
168608c9a16cSJeff Roberson /*
1687731016feSWojciech A. Koszek  * Block a thread for switching.  Similar to thread_block() but does not
1688731016feSWojciech A. Koszek  * bump the spin count.
1689731016feSWojciech A. Koszek  */
1690731016feSWojciech A. Koszek static inline struct mtx *
1691731016feSWojciech A. Koszek thread_block_switch(struct thread *td)
1692731016feSWojciech A. Koszek {
1693731016feSWojciech A. Koszek 	struct mtx *lock;
1694731016feSWojciech A. Koszek 
1695731016feSWojciech A. Koszek 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1696731016feSWojciech A. Koszek 	lock = td->td_lock;
1697731016feSWojciech A. Koszek 	td->td_lock = &blocked_lock;
1698731016feSWojciech A. Koszek 	mtx_unlock_spin(lock);
1699731016feSWojciech A. Koszek 
1700731016feSWojciech A. Koszek 	return (lock);
1701731016feSWojciech A. Koszek }
1702731016feSWojciech A. Koszek 
1703731016feSWojciech A. Koszek /*
1704c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
1705c47f202bSJeff Roberson  * cpu binding.
1706c47f202bSJeff Roberson  */
1707c47f202bSJeff Roberson static struct mtx *
1708c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1709c47f202bSJeff Roberson {
1710c47f202bSJeff Roberson 	struct tdq *tdn;
1711c47f202bSJeff Roberson 
1712c47f202bSJeff Roberson 	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1713c47f202bSJeff Roberson #ifdef SMP
171473daf66fSJeff Roberson 	tdq_load_rem(tdq, td->td_sched);
1715c47f202bSJeff Roberson 	/*
1716c47f202bSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We grab an extra
1717c47f202bSJeff Roberson 	 * spinlock nesting to prevent preemption while we're
1718c47f202bSJeff Roberson 	 * not holding either run-queue lock.
1719c47f202bSJeff Roberson 	 */
1720c47f202bSJeff Roberson 	spinlock_enter();
1721c47f202bSJeff Roberson 	thread_block_switch(td);	/* This releases the lock on tdq. */
1722c47f202bSJeff Roberson 	TDQ_LOCK(tdn);
1723c47f202bSJeff Roberson 	tdq_add(tdn, td, flags);
1724ff256d9cSJeff Roberson 	tdq_notify(tdn, td->td_sched);
1725c47f202bSJeff Roberson 	/*
1726c47f202bSJeff Roberson 	 * After we unlock tdn the new cpu still can't switch into this
1727c47f202bSJeff Roberson 	 * thread until we've unblocked it in cpu_switch().  The lock
1728c47f202bSJeff Roberson 	 * pointers may match in the case of HTT cores.  Don't unlock here
1729c47f202bSJeff Roberson 	 * or we can deadlock when the other CPU runs the IPI handler.
1730c47f202bSJeff Roberson 	 */
1731c47f202bSJeff Roberson 	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1732c47f202bSJeff Roberson 		TDQ_UNLOCK(tdn);
1733c47f202bSJeff Roberson 		TDQ_LOCK(tdq);
1734c47f202bSJeff Roberson 	}
1735c47f202bSJeff Roberson 	spinlock_exit();
1736c47f202bSJeff Roberson #endif
1737c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
1738c47f202bSJeff Roberson }
1739c47f202bSJeff Roberson 
1740c47f202bSJeff Roberson /*
1741ae7a6b38SJeff Roberson  * Release a thread that was blocked with thread_block_switch().
1742ae7a6b38SJeff Roberson  */
1743ae7a6b38SJeff Roberson static inline void
1744ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
1745ae7a6b38SJeff Roberson {
1746ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1747ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
1748ae7a6b38SJeff Roberson }
1749ae7a6b38SJeff Roberson 
1750ae7a6b38SJeff Roberson /*
1751ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
1752ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
1753ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
1754ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
1755ae7a6b38SJeff Roberson  */
17563db720fdSDavid Xu void
17573389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
175835e6168fSJeff Roberson {
1759c02bbb43SJeff Roberson 	struct tdq *tdq;
1760ad1e7d28SJulian Elischer 	struct td_sched *ts;
1761ae7a6b38SJeff Roberson 	struct mtx *mtx;
1762c47f202bSJeff Roberson 	int srqflag;
1763ae7a6b38SJeff Roberson 	int cpuid;
176435e6168fSJeff Roberson 
17657b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
176635e6168fSJeff Roberson 
1767ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
1768ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
1769e7d50326SJeff Roberson 	ts = td->td_sched;
1770c47f202bSJeff Roberson 	mtx = td->td_lock;
1771ae7a6b38SJeff Roberson 	ts->ts_rltick = ticks;
1772060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
1773060563ecSJulian Elischer 	td->td_oncpu = NOCPU;
177452eb8464SJohn Baldwin 	td->td_flags &= ~TDF_NEEDRESCHED;
177577918643SStephan Uphoff 	td->td_owepreempt = 0;
1776b11fdad0SJeff Roberson 	/*
1777ae7a6b38SJeff Roberson 	 * The lock pointer in an idle thread should never change.  Reset it
1778ae7a6b38SJeff Roberson 	 * to CAN_RUN as well.
1779b11fdad0SJeff Roberson 	 */
1780486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
1781ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1782bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
17837b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
1784ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1785c47f202bSJeff Roberson 		srqflag = (flags & SW_PREEMPT) ?
1786598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1787c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
1788c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
178973daf66fSJeff Roberson 			tdq_runq_add(tdq, ts, srqflag);
1790c47f202bSJeff Roberson 		else
1791c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
1792ae7a6b38SJeff Roberson 	} else {
1793ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
1794ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1795ae7a6b38SJeff Roberson 		mtx = thread_block_switch(td);
1796ae7a6b38SJeff Roberson 		tdq_load_rem(tdq, ts);
1797ae7a6b38SJeff Roberson 	}
1798ae7a6b38SJeff Roberson 	/*
1799ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
1800ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
1801ae7a6b38SJeff Roberson 	 * thread-queue locked.
1802ae7a6b38SJeff Roberson 	 */
1803ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1804ae7a6b38SJeff Roberson 	/*
180508c9a16cSJeff Roberson 	 * If KSE assigned a new thread just add it here and let choosethread
180608c9a16cSJeff Roberson 	 * select the best one.
1807ae7a6b38SJeff Roberson 	 */
180808c9a16cSJeff Roberson 	if (newtd != NULL)
180908c9a16cSJeff Roberson 		sched_switchin(tdq, newtd);
18102454aaf5SJeff Roberson 	newtd = choosethread();
1811ae7a6b38SJeff Roberson 	/*
1812ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
1813ae7a6b38SJeff Roberson 	 */
1814ebccf1e3SJoseph Koshy 	if (td != newtd) {
1815ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1816ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1817ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1818ebccf1e3SJoseph Koshy #endif
1819eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
182059c68134SJeff Roberson 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1821ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
1822ae7a6b38SJeff Roberson 		/*
1823ae7a6b38SJeff Roberson 		 * We may return from cpu_switch on a different cpu.  However,
1824ae7a6b38SJeff Roberson 		 * we always return with td_lock pointing to the current cpu's
1825ae7a6b38SJeff Roberson 		 * run queue lock.
1826ae7a6b38SJeff Roberson 		 */
1827ae7a6b38SJeff Roberson 		cpuid = PCPU_GET(cpuid);
1828ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(cpuid);
1829eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(
1830eea4f254SJeff Roberson 		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
1831ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1832ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1833ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1834ebccf1e3SJoseph Koshy #endif
1835ae7a6b38SJeff Roberson 	} else
1836ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
1837ae7a6b38SJeff Roberson 	/*
183862fa74d9SJeff Roberson 	 * We should always get here with the lowest priority td possible.
183962fa74d9SJeff Roberson 	 */
184062fa74d9SJeff Roberson 	tdq->tdq_lowpri = td->td_priority;
184162fa74d9SJeff Roberson 	/*
1842ae7a6b38SJeff Roberson 	 * Assert that all went well and return.
1843ae7a6b38SJeff Roberson 	 */
1844ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1845ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1846ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
184735e6168fSJeff Roberson }
184835e6168fSJeff Roberson 
1849ae7a6b38SJeff Roberson /*
1850ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
1851ae7a6b38SJeff Roberson  */
185235e6168fSJeff Roberson void
1853fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
185435e6168fSJeff Roberson {
185535e6168fSJeff Roberson 	struct thread *td;
185635e6168fSJeff Roberson 
1857fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
18587b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1859e7d50326SJeff Roberson 
1860fa885116SJulian Elischer 	p->p_nice = nice;
18618460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
18627b20fb19SJeff Roberson 		thread_lock(td);
18638460a577SJohn Birrell 		sched_priority(td);
1864e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
18657b20fb19SJeff Roberson 		thread_unlock(td);
186635e6168fSJeff Roberson 	}
1867fa885116SJulian Elischer }
186835e6168fSJeff Roberson 
1869ae7a6b38SJeff Roberson /*
1870ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
1871ae7a6b38SJeff Roberson  */
187235e6168fSJeff Roberson void
187344f3b092SJohn Baldwin sched_sleep(struct thread *td)
187435e6168fSJeff Roberson {
1875e7d50326SJeff Roberson 
18767b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
187735e6168fSJeff Roberson 
187854b0e65fSJeff Roberson 	td->td_slptick = ticks;
187935e6168fSJeff Roberson }
188035e6168fSJeff Roberson 
1881ae7a6b38SJeff Roberson /*
1882ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
1883ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
1884ae7a6b38SJeff Roberson  */
188535e6168fSJeff Roberson void
188635e6168fSJeff Roberson sched_wakeup(struct thread *td)
188735e6168fSJeff Roberson {
188814618990SJeff Roberson 	struct td_sched *ts;
1889ae7a6b38SJeff Roberson 	int slptick;
1890e7d50326SJeff Roberson 
18917b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
189214618990SJeff Roberson 	ts = td->td_sched;
189335e6168fSJeff Roberson 	/*
1894e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
1895e7d50326SJeff Roberson 	 * priority.
189635e6168fSJeff Roberson 	 */
189754b0e65fSJeff Roberson 	slptick = td->td_slptick;
189854b0e65fSJeff Roberson 	td->td_slptick = 0;
1899ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
19009a93305aSJeff Roberson 		u_int hzticks;
1901f1e8dc4aSJeff Roberson 
1902ae7a6b38SJeff Roberson 		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1903ae7a6b38SJeff Roberson 		ts->ts_slptime += hzticks;
19048460a577SJohn Birrell 		sched_interact_update(td);
190514618990SJeff Roberson 		sched_pctcpu_update(ts);
1906f1e8dc4aSJeff Roberson 	}
190714618990SJeff Roberson 	/* Reset the slice value after we sleep. */
190814618990SJeff Roberson 	ts->ts_slice = sched_slice;
19097a5e5e2aSJeff Roberson 	sched_add(td, SRQ_BORING);
191035e6168fSJeff Roberson }
191135e6168fSJeff Roberson 
191235e6168fSJeff Roberson /*
191335e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
191435e6168fSJeff Roberson  * priority.
191535e6168fSJeff Roberson  */
191635e6168fSJeff Roberson void
19178460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
191815dc847eSJeff Roberson {
19197b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1920ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
1921e7d50326SJeff Roberson 	/*
1922e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
1923e7d50326SJeff Roberson 	 */
1924e7d50326SJeff Roberson 	sched_interact_fork(child);
1925e7d50326SJeff Roberson 	sched_priority(child);
1926ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += tickincr;
1927e7d50326SJeff Roberson 	sched_interact_update(td);
1928e7d50326SJeff Roberson 	sched_priority(td);
1929ad1e7d28SJulian Elischer }
1930ad1e7d28SJulian Elischer 
1931ae7a6b38SJeff Roberson /*
1932ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
1933ae7a6b38SJeff Roberson  */
1934ad1e7d28SJulian Elischer void
1935ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
1936ad1e7d28SJulian Elischer {
1937ad1e7d28SJulian Elischer 	struct td_sched *ts;
1938ad1e7d28SJulian Elischer 	struct td_sched *ts2;
19398460a577SJohn Birrell 
1940e7d50326SJeff Roberson 	/*
1941e7d50326SJeff Roberson 	 * Initialize child.
1942e7d50326SJeff Roberson 	 */
19437b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1944ed062c8dSJulian Elischer 	sched_newthread(child);
1945ae7a6b38SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
194662fa74d9SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
1947ad1e7d28SJulian Elischer 	ts = td->td_sched;
1948ad1e7d28SJulian Elischer 	ts2 = child->td_sched;
1949ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
1950ad1e7d28SJulian Elischer 	ts2->ts_runq = NULL;
1951e7d50326SJeff Roberson 	/*
1952e7d50326SJeff Roberson 	 * Grab our parents cpu estimation information and priority.
1953e7d50326SJeff Roberson 	 */
1954ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
1955ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
1956ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
1957e7d50326SJeff Roberson 	child->td_user_pri = td->td_user_pri;
1958e7d50326SJeff Roberson 	child->td_base_user_pri = td->td_base_user_pri;
1959e7d50326SJeff Roberson 	/*
1960e7d50326SJeff Roberson 	 * And update interactivity score.
1961e7d50326SJeff Roberson 	 */
1962ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
1963ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
1964e7d50326SJeff Roberson 	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
196515dc847eSJeff Roberson }
196615dc847eSJeff Roberson 
1967ae7a6b38SJeff Roberson /*
1968ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
1969ae7a6b38SJeff Roberson  */
197015dc847eSJeff Roberson void
19718460a577SJohn Birrell sched_class(struct thread *td, int class)
197215dc847eSJeff Roberson {
197315dc847eSJeff Roberson 
19747b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
19758460a577SJohn Birrell 	if (td->td_pri_class == class)
197615dc847eSJeff Roberson 		return;
1977155b9987SJeff Roberson 	/*
1978155b9987SJeff Roberson 	 * On SMP if we're on the RUNQ we must adjust the transferable
1979155b9987SJeff Roberson 	 * count because could be changing to or from an interrupt
1980155b9987SJeff Roberson 	 * class.
1981155b9987SJeff Roberson 	 */
19827a5e5e2aSJeff Roberson 	if (TD_ON_RUNQ(td)) {
19831e516cf5SJeff Roberson 		struct tdq *tdq;
19841e516cf5SJeff Roberson 
19851e516cf5SJeff Roberson 		tdq = TDQ_CPU(td->td_sched->ts_cpu);
198662fa74d9SJeff Roberson 		if (THREAD_CAN_MIGRATE(td))
1987d2ad694cSJeff Roberson 			tdq->tdq_transferable--;
19881e516cf5SJeff Roberson 		td->td_pri_class = class;
198962fa74d9SJeff Roberson 		if (THREAD_CAN_MIGRATE(td))
1990d2ad694cSJeff Roberson 			tdq->tdq_transferable++;
199180f86c9fSJeff Roberson 	}
19928460a577SJohn Birrell 	td->td_pri_class = class;
199335e6168fSJeff Roberson }
199435e6168fSJeff Roberson 
199535e6168fSJeff Roberson /*
199635e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
199735e6168fSJeff Roberson  */
199835e6168fSJeff Roberson void
1999fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
200035e6168fSJeff Roberson {
2001e7d50326SJeff Roberson 	struct thread *td;
2002141ad61cSJeff Roberson 
20038460a577SJohn Birrell 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2004431f8906SJulian Elischer 	    child, child->td_name, child->td_priority);
20058460a577SJohn Birrell 
20067b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
2007e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2008e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2009ad1e7d28SJulian Elischer }
2010ad1e7d28SJulian Elischer 
2011ae7a6b38SJeff Roberson /*
2012ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2013ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2014ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2015ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2016ae7a6b38SJeff Roberson  */
2017ad1e7d28SJulian Elischer void
2018fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2019ad1e7d28SJulian Elischer {
2020fc6c30f6SJulian Elischer 
2021e7d50326SJeff Roberson 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2022431f8906SJulian Elischer 	    child, child->td_name, child->td_priority);
2023e7d50326SJeff Roberson 
2024e7d50326SJeff Roberson #ifdef KSE
2025e7d50326SJeff Roberson 	/*
2026e7d50326SJeff Roberson 	 * KSE forks and exits so often that this penalty causes short-lived
2027e7d50326SJeff Roberson 	 * threads to always be non-interactive.  This causes mozilla to
2028e7d50326SJeff Roberson 	 * crawl under load.
2029e7d50326SJeff Roberson 	 */
2030e7d50326SJeff Roberson 	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2031e7d50326SJeff Roberson 		return;
2032e7d50326SJeff Roberson #endif
2033e7d50326SJeff Roberson 	/*
2034e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2035e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2036e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2037e7d50326SJeff Roberson 	 */
20387b20fb19SJeff Roberson 	thread_lock(td);
2039ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2040fc6c30f6SJulian Elischer 	sched_interact_update(td);
2041e7d50326SJeff Roberson 	sched_priority(td);
20427b20fb19SJeff Roberson 	thread_unlock(td);
2043ad1e7d28SJulian Elischer }
2044ad1e7d28SJulian Elischer 
2045ff256d9cSJeff Roberson void
2046ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2047ff256d9cSJeff Roberson {
2048ff256d9cSJeff Roberson 	struct tdq *tdq;
2049ff256d9cSJeff Roberson 
2050ff256d9cSJeff Roberson 	thread_lock(td);
2051ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2052ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2053ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 0;
2054ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
2055ff256d9cSJeff Roberson 		if (td->td_critnest > 1)
2056ff256d9cSJeff Roberson 			td->td_owepreempt = 1;
2057ff256d9cSJeff Roberson 		else
2058ff256d9cSJeff Roberson 			mi_switch(SW_INVOL | SW_PREEMPT, NULL);
2059ff256d9cSJeff Roberson 	}
2060ff256d9cSJeff Roberson 	thread_unlock(td);
2061ff256d9cSJeff Roberson }
2062ff256d9cSJeff Roberson 
2063ae7a6b38SJeff Roberson /*
2064ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2065ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2066ae7a6b38SJeff Roberson  */
2067ad1e7d28SJulian Elischer void
2068ad1e7d28SJulian Elischer sched_userret(struct thread *td)
2069ad1e7d28SJulian Elischer {
2070ad1e7d28SJulian Elischer 	/*
2071ad1e7d28SJulian Elischer 	 * XXX we cheat slightly on the locking here to avoid locking in
2072ad1e7d28SJulian Elischer 	 * the usual case.  Setting td_priority here is essentially an
2073ad1e7d28SJulian Elischer 	 * incomplete workaround for not setting it properly elsewhere.
2074ad1e7d28SJulian Elischer 	 * Now that some interrupt handlers are threads, not setting it
2075ad1e7d28SJulian Elischer 	 * properly elsewhere can clobber it in the window between setting
2076ad1e7d28SJulian Elischer 	 * it here and returning to user mode, so don't waste time setting
2077ad1e7d28SJulian Elischer 	 * it perfectly here.
2078ad1e7d28SJulian Elischer 	 */
2079ad1e7d28SJulian Elischer 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2080ad1e7d28SJulian Elischer 	    ("thread with borrowed priority returning to userland"));
2081ad1e7d28SJulian Elischer 	if (td->td_priority != td->td_user_pri) {
20827b20fb19SJeff Roberson 		thread_lock(td);
2083ad1e7d28SJulian Elischer 		td->td_priority = td->td_user_pri;
2084ad1e7d28SJulian Elischer 		td->td_base_pri = td->td_user_pri;
208562fa74d9SJeff Roberson 		tdq_setlowpri(TDQ_SELF(), td);
20867b20fb19SJeff Roberson 		thread_unlock(td);
2087ad1e7d28SJulian Elischer         }
208835e6168fSJeff Roberson }
208935e6168fSJeff Roberson 
2090ae7a6b38SJeff Roberson /*
2091ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2092ae7a6b38SJeff Roberson  * threads.
2093ae7a6b38SJeff Roberson  */
209435e6168fSJeff Roberson void
20957cf90fb3SJeff Roberson sched_clock(struct thread *td)
209635e6168fSJeff Roberson {
2097ad1e7d28SJulian Elischer 	struct tdq *tdq;
2098ad1e7d28SJulian Elischer 	struct td_sched *ts;
209935e6168fSJeff Roberson 
2100ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
21013f872f85SJeff Roberson 	tdq = TDQ_SELF();
21027fcf154aSJeff Roberson #ifdef SMP
21037fcf154aSJeff Roberson 	/*
21047fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
21057fcf154aSJeff Roberson 	 */
21067fcf154aSJeff Roberson 	if (balance_tdq == tdq) {
21077fcf154aSJeff Roberson 		if (balance_ticks && --balance_ticks == 0)
21087fcf154aSJeff Roberson 			sched_balance();
21097fcf154aSJeff Roberson 	}
21107fcf154aSJeff Roberson #endif
21113f872f85SJeff Roberson 	/*
21123f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
21133f872f85SJeff Roberson 	 * threads get a chance to run.
21143f872f85SJeff Roberson 	 */
21153f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
21163f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
21173f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
21183f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
21193f872f85SJeff Roberson 	}
21203f872f85SJeff Roberson 	ts = td->td_sched;
2121fd0b8c78SJeff Roberson 	if (td->td_pri_class & PRI_FIFO_BIT)
2122a8949de2SJeff Roberson 		return;
2123fd0b8c78SJeff Roberson 	if (td->td_pri_class == PRI_TIMESHARE) {
2124a8949de2SJeff Roberson 		/*
2125fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2126fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
212715dc847eSJeff Roberson 		 */
2128ae7a6b38SJeff Roberson 		td->td_sched->ts_runtime += tickincr;
21298460a577SJohn Birrell 		sched_interact_update(td);
213073daf66fSJeff Roberson 		sched_priority(td);
2131fd0b8c78SJeff Roberson 	}
213235e6168fSJeff Roberson 	/*
213335e6168fSJeff Roberson 	 * We used up one time slice.
213435e6168fSJeff Roberson 	 */
2135ad1e7d28SJulian Elischer 	if (--ts->ts_slice > 0)
213615dc847eSJeff Roberson 		return;
213735e6168fSJeff Roberson 	/*
213873daf66fSJeff Roberson 	 * We're out of time, force a requeue at userret().
213935e6168fSJeff Roberson 	 */
214073daf66fSJeff Roberson 	ts->ts_slice = sched_slice;
21414a338afdSJulian Elischer 	td->td_flags |= TDF_NEEDRESCHED;
214235e6168fSJeff Roberson }
214335e6168fSJeff Roberson 
2144ae7a6b38SJeff Roberson /*
2145ae7a6b38SJeff Roberson  * Called once per hz tick.  Used for cpu utilization information.  This
2146ae7a6b38SJeff Roberson  * is easier than trying to scale based on stathz.
2147ae7a6b38SJeff Roberson  */
2148ae7a6b38SJeff Roberson void
2149ae7a6b38SJeff Roberson sched_tick(void)
2150ae7a6b38SJeff Roberson {
2151ae7a6b38SJeff Roberson 	struct td_sched *ts;
2152ae7a6b38SJeff Roberson 
2153ae7a6b38SJeff Roberson 	ts = curthread->td_sched;
2154ae7a6b38SJeff Roberson 	/* Adjust ticks for pctcpu */
2155ae7a6b38SJeff Roberson 	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2156ae7a6b38SJeff Roberson 	ts->ts_ltick = ticks;
2157ae7a6b38SJeff Roberson 	/*
2158ae7a6b38SJeff Roberson 	 * Update if we've exceeded our desired tick threshhold by over one
2159ae7a6b38SJeff Roberson 	 * second.
2160ae7a6b38SJeff Roberson 	 */
2161ae7a6b38SJeff Roberson 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2162ae7a6b38SJeff Roberson 		sched_pctcpu_update(ts);
2163ae7a6b38SJeff Roberson }
2164ae7a6b38SJeff Roberson 
2165ae7a6b38SJeff Roberson /*
2166ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2167ae7a6b38SJeff Roberson  * cooperative idle threads.
2168ae7a6b38SJeff Roberson  */
216935e6168fSJeff Roberson int
217035e6168fSJeff Roberson sched_runnable(void)
217135e6168fSJeff Roberson {
2172ad1e7d28SJulian Elischer 	struct tdq *tdq;
2173b90816f1SJeff Roberson 	int load;
217435e6168fSJeff Roberson 
2175b90816f1SJeff Roberson 	load = 1;
2176b90816f1SJeff Roberson 
2177ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
21783f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2179d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
21803f741ca1SJeff Roberson 			goto out;
21813f741ca1SJeff Roberson 	} else
2182d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2183b90816f1SJeff Roberson 			goto out;
2184b90816f1SJeff Roberson 	load = 0;
2185b90816f1SJeff Roberson out:
2186b90816f1SJeff Roberson 	return (load);
218735e6168fSJeff Roberson }
218835e6168fSJeff Roberson 
2189ae7a6b38SJeff Roberson /*
2190ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2191ae7a6b38SJeff Roberson  * the run-queue while running however the load remains.  For SMP we set
2192ae7a6b38SJeff Roberson  * the tdq in the global idle bitmask if it idles here.
2193ae7a6b38SJeff Roberson  */
21947a5e5e2aSJeff Roberson struct thread *
2195c9f25d8fSJeff Roberson sched_choose(void)
2196c9f25d8fSJeff Roberson {
2197ae7a6b38SJeff Roberson 	struct td_sched *ts;
2198ae7a6b38SJeff Roberson 	struct tdq *tdq;
2199ae7a6b38SJeff Roberson 
2200ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2201ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2202ad1e7d28SJulian Elischer 	ts = tdq_choose(tdq);
2203ad1e7d28SJulian Elischer 	if (ts) {
2204ad1e7d28SJulian Elischer 		tdq_runq_rem(tdq, ts);
22057a5e5e2aSJeff Roberson 		return (ts->ts_thread);
220635e6168fSJeff Roberson 	}
220762fa74d9SJeff Roberson 	return (PCPU_GET(idlethread));
22087a5e5e2aSJeff Roberson }
22097a5e5e2aSJeff Roberson 
2210ae7a6b38SJeff Roberson /*
2211ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2212ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2213ae7a6b38SJeff Roberson  */
2214ae7a6b38SJeff Roberson static inline void
2215ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
22167a5e5e2aSJeff Roberson {
22177a5e5e2aSJeff Roberson 	struct thread *ctd;
22187a5e5e2aSJeff Roberson 	int cpri;
22197a5e5e2aSJeff Roberson 	int pri;
22207a5e5e2aSJeff Roberson 
2221ff256d9cSJeff Roberson 	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2222ff256d9cSJeff Roberson 
22237a5e5e2aSJeff Roberson 	ctd = curthread;
22247a5e5e2aSJeff Roberson 	pri = td->td_priority;
22257a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2226ff256d9cSJeff Roberson 	if (pri < cpri)
2227ff256d9cSJeff Roberson 		ctd->td_flags |= TDF_NEEDRESCHED;
22287a5e5e2aSJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2229ae7a6b38SJeff Roberson 		return;
2230ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2231ae7a6b38SJeff Roberson 		return;
22327a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
223335e6168fSJeff Roberson }
223435e6168fSJeff Roberson 
2235ae7a6b38SJeff Roberson /*
223673daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
223773daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
223873daf66fSJeff Roberson  * predetermined.
2239ae7a6b38SJeff Roberson  */
224035e6168fSJeff Roberson void
2241ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
224235e6168fSJeff Roberson {
2243ad1e7d28SJulian Elischer 	struct td_sched *ts;
2244c9f25d8fSJeff Roberson 
2245ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
22467a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
22477a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
22487a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
22497a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2250b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2251b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2252ae7a6b38SJeff Roberson 
2253ae7a6b38SJeff Roberson 	ts = td->td_sched;
2254ae7a6b38SJeff Roberson 	if (td->td_priority < tdq->tdq_lowpri)
2255ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
225673daf66fSJeff Roberson 	tdq_runq_pick(tdq, ts);
2257ad1e7d28SJulian Elischer 	tdq_runq_add(tdq, ts, flags);
2258ad1e7d28SJulian Elischer 	tdq_load_add(tdq, ts);
2259ae7a6b38SJeff Roberson }
2260ae7a6b38SJeff Roberson 
2261ae7a6b38SJeff Roberson /*
2262ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2263ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
2264ae7a6b38SJeff Roberson  */
2265ae7a6b38SJeff Roberson void
2266ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2267ae7a6b38SJeff Roberson {
2268ae7a6b38SJeff Roberson 	struct tdq *tdq;
22697b8bfa0dSJeff Roberson #ifdef SMP
227073daf66fSJeff Roberson 	struct td_sched *ts;
2271ae7a6b38SJeff Roberson 	int cpu;
2272ae7a6b38SJeff Roberson #endif
2273ae7a6b38SJeff Roberson 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2274431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
2275431f8906SJulian Elischer 	    curthread->td_name);
2276ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2277ae7a6b38SJeff Roberson 	/*
2278ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2279ae7a6b38SJeff Roberson 	 * run-queue.
2280ae7a6b38SJeff Roberson 	 */
2281ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2282ae7a6b38SJeff Roberson 		sched_priority(td);
2283ae7a6b38SJeff Roberson #ifdef SMP
2284ae7a6b38SJeff Roberson 	/*
2285ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2286ae7a6b38SJeff Roberson 	 * target cpu.
2287ae7a6b38SJeff Roberson 	 */
228873daf66fSJeff Roberson 	ts = td->td_sched;
2289ae7a6b38SJeff Roberson 	cpu = sched_pickcpu(ts, flags);
2290ae7a6b38SJeff Roberson 	tdq = sched_setcpu(ts, cpu, flags);
2291ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
229273daf66fSJeff Roberson 	if (cpu != PCPU_GET(cpuid)) {
2293ff256d9cSJeff Roberson 		tdq_notify(tdq, ts);
22947b8bfa0dSJeff Roberson 		return;
22957b8bfa0dSJeff Roberson 	}
2296ae7a6b38SJeff Roberson #else
2297ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2298ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
2299ae7a6b38SJeff Roberson 	/*
2300ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2301ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2302ae7a6b38SJeff Roberson 	 */
2303ae7a6b38SJeff Roberson 	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2304ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
23057b8bfa0dSJeff Roberson #endif
2306ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2307ae7a6b38SJeff Roberson 		sched_setpreempt(td);
230835e6168fSJeff Roberson }
230935e6168fSJeff Roberson 
2310ae7a6b38SJeff Roberson /*
2311ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2312ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2313ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2314ae7a6b38SJeff Roberson  */
231535e6168fSJeff Roberson void
23167cf90fb3SJeff Roberson sched_rem(struct thread *td)
231735e6168fSJeff Roberson {
2318ad1e7d28SJulian Elischer 	struct tdq *tdq;
2319ad1e7d28SJulian Elischer 	struct td_sched *ts;
23207cf90fb3SJeff Roberson 
232181d47d3fSJeff Roberson 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2322431f8906SJulian Elischer 	    td, td->td_name, td->td_priority, curthread,
2323431f8906SJulian Elischer 	    curthread->td_name);
2324ad1e7d28SJulian Elischer 	ts = td->td_sched;
2325ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(ts->ts_cpu);
2326ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2327ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
23287a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2329ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
2330ad1e7d28SJulian Elischer 	tdq_runq_rem(tdq, ts);
2331ad1e7d28SJulian Elischer 	tdq_load_rem(tdq, ts);
23327a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
233362fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
233462fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
233535e6168fSJeff Roberson }
233635e6168fSJeff Roberson 
2337ae7a6b38SJeff Roberson /*
2338ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2339ae7a6b38SJeff Roberson  */
234035e6168fSJeff Roberson fixpt_t
23417cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
234235e6168fSJeff Roberson {
234335e6168fSJeff Roberson 	fixpt_t pctcpu;
2344ad1e7d28SJulian Elischer 	struct td_sched *ts;
234535e6168fSJeff Roberson 
234635e6168fSJeff Roberson 	pctcpu = 0;
2347ad1e7d28SJulian Elischer 	ts = td->td_sched;
2348ad1e7d28SJulian Elischer 	if (ts == NULL)
2349484288deSJeff Roberson 		return (0);
235035e6168fSJeff Roberson 
23517b20fb19SJeff Roberson 	thread_lock(td);
2352ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
235335e6168fSJeff Roberson 		int rtick;
235435e6168fSJeff Roberson 
2355ad1e7d28SJulian Elischer 		sched_pctcpu_update(ts);
235635e6168fSJeff Roberson 		/* How many rtick per second ? */
2357e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2358e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
235935e6168fSJeff Roberson 	}
23607b20fb19SJeff Roberson 	thread_unlock(td);
236135e6168fSJeff Roberson 
236235e6168fSJeff Roberson 	return (pctcpu);
236335e6168fSJeff Roberson }
236435e6168fSJeff Roberson 
236562fa74d9SJeff Roberson /*
236662fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
236762fa74d9SJeff Roberson  * cpumask.
236862fa74d9SJeff Roberson  */
2369885d51a3SJeff Roberson void
2370885d51a3SJeff Roberson sched_affinity(struct thread *td)
2371885d51a3SJeff Roberson {
237262fa74d9SJeff Roberson #ifdef SMP
237362fa74d9SJeff Roberson 	struct td_sched *ts;
237462fa74d9SJeff Roberson 	int cpu;
237562fa74d9SJeff Roberson 
237662fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
237762fa74d9SJeff Roberson 	ts = td->td_sched;
237862fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
237962fa74d9SJeff Roberson 		return;
238062fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
238162fa74d9SJeff Roberson 		return;
238262fa74d9SJeff Roberson 	td->td_flags |= TDF_NEEDRESCHED;
238362fa74d9SJeff Roberson 	if (!THREAD_CAN_MIGRATE(td))
238462fa74d9SJeff Roberson 		return;
238562fa74d9SJeff Roberson 	/*
238662fa74d9SJeff Roberson 	 * Assign the new cpu and force a switch before returning to
238762fa74d9SJeff Roberson 	 * userspace.  If the target thread is not running locally send
238862fa74d9SJeff Roberson 	 * an ipi to force the issue.
238962fa74d9SJeff Roberson 	 */
239062fa74d9SJeff Roberson 	cpu = ts->ts_cpu;
239162fa74d9SJeff Roberson 	ts->ts_cpu = sched_pickcpu(ts, 0);
239262fa74d9SJeff Roberson 	if (cpu != PCPU_GET(cpuid))
239362fa74d9SJeff Roberson 		ipi_selected(1 << cpu, IPI_PREEMPT);
239462fa74d9SJeff Roberson #endif
2395885d51a3SJeff Roberson }
2396885d51a3SJeff Roberson 
2397ae7a6b38SJeff Roberson /*
2398ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2399ae7a6b38SJeff Roberson  */
24009bacd788SJeff Roberson void
24019bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
24029bacd788SJeff Roberson {
2403ad1e7d28SJulian Elischer 	struct td_sched *ts;
24049bacd788SJeff Roberson 
2405c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2406ad1e7d28SJulian Elischer 	ts = td->td_sched;
24076b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2408c95d2db2SJeff Roberson 		sched_unbind(td);
2409ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
24106b2f763fSJeff Roberson 	sched_pin();
241180f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
24129bacd788SJeff Roberson 		return;
24136b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
24149bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2415279f949eSPoul-Henning Kamp 	mi_switch(SW_VOL, NULL);
24169bacd788SJeff Roberson }
24179bacd788SJeff Roberson 
2418ae7a6b38SJeff Roberson /*
2419ae7a6b38SJeff Roberson  * Release a bound thread.
2420ae7a6b38SJeff Roberson  */
24219bacd788SJeff Roberson void
24229bacd788SJeff Roberson sched_unbind(struct thread *td)
24239bacd788SJeff Roberson {
2424e7d50326SJeff Roberson 	struct td_sched *ts;
2425e7d50326SJeff Roberson 
24267b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2427e7d50326SJeff Roberson 	ts = td->td_sched;
24286b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
24296b2f763fSJeff Roberson 		return;
2430e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2431e7d50326SJeff Roberson 	sched_unpin();
24329bacd788SJeff Roberson }
24339bacd788SJeff Roberson 
243435e6168fSJeff Roberson int
2435ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2436ebccf1e3SJoseph Koshy {
24377b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2438ad1e7d28SJulian Elischer 	return (td->td_sched->ts_flags & TSF_BOUND);
2439ebccf1e3SJoseph Koshy }
2440ebccf1e3SJoseph Koshy 
2441ae7a6b38SJeff Roberson /*
2442ae7a6b38SJeff Roberson  * Basic yield call.
2443ae7a6b38SJeff Roberson  */
244436ec198bSDavid Xu void
244536ec198bSDavid Xu sched_relinquish(struct thread *td)
244636ec198bSDavid Xu {
24477b20fb19SJeff Roberson 	thread_lock(td);
24487b20fb19SJeff Roberson 	SCHED_STAT_INC(switch_relinquish);
244936ec198bSDavid Xu 	mi_switch(SW_VOL, NULL);
24507b20fb19SJeff Roberson 	thread_unlock(td);
245136ec198bSDavid Xu }
245236ec198bSDavid Xu 
2453ae7a6b38SJeff Roberson /*
2454ae7a6b38SJeff Roberson  * Return the total system load.
2455ae7a6b38SJeff Roberson  */
2456ebccf1e3SJoseph Koshy int
245733916c36SJeff Roberson sched_load(void)
245833916c36SJeff Roberson {
245933916c36SJeff Roberson #ifdef SMP
246033916c36SJeff Roberson 	int total;
246133916c36SJeff Roberson 	int i;
246233916c36SJeff Roberson 
246333916c36SJeff Roberson 	total = 0;
246462fa74d9SJeff Roberson 	for (i = 0; i <= mp_maxid; i++)
246562fa74d9SJeff Roberson 		total += TDQ_CPU(i)->tdq_sysload;
246633916c36SJeff Roberson 	return (total);
246733916c36SJeff Roberson #else
2468d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
246933916c36SJeff Roberson #endif
247033916c36SJeff Roberson }
247133916c36SJeff Roberson 
247233916c36SJeff Roberson int
247335e6168fSJeff Roberson sched_sizeof_proc(void)
247435e6168fSJeff Roberson {
247535e6168fSJeff Roberson 	return (sizeof(struct proc));
247635e6168fSJeff Roberson }
247735e6168fSJeff Roberson 
247835e6168fSJeff Roberson int
247935e6168fSJeff Roberson sched_sizeof_thread(void)
248035e6168fSJeff Roberson {
248135e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
248235e6168fSJeff Roberson }
2483b41f1452SDavid Xu 
24847a5e5e2aSJeff Roberson /*
24857a5e5e2aSJeff Roberson  * The actual idle process.
24867a5e5e2aSJeff Roberson  */
24877a5e5e2aSJeff Roberson void
24887a5e5e2aSJeff Roberson sched_idletd(void *dummy)
24897a5e5e2aSJeff Roberson {
24907a5e5e2aSJeff Roberson 	struct thread *td;
2491ae7a6b38SJeff Roberson 	struct tdq *tdq;
24927a5e5e2aSJeff Roberson 
24937a5e5e2aSJeff Roberson 	td = curthread;
2494ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
24957a5e5e2aSJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
2496ae7a6b38SJeff Roberson 	/* ULE relies on preemption for idle interruption. */
2497ae7a6b38SJeff Roberson 	for (;;) {
2498ae7a6b38SJeff Roberson #ifdef SMP
2499ae7a6b38SJeff Roberson 		if (tdq_idled(tdq))
25007a5e5e2aSJeff Roberson 			cpu_idle();
2501ae7a6b38SJeff Roberson #else
2502ae7a6b38SJeff Roberson 		cpu_idle();
2503ae7a6b38SJeff Roberson #endif
2504ae7a6b38SJeff Roberson 	}
2505b41f1452SDavid Xu }
2506e7d50326SJeff Roberson 
25077b20fb19SJeff Roberson /*
25087b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
25097b20fb19SJeff Roberson  */
25107b20fb19SJeff Roberson void
25117b20fb19SJeff Roberson sched_throw(struct thread *td)
25127b20fb19SJeff Roberson {
251359c68134SJeff Roberson 	struct thread *newtd;
2514ae7a6b38SJeff Roberson 	struct tdq *tdq;
2515ae7a6b38SJeff Roberson 
2516ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
25177b20fb19SJeff Roberson 	if (td == NULL) {
2518ae7a6b38SJeff Roberson 		/* Correct spinlock nesting and acquire the correct lock. */
2519ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
25207b20fb19SJeff Roberson 		spinlock_exit();
25217b20fb19SJeff Roberson 	} else {
2522ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2523ae7a6b38SJeff Roberson 		tdq_load_rem(tdq, td->td_sched);
2524eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
25257b20fb19SJeff Roberson 	}
25267b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
252759c68134SJeff Roberson 	newtd = choosethread();
252859c68134SJeff Roberson 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
25297b20fb19SJeff Roberson 	PCPU_SET(switchtime, cpu_ticks());
25307b20fb19SJeff Roberson 	PCPU_SET(switchticks, ticks);
253159c68134SJeff Roberson 	cpu_throw(td, newtd);		/* doesn't return */
25327b20fb19SJeff Roberson }
25337b20fb19SJeff Roberson 
2534ae7a6b38SJeff Roberson /*
2535ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
2536ae7a6b38SJeff Roberson  * let fork do the rest of the work.
2537ae7a6b38SJeff Roberson  */
25387b20fb19SJeff Roberson void
2539fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
25407b20fb19SJeff Roberson {
2541ae7a6b38SJeff Roberson 	struct td_sched *ts;
2542ae7a6b38SJeff Roberson 	struct tdq *tdq;
2543ae7a6b38SJeff Roberson 	int cpuid;
25447b20fb19SJeff Roberson 
25457b20fb19SJeff Roberson 	/*
25467b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
2547ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
25487b20fb19SJeff Roberson 	 */
2549ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2550ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
2551ae7a6b38SJeff Roberson 	ts = td->td_sched;
2552ae7a6b38SJeff Roberson 	if (TD_IS_IDLETHREAD(td))
2553ae7a6b38SJeff Roberson 		td->td_lock = TDQ_LOCKPTR(tdq);
2554ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2555ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
255659c68134SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2557eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(
2558eea4f254SJeff Roberson 	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
255962fa74d9SJeff Roberson 	tdq->tdq_lowpri = td->td_priority;
25607b20fb19SJeff Roberson }
25617b20fb19SJeff Roberson 
2562ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2563ae7a6b38SJeff Roberson     "Scheduler");
2564ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2565e7d50326SJeff Roberson     "Scheduler name");
2566ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2567ae7a6b38SJeff Roberson     "Slice size for timeshare threads");
2568ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2569ae7a6b38SJeff Roberson      "Interactivity score threshold");
2570ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2571ae7a6b38SJeff Roberson      0,"Min priority for preemption, lower priorities have greater precedence");
25727b8bfa0dSJeff Roberson #ifdef SMP
2573ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2574ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
2575ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2576ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
25777fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
25787fcf154aSJeff Roberson     &balance_interval, 0,
25797fcf154aSJeff Roberson     "Average frequency in stathz ticks to run the long-term balancer");
2580ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2581ae7a6b38SJeff Roberson     "Steals work from another hyper-threaded core on idle");
2582ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2583ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
258428994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
258528994a58SJeff Roberson     "Minimum load on remote cpu before we'll steal");
25867b8bfa0dSJeff Roberson #endif
2587e7d50326SJeff Roberson 
258854b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
2589a5423ea3SJeff Roberson static int ccpu = 0;
2590e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2591e7d50326SJeff Roberson 
2592e7d50326SJeff Roberson 
2593ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1
2594ed062c8dSJulian Elischer #include "kern/kern_switch.c"
2595