xref: /freebsd/sys/kern/sched_ule.c (revision bb3dfc6ae90f67c1900f616ac7948a1112cfae32)
135e6168fSJeff Roberson /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
535e6168fSJeff Roberson  * All rights reserved.
635e6168fSJeff Roberson  *
735e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
835e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
935e6168fSJeff Roberson  * are met:
1035e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
1135e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1235e6168fSJeff Roberson  *    disclaimer.
1335e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1435e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1535e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1635e6168fSJeff Roberson  *
1735e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1835e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1935e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2035e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2135e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2235e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2335e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2435e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2535e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2635e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2735e6168fSJeff Roberson  */
2835e6168fSJeff Roberson 
29ae7a6b38SJeff Roberson /*
30ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
31ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
32ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
33ae7a6b38SJeff Roberson  *
34ae7a6b38SJeff Roberson  * etymology:
35a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
36ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
37ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
38ae7a6b38SJeff Roberson  */
39ae7a6b38SJeff Roberson 
40677b542eSDavid E. O'Brien #include <sys/cdefs.h>
41113dda8aSJeff Roberson __FBSDID("$FreeBSD$");
42677b542eSDavid E. O'Brien 
434da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
444da0d332SPeter Wemm #include "opt_sched.h"
459923b511SScott Long 
4635e6168fSJeff Roberson #include <sys/param.h>
4735e6168fSJeff Roberson #include <sys/systm.h>
482c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4935e6168fSJeff Roberson #include <sys/kernel.h>
5035e6168fSJeff Roberson #include <sys/ktr.h>
51c149e542SAttilio Rao #include <sys/limits.h>
5235e6168fSJeff Roberson #include <sys/lock.h>
5335e6168fSJeff Roberson #include <sys/mutex.h>
5435e6168fSJeff Roberson #include <sys/proc.h>
55245f3abfSJeff Roberson #include <sys/resource.h>
569bacd788SJeff Roberson #include <sys/resourcevar.h>
5735e6168fSJeff Roberson #include <sys/sched.h>
58b3e9e682SRyan Stone #include <sys/sdt.h>
5935e6168fSJeff Roberson #include <sys/smp.h>
6035e6168fSJeff Roberson #include <sys/sx.h>
6135e6168fSJeff Roberson #include <sys/sysctl.h>
6235e6168fSJeff Roberson #include <sys/sysproto.h>
63f5c157d9SJohn Baldwin #include <sys/turnstile.h>
643db720fdSDavid Xu #include <sys/umtx.h>
6535e6168fSJeff Roberson #include <sys/vmmeter.h>
6662fa74d9SJeff Roberson #include <sys/cpuset.h>
6707095abfSIvan Voras #include <sys/sbuf.h>
6835e6168fSJeff Roberson 
69ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
70ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
71ebccf1e3SJoseph Koshy #endif
72ebccf1e3SJoseph Koshy 
736f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
746f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
756f5f25e5SJohn Birrell int				dtrace_vtime_active;
766f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
776f5f25e5SJohn Birrell #endif
786f5f25e5SJohn Birrell 
7935e6168fSJeff Roberson #include <machine/cpu.h>
8022bf7d9aSJeff Roberson #include <machine/smp.h>
8135e6168fSJeff Roberson 
82ae7a6b38SJeff Roberson #define	KTR_ULE	0
8314618990SJeff Roberson 
840d2cf837SJeff Roberson #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
850d2cf837SJeff Roberson #define	TDQ_NAME_LEN	(sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
866338c579SAttilio Rao #define	TDQ_LOADNAME_LEN	(sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
878f51ad55SJeff Roberson 
886b2f763fSJeff Roberson /*
89ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
90ae7a6b38SJeff Roberson  * by the thread lock.
91ed062c8dSJulian Elischer  */
92ad1e7d28SJulian Elischer struct td_sched {
93ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
94ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
95e77f9fedSAdrian Chadd 	int		ts_cpu;		/* CPU that we have affinity for. */
9673daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
97ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
98ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
99ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
100ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
101ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
102ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
1038f51ad55SJeff Roberson #ifdef KTR
1048f51ad55SJeff Roberson 	char		ts_name[TS_NAME_LEN];
1058f51ad55SJeff Roberson #endif
106ed062c8dSJulian Elischer };
107ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1087b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1097b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
11035e6168fSJeff Roberson 
11162fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
11262fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
11362fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
11462fa74d9SJeff Roberson 
11593ccd6bfSKonstantin Belousov _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
11693ccd6bfSKonstantin Belousov     sizeof(struct thread0_storage),
11793ccd6bfSKonstantin Belousov     "increase struct thread0_storage.t0st_sched size");
11893ccd6bfSKonstantin Belousov 
11935e6168fSJeff Roberson /*
12012d56c0fSJohn Baldwin  * Priority ranges used for interactive and non-interactive timeshare
1212dc29adbSJohn Baldwin  * threads.  The timeshare priorities are split up into four ranges.
1222dc29adbSJohn Baldwin  * The first range handles interactive threads.  The last three ranges
1232dc29adbSJohn Baldwin  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
1242dc29adbSJohn Baldwin  * ranges supporting nice values.
12512d56c0fSJohn Baldwin  */
1262dc29adbSJohn Baldwin #define	PRI_TIMESHARE_RANGE	(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
1272dc29adbSJohn Baldwin #define	PRI_INTERACT_RANGE	((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
12816705791SAndriy Gapon #define	PRI_BATCH_RANGE		(PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
1292dc29adbSJohn Baldwin 
1302dc29adbSJohn Baldwin #define	PRI_MIN_INTERACT	PRI_MIN_TIMESHARE
1312dc29adbSJohn Baldwin #define	PRI_MAX_INTERACT	(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
1322dc29adbSJohn Baldwin #define	PRI_MIN_BATCH		(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
13312d56c0fSJohn Baldwin #define	PRI_MAX_BATCH		PRI_MAX_TIMESHARE
13412d56c0fSJohn Baldwin 
13512d56c0fSJohn Baldwin /*
136e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
137e1f89c22SJeff Roberson  *
138e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
139e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1408ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
141e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
142e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
143e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
14435e6168fSJeff Roberson  */
145e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
146e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1478ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
148e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
149e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
150eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
15135e6168fSJeff Roberson 
15235e6168fSJeff Roberson /*
153e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
154e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
155e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
156e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
157e7d50326SJeff Roberson  * or positive nice respectively.
158e7d50326SJeff Roberson  *
159e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
160e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
161e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
162e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
163e7d50326SJeff Roberson  */
164e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
165e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
16612d56c0fSJohn Baldwin #define	SCHED_PRI_MIN		(PRI_MIN_BATCH + SCHED_PRI_NHALF)
16712d56c0fSJohn Baldwin #define	SCHED_PRI_MAX		(PRI_MAX_BATCH - SCHED_PRI_NHALF)
16878920008SJohn Baldwin #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
169e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
170e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1711e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
172e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
173e7d50326SJeff Roberson 
174e7d50326SJeff Roberson /*
175e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
176e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
177e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
178e7d50326SJeff Roberson  * models the intent of the thread.
17935e6168fSJeff Roberson  *
180407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
181407b0157SJeff Roberson  *		before throttling back.
182d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
183210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
1849f518f20SAttilio Rao  * INTERACT_THRESH:	Threshold for placement on the current runq.
18535e6168fSJeff Roberson  */
186e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
187e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
188210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
189210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1904c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
191e1f89c22SJeff Roberson 
1925e5c3873SJeff Roberson /*
1935e5c3873SJeff Roberson  * These parameters determine the slice behavior for batch work.
1945e5c3873SJeff Roberson  */
1955e5c3873SJeff Roberson #define	SCHED_SLICE_DEFAULT_DIVISOR	10	/* ~94 ms, 12 stathz ticks. */
1965e5c3873SJeff Roberson #define	SCHED_SLICE_MIN_DIVISOR		6	/* DEFAULT/MIN = ~16 ms. */
1975e5c3873SJeff Roberson 
1983d7f4117SAlexander Motin /* Flags kept in td_flags. */
1993d7f4117SAlexander Motin #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
2003d7f4117SAlexander Motin 
20135e6168fSJeff Roberson /*
202e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
203e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
204e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
205e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
206e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
207ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
20835e6168fSJeff Roberson  */
209e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH;
210db702c59SEitan Adler static int tickincr = 8 << SCHED_TICK_SHIFT;
2115e5c3873SJeff Roberson static int realstathz = 127;	/* reset during boot. */
2125e5c3873SJeff Roberson static int sched_slice = 10;	/* reset during boot. */
2135e5c3873SJeff Roberson static int sched_slice_min = 1;	/* reset during boot. */
21402e2d6b4SJeff Roberson #ifdef PREEMPTION
21502e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
21602e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE;
21702e2d6b4SJeff Roberson #else
218ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN;
21902e2d6b4SJeff Roberson #endif
22002e2d6b4SJeff Roberson #else
22102e2d6b4SJeff Roberson static int preempt_thresh = 0;
22202e2d6b4SJeff Roberson #endif
22312d56c0fSJohn Baldwin static int static_boost = PRI_MIN_BATCH;
2241690c6c1SJeff Roberson static int sched_idlespins = 10000;
225b3f40a41SAlexander Motin static int sched_idlespinthresh = -1;
226ae7a6b38SJeff Roberson 
22735e6168fSJeff Roberson /*
228ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
229ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
230ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
23135e6168fSJeff Roberson  */
232ad1e7d28SJulian Elischer struct tdq {
23339f819e2SJim Harris 	/*
23439f819e2SJim Harris 	 * Ordered to improve efficiency of cpu_search() and switch().
23539f819e2SJim Harris 	 * tdq_lock is padded to avoid false sharing with tdq_load and
23639f819e2SJim Harris 	 * tdq_cpu_idle.
23739f819e2SJim Harris 	 */
2384ceaf45dSAttilio Rao 	struct mtx_padalign tdq_lock;		/* run queue lock. */
23973daf66fSJeff Roberson 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
2401690c6c1SJeff Roberson 	volatile int	tdq_load;		/* Aggregate load. */
2419f9ad565SAlexander Motin 	volatile int	tdq_cpu_idle;		/* cpu_idle() is active. */
24273daf66fSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
24397e9382dSDon Lewis 	volatile int	tdq_transferable;	/* Transferable thread count. */
24497e9382dSDon Lewis 	volatile short	tdq_switchcnt;		/* Switches this tick. */
24597e9382dSDon Lewis 	volatile short	tdq_oldswitchcnt;	/* Switches last tick. */
24673daf66fSJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
24773daf66fSJeff Roberson 	u_char		tdq_ipipending;		/* IPI pending. */
24873daf66fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
24973daf66fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
250018ff686SJeff Roberson 	int		tdq_id;			/* cpuid. */
251e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
252ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
253ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
2548f51ad55SJeff Roberson 	char		tdq_name[TDQ_NAME_LEN];
2558f51ad55SJeff Roberson #ifdef KTR
2568f51ad55SJeff Roberson 	char		tdq_loadname[TDQ_LOADNAME_LEN];
2578f51ad55SJeff Roberson #endif
258ae7a6b38SJeff Roberson } __aligned(64);
25935e6168fSJeff Roberson 
2601690c6c1SJeff Roberson /* Idle thread states and config. */
2611690c6c1SJeff Roberson #define	TDQ_RUNNING	1
2621690c6c1SJeff Roberson #define	TDQ_IDLE	2
2637b8bfa0dSJeff Roberson 
26480f86c9fSJeff Roberson #ifdef SMP
26507095abfSIvan Voras struct cpu_group *cpu_top;		/* CPU topology */
2667b8bfa0dSJeff Roberson 
26762fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
26862fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2697b8bfa0dSJeff Roberson 
2707b8bfa0dSJeff Roberson /*
2717b8bfa0dSJeff Roberson  * Run-time tunables.
2727b8bfa0dSJeff Roberson  */
27328994a58SJeff Roberson static int rebalance = 1;
2747fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
2757b8bfa0dSJeff Roberson static int affinity;
27628994a58SJeff Roberson static int steal_idle = 1;
27728994a58SJeff Roberson static int steal_thresh = 2;
27897e9382dSDon Lewis static int always_steal = 0;
27997e9382dSDon Lewis static int trysteal_limit = 2;
28080f86c9fSJeff Roberson 
28135e6168fSJeff Roberson /*
282d2ad694cSJeff Roberson  * One thread queue per processor.
28335e6168fSJeff Roberson  */
2847fcf154aSJeff Roberson static struct tdq	*balance_tdq;
2857fcf154aSJeff Roberson static int balance_ticks;
286018ff686SJeff Roberson DPCPU_DEFINE_STATIC(struct tdq, tdq);
2872bf95012SAndrew Turner DPCPU_DEFINE_STATIC(uint32_t, randomval);
288dc03363dSJeff Roberson 
289018ff686SJeff Roberson #define	TDQ_SELF()	((struct tdq *)PCPU_GET(sched))
290018ff686SJeff Roberson #define	TDQ_CPU(x)	(DPCPU_ID_PTR((x), tdq))
291018ff686SJeff Roberson #define	TDQ_ID(x)	((x)->tdq_id)
29280f86c9fSJeff Roberson #else	/* !SMP */
293ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
294dc03363dSJeff Roberson 
29536b36916SJeff Roberson #define	TDQ_ID(x)	(0)
296ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
297ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
2980a016a05SJeff Roberson #endif
29935e6168fSJeff Roberson 
300ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
301ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
302ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
303ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
3044ceaf45dSAttilio Rao #define	TDQ_LOCKPTR(t)		((struct mtx *)(&(t)->tdq_lock))
305ae7a6b38SJeff Roberson 
3068460a577SJohn Birrell static void sched_priority(struct thread *);
30721381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
3088460a577SJohn Birrell static int sched_interact_score(struct thread *);
3098460a577SJohn Birrell static void sched_interact_update(struct thread *);
3108460a577SJohn Birrell static void sched_interact_fork(struct thread *);
3117295465eSAlexander Motin static void sched_pctcpu_update(struct td_sched *, int);
31235e6168fSJeff Roberson 
3135d7ef00cSJeff Roberson /* Operations on per processor queues */
3149727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *);
315018ff686SJeff Roberson static void tdq_setup(struct tdq *, int i);
3169727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *);
3179727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *);
3189727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
3199727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *);
320ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
321ad1e7d28SJulian Elischer void tdq_print(int cpu);
322e7d50326SJeff Roberson static void runq_print(struct runq *rq);
323ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int);
3245d7ef00cSJeff Roberson #ifdef SMP
32597e9382dSDon Lewis static struct thread *tdq_move(struct tdq *, struct tdq *);
326ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
32727ee18adSRyan Stone static void tdq_notify(struct tdq *, struct thread *);
3289727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int);
3299727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int);
3309727e637SJeff Roberson static int sched_pickcpu(struct thread *, int);
3317fcf154aSJeff Roberson static void sched_balance(void);
33262fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *);
3339727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int);
334ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
335c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
33607095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
33707095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
33807095abfSIvan Voras     struct cpu_group *cg, int indent);
3395d7ef00cSJeff Roberson #endif
3405d7ef00cSJeff Roberson 
341e7d50326SJeff Roberson static void sched_setup(void *dummy);
342237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
343e7d50326SJeff Roberson 
344e7d50326SJeff Roberson static void sched_initticks(void *dummy);
345237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
346237fdd78SRobert Watson     NULL);
347e7d50326SJeff Roberson 
348b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched);
349b3e9e682SRyan Stone 
350d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
351b3e9e682SRyan Stone     "struct proc *", "uint8_t");
352d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
353b3e9e682SRyan Stone     "struct proc *", "void *");
354d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
355b3e9e682SRyan Stone     "struct proc *", "void *", "int");
356d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
357b3e9e682SRyan Stone     "struct proc *", "uint8_t", "struct thread *");
358d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
359d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
360b3e9e682SRyan Stone     "struct proc *");
361d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu);
362d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu);
363d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
364b3e9e682SRyan Stone     "struct proc *");
365b3e9e682SRyan Stone 
3660567b6ccSWarner Losh /*
367ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
368ae7a6b38SJeff Roberson  */
369e7d50326SJeff Roberson static void
370e7d50326SJeff Roberson runq_print(struct runq *rq)
371e7d50326SJeff Roberson {
372e7d50326SJeff Roberson 	struct rqhead *rqh;
3739727e637SJeff Roberson 	struct thread *td;
374e7d50326SJeff Roberson 	int pri;
375e7d50326SJeff Roberson 	int j;
376e7d50326SJeff Roberson 	int i;
377e7d50326SJeff Roberson 
378e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
379e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
380e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
381e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
382e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
383e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
384e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
3859727e637SJeff Roberson 				TAILQ_FOREACH(td, rqh, td_runq) {
386e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
3879727e637SJeff Roberson 					    td, td->td_name, td->td_priority,
3889727e637SJeff Roberson 					    td->td_rqindex, pri);
389e7d50326SJeff Roberson 				}
390e7d50326SJeff Roberson 			}
391e7d50326SJeff Roberson 	}
392e7d50326SJeff Roberson }
393e7d50326SJeff Roberson 
394ae7a6b38SJeff Roberson /*
395ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
396ae7a6b38SJeff Roberson  */
39715dc847eSJeff Roberson void
398ad1e7d28SJulian Elischer tdq_print(int cpu)
39915dc847eSJeff Roberson {
400ad1e7d28SJulian Elischer 	struct tdq *tdq;
40115dc847eSJeff Roberson 
402ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
40315dc847eSJeff Roberson 
404c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
40562fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
40662fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
407d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
4081690c6c1SJeff Roberson 	printf("\tswitch cnt:     %d\n", tdq->tdq_switchcnt);
4091690c6c1SJeff Roberson 	printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
410e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
4113f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
4121690c6c1SJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
4131690c6c1SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
414e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
415e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
416e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
417e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
418e7d50326SJeff Roberson 	printf("\tidle runq:\n");
419e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
42015dc847eSJeff Roberson }
42115dc847eSJeff Roberson 
422ff256d9cSJeff Roberson static inline int
423ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
424ff256d9cSJeff Roberson {
425ff256d9cSJeff Roberson 	/*
426ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
427ff256d9cSJeff Roberson 	 * nothing to do.
428ff256d9cSJeff Roberson 	 */
429ff256d9cSJeff Roberson 	if (pri >= cpri)
430ff256d9cSJeff Roberson 		return (0);
431ff256d9cSJeff Roberson 	/*
432ff256d9cSJeff Roberson 	 * Always preempt idle.
433ff256d9cSJeff Roberson 	 */
434ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
435ff256d9cSJeff Roberson 		return (1);
436ff256d9cSJeff Roberson 	/*
437ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
438ff256d9cSJeff Roberson 	 */
439ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
440ff256d9cSJeff Roberson 		return (0);
441ff256d9cSJeff Roberson 	/*
442ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
443ff256d9cSJeff Roberson 	 */
444ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
445ff256d9cSJeff Roberson 		return (1);
446ff256d9cSJeff Roberson 	/*
44712d56c0fSJohn Baldwin 	 * If we're interactive or better and there is non-interactive
44812d56c0fSJohn Baldwin 	 * or worse running preempt only remote processors.
449ff256d9cSJeff Roberson 	 */
45012d56c0fSJohn Baldwin 	if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
451ff256d9cSJeff Roberson 		return (1);
452ff256d9cSJeff Roberson 	return (0);
453ff256d9cSJeff Roberson }
454ff256d9cSJeff Roberson 
455ae7a6b38SJeff Roberson /*
456ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
457ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
458ae7a6b38SJeff Roberson  * queue position for timeshare threads.
459ae7a6b38SJeff Roberson  */
460155b9987SJeff Roberson static __inline void
4619727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
462155b9987SJeff Roberson {
4639727e637SJeff Roberson 	struct td_sched *ts;
464c143ac21SJeff Roberson 	u_char pri;
465c143ac21SJeff Roberson 
466ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
4679727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
46873daf66fSJeff Roberson 
4699727e637SJeff Roberson 	pri = td->td_priority;
47093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
4719727e637SJeff Roberson 	TD_SET_RUNQ(td);
4729727e637SJeff Roberson 	if (THREAD_CAN_MIGRATE(td)) {
473d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
474ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
47580f86c9fSJeff Roberson 	}
47612d56c0fSJohn Baldwin 	if (pri < PRI_MIN_BATCH) {
477c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
47812d56c0fSJohn Baldwin 	} else if (pri <= PRI_MAX_BATCH) {
479c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
48012d56c0fSJohn Baldwin 		KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
481e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
482e7d50326SJeff Roberson 		/*
483e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
484e7d50326SJeff Roberson 		 * realtime.  Use the whole queue to represent these values.
485e7d50326SJeff Roberson 		 */
486c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
48716705791SAndriy Gapon 			pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
488e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4893f872f85SJeff Roberson 			/*
4903f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4913f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4923f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4933f872f85SJeff Roberson 			 */
4943f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4953f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4964499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
497e7d50326SJeff Roberson 		} else
4983f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
4999727e637SJeff Roberson 		runq_add_pri(ts->ts_runq, td, pri, flags);
500c143ac21SJeff Roberson 		return;
501e7d50326SJeff Roberson 	} else
50273daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
5039727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
50473daf66fSJeff Roberson }
50573daf66fSJeff Roberson 
50673daf66fSJeff Roberson /*
507ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
508ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
509ae7a6b38SJeff Roberson  * transferable count does not reflect them.
510ae7a6b38SJeff Roberson  */
511155b9987SJeff Roberson static __inline void
5129727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td)
513155b9987SJeff Roberson {
5149727e637SJeff Roberson 	struct td_sched *ts;
5159727e637SJeff Roberson 
51693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
517ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
518ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
5199727e637SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", td));
520ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
521d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
522ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
52380f86c9fSJeff Roberson 	}
5243f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
5253f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
5269727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
527e7d50326SJeff Roberson 		else
5289727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, NULL);
5293f872f85SJeff Roberson 	} else
5309727e637SJeff Roberson 		runq_remove(ts->ts_runq, td);
531155b9987SJeff Roberson }
532155b9987SJeff Roberson 
533ae7a6b38SJeff Roberson /*
534ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
535ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
536ae7a6b38SJeff Roberson  */
537a8949de2SJeff Roberson static void
5389727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td)
5395d7ef00cSJeff Roberson {
540ae7a6b38SJeff Roberson 
541ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
5429727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
54303d17db7SJeff Roberson 
544d2ad694cSJeff Roberson 	tdq->tdq_load++;
5451b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
546d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
5478f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
548d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
5495d7ef00cSJeff Roberson }
55015dc847eSJeff Roberson 
551ae7a6b38SJeff Roberson /*
552ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
553ae7a6b38SJeff Roberson  * exiting.
554ae7a6b38SJeff Roberson  */
555a8949de2SJeff Roberson static void
5569727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td)
5575d7ef00cSJeff Roberson {
558ae7a6b38SJeff Roberson 
5599727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
560ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
561ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
562c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
56303d17db7SJeff Roberson 
564d2ad694cSJeff Roberson 	tdq->tdq_load--;
5651b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
56603d17db7SJeff Roberson 		tdq->tdq_sysload--;
5678f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
568d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
56915dc847eSJeff Roberson }
57015dc847eSJeff Roberson 
571356500a3SJeff Roberson /*
5725e5c3873SJeff Roberson  * Bound timeshare latency by decreasing slice size as load increases.  We
5735e5c3873SJeff Roberson  * consider the maximum latency as the sum of the threads waiting to run
5745e5c3873SJeff Roberson  * aside from curthread and target no more than sched_slice latency but
5755e5c3873SJeff Roberson  * no less than sched_slice_min runtime.
5765e5c3873SJeff Roberson  */
5775e5c3873SJeff Roberson static inline int
5785e5c3873SJeff Roberson tdq_slice(struct tdq *tdq)
5795e5c3873SJeff Roberson {
5805e5c3873SJeff Roberson 	int load;
5815e5c3873SJeff Roberson 
5825e5c3873SJeff Roberson 	/*
5835e5c3873SJeff Roberson 	 * It is safe to use sys_load here because this is called from
5845e5c3873SJeff Roberson 	 * contexts where timeshare threads are running and so there
5855e5c3873SJeff Roberson 	 * cannot be higher priority load in the system.
5865e5c3873SJeff Roberson 	 */
5875e5c3873SJeff Roberson 	load = tdq->tdq_sysload - 1;
5885e5c3873SJeff Roberson 	if (load >= SCHED_SLICE_MIN_DIVISOR)
5895e5c3873SJeff Roberson 		return (sched_slice_min);
5905e5c3873SJeff Roberson 	if (load <= 1)
5915e5c3873SJeff Roberson 		return (sched_slice);
5925e5c3873SJeff Roberson 	return (sched_slice / load);
5935e5c3873SJeff Roberson }
5945e5c3873SJeff Roberson 
5955e5c3873SJeff Roberson /*
59662fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
59762fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
598356500a3SJeff Roberson  */
59922bf7d9aSJeff Roberson static void
60062fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
60162fa74d9SJeff Roberson {
60262fa74d9SJeff Roberson 	struct thread *td;
60362fa74d9SJeff Roberson 
60462fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
60562fa74d9SJeff Roberson 	if (ctd == NULL)
60662fa74d9SJeff Roberson 		ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
6079727e637SJeff Roberson 	td = tdq_choose(tdq);
6089727e637SJeff Roberson 	if (td == NULL || td->td_priority > ctd->td_priority)
60962fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
61062fa74d9SJeff Roberson 	else
61162fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
61262fa74d9SJeff Roberson }
61362fa74d9SJeff Roberson 
61462fa74d9SJeff Roberson #ifdef SMP
6159129dd59SPedro F. Giffuni /*
6169129dd59SPedro F. Giffuni  * We need some randomness. Implement a classic Linear Congruential
6179129dd59SPedro F. Giffuni  * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
6189129dd59SPedro F. Giffuni  * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
6199129dd59SPedro F. Giffuni  * of the random state (in the low bits of our answer) to keep
6209129dd59SPedro F. Giffuni  * the maximum randomness.
6219129dd59SPedro F. Giffuni  */
6229129dd59SPedro F. Giffuni static uint32_t
6239129dd59SPedro F. Giffuni sched_random(void)
6249129dd59SPedro F. Giffuni {
6259129dd59SPedro F. Giffuni 	uint32_t *rndptr;
6269129dd59SPedro F. Giffuni 
6279129dd59SPedro F. Giffuni 	rndptr = DPCPU_PTR(randomval);
6289129dd59SPedro F. Giffuni 	*rndptr = *rndptr * 69069 + 5;
6299129dd59SPedro F. Giffuni 
6309129dd59SPedro F. Giffuni 	return (*rndptr >> 16);
6319129dd59SPedro F. Giffuni }
6329129dd59SPedro F. Giffuni 
63362fa74d9SJeff Roberson struct cpu_search {
634c76ee827SJeff Roberson 	cpuset_t cs_mask;
63536acfc65SAlexander Motin 	u_int	cs_prefer;
63636acfc65SAlexander Motin 	int	cs_pri;		/* Min priority for low. */
63736acfc65SAlexander Motin 	int	cs_limit;	/* Max load for low, min load for high. */
63836acfc65SAlexander Motin 	int	cs_cpu;
63936acfc65SAlexander Motin 	int	cs_load;
64062fa74d9SJeff Roberson };
64162fa74d9SJeff Roberson 
64262fa74d9SJeff Roberson #define	CPU_SEARCH_LOWEST	0x1
64362fa74d9SJeff Roberson #define	CPU_SEARCH_HIGHEST	0x2
64462fa74d9SJeff Roberson #define	CPU_SEARCH_BOTH		(CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
64562fa74d9SJeff Roberson 
646c76ee827SJeff Roberson #define	CPUSET_FOREACH(cpu, mask)				\
647c76ee827SJeff Roberson 	for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++)		\
64871a19bdcSAttilio Rao 		if (CPU_ISSET(cpu, &mask))
64962fa74d9SJeff Roberson 
6502499a5ccSKonstantin Belousov static __always_inline int cpu_search(const struct cpu_group *cg,
6512499a5ccSKonstantin Belousov     struct cpu_search *low, struct cpu_search *high, const int match);
6522499a5ccSKonstantin Belousov int __noinline cpu_search_lowest(const struct cpu_group *cg,
6532499a5ccSKonstantin Belousov     struct cpu_search *low);
6542499a5ccSKonstantin Belousov int __noinline cpu_search_highest(const struct cpu_group *cg,
65562fa74d9SJeff Roberson     struct cpu_search *high);
6562499a5ccSKonstantin Belousov int __noinline cpu_search_both(const struct cpu_group *cg,
6572499a5ccSKonstantin Belousov     struct cpu_search *low, struct cpu_search *high);
65862fa74d9SJeff Roberson 
65962fa74d9SJeff Roberson /*
66062fa74d9SJeff Roberson  * Search the tree of cpu_groups for the lowest or highest loaded cpu
66162fa74d9SJeff Roberson  * according to the match argument.  This routine actually compares the
66262fa74d9SJeff Roberson  * load on all paths through the tree and finds the least loaded cpu on
66362fa74d9SJeff Roberson  * the least loaded path, which may differ from the least loaded cpu in
664db4fcadfSConrad Meyer  * the system.  This balances work among caches and buses.
66562fa74d9SJeff Roberson  *
66662fa74d9SJeff Roberson  * This inline is instantiated in three forms below using constants for the
66762fa74d9SJeff Roberson  * match argument.  It is reduced to the minimum set for each case.  It is
66862fa74d9SJeff Roberson  * also recursive to the depth of the tree.
66962fa74d9SJeff Roberson  */
6702499a5ccSKonstantin Belousov static __always_inline int
67136acfc65SAlexander Motin cpu_search(const struct cpu_group *cg, struct cpu_search *low,
67262fa74d9SJeff Roberson     struct cpu_search *high, const int match)
67362fa74d9SJeff Roberson {
67462fa74d9SJeff Roberson 	struct cpu_search lgroup;
67562fa74d9SJeff Roberson 	struct cpu_search hgroup;
67636acfc65SAlexander Motin 	cpuset_t cpumask;
67762fa74d9SJeff Roberson 	struct cpu_group *child;
67836acfc65SAlexander Motin 	struct tdq *tdq;
6790567b6ccSWarner Losh 	int cpu, i, hload, lload, load, total, rnd;
68062fa74d9SJeff Roberson 
68136acfc65SAlexander Motin 	total = 0;
68236acfc65SAlexander Motin 	cpumask = cg->cg_mask;
68362fa74d9SJeff Roberson 	if (match & CPU_SEARCH_LOWEST) {
68436acfc65SAlexander Motin 		lload = INT_MAX;
68562fa74d9SJeff Roberson 		lgroup = *low;
68662fa74d9SJeff Roberson 	}
68762fa74d9SJeff Roberson 	if (match & CPU_SEARCH_HIGHEST) {
68870801abeSAlexander Motin 		hload = INT_MIN;
68962fa74d9SJeff Roberson 		hgroup = *high;
69062fa74d9SJeff Roberson 	}
69136acfc65SAlexander Motin 
69236acfc65SAlexander Motin 	/* Iterate through the child CPU groups and then remaining CPUs. */
69358909b74SAlexander Motin 	for (i = cg->cg_children, cpu = mp_maxid; ; ) {
69470801abeSAlexander Motin 		if (i == 0) {
69558909b74SAlexander Motin #ifdef HAVE_INLINE_FFSL
69658909b74SAlexander Motin 			cpu = CPU_FFS(&cpumask) - 1;
69758909b74SAlexander Motin #else
69870801abeSAlexander Motin 			while (cpu >= 0 && !CPU_ISSET(cpu, &cpumask))
69970801abeSAlexander Motin 				cpu--;
70058909b74SAlexander Motin #endif
70170801abeSAlexander Motin 			if (cpu < 0)
70236acfc65SAlexander Motin 				break;
70336acfc65SAlexander Motin 			child = NULL;
70436acfc65SAlexander Motin 		} else
70570801abeSAlexander Motin 			child = &cg->cg_child[i - 1];
70636acfc65SAlexander Motin 
70770801abeSAlexander Motin 		if (match & CPU_SEARCH_LOWEST)
70870801abeSAlexander Motin 			lgroup.cs_cpu = -1;
70970801abeSAlexander Motin 		if (match & CPU_SEARCH_HIGHEST)
71070801abeSAlexander Motin 			hgroup.cs_cpu = -1;
71136acfc65SAlexander Motin 		if (child) {			/* Handle child CPU group. */
71236acfc65SAlexander Motin 			CPU_NAND(&cpumask, &child->cg_mask);
71362fa74d9SJeff Roberson 			switch (match) {
71462fa74d9SJeff Roberson 			case CPU_SEARCH_LOWEST:
71562fa74d9SJeff Roberson 				load = cpu_search_lowest(child, &lgroup);
71662fa74d9SJeff Roberson 				break;
71762fa74d9SJeff Roberson 			case CPU_SEARCH_HIGHEST:
71862fa74d9SJeff Roberson 				load = cpu_search_highest(child, &hgroup);
71962fa74d9SJeff Roberson 				break;
72062fa74d9SJeff Roberson 			case CPU_SEARCH_BOTH:
72162fa74d9SJeff Roberson 				load = cpu_search_both(child, &lgroup, &hgroup);
72262fa74d9SJeff Roberson 				break;
72362fa74d9SJeff Roberson 			}
72436acfc65SAlexander Motin 		} else {			/* Handle child CPU. */
72558909b74SAlexander Motin 			CPU_CLR(cpu, &cpumask);
72636acfc65SAlexander Motin 			tdq = TDQ_CPU(cpu);
72736acfc65SAlexander Motin 			load = tdq->tdq_load * 256;
728b250ad34SWarner Losh 			rnd = sched_random() % 32;
72936acfc65SAlexander Motin 			if (match & CPU_SEARCH_LOWEST) {
73036acfc65SAlexander Motin 				if (cpu == low->cs_prefer)
73136acfc65SAlexander Motin 					load -= 64;
73236acfc65SAlexander Motin 				/* If that CPU is allowed and get data. */
73370801abeSAlexander Motin 				if (tdq->tdq_lowpri > lgroup.cs_pri &&
73470801abeSAlexander Motin 				    tdq->tdq_load <= lgroup.cs_limit &&
73570801abeSAlexander Motin 				    CPU_ISSET(cpu, &lgroup.cs_mask)) {
73636acfc65SAlexander Motin 					lgroup.cs_cpu = cpu;
73736acfc65SAlexander Motin 					lgroup.cs_load = load - rnd;
73836acfc65SAlexander Motin 				}
73962fa74d9SJeff Roberson 			}
74062fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST)
74170801abeSAlexander Motin 				if (tdq->tdq_load >= hgroup.cs_limit &&
74270801abeSAlexander Motin 				    tdq->tdq_transferable &&
74370801abeSAlexander Motin 				    CPU_ISSET(cpu, &hgroup.cs_mask)) {
74436acfc65SAlexander Motin 					hgroup.cs_cpu = cpu;
74536acfc65SAlexander Motin 					hgroup.cs_load = load - rnd;
74662fa74d9SJeff Roberson 				}
74762fa74d9SJeff Roberson 		}
74836acfc65SAlexander Motin 		total += load;
74962fa74d9SJeff Roberson 
75036acfc65SAlexander Motin 		/* We have info about child item. Compare it. */
75136acfc65SAlexander Motin 		if (match & CPU_SEARCH_LOWEST) {
75270801abeSAlexander Motin 			if (lgroup.cs_cpu >= 0 &&
7536022f0bcSAlexander Motin 			    (load < lload ||
7546022f0bcSAlexander Motin 			     (load == lload && lgroup.cs_load < low->cs_load))) {
75536acfc65SAlexander Motin 				lload = load;
75636acfc65SAlexander Motin 				low->cs_cpu = lgroup.cs_cpu;
75736acfc65SAlexander Motin 				low->cs_load = lgroup.cs_load;
75836acfc65SAlexander Motin 			}
75936acfc65SAlexander Motin 		}
76036acfc65SAlexander Motin 		if (match & CPU_SEARCH_HIGHEST)
76170801abeSAlexander Motin 			if (hgroup.cs_cpu >= 0 &&
7626022f0bcSAlexander Motin 			    (load > hload ||
7636022f0bcSAlexander Motin 			     (load == hload && hgroup.cs_load > high->cs_load))) {
76436acfc65SAlexander Motin 				hload = load;
76536acfc65SAlexander Motin 				high->cs_cpu = hgroup.cs_cpu;
76636acfc65SAlexander Motin 				high->cs_load = hgroup.cs_load;
76736acfc65SAlexander Motin 			}
76870801abeSAlexander Motin 		if (child) {
76970801abeSAlexander Motin 			i--;
77070801abeSAlexander Motin 			if (i == 0 && CPU_EMPTY(&cpumask))
77170801abeSAlexander Motin 				break;
77258909b74SAlexander Motin 		}
77358909b74SAlexander Motin #ifndef HAVE_INLINE_FFSL
77458909b74SAlexander Motin 		else
77570801abeSAlexander Motin 			cpu--;
77658909b74SAlexander Motin #endif
77762fa74d9SJeff Roberson 	}
77862fa74d9SJeff Roberson 	return (total);
77962fa74d9SJeff Roberson }
78062fa74d9SJeff Roberson 
78162fa74d9SJeff Roberson /*
78262fa74d9SJeff Roberson  * cpu_search instantiations must pass constants to maintain the inline
78362fa74d9SJeff Roberson  * optimization.
78462fa74d9SJeff Roberson  */
78562fa74d9SJeff Roberson int
78636acfc65SAlexander Motin cpu_search_lowest(const struct cpu_group *cg, struct cpu_search *low)
78762fa74d9SJeff Roberson {
78862fa74d9SJeff Roberson 	return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
78962fa74d9SJeff Roberson }
79062fa74d9SJeff Roberson 
79162fa74d9SJeff Roberson int
79236acfc65SAlexander Motin cpu_search_highest(const struct cpu_group *cg, struct cpu_search *high)
79362fa74d9SJeff Roberson {
79462fa74d9SJeff Roberson 	return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
79562fa74d9SJeff Roberson }
79662fa74d9SJeff Roberson 
79762fa74d9SJeff Roberson int
79836acfc65SAlexander Motin cpu_search_both(const struct cpu_group *cg, struct cpu_search *low,
79962fa74d9SJeff Roberson     struct cpu_search *high)
80062fa74d9SJeff Roberson {
80162fa74d9SJeff Roberson 	return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
80262fa74d9SJeff Roberson }
80362fa74d9SJeff Roberson 
80462fa74d9SJeff Roberson /*
80562fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
80662fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
80762fa74d9SJeff Roberson  * acceptable.
80862fa74d9SJeff Roberson  */
80962fa74d9SJeff Roberson static inline int
81036acfc65SAlexander Motin sched_lowest(const struct cpu_group *cg, cpuset_t mask, int pri, int maxload,
81136acfc65SAlexander Motin     int prefer)
81262fa74d9SJeff Roberson {
81362fa74d9SJeff Roberson 	struct cpu_search low;
81462fa74d9SJeff Roberson 
81562fa74d9SJeff Roberson 	low.cs_cpu = -1;
81636acfc65SAlexander Motin 	low.cs_prefer = prefer;
81762fa74d9SJeff Roberson 	low.cs_mask = mask;
81836acfc65SAlexander Motin 	low.cs_pri = pri;
81936acfc65SAlexander Motin 	low.cs_limit = maxload;
82062fa74d9SJeff Roberson 	cpu_search_lowest(cg, &low);
82162fa74d9SJeff Roberson 	return low.cs_cpu;
82262fa74d9SJeff Roberson }
82362fa74d9SJeff Roberson 
82462fa74d9SJeff Roberson /*
82562fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
82662fa74d9SJeff Roberson  */
82762fa74d9SJeff Roberson static inline int
82836acfc65SAlexander Motin sched_highest(const struct cpu_group *cg, cpuset_t mask, int minload)
82962fa74d9SJeff Roberson {
83062fa74d9SJeff Roberson 	struct cpu_search high;
83162fa74d9SJeff Roberson 
83262fa74d9SJeff Roberson 	high.cs_cpu = -1;
83362fa74d9SJeff Roberson 	high.cs_mask = mask;
83462fa74d9SJeff Roberson 	high.cs_limit = minload;
83562fa74d9SJeff Roberson 	cpu_search_highest(cg, &high);
83662fa74d9SJeff Roberson 	return high.cs_cpu;
83762fa74d9SJeff Roberson }
83862fa74d9SJeff Roberson 
83962fa74d9SJeff Roberson static void
84062fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
84162fa74d9SJeff Roberson {
842018ff686SJeff Roberson 	struct tdq *tdq;
84336acfc65SAlexander Motin 	cpuset_t hmask, lmask;
84436acfc65SAlexander Motin 	int high, low, anylow;
84562fa74d9SJeff Roberson 
84636acfc65SAlexander Motin 	CPU_FILL(&hmask);
84762fa74d9SJeff Roberson 	for (;;) {
84897e9382dSDon Lewis 		high = sched_highest(cg, hmask, 2);
84936acfc65SAlexander Motin 		/* Stop if there is no more CPU with transferrable threads. */
85036acfc65SAlexander Motin 		if (high == -1)
85162fa74d9SJeff Roberson 			break;
85236acfc65SAlexander Motin 		CPU_CLR(high, &hmask);
85336acfc65SAlexander Motin 		CPU_COPY(&hmask, &lmask);
85436acfc65SAlexander Motin 		/* Stop if there is no more CPU left for low. */
85536acfc65SAlexander Motin 		if (CPU_EMPTY(&lmask))
85662fa74d9SJeff Roberson 			break;
85736acfc65SAlexander Motin 		anylow = 1;
858018ff686SJeff Roberson 		tdq = TDQ_CPU(high);
85936acfc65SAlexander Motin nextlow:
860018ff686SJeff Roberson 		low = sched_lowest(cg, lmask, -1, tdq->tdq_load - 1, high);
86136acfc65SAlexander Motin 		/* Stop if we looked well and found no less loaded CPU. */
86236acfc65SAlexander Motin 		if (anylow && low == -1)
86336acfc65SAlexander Motin 			break;
86436acfc65SAlexander Motin 		/* Go to next high if we found no less loaded CPU. */
86536acfc65SAlexander Motin 		if (low == -1)
86636acfc65SAlexander Motin 			continue;
86736acfc65SAlexander Motin 		/* Transfer thread from high to low. */
868018ff686SJeff Roberson 		if (sched_balance_pair(tdq, TDQ_CPU(low))) {
86936acfc65SAlexander Motin 			/* CPU that got thread can no longer be a donor. */
87036acfc65SAlexander Motin 			CPU_CLR(low, &hmask);
87136acfc65SAlexander Motin 		} else {
87262fa74d9SJeff Roberson 			/*
87336acfc65SAlexander Motin 			 * If failed, then there is no threads on high
87436acfc65SAlexander Motin 			 * that can run on this low. Drop low from low
87536acfc65SAlexander Motin 			 * mask and look for different one.
87662fa74d9SJeff Roberson 			 */
87736acfc65SAlexander Motin 			CPU_CLR(low, &lmask);
87836acfc65SAlexander Motin 			anylow = 0;
87936acfc65SAlexander Motin 			goto nextlow;
88062fa74d9SJeff Roberson 		}
88136acfc65SAlexander Motin 	}
88262fa74d9SJeff Roberson }
88362fa74d9SJeff Roberson 
88462fa74d9SJeff Roberson static void
88562375ca8SEd Schouten sched_balance(void)
886356500a3SJeff Roberson {
8877fcf154aSJeff Roberson 	struct tdq *tdq;
888356500a3SJeff Roberson 
8890567b6ccSWarner Losh 	balance_ticks = max(balance_interval / 2, 1) +
890b250ad34SWarner Losh 	    (sched_random() % balance_interval);
8917fcf154aSJeff Roberson 	tdq = TDQ_SELF();
8927fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
89362fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
8947fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
895cac77d04SJeff Roberson }
89686f8ae96SJeff Roberson 
897ae7a6b38SJeff Roberson /*
898ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
899ae7a6b38SJeff Roberson  */
900ae7a6b38SJeff Roberson static void
901ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
902ae7a6b38SJeff Roberson {
903ae7a6b38SJeff Roberson 	if (one < two) {
904ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
905ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
906ae7a6b38SJeff Roberson 	} else {
907ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
908ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
909ae7a6b38SJeff Roberson 	}
910ae7a6b38SJeff Roberson }
911ae7a6b38SJeff Roberson 
912ae7a6b38SJeff Roberson /*
9137fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
9147fcf154aSJeff Roberson  */
9157fcf154aSJeff Roberson static void
9167fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
9177fcf154aSJeff Roberson {
9187fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
9197fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
9207fcf154aSJeff Roberson }
9217fcf154aSJeff Roberson 
9227fcf154aSJeff Roberson /*
923ae7a6b38SJeff Roberson  * Transfer load between two imbalanced thread queues.
924ae7a6b38SJeff Roberson  */
92562fa74d9SJeff Roberson static int
926ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
927cac77d04SJeff Roberson {
92897e9382dSDon Lewis 	struct thread *td;
929880bf8b9SMarius Strobl 	int cpu;
930cac77d04SJeff Roberson 
931ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
93297e9382dSDon Lewis 	td = NULL;
933155b9987SJeff Roberson 	/*
93497e9382dSDon Lewis 	 * Transfer a thread from high to low.
935155b9987SJeff Roberson 	 */
93636acfc65SAlexander Motin 	if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load &&
93797e9382dSDon Lewis 	    (td = tdq_move(high, low)) != NULL) {
938a5423ea3SJeff Roberson 		/*
93997e9382dSDon Lewis 		 * In case the target isn't the current cpu notify it of the
94097e9382dSDon Lewis 		 * new load, possibly sending an IPI to force it to reschedule.
941a5423ea3SJeff Roberson 		 */
942880bf8b9SMarius Strobl 		cpu = TDQ_ID(low);
943880bf8b9SMarius Strobl 		if (cpu != PCPU_GET(cpuid))
94497e9382dSDon Lewis 			tdq_notify(low, td);
945ae7a6b38SJeff Roberson 	}
9467fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
94797e9382dSDon Lewis 	return (td != NULL);
948356500a3SJeff Roberson }
949356500a3SJeff Roberson 
950ae7a6b38SJeff Roberson /*
951ae7a6b38SJeff Roberson  * Move a thread from one thread queue to another.
952ae7a6b38SJeff Roberson  */
95397e9382dSDon Lewis static struct thread *
954ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
955356500a3SJeff Roberson {
956ad1e7d28SJulian Elischer 	struct td_sched *ts;
957ae7a6b38SJeff Roberson 	struct thread *td;
958ae7a6b38SJeff Roberson 	struct tdq *tdq;
959ae7a6b38SJeff Roberson 	int cpu;
960356500a3SJeff Roberson 
9617fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
9627fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
9637fcf154aSJeff Roberson 
964ad1e7d28SJulian Elischer 	tdq = from;
965ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
9669727e637SJeff Roberson 	td = tdq_steal(tdq, cpu);
9679727e637SJeff Roberson 	if (td == NULL)
96897e9382dSDon Lewis 		return (NULL);
96993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
970ae7a6b38SJeff Roberson 	/*
971ae7a6b38SJeff Roberson 	 * Although the run queue is locked the thread may be blocked.  Lock
9727fcf154aSJeff Roberson 	 * it to clear this and acquire the run-queue lock.
973ae7a6b38SJeff Roberson 	 */
974ae7a6b38SJeff Roberson 	thread_lock(td);
9757fcf154aSJeff Roberson 	/* Drop recursive lock on from acquired via thread_lock(). */
976ae7a6b38SJeff Roberson 	TDQ_UNLOCK(from);
977ae7a6b38SJeff Roberson 	sched_rem(td);
9787b8bfa0dSJeff Roberson 	ts->ts_cpu = cpu;
979ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
980ae7a6b38SJeff Roberson 	tdq_add(to, td, SRQ_YIELDING);
98197e9382dSDon Lewis 	return (td);
982356500a3SJeff Roberson }
98322bf7d9aSJeff Roberson 
984ae7a6b38SJeff Roberson /*
985ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
986ae7a6b38SJeff Roberson  * to it.
987ae7a6b38SJeff Roberson  */
98880f86c9fSJeff Roberson static int
989ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
99022bf7d9aSJeff Roberson {
99162fa74d9SJeff Roberson 	struct cpu_group *cg;
992ad1e7d28SJulian Elischer 	struct tdq *steal;
993c76ee827SJeff Roberson 	cpuset_t mask;
99497e9382dSDon Lewis 	int cpu, switchcnt;
99580f86c9fSJeff Roberson 
99697e9382dSDon Lewis 	if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
99788f530ccSJeff Roberson 		return (1);
998c76ee827SJeff Roberson 	CPU_FILL(&mask);
999c76ee827SJeff Roberson 	CPU_CLR(PCPU_GET(cpuid), &mask);
100097e9382dSDon Lewis     restart:
100197e9382dSDon Lewis 	switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
100297e9382dSDon Lewis 	for (cg = tdq->tdq_cg; ; ) {
100397e9382dSDon Lewis 		cpu = sched_highest(cg, mask, steal_thresh);
100497e9382dSDon Lewis 		/*
100597e9382dSDon Lewis 		 * We were assigned a thread but not preempted.  Returning
100697e9382dSDon Lewis 		 * 0 here will cause our caller to switch to it.
100797e9382dSDon Lewis 		 */
100897e9382dSDon Lewis 		if (tdq->tdq_load)
100997e9382dSDon Lewis 			return (0);
101062fa74d9SJeff Roberson 		if (cpu == -1) {
101162fa74d9SJeff Roberson 			cg = cg->cg_parent;
101297e9382dSDon Lewis 			if (cg == NULL)
101397e9382dSDon Lewis 				return (1);
101480f86c9fSJeff Roberson 			continue;
10157b8bfa0dSJeff Roberson 		}
10167b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
101797e9382dSDon Lewis 		/*
101897e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
101997e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
102097e9382dSDon Lewis 		 *
102197e9382dSDon Lewis 		 * Testing this ahead of tdq_lock_pair() only catches
102297e9382dSDon Lewis 		 * this situation about 20% of the time on an 8 core
102397e9382dSDon Lewis 		 * 16 thread Ryzen 7, but it still helps performance.
102497e9382dSDon Lewis 		 */
102597e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
102697e9382dSDon Lewis 		    steal->tdq_transferable == 0)
102797e9382dSDon Lewis 			goto restart;
10287fcf154aSJeff Roberson 		tdq_lock_pair(tdq, steal);
102997e9382dSDon Lewis 		/*
103097e9382dSDon Lewis 		 * We were assigned a thread while waiting for the locks.
103197e9382dSDon Lewis 		 * Switch to it now instead of stealing a thread.
103297e9382dSDon Lewis 		 */
103397e9382dSDon Lewis 		if (tdq->tdq_load)
103497e9382dSDon Lewis 			break;
103597e9382dSDon Lewis 		/*
103697e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
103797e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread, or
103897e9382dSDon Lewis 		 * we were preempted and the CPU loading info may be out
103997e9382dSDon Lewis 		 * of date.  The latter is rare.  In either case restart
104097e9382dSDon Lewis 		 * the search.
104197e9382dSDon Lewis 		 */
104297e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
104397e9382dSDon Lewis 		    steal->tdq_transferable == 0 ||
104497e9382dSDon Lewis 		    switchcnt != tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt) {
10457fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
104697e9382dSDon Lewis 			goto restart;
104762fa74d9SJeff Roberson 		}
104862fa74d9SJeff Roberson 		/*
104997e9382dSDon Lewis 		 * Steal the thread and switch to it.
105062fa74d9SJeff Roberson 		 */
105197e9382dSDon Lewis 		if (tdq_move(steal, tdq) != NULL)
105297e9382dSDon Lewis 			break;
105397e9382dSDon Lewis 		/*
105497e9382dSDon Lewis 		 * We failed to acquire a thread even though it looked
105597e9382dSDon Lewis 		 * like one was available.  This could be due to affinity
105697e9382dSDon Lewis 		 * restrictions or for other reasons.  Loop again after
105797e9382dSDon Lewis 		 * removing this CPU from the set.  The restart logic
105897e9382dSDon Lewis 		 * above does not restore this CPU to the set due to the
105997e9382dSDon Lewis 		 * likelyhood of failing here again.
106097e9382dSDon Lewis 		 */
106197e9382dSDon Lewis 		CPU_CLR(cpu, &mask);
106262fa74d9SJeff Roberson 		tdq_unlock_pair(tdq, steal);
106380f86c9fSJeff Roberson 	}
1064ae7a6b38SJeff Roberson 	TDQ_UNLOCK(steal);
10658df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_IDLE, NULL);
1066ae7a6b38SJeff Roberson 	thread_unlock(curthread);
10677b8bfa0dSJeff Roberson 	return (0);
106822bf7d9aSJeff Roberson }
106922bf7d9aSJeff Roberson 
1070ae7a6b38SJeff Roberson /*
1071ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
1072ae7a6b38SJeff Roberson  */
107322bf7d9aSJeff Roberson static void
107427ee18adSRyan Stone tdq_notify(struct tdq *tdq, struct thread *td)
107522bf7d9aSJeff Roberson {
107602f0ff6dSJohn Baldwin 	struct thread *ctd;
107727ee18adSRyan Stone 	int pri;
10787b8bfa0dSJeff Roberson 	int cpu;
107922bf7d9aSJeff Roberson 
1080ff256d9cSJeff Roberson 	if (tdq->tdq_ipipending)
1081ff256d9cSJeff Roberson 		return;
108227ee18adSRyan Stone 	cpu = td_get_sched(td)->ts_cpu;
108327ee18adSRyan Stone 	pri = td->td_priority;
108402f0ff6dSJohn Baldwin 	ctd = pcpu_find(cpu)->pc_curthread;
108502f0ff6dSJohn Baldwin 	if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
10866b2f763fSJeff Roberson 		return;
108779654969SAlexander Motin 
108879654969SAlexander Motin 	/*
1089ae9e9b4fSAlexander Motin 	 * Make sure that our caller's earlier update to tdq_load is
1090ae9e9b4fSAlexander Motin 	 * globally visible before we read tdq_cpu_idle.  Idle thread
109179654969SAlexander Motin 	 * accesses both of them without locks, and the order is important.
109279654969SAlexander Motin 	 */
1093e8677f38SKonstantin Belousov 	atomic_thread_fence_seq_cst();
109479654969SAlexander Motin 
109502f0ff6dSJohn Baldwin 	if (TD_IS_IDLETHREAD(ctd)) {
10961690c6c1SJeff Roberson 		/*
10976c47aaaeSJeff Roberson 		 * If the MD code has an idle wakeup routine try that before
10986c47aaaeSJeff Roberson 		 * falling back to IPI.
10996c47aaaeSJeff Roberson 		 */
11009f9ad565SAlexander Motin 		if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
11016c47aaaeSJeff Roberson 			return;
11021690c6c1SJeff Roberson 	}
1103ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 1;
1104d9d8d144SJohn Baldwin 	ipi_cpu(cpu, IPI_PREEMPT);
110522bf7d9aSJeff Roberson }
110622bf7d9aSJeff Roberson 
1107ae7a6b38SJeff Roberson /*
1108ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
1109ae7a6b38SJeff Roberson  * index.
1110ae7a6b38SJeff Roberson  */
11119727e637SJeff Roberson static struct thread *
111262fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
1113ae7a6b38SJeff Roberson {
1114ae7a6b38SJeff Roberson 	struct rqbits *rqb;
1115ae7a6b38SJeff Roberson 	struct rqhead *rqh;
111636acfc65SAlexander Motin 	struct thread *td, *first;
1117ae7a6b38SJeff Roberson 	int bit;
1118ae7a6b38SJeff Roberson 	int i;
1119ae7a6b38SJeff Roberson 
1120ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
1121ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
112236acfc65SAlexander Motin 	first = NULL;
1123ae7a6b38SJeff Roberson again:
1124ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1125ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
1126ae7a6b38SJeff Roberson 			continue;
11278bc713f6SJeff Roberson 		if (bit == 0)
11288bc713f6SJeff Roberson 			bit = RQB_FFS(rqb->rqb_bits[i]);
11298bc713f6SJeff Roberson 		for (; bit < RQB_BPW; bit++) {
11308bc713f6SJeff Roberson 			if ((rqb->rqb_bits[i] & (1ul << bit)) == 0)
1131ae7a6b38SJeff Roberson 				continue;
11328bc713f6SJeff Roberson 			rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)];
11339727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq) {
11349727e637SJeff Roberson 				if (first && THREAD_CAN_MIGRATE(td) &&
11359727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
11369727e637SJeff Roberson 					return (td);
113736acfc65SAlexander Motin 				first = td;
1138ae7a6b38SJeff Roberson 			}
1139ae7a6b38SJeff Roberson 		}
11408bc713f6SJeff Roberson 	}
1141ae7a6b38SJeff Roberson 	if (start != 0) {
1142ae7a6b38SJeff Roberson 		start = 0;
1143ae7a6b38SJeff Roberson 		goto again;
1144ae7a6b38SJeff Roberson 	}
1145ae7a6b38SJeff Roberson 
114636acfc65SAlexander Motin 	if (first && THREAD_CAN_MIGRATE(first) &&
114736acfc65SAlexander Motin 	    THREAD_CAN_SCHED(first, cpu))
114836acfc65SAlexander Motin 		return (first);
1149ae7a6b38SJeff Roberson 	return (NULL);
1150ae7a6b38SJeff Roberson }
1151ae7a6b38SJeff Roberson 
1152ae7a6b38SJeff Roberson /*
1153ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1154ae7a6b38SJeff Roberson  */
11559727e637SJeff Roberson static struct thread *
115662fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
115722bf7d9aSJeff Roberson {
115822bf7d9aSJeff Roberson 	struct rqhead *rqh;
115922bf7d9aSJeff Roberson 	struct rqbits *rqb;
11609727e637SJeff Roberson 	struct thread *td;
116122bf7d9aSJeff Roberson 	int word;
116222bf7d9aSJeff Roberson 	int bit;
116322bf7d9aSJeff Roberson 
116422bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
116522bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
116622bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
116722bf7d9aSJeff Roberson 			continue;
116822bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1169a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
117022bf7d9aSJeff Roberson 				continue;
117122bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
11729727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq)
11739727e637SJeff Roberson 				if (THREAD_CAN_MIGRATE(td) &&
11749727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
11759727e637SJeff Roberson 					return (td);
117622bf7d9aSJeff Roberson 		}
117722bf7d9aSJeff Roberson 	}
117822bf7d9aSJeff Roberson 	return (NULL);
117922bf7d9aSJeff Roberson }
118022bf7d9aSJeff Roberson 
1181ae7a6b38SJeff Roberson /*
1182ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1183ae7a6b38SJeff Roberson  */
11849727e637SJeff Roberson static struct thread *
118562fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
118622bf7d9aSJeff Roberson {
11879727e637SJeff Roberson 	struct thread *td;
118822bf7d9aSJeff Roberson 
1189ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
11909727e637SJeff Roberson 	if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
11919727e637SJeff Roberson 		return (td);
11929727e637SJeff Roberson 	if ((td = runq_steal_from(&tdq->tdq_timeshare,
11939727e637SJeff Roberson 	    cpu, tdq->tdq_ridx)) != NULL)
11949727e637SJeff Roberson 		return (td);
119562fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
119622bf7d9aSJeff Roberson }
119780f86c9fSJeff Roberson 
1198ae7a6b38SJeff Roberson /*
1199ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
12007fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1201ae7a6b38SJeff Roberson  */
1202ae7a6b38SJeff Roberson static inline struct tdq *
12039727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags)
120480f86c9fSJeff Roberson {
12059727e637SJeff Roberson 
1206ae7a6b38SJeff Roberson 	struct tdq *tdq;
120780f86c9fSJeff Roberson 
12089727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1209ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
121093ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_cpu = cpu;
12119727e637SJeff Roberson 	/*
12129727e637SJeff Roberson 	 * If the lock matches just return the queue.
12139727e637SJeff Roberson 	 */
1214ae7a6b38SJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq))
1215ae7a6b38SJeff Roberson 		return (tdq);
1216ae7a6b38SJeff Roberson #ifdef notyet
121780f86c9fSJeff Roberson 	/*
1218a5423ea3SJeff Roberson 	 * If the thread isn't running its lockptr is a
1219ae7a6b38SJeff Roberson 	 * turnstile or a sleepqueue.  We can just lock_set without
1220ae7a6b38SJeff Roberson 	 * blocking.
1221670c524fSJeff Roberson 	 */
1222ae7a6b38SJeff Roberson 	if (TD_CAN_RUN(td)) {
1223ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1224ae7a6b38SJeff Roberson 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
1225ae7a6b38SJeff Roberson 		return (tdq);
1226ae7a6b38SJeff Roberson 	}
1227ae7a6b38SJeff Roberson #endif
122880f86c9fSJeff Roberson 	/*
1229ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1230ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
12317b8bfa0dSJeff Roberson 	 */
1232b0b9dee5SAttilio Rao 	spinlock_enter();
1233ae7a6b38SJeff Roberson 	thread_lock_block(td);
1234ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1235ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1236b0b9dee5SAttilio Rao 	spinlock_exit();
1237ae7a6b38SJeff Roberson 	return (tdq);
123880f86c9fSJeff Roberson }
12392454aaf5SJeff Roberson 
12408df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
12418df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
12428df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
12438df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
12448df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
12458df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
12468df78c41SJeff Roberson 
1247ae7a6b38SJeff Roberson static int
12489727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags)
1249ae7a6b38SJeff Roberson {
125036acfc65SAlexander Motin 	struct cpu_group *cg, *ccg;
12519727e637SJeff Roberson 	struct td_sched *ts;
1252ae7a6b38SJeff Roberson 	struct tdq *tdq;
1253c76ee827SJeff Roberson 	cpuset_t mask;
1254c9205e35SAlexander Motin 	int cpu, pri, self, intr;
12557b8bfa0dSJeff Roberson 
125662fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
125793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1258efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
1259efe67753SNathan Whitehorn 	    "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
12607b8bfa0dSJeff Roberson 	if (smp_started == 0)
12617b8bfa0dSJeff Roberson 		return (self);
126228994a58SJeff Roberson 	/*
126328994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
126428994a58SJeff Roberson 	 */
126562fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
126662fa74d9SJeff Roberson 		return (ts->ts_cpu);
12677b8bfa0dSJeff Roberson 	/*
126862fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
126962fa74d9SJeff Roberson 	 * the interrupt.
12707b8bfa0dSJeff Roberson 	 */
127162fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1272c9205e35SAlexander Motin 	    curthread->td_intr_nesting_level) {
127362fa74d9SJeff Roberson 		ts->ts_cpu = self;
1274c9205e35SAlexander Motin 		intr = 1;
1275c9205e35SAlexander Motin 	} else
1276c9205e35SAlexander Motin 		intr = 0;
12777b8bfa0dSJeff Roberson 	/*
127836acfc65SAlexander Motin 	 * If the thread can run on the last cpu and the affinity has not
12790127914cSEric van Gyzen 	 * expired and it is idle, run it there.
12807b8bfa0dSJeff Roberson 	 */
128136acfc65SAlexander Motin 	tdq = TDQ_CPU(ts->ts_cpu);
128236acfc65SAlexander Motin 	cg = tdq->tdq_cg;
128336acfc65SAlexander Motin 	if (THREAD_CAN_SCHED(td, ts->ts_cpu) &&
128436acfc65SAlexander Motin 	    tdq->tdq_lowpri >= PRI_MIN_IDLE &&
128536acfc65SAlexander Motin 	    SCHED_AFFINITY(ts, CG_SHARE_L2)) {
1286c9205e35SAlexander Motin 		if (!intr && cg->cg_flags & CG_FLAG_THREAD) {
128736acfc65SAlexander Motin 			CPUSET_FOREACH(cpu, cg->cg_mask) {
128836acfc65SAlexander Motin 				if (TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
128962fa74d9SJeff Roberson 					break;
129036acfc65SAlexander Motin 			}
129136acfc65SAlexander Motin 		} else
129236acfc65SAlexander Motin 			cpu = INT_MAX;
129336acfc65SAlexander Motin 		if (cpu > mp_maxid) {
129436acfc65SAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
129536acfc65SAlexander Motin 			return (ts->ts_cpu);
129636acfc65SAlexander Motin 		}
129736acfc65SAlexander Motin 	}
129836acfc65SAlexander Motin 	/*
129936acfc65SAlexander Motin 	 * Search for the last level cache CPU group in the tree.
1300c9205e35SAlexander Motin 	 * Skip SMT, identical groups and caches with expired affinity.
1301c9205e35SAlexander Motin 	 * Interrupt threads affinity is explicit and never expires.
130236acfc65SAlexander Motin 	 */
130336acfc65SAlexander Motin 	for (ccg = NULL; cg != NULL; cg = cg->cg_parent) {
130436acfc65SAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD)
130536acfc65SAlexander Motin 			continue;
1306c9205e35SAlexander Motin 		if (cg->cg_children == 1 || cg->cg_count == 1)
1307c9205e35SAlexander Motin 			continue;
1308c9205e35SAlexander Motin 		if (cg->cg_level == CG_SHARE_NONE ||
1309c9205e35SAlexander Motin 		    (!intr && !SCHED_AFFINITY(ts, cg->cg_level)))
131036acfc65SAlexander Motin 			continue;
131136acfc65SAlexander Motin 		ccg = cg;
131236acfc65SAlexander Motin 	}
1313c9205e35SAlexander Motin 	/* Found LLC shared by all CPUs, so do a global search. */
1314c9205e35SAlexander Motin 	if (ccg == cpu_top)
1315c9205e35SAlexander Motin 		ccg = NULL;
131662fa74d9SJeff Roberson 	cpu = -1;
1317c76ee827SJeff Roberson 	mask = td->td_cpuset->cs_mask;
1318c9205e35SAlexander Motin 	pri = td->td_priority;
1319c9205e35SAlexander Motin 	/*
1320c9205e35SAlexander Motin 	 * Try hard to keep interrupts within found LLC.  Search the LLC for
1321c9205e35SAlexander Motin 	 * the least loaded CPU we can run now.  For NUMA systems it should
1322c9205e35SAlexander Motin 	 * be within target domain, and it also reduces scheduling overhead.
1323c9205e35SAlexander Motin 	 */
1324c9205e35SAlexander Motin 	if (ccg != NULL && intr) {
1325c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, pri, INT_MAX, ts->ts_cpu);
1326c9205e35SAlexander Motin 		if (cpu >= 0)
1327c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_intrbind);
1328c9205e35SAlexander Motin 	} else
1329c9205e35SAlexander Motin 	/* Search the LLC for the least loaded idle CPU we can run now. */
1330c9205e35SAlexander Motin 	if (ccg != NULL) {
1331c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, max(pri, PRI_MAX_TIMESHARE),
133236acfc65SAlexander Motin 		    INT_MAX, ts->ts_cpu);
1333c9205e35SAlexander Motin 		if (cpu >= 0)
1334c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_affinity);
1335c9205e35SAlexander Motin 	}
1336c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU we can run now. */
1337c9205e35SAlexander Motin 	if (cpu < 0) {
133836acfc65SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu);
1339c9205e35SAlexander Motin 		if (cpu >= 0)
1340c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1341c9205e35SAlexander Motin 	}
1342c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU. */
1343c9205e35SAlexander Motin 	if (cpu < 0) {
134436acfc65SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu);
1345c9205e35SAlexander Motin 		if (cpu >= 0)
1346c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1347c9205e35SAlexander Motin 	}
1348*bb3dfc6aSAlexander Motin 	KASSERT(cpu >= 0, ("sched_pickcpu: Failed to find a cpu."));
1349efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
135062fa74d9SJeff Roberson 	/*
135162fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
135262fa74d9SJeff Roberson 	 */
1353018ff686SJeff Roberson 	tdq = TDQ_CPU(cpu);
1354018ff686SJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_SELF()->tdq_lowpri > pri &&
1355018ff686SJeff Roberson 	    tdq->tdq_lowpri < PRI_MIN_IDLE &&
1356018ff686SJeff Roberson 	    TDQ_SELF()->tdq_load <= tdq->tdq_load + 1) {
13578df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_local);
135862fa74d9SJeff Roberson 		cpu = self;
1359c9205e35SAlexander Motin 	}
13608df78c41SJeff Roberson 	if (cpu != ts->ts_cpu)
13618df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_migration);
1362ae7a6b38SJeff Roberson 	return (cpu);
136380f86c9fSJeff Roberson }
136462fa74d9SJeff Roberson #endif
136522bf7d9aSJeff Roberson 
136622bf7d9aSJeff Roberson /*
136722bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
13680c0a98b2SJeff Roberson  */
13699727e637SJeff Roberson static struct thread *
1370ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
13715d7ef00cSJeff Roberson {
13729727e637SJeff Roberson 	struct thread *td;
13735d7ef00cSJeff Roberson 
1374ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
13759727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_realtime);
13769727e637SJeff Roberson 	if (td != NULL)
13779727e637SJeff Roberson 		return (td);
13789727e637SJeff Roberson 	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
13799727e637SJeff Roberson 	if (td != NULL) {
138012d56c0fSJohn Baldwin 		KASSERT(td->td_priority >= PRI_MIN_BATCH,
1381e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
13829727e637SJeff Roberson 		    td->td_priority));
13839727e637SJeff Roberson 		return (td);
138415dc847eSJeff Roberson 	}
13859727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_idle);
13869727e637SJeff Roberson 	if (td != NULL) {
13879727e637SJeff Roberson 		KASSERT(td->td_priority >= PRI_MIN_IDLE,
1388e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
13899727e637SJeff Roberson 		    td->td_priority));
13909727e637SJeff Roberson 		return (td);
1391e7d50326SJeff Roberson 	}
1392e7d50326SJeff Roberson 
1393e7d50326SJeff Roberson 	return (NULL);
1394245f3abfSJeff Roberson }
13950a016a05SJeff Roberson 
1396ae7a6b38SJeff Roberson /*
1397ae7a6b38SJeff Roberson  * Initialize a thread queue.
1398ae7a6b38SJeff Roberson  */
13990a016a05SJeff Roberson static void
1400018ff686SJeff Roberson tdq_setup(struct tdq *tdq, int id)
14010a016a05SJeff Roberson {
1402ae7a6b38SJeff Roberson 
1403c47f202bSJeff Roberson 	if (bootverbose)
1404018ff686SJeff Roberson 		printf("ULE: setup cpu %d\n", id);
1405e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1406e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1407d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
1408018ff686SJeff Roberson 	tdq->tdq_id = id;
140962fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
141062fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
141162fa74d9SJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
141262fa74d9SJeff Roberson 	    MTX_SPIN | MTX_RECURSE);
14138f51ad55SJeff Roberson #ifdef KTR
14148f51ad55SJeff Roberson 	snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
14158f51ad55SJeff Roberson 	    "CPU %d load", (int)TDQ_ID(tdq));
14168f51ad55SJeff Roberson #endif
14170a016a05SJeff Roberson }
14180a016a05SJeff Roberson 
1419c47f202bSJeff Roberson #ifdef SMP
1420c47f202bSJeff Roberson static void
1421c47f202bSJeff Roberson sched_setup_smp(void)
1422c47f202bSJeff Roberson {
1423c47f202bSJeff Roberson 	struct tdq *tdq;
1424c47f202bSJeff Roberson 	int i;
1425c47f202bSJeff Roberson 
142662fa74d9SJeff Roberson 	cpu_top = smp_topo();
14273aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
1428018ff686SJeff Roberson 		tdq = DPCPU_ID_PTR(i, tdq);
1429018ff686SJeff Roberson 		tdq_setup(tdq, i);
143062fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
143162fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
143262fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1433c47f202bSJeff Roberson 	}
1434018ff686SJeff Roberson 	PCPU_SET(sched, DPCPU_PTR(tdq));
143562fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
1436c47f202bSJeff Roberson }
1437c47f202bSJeff Roberson #endif
1438c47f202bSJeff Roberson 
1439ae7a6b38SJeff Roberson /*
1440ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1441ae7a6b38SJeff Roberson  * information.
1442ae7a6b38SJeff Roberson  */
144335e6168fSJeff Roberson static void
144435e6168fSJeff Roberson sched_setup(void *dummy)
144535e6168fSJeff Roberson {
1446ae7a6b38SJeff Roberson 	struct tdq *tdq;
1447c47f202bSJeff Roberson 
14480ec896fdSJeff Roberson #ifdef SMP
1449c47f202bSJeff Roberson 	sched_setup_smp();
1450749d01b0SJeff Roberson #else
1451018ff686SJeff Roberson 	tdq_setup(TDQ_SELF(), 0);
1452356500a3SJeff Roberson #endif
1453018ff686SJeff Roberson 	tdq = TDQ_SELF();
1454ae7a6b38SJeff Roberson 
1455ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1456ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1457c47f202bSJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
14589727e637SJeff Roberson 	tdq_load_add(tdq, &thread0);
145962fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1460ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
146135e6168fSJeff Roberson }
146235e6168fSJeff Roberson 
1463ae7a6b38SJeff Roberson /*
1464579895dfSAlexander Motin  * This routine determines time constants after stathz and hz are setup.
1465ae7a6b38SJeff Roberson  */
1466a1d4fe69SDavid Xu /* ARGSUSED */
1467a1d4fe69SDavid Xu static void
1468a1d4fe69SDavid Xu sched_initticks(void *dummy)
1469a1d4fe69SDavid Xu {
1470ae7a6b38SJeff Roberson 	int incr;
1471ae7a6b38SJeff Roberson 
1472a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
14735e5c3873SJeff Roberson 	sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
14745e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
147537f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
147637f4e025SAlexander Motin 	    realstathz);
1477a1d4fe69SDavid Xu 
1478a1d4fe69SDavid Xu 	/*
1479e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
14803f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1481e7d50326SJeff Roberson 	 */
1482ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1483e7d50326SJeff Roberson 	/*
1484e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1485e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1486a1d4fe69SDavid Xu 	 */
1487ae7a6b38SJeff Roberson 	if (incr == 0)
1488ae7a6b38SJeff Roberson 		incr = 1;
1489ae7a6b38SJeff Roberson 	tickincr = incr;
14907b8bfa0dSJeff Roberson #ifdef SMP
14919862717aSJeff Roberson 	/*
14927fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
14937fcf154aSJeff Roberson 	 * what realstathz is.
14947fcf154aSJeff Roberson 	 */
14957fcf154aSJeff Roberson 	balance_interval = realstathz;
1496290d9060SDon Lewis 	balance_ticks = balance_interval;
14977b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
14987b8bfa0dSJeff Roberson #endif
1499b3f40a41SAlexander Motin 	if (sched_idlespinthresh < 0)
15002c27cb3aSAlexander Motin 		sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
1501a1d4fe69SDavid Xu }
1502a1d4fe69SDavid Xu 
1503a1d4fe69SDavid Xu 
150435e6168fSJeff Roberson /*
1505ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1506ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1507ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1508ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1509ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
151057031f79SGeorge V. Neville-Neil  *
151157031f79SGeorge V. Neville-Neil  * When a thread's sleep time is greater than its run time the
151257031f79SGeorge V. Neville-Neil  * calculation is:
151357031f79SGeorge V. Neville-Neil  *
151457031f79SGeorge V. Neville-Neil  *                           scaling factor
151557031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------
151657031f79SGeorge V. Neville-Neil  *                        sleep time / run time
151757031f79SGeorge V. Neville-Neil  *
151857031f79SGeorge V. Neville-Neil  *
151957031f79SGeorge V. Neville-Neil  * When a thread's run time is greater than its sleep time the
152057031f79SGeorge V. Neville-Neil  * calculation is:
152157031f79SGeorge V. Neville-Neil  *
152257031f79SGeorge V. Neville-Neil  *                           scaling factor
152357031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------    + scaling factor
152457031f79SGeorge V. Neville-Neil  *                        run time / sleep time
1525ae7a6b38SJeff Roberson  */
1526ae7a6b38SJeff Roberson static int
1527ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1528ae7a6b38SJeff Roberson {
1529ae7a6b38SJeff Roberson 	struct td_sched *ts;
1530ae7a6b38SJeff Roberson 	int div;
1531ae7a6b38SJeff Roberson 
153293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1533ae7a6b38SJeff Roberson 	/*
1534ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1535ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1536ae7a6b38SJeff Roberson 	 * no chance.
1537ae7a6b38SJeff Roberson 	 */
1538ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1539ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1540ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1541ae7a6b38SJeff Roberson 
1542ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1543ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1544ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1545ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1546ae7a6b38SJeff Roberson 	}
1547ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1548ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1549ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1550ae7a6b38SJeff Roberson 	}
1551ae7a6b38SJeff Roberson 	/* runtime == slptime */
1552ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1553ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1554ae7a6b38SJeff Roberson 
1555ae7a6b38SJeff Roberson 	/*
1556ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1557ae7a6b38SJeff Roberson 	 */
1558ae7a6b38SJeff Roberson 	return (0);
1559ae7a6b38SJeff Roberson 
1560ae7a6b38SJeff Roberson }
1561ae7a6b38SJeff Roberson 
1562ae7a6b38SJeff Roberson /*
156335e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
156435e6168fSJeff Roberson  * process.
156535e6168fSJeff Roberson  */
156615dc847eSJeff Roberson static void
15678460a577SJohn Birrell sched_priority(struct thread *td)
156835e6168fSJeff Roberson {
1569e7d50326SJeff Roberson 	int score;
157035e6168fSJeff Roberson 	int pri;
157135e6168fSJeff Roberson 
1572c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
157315dc847eSJeff Roberson 		return;
1574e7d50326SJeff Roberson 	/*
1575e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1576e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1577e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1578e7d50326SJeff Roberson 	 *
1579ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1580e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1581e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1582a5423ea3SJeff Roberson 	 *
1583a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1584a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1585a5423ea3SJeff Roberson 	 * considered interactive.
1586e7d50326SJeff Roberson 	 */
1587a0f15352SJohn Baldwin 	score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1588e7d50326SJeff Roberson 	if (score < sched_interact) {
158912d56c0fSJohn Baldwin 		pri = PRI_MIN_INTERACT;
159012d56c0fSJohn Baldwin 		pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) /
159178920008SJohn Baldwin 		    sched_interact) * score;
159212d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
15939a93305aSJeff Roberson 		    ("sched_priority: invalid interactive priority %d score %d",
15949a93305aSJeff Roberson 		    pri, score));
1595e7d50326SJeff Roberson 	} else {
1596e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
159793ccd6bfSKonstantin Belousov 		if (td_get_sched(td)->ts_ticks)
159893ccd6bfSKonstantin Belousov 			pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
15995457fa23SJohn Baldwin 			    SCHED_PRI_RANGE - 1);
1600e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
160112d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
1602ae7a6b38SJeff Roberson 		    ("sched_priority: invalid priority %d: nice %d, "
1603ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
160493ccd6bfSKonstantin Belousov 		    pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
160593ccd6bfSKonstantin Belousov 		    td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
160693ccd6bfSKonstantin Belousov 		    SCHED_PRI_TICKS(td_get_sched(td))));
1607e7d50326SJeff Roberson 	}
16088460a577SJohn Birrell 	sched_user_prio(td, pri);
160935e6168fSJeff Roberson 
161015dc847eSJeff Roberson 	return;
161135e6168fSJeff Roberson }
161235e6168fSJeff Roberson 
161335e6168fSJeff Roberson /*
1614d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1615ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1616ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1617d322132cSJeff Roberson  */
16184b60e324SJeff Roberson static void
16198460a577SJohn Birrell sched_interact_update(struct thread *td)
16204b60e324SJeff Roberson {
1621155b6ca1SJeff Roberson 	struct td_sched *ts;
16229a93305aSJeff Roberson 	u_int sum;
16233f741ca1SJeff Roberson 
162493ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1625ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1626d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1627d322132cSJeff Roberson 		return;
1628d322132cSJeff Roberson 	/*
1629155b6ca1SJeff Roberson 	 * This only happens from two places:
1630155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1631155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1632155b6ca1SJeff Roberson 	 */
1633155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1634ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1635ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1636ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1637155b6ca1SJeff Roberson 		} else {
1638ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1639ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1640155b6ca1SJeff Roberson 		}
1641155b6ca1SJeff Roberson 		return;
1642155b6ca1SJeff Roberson 	}
1643155b6ca1SJeff Roberson 	/*
1644d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1645d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
16462454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1647d322132cSJeff Roberson 	 */
164837a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1649ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1650ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1651d322132cSJeff Roberson 		return;
1652d322132cSJeff Roberson 	}
1653ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1654ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1655d322132cSJeff Roberson }
1656d322132cSJeff Roberson 
1657ae7a6b38SJeff Roberson /*
1658ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1659ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1660ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1661ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1662ae7a6b38SJeff Roberson  */
1663d322132cSJeff Roberson static void
16648460a577SJohn Birrell sched_interact_fork(struct thread *td)
1665d322132cSJeff Roberson {
166693ccd6bfSKonstantin Belousov 	struct td_sched *ts;
1667d322132cSJeff Roberson 	int ratio;
1668d322132cSJeff Roberson 	int sum;
1669d322132cSJeff Roberson 
167093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
167193ccd6bfSKonstantin Belousov 	sum = ts->ts_runtime + ts->ts_slptime;
1672d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1673d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
167493ccd6bfSKonstantin Belousov 		ts->ts_runtime /= ratio;
167593ccd6bfSKonstantin Belousov 		ts->ts_slptime /= ratio;
16764b60e324SJeff Roberson 	}
16774b60e324SJeff Roberson }
16784b60e324SJeff Roberson 
167915dc847eSJeff Roberson /*
1680ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1681ed062c8dSJulian Elischer  */
1682ed062c8dSJulian Elischer void
1683ed062c8dSJulian Elischer schedinit(void)
1684ed062c8dSJulian Elischer {
168593ccd6bfSKonstantin Belousov 	struct td_sched *ts0;
1686e7d50326SJeff Roberson 
1687ed062c8dSJulian Elischer 	/*
168893ccd6bfSKonstantin Belousov 	 * Set up the scheduler specific parts of thread0.
1689ed062c8dSJulian Elischer 	 */
169093ccd6bfSKonstantin Belousov 	ts0 = td_get_sched(&thread0);
169193ccd6bfSKonstantin Belousov 	ts0->ts_ltick = ticks;
169293ccd6bfSKonstantin Belousov 	ts0->ts_ftick = ticks;
169393ccd6bfSKonstantin Belousov 	ts0->ts_slice = 0;
16941408b84aSHans Petter Selasky 	ts0->ts_cpu = curcpu;	/* set valid CPU number */
1695ed062c8dSJulian Elischer }
1696ed062c8dSJulian Elischer 
1697ed062c8dSJulian Elischer /*
169815dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
169915dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1700e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
170115dc847eSJeff Roberson  */
170235e6168fSJeff Roberson int
170335e6168fSJeff Roberson sched_rr_interval(void)
170435e6168fSJeff Roberson {
1705e7d50326SJeff Roberson 
1706579895dfSAlexander Motin 	/* Convert sched_slice from stathz to hz. */
170737f4e025SAlexander Motin 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
170835e6168fSJeff Roberson }
170935e6168fSJeff Roberson 
1710ae7a6b38SJeff Roberson /*
1711ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1712ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1713ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1714ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1715ae7a6b38SJeff Roberson  */
171622bf7d9aSJeff Roberson static void
17177295465eSAlexander Motin sched_pctcpu_update(struct td_sched *ts, int run)
171835e6168fSJeff Roberson {
17197295465eSAlexander Motin 	int t = ticks;
1720e7d50326SJeff Roberson 
172178133024SMark Johnston 	/*
172278133024SMark Johnston 	 * The signed difference may be negative if the thread hasn't run for
172378133024SMark Johnston 	 * over half of the ticks rollover period.
172478133024SMark Johnston 	 */
172578133024SMark Johnston 	if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) {
1726ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
17277295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
17287295465eSAlexander Motin 	} else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
17297295465eSAlexander Motin 		ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
17307295465eSAlexander Motin 		    (ts->ts_ltick - (t - SCHED_TICK_TARG));
17317295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
17327295465eSAlexander Motin 	}
17337295465eSAlexander Motin 	if (run)
17347295465eSAlexander Motin 		ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
17357295465eSAlexander Motin 	ts->ts_ltick = t;
173635e6168fSJeff Roberson }
173735e6168fSJeff Roberson 
1738ae7a6b38SJeff Roberson /*
1739ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1740ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1741ae7a6b38SJeff Roberson  * functions.
1742ae7a6b38SJeff Roberson  */
1743e7d50326SJeff Roberson static void
1744f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
174535e6168fSJeff Roberson {
1746ad1e7d28SJulian Elischer 	struct td_sched *ts;
174773daf66fSJeff Roberson 	struct tdq *tdq;
174873daf66fSJeff Roberson 	int oldpri;
174935e6168fSJeff Roberson 
17508f51ad55SJeff Roberson 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
17518f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "new prio:%d", prio,
17528f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(curthread));
1753d9fae5abSAndriy Gapon 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
1754e87fc7cfSAndriy Gapon 	if (td != curthread && prio < td->td_priority) {
17558f51ad55SJeff Roberson 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
17568f51ad55SJeff Roberson 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
17578f51ad55SJeff Roberson 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
1758d9fae5abSAndriy Gapon 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
1759b3e9e682SRyan Stone 		    curthread);
17608f51ad55SJeff Roberson 	}
176193ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
17627b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1763f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1764f5c157d9SJohn Baldwin 		return;
17653f741ca1SJeff Roberson 	/*
17663f741ca1SJeff Roberson 	 * If the priority has been elevated due to priority
17673f741ca1SJeff Roberson 	 * propagation, we may have to move ourselves to a new
1768e7d50326SJeff Roberson 	 * queue.  This could be optimized to not re-add in some
1769e7d50326SJeff Roberson 	 * cases.
1770f2b74cbfSJeff Roberson 	 */
17716d55b3ecSJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1772e7d50326SJeff Roberson 		sched_rem(td);
1773e7d50326SJeff Roberson 		td->td_priority = prio;
1774ae7a6b38SJeff Roberson 		sched_add(td, SRQ_BORROWING);
177573daf66fSJeff Roberson 		return;
177673daf66fSJeff Roberson 	}
17776d55b3ecSJeff Roberson 	/*
17786d55b3ecSJeff Roberson 	 * If the thread is currently running we may have to adjust the lowpri
17796d55b3ecSJeff Roberson 	 * information so other cpus are aware of our current priority.
17806d55b3ecSJeff Roberson 	 */
17816d55b3ecSJeff Roberson 	if (TD_IS_RUNNING(td)) {
1782ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(ts->ts_cpu);
178362fa74d9SJeff Roberson 		oldpri = td->td_priority;
17843f741ca1SJeff Roberson 		td->td_priority = prio;
178562fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
178662fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
178762fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
178862fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
17896d55b3ecSJeff Roberson 		return;
179073daf66fSJeff Roberson 	}
17916d55b3ecSJeff Roberson 	td->td_priority = prio;
1792ae7a6b38SJeff Roberson }
179335e6168fSJeff Roberson 
1794f5c157d9SJohn Baldwin /*
1795f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1796f5c157d9SJohn Baldwin  * priority.
1797f5c157d9SJohn Baldwin  */
1798f5c157d9SJohn Baldwin void
1799f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1800f5c157d9SJohn Baldwin {
1801f5c157d9SJohn Baldwin 
1802f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1803f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1804f5c157d9SJohn Baldwin }
1805f5c157d9SJohn Baldwin 
1806f5c157d9SJohn Baldwin /*
1807f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1808f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1809f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1810f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1811f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1812f5c157d9SJohn Baldwin  * of prio.
1813f5c157d9SJohn Baldwin  */
1814f5c157d9SJohn Baldwin void
1815f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1816f5c157d9SJohn Baldwin {
1817f5c157d9SJohn Baldwin 	u_char base_pri;
1818f5c157d9SJohn Baldwin 
1819f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1820f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
18218460a577SJohn Birrell 		base_pri = td->td_user_pri;
1822f5c157d9SJohn Baldwin 	else
1823f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1824f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1825f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1826f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1827f5c157d9SJohn Baldwin 	} else
1828f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1829f5c157d9SJohn Baldwin }
1830f5c157d9SJohn Baldwin 
1831ae7a6b38SJeff Roberson /*
1832ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1833ae7a6b38SJeff Roberson  */
1834f5c157d9SJohn Baldwin void
1835f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1836f5c157d9SJohn Baldwin {
1837f5c157d9SJohn Baldwin 	u_char oldprio;
1838f5c157d9SJohn Baldwin 
1839f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1840f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1841f5c157d9SJohn Baldwin 
1842f5c157d9SJohn Baldwin 	/*
184350aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1844f5c157d9SJohn Baldwin 	 * ever lower the priority.
1845f5c157d9SJohn Baldwin 	 */
1846f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1847f5c157d9SJohn Baldwin 		return;
1848f5c157d9SJohn Baldwin 
1849f5c157d9SJohn Baldwin 	/* Change the real priority. */
1850f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1851f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1852f5c157d9SJohn Baldwin 
1853f5c157d9SJohn Baldwin 	/*
1854f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1855f5c157d9SJohn Baldwin 	 * its state.
1856f5c157d9SJohn Baldwin 	 */
1857f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1858f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1859f5c157d9SJohn Baldwin }
1860f5c157d9SJohn Baldwin 
1861ae7a6b38SJeff Roberson /*
1862ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1863ae7a6b38SJeff Roberson  */
186435e6168fSJeff Roberson void
18658460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
18663db720fdSDavid Xu {
18673db720fdSDavid Xu 
18688460a577SJohn Birrell 	td->td_base_user_pri = prio;
1869acbe332aSDavid Xu 	if (td->td_lend_user_pri <= prio)
1870fc6c30f6SJulian Elischer 		return;
18718460a577SJohn Birrell 	td->td_user_pri = prio;
18723db720fdSDavid Xu }
18733db720fdSDavid Xu 
18743db720fdSDavid Xu void
18753db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
18763db720fdSDavid Xu {
18773db720fdSDavid Xu 
1878435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1879acbe332aSDavid Xu 	td->td_lend_user_pri = prio;
1880c8e368a9SDavid Xu 	td->td_user_pri = min(prio, td->td_base_user_pri);
1881c8e368a9SDavid Xu 	if (td->td_priority > td->td_user_pri)
1882c8e368a9SDavid Xu 		sched_prio(td, td->td_user_pri);
1883c8e368a9SDavid Xu 	else if (td->td_priority != td->td_user_pri)
1884c8e368a9SDavid Xu 		td->td_flags |= TDF_NEEDRESCHED;
1885435806d3SDavid Xu }
18863db720fdSDavid Xu 
1887ac97da9aSMateusz Guzik /*
1888ac97da9aSMateusz Guzik  * Like the above but first check if there is anything to do.
1889ac97da9aSMateusz Guzik  */
1890ac97da9aSMateusz Guzik void
1891ac97da9aSMateusz Guzik sched_lend_user_prio_cond(struct thread *td, u_char prio)
1892ac97da9aSMateusz Guzik {
1893ac97da9aSMateusz Guzik 
1894ac97da9aSMateusz Guzik 	if (td->td_lend_user_pri != prio)
1895ac97da9aSMateusz Guzik 		goto lend;
1896ac97da9aSMateusz Guzik 	if (td->td_user_pri != min(prio, td->td_base_user_pri))
1897ac97da9aSMateusz Guzik 		goto lend;
1898ac97da9aSMateusz Guzik 	if (td->td_priority >= td->td_user_pri)
1899ac97da9aSMateusz Guzik 		goto lend;
1900ac97da9aSMateusz Guzik 	return;
1901ac97da9aSMateusz Guzik 
1902ac97da9aSMateusz Guzik lend:
1903ac97da9aSMateusz Guzik 	thread_lock(td);
1904ac97da9aSMateusz Guzik 	sched_lend_user_prio(td, prio);
1905ac97da9aSMateusz Guzik 	thread_unlock(td);
1906ac97da9aSMateusz Guzik }
1907ac97da9aSMateusz Guzik 
19084c8a8cfcSKonstantin Belousov #ifdef SMP
1909ae7a6b38SJeff Roberson /*
191097e9382dSDon Lewis  * This tdq is about to idle.  Try to steal a thread from another CPU before
191197e9382dSDon Lewis  * choosing the idle thread.
191297e9382dSDon Lewis  */
191397e9382dSDon Lewis static void
191497e9382dSDon Lewis tdq_trysteal(struct tdq *tdq)
191597e9382dSDon Lewis {
191697e9382dSDon Lewis 	struct cpu_group *cg;
191797e9382dSDon Lewis 	struct tdq *steal;
191897e9382dSDon Lewis 	cpuset_t mask;
191997e9382dSDon Lewis 	int cpu, i;
192097e9382dSDon Lewis 
192197e9382dSDon Lewis 	if (smp_started == 0 || trysteal_limit == 0 || tdq->tdq_cg == NULL)
192297e9382dSDon Lewis 		return;
192397e9382dSDon Lewis 	CPU_FILL(&mask);
192497e9382dSDon Lewis 	CPU_CLR(PCPU_GET(cpuid), &mask);
192597e9382dSDon Lewis 	/* We don't want to be preempted while we're iterating. */
192697e9382dSDon Lewis 	spinlock_enter();
192797e9382dSDon Lewis 	TDQ_UNLOCK(tdq);
192897e9382dSDon Lewis 	for (i = 1, cg = tdq->tdq_cg; ; ) {
192997e9382dSDon Lewis 		cpu = sched_highest(cg, mask, steal_thresh);
193097e9382dSDon Lewis 		/*
193197e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
193297e9382dSDon Lewis 		 * steal one here.
193397e9382dSDon Lewis 		 */
193497e9382dSDon Lewis 		if (tdq->tdq_load > 0) {
193597e9382dSDon Lewis 			TDQ_LOCK(tdq);
193697e9382dSDon Lewis 			break;
193797e9382dSDon Lewis 		}
193897e9382dSDon Lewis 		if (cpu == -1) {
193997e9382dSDon Lewis 			i++;
194097e9382dSDon Lewis 			cg = cg->cg_parent;
194197e9382dSDon Lewis 			if (cg == NULL || i > trysteal_limit) {
194297e9382dSDon Lewis 				TDQ_LOCK(tdq);
194397e9382dSDon Lewis 				break;
194497e9382dSDon Lewis 			}
194597e9382dSDon Lewis 			continue;
194697e9382dSDon Lewis 		}
194797e9382dSDon Lewis 		steal = TDQ_CPU(cpu);
194897e9382dSDon Lewis 		/*
194997e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
195097e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
195197e9382dSDon Lewis 		 */
195297e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
195397e9382dSDon Lewis 		    steal->tdq_transferable == 0)
195497e9382dSDon Lewis 			continue;
195597e9382dSDon Lewis 		tdq_lock_pair(tdq, steal);
195697e9382dSDon Lewis 		/*
195797e9382dSDon Lewis 		 * If we get to this point, unconditonally exit the loop
195897e9382dSDon Lewis 		 * to bound the time spent in the critcal section.
195997e9382dSDon Lewis 		 *
196097e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
196197e9382dSDon Lewis 		 * steal one here.
196297e9382dSDon Lewis 		 */
196397e9382dSDon Lewis 		if (tdq->tdq_load > 0) {
196497e9382dSDon Lewis 			TDQ_UNLOCK(steal);
196597e9382dSDon Lewis 			break;
196697e9382dSDon Lewis 		}
196797e9382dSDon Lewis 		/*
196897e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
196997e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
197097e9382dSDon Lewis 		 */
197197e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
197297e9382dSDon Lewis 		    steal->tdq_transferable == 0) {
197397e9382dSDon Lewis 			TDQ_UNLOCK(steal);
197497e9382dSDon Lewis 			break;
197597e9382dSDon Lewis 		}
197697e9382dSDon Lewis 		/*
197797e9382dSDon Lewis 		 * If we fail to acquire one due to affinity restrictions,
197897e9382dSDon Lewis 		 * bail out and let the idle thread to a more complete search
197997e9382dSDon Lewis 		 * outside of a critical section.
198097e9382dSDon Lewis 		 */
198197e9382dSDon Lewis 		if (tdq_move(steal, tdq) == NULL) {
198297e9382dSDon Lewis 			TDQ_UNLOCK(steal);
198397e9382dSDon Lewis 			break;
198497e9382dSDon Lewis 		}
198597e9382dSDon Lewis 		TDQ_UNLOCK(steal);
198697e9382dSDon Lewis 		break;
198797e9382dSDon Lewis 	}
198897e9382dSDon Lewis 	spinlock_exit();
198997e9382dSDon Lewis }
19904c8a8cfcSKonstantin Belousov #endif
199197e9382dSDon Lewis 
199297e9382dSDon Lewis /*
1993c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
1994c47f202bSJeff Roberson  * cpu binding.
1995c47f202bSJeff Roberson  */
1996c47f202bSJeff Roberson static struct mtx *
1997c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1998c47f202bSJeff Roberson {
1999c47f202bSJeff Roberson 	struct tdq *tdn;
2000c47f202bSJeff Roberson 
2001efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
2002efe67753SNathan Whitehorn 	    "thread %s queued on absent CPU %d.", td->td_name,
2003efe67753SNathan Whitehorn 	    td_get_sched(td)->ts_cpu));
200493ccd6bfSKonstantin Belousov 	tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
2005c47f202bSJeff Roberson #ifdef SMP
20069727e637SJeff Roberson 	tdq_load_rem(tdq, td);
2007c47f202bSJeff Roberson 	/*
2008c47f202bSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We grab an extra
2009c47f202bSJeff Roberson 	 * spinlock nesting to prevent preemption while we're
2010c47f202bSJeff Roberson 	 * not holding either run-queue lock.
2011c47f202bSJeff Roberson 	 */
2012c47f202bSJeff Roberson 	spinlock_enter();
2013b0b9dee5SAttilio Rao 	thread_lock_block(td);	/* This releases the lock on tdq. */
2014435068aaSAttilio Rao 
2015435068aaSAttilio Rao 	/*
2016435068aaSAttilio Rao 	 * Acquire both run-queue locks before placing the thread on the new
2017435068aaSAttilio Rao 	 * run-queue to avoid deadlocks created by placing a thread with a
2018435068aaSAttilio Rao 	 * blocked lock on the run-queue of a remote processor.  The deadlock
2019435068aaSAttilio Rao 	 * occurs when a third processor attempts to lock the two queues in
2020435068aaSAttilio Rao 	 * question while the target processor is spinning with its own
2021435068aaSAttilio Rao 	 * run-queue lock held while waiting for the blocked lock to clear.
2022435068aaSAttilio Rao 	 */
2023435068aaSAttilio Rao 	tdq_lock_pair(tdn, tdq);
2024c47f202bSJeff Roberson 	tdq_add(tdn, td, flags);
202527ee18adSRyan Stone 	tdq_notify(tdn, td);
2026c47f202bSJeff Roberson 	TDQ_UNLOCK(tdn);
2027c47f202bSJeff Roberson 	spinlock_exit();
2028c47f202bSJeff Roberson #endif
2029c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
2030c47f202bSJeff Roberson }
2031c47f202bSJeff Roberson 
2032c47f202bSJeff Roberson /*
2033b0b9dee5SAttilio Rao  * Variadic version of thread_lock_unblock() that does not assume td_lock
2034b0b9dee5SAttilio Rao  * is blocked.
2035ae7a6b38SJeff Roberson  */
2036ae7a6b38SJeff Roberson static inline void
2037ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
2038ae7a6b38SJeff Roberson {
2039ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
2040ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
2041ae7a6b38SJeff Roberson }
2042ae7a6b38SJeff Roberson 
2043ae7a6b38SJeff Roberson /*
2044ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
2045ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
2046ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
2047ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
2048ae7a6b38SJeff Roberson  */
20493db720fdSDavid Xu void
20503389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
205135e6168fSJeff Roberson {
2052c02bbb43SJeff Roberson 	struct tdq *tdq;
2053ad1e7d28SJulian Elischer 	struct td_sched *ts;
2054ae7a6b38SJeff Roberson 	struct mtx *mtx;
2055c47f202bSJeff Roberson 	int srqflag;
20563d7f4117SAlexander Motin 	int cpuid, preempted;
205735e6168fSJeff Roberson 
20587b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
20596d55b3ecSJeff Roberson 	KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument"));
206035e6168fSJeff Roberson 
2061ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2062018ff686SJeff Roberson 	tdq = TDQ_SELF();
206393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
2064c47f202bSJeff Roberson 	mtx = td->td_lock;
20657295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2066ae7a6b38SJeff Roberson 	ts->ts_rltick = ticks;
2067060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
2068060563ecSJulian Elischer 	td->td_oncpu = NOCPU;
2069ad9dadc4SAndriy Gapon 	preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
2070ad9dadc4SAndriy Gapon 	    (flags & SW_PREEMPT) != 0;
20713d7f4117SAlexander Motin 	td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
207277918643SStephan Uphoff 	td->td_owepreempt = 0;
20732c27cb3aSAlexander Motin 	if (!TD_IS_IDLETHREAD(td))
20741690c6c1SJeff Roberson 		tdq->tdq_switchcnt++;
2075b11fdad0SJeff Roberson 	/*
2076ae7a6b38SJeff Roberson 	 * The lock pointer in an idle thread should never change.  Reset it
2077ae7a6b38SJeff Roberson 	 * to CAN_RUN as well.
2078b11fdad0SJeff Roberson 	 */
2079486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
2080ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2081bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
20827b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
2083ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
20843d7f4117SAlexander Motin 		srqflag = preempted ?
2085598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
2086c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
2087ba4932b5SMatthew D Fleming #ifdef SMP
20880f7a0ebdSMatthew D Fleming 		if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
20890f7a0ebdSMatthew D Fleming 			ts->ts_cpu = sched_pickcpu(td, 0);
2090ba4932b5SMatthew D Fleming #endif
2091c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
20929727e637SJeff Roberson 			tdq_runq_add(tdq, td, srqflag);
20930f7a0ebdSMatthew D Fleming 		else {
20940f7a0ebdSMatthew D Fleming 			KASSERT(THREAD_CAN_MIGRATE(td) ||
20950f7a0ebdSMatthew D Fleming 			    (ts->ts_flags & TSF_BOUND) != 0,
20960f7a0ebdSMatthew D Fleming 			    ("Thread %p shouldn't migrate", td));
2097c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
20980f7a0ebdSMatthew D Fleming 		}
2099ae7a6b38SJeff Roberson 	} else {
2100ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
2101ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
2102b0b9dee5SAttilio Rao 		mtx = thread_lock_block(td);
21039727e637SJeff Roberson 		tdq_load_rem(tdq, td);
21044c8a8cfcSKonstantin Belousov #ifdef SMP
210597e9382dSDon Lewis 		if (tdq->tdq_load == 0)
210697e9382dSDon Lewis 			tdq_trysteal(tdq);
21074c8a8cfcSKonstantin Belousov #endif
2108ae7a6b38SJeff Roberson 	}
2109afa0a46cSAndriy Gapon 
2110afa0a46cSAndriy Gapon #if (KTR_COMPILE & KTR_SCHED) != 0
2111afa0a46cSAndriy Gapon 	if (TD_IS_IDLETHREAD(td))
2112afa0a46cSAndriy Gapon 		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
2113afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority);
2114afa0a46cSAndriy Gapon 	else
2115afa0a46cSAndriy Gapon 		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
2116afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
2117afa0a46cSAndriy Gapon 		    "lockname:\"%s\"", td->td_lockname);
2118afa0a46cSAndriy Gapon #endif
2119afa0a46cSAndriy Gapon 
2120ae7a6b38SJeff Roberson 	/*
2121ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
2122ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
2123ae7a6b38SJeff Roberson 	 * thread-queue locked.
2124ae7a6b38SJeff Roberson 	 */
2125ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
21262454aaf5SJeff Roberson 	newtd = choosethread();
2127ae7a6b38SJeff Roberson 	/*
2128ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
2129ae7a6b38SJeff Roberson 	 */
2130ebccf1e3SJoseph Koshy 	if (td != newtd) {
2131ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2132ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2133ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
2134ebccf1e3SJoseph Koshy #endif
2135d9fae5abSAndriy Gapon 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
2136eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
213759c68134SJeff Roberson 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
213893ccd6bfSKonstantin Belousov 		sched_pctcpu_update(td_get_sched(newtd), 0);
21396f5f25e5SJohn Birrell 
21406f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
21416f5f25e5SJohn Birrell 		/*
21426f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
21436f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
21446f5f25e5SJohn Birrell 		 * function to call.
21456f5f25e5SJohn Birrell 		 */
21466f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
21476f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
21486f5f25e5SJohn Birrell #endif
21496f5f25e5SJohn Birrell 
2150ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
2151ae7a6b38SJeff Roberson 		/*
2152ae7a6b38SJeff Roberson 		 * We may return from cpu_switch on a different cpu.  However,
2153ae7a6b38SJeff Roberson 		 * we always return with td_lock pointing to the current cpu's
2154ae7a6b38SJeff Roberson 		 * run queue lock.
2155ae7a6b38SJeff Roberson 		 */
2156ae7a6b38SJeff Roberson 		cpuid = PCPU_GET(cpuid);
2157018ff686SJeff Roberson 		tdq = TDQ_SELF();
2158eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(
2159eea4f254SJeff Roberson 		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2160b3e9e682SRyan Stone 
2161d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , on__cpu);
2162ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2163ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2164ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
2165ebccf1e3SJoseph Koshy #endif
2166b3e9e682SRyan Stone 	} else {
2167ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
2168d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , remain__cpu);
2169b3e9e682SRyan Stone 	}
2170afa0a46cSAndriy Gapon 
2171afa0a46cSAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2172afa0a46cSAndriy Gapon 	    "prio:%d", td->td_priority);
2173afa0a46cSAndriy Gapon 
2174ae7a6b38SJeff Roberson 	/*
2175ae7a6b38SJeff Roberson 	 * Assert that all went well and return.
2176ae7a6b38SJeff Roberson 	 */
2177ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
2178ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2179ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
218035e6168fSJeff Roberson }
218135e6168fSJeff Roberson 
2182ae7a6b38SJeff Roberson /*
2183ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
2184ae7a6b38SJeff Roberson  */
218535e6168fSJeff Roberson void
2186fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
218735e6168fSJeff Roberson {
218835e6168fSJeff Roberson 	struct thread *td;
218935e6168fSJeff Roberson 
2190fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
2191e7d50326SJeff Roberson 
2192fa885116SJulian Elischer 	p->p_nice = nice;
21938460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
21947b20fb19SJeff Roberson 		thread_lock(td);
21958460a577SJohn Birrell 		sched_priority(td);
2196e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
21977b20fb19SJeff Roberson 		thread_unlock(td);
219835e6168fSJeff Roberson 	}
2199fa885116SJulian Elischer }
220035e6168fSJeff Roberson 
2201ae7a6b38SJeff Roberson /*
2202ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
2203ae7a6b38SJeff Roberson  */
220435e6168fSJeff Roberson void
2205c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio)
220635e6168fSJeff Roberson {
2207e7d50326SJeff Roberson 
22087b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
220935e6168fSJeff Roberson 
221054b0e65fSJeff Roberson 	td->td_slptick = ticks;
221117c4c356SKonstantin Belousov 	if (TD_IS_SUSPENDED(td) || prio >= PSOCK)
2212c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
22132dc29adbSJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
22142dc29adbSJohn Baldwin 		return;
22150502fe2eSJeff Roberson 	if (static_boost == 1 && prio)
2216c5aa6b58SJeff Roberson 		sched_prio(td, prio);
22170502fe2eSJeff Roberson 	else if (static_boost && td->td_priority > static_boost)
22180502fe2eSJeff Roberson 		sched_prio(td, static_boost);
221935e6168fSJeff Roberson }
222035e6168fSJeff Roberson 
2221ae7a6b38SJeff Roberson /*
2222ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
2223ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
2224ae7a6b38SJeff Roberson  */
222535e6168fSJeff Roberson void
222635e6168fSJeff Roberson sched_wakeup(struct thread *td)
222735e6168fSJeff Roberson {
222814618990SJeff Roberson 	struct td_sched *ts;
2229ae7a6b38SJeff Roberson 	int slptick;
2230e7d50326SJeff Roberson 
22317b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
223293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
2233c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
223435e6168fSJeff Roberson 	/*
2235e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
2236e7d50326SJeff Roberson 	 * priority.
223735e6168fSJeff Roberson 	 */
223854b0e65fSJeff Roberson 	slptick = td->td_slptick;
223954b0e65fSJeff Roberson 	td->td_slptick = 0;
2240ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
22417295465eSAlexander Motin 		ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
22428460a577SJohn Birrell 		sched_interact_update(td);
22437295465eSAlexander Motin 		sched_pctcpu_update(ts, 0);
2244f1e8dc4aSJeff Roberson 	}
22455e5c3873SJeff Roberson 	/*
22465e5c3873SJeff Roberson 	 * Reset the slice value since we slept and advanced the round-robin.
22475e5c3873SJeff Roberson 	 */
22485e5c3873SJeff Roberson 	ts->ts_slice = 0;
22497a5e5e2aSJeff Roberson 	sched_add(td, SRQ_BORING);
225035e6168fSJeff Roberson }
225135e6168fSJeff Roberson 
225235e6168fSJeff Roberson /*
225335e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
225435e6168fSJeff Roberson  * priority.
225535e6168fSJeff Roberson  */
225635e6168fSJeff Roberson void
22578460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
225815dc847eSJeff Roberson {
22597b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
226093ccd6bfSKonstantin Belousov 	sched_pctcpu_update(td_get_sched(td), 1);
2261ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
2262e7d50326SJeff Roberson 	/*
2263e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
2264e7d50326SJeff Roberson 	 */
2265e7d50326SJeff Roberson 	sched_interact_fork(child);
2266e7d50326SJeff Roberson 	sched_priority(child);
226793ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += tickincr;
2268e7d50326SJeff Roberson 	sched_interact_update(td);
2269e7d50326SJeff Roberson 	sched_priority(td);
2270ad1e7d28SJulian Elischer }
2271ad1e7d28SJulian Elischer 
2272ae7a6b38SJeff Roberson /*
2273ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
2274ae7a6b38SJeff Roberson  */
2275ad1e7d28SJulian Elischer void
2276ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
2277ad1e7d28SJulian Elischer {
2278ad1e7d28SJulian Elischer 	struct td_sched *ts;
2279ad1e7d28SJulian Elischer 	struct td_sched *ts2;
22805e5c3873SJeff Roberson 	struct tdq *tdq;
22818460a577SJohn Birrell 
22825e5c3873SJeff Roberson 	tdq = TDQ_SELF();
22838b16c208SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2284e7d50326SJeff Roberson 	/*
2285e7d50326SJeff Roberson 	 * Initialize child.
2286e7d50326SJeff Roberson 	 */
228793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
228893ccd6bfSKonstantin Belousov 	ts2 = td_get_sched(child);
228992de34dfSJohn Baldwin 	child->td_oncpu = NOCPU;
229092de34dfSJohn Baldwin 	child->td_lastcpu = NOCPU;
22915e5c3873SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(tdq);
22928b16c208SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
22933f289c3fSJeff Roberson 	child->td_domain.dr_policy = td->td_cpuset->cs_domain;
2294ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
22958b16c208SJeff Roberson 	ts2->ts_flags = 0;
2296e7d50326SJeff Roberson 	/*
229722d19207SJohn Baldwin 	 * Grab our parents cpu estimation information.
2298e7d50326SJeff Roberson 	 */
2299ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
2300ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
2301ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
230222d19207SJohn Baldwin 	/*
230322d19207SJohn Baldwin 	 * Do not inherit any borrowed priority from the parent.
230422d19207SJohn Baldwin 	 */
230522d19207SJohn Baldwin 	child->td_priority = child->td_base_pri;
2306e7d50326SJeff Roberson 	/*
2307e7d50326SJeff Roberson 	 * And update interactivity score.
2308e7d50326SJeff Roberson 	 */
2309ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2310ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
23115e5c3873SJeff Roberson 	/* Attempt to quickly learn interactivity. */
23125e5c3873SJeff Roberson 	ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
23138f51ad55SJeff Roberson #ifdef KTR
23148f51ad55SJeff Roberson 	bzero(ts2->ts_name, sizeof(ts2->ts_name));
23158f51ad55SJeff Roberson #endif
231615dc847eSJeff Roberson }
231715dc847eSJeff Roberson 
2318ae7a6b38SJeff Roberson /*
2319ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2320ae7a6b38SJeff Roberson  */
232115dc847eSJeff Roberson void
23228460a577SJohn Birrell sched_class(struct thread *td, int class)
232315dc847eSJeff Roberson {
232415dc847eSJeff Roberson 
23257b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
23268460a577SJohn Birrell 	if (td->td_pri_class == class)
232715dc847eSJeff Roberson 		return;
23288460a577SJohn Birrell 	td->td_pri_class = class;
232935e6168fSJeff Roberson }
233035e6168fSJeff Roberson 
233135e6168fSJeff Roberson /*
233235e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
233335e6168fSJeff Roberson  */
233435e6168fSJeff Roberson void
2335fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
233635e6168fSJeff Roberson {
2337e7d50326SJeff Roberson 	struct thread *td;
2338141ad61cSJeff Roberson 
23398f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2340cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2341374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
2342e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2343e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2344ad1e7d28SJulian Elischer }
2345ad1e7d28SJulian Elischer 
2346ae7a6b38SJeff Roberson /*
2347ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2348ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2349ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2350ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2351ae7a6b38SJeff Roberson  */
2352ad1e7d28SJulian Elischer void
2353fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2354ad1e7d28SJulian Elischer {
2355fc6c30f6SJulian Elischer 
23568f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2357cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2358e7d50326SJeff Roberson 	/*
2359e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2360e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2361e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2362e7d50326SJeff Roberson 	 */
23637b20fb19SJeff Roberson 	thread_lock(td);
236493ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
2365fc6c30f6SJulian Elischer 	sched_interact_update(td);
2366e7d50326SJeff Roberson 	sched_priority(td);
23677b20fb19SJeff Roberson 	thread_unlock(td);
2368ad1e7d28SJulian Elischer }
2369ad1e7d28SJulian Elischer 
2370ff256d9cSJeff Roberson void
2371ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2372ff256d9cSJeff Roberson {
2373ff256d9cSJeff Roberson 	struct tdq *tdq;
2374ff256d9cSJeff Roberson 
2375b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
2376b3e9e682SRyan Stone 
2377ff256d9cSJeff Roberson 	thread_lock(td);
2378ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2379ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2380ff256d9cSJeff Roberson 	tdq->tdq_ipipending = 0;
2381ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
23828df78c41SJeff Roberson 		int flags;
23838df78c41SJeff Roberson 
23848df78c41SJeff Roberson 		flags = SW_INVOL | SW_PREEMPT;
2385ff256d9cSJeff Roberson 		if (td->td_critnest > 1)
2386ff256d9cSJeff Roberson 			td->td_owepreempt = 1;
23878df78c41SJeff Roberson 		else if (TD_IS_IDLETHREAD(td))
23888df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL);
2389ff256d9cSJeff Roberson 		else
23908df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEPREEMPT, NULL);
2391ff256d9cSJeff Roberson 	}
2392ff256d9cSJeff Roberson 	thread_unlock(td);
2393ff256d9cSJeff Roberson }
2394ff256d9cSJeff Roberson 
2395ae7a6b38SJeff Roberson /*
2396ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2397ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2398ae7a6b38SJeff Roberson  */
2399ad1e7d28SJulian Elischer void
240028240885SMateusz Guzik sched_userret_slowpath(struct thread *td)
2401ad1e7d28SJulian Elischer {
240228240885SMateusz Guzik 
24037b20fb19SJeff Roberson 	thread_lock(td);
2404ad1e7d28SJulian Elischer 	td->td_priority = td->td_user_pri;
2405ad1e7d28SJulian Elischer 	td->td_base_pri = td->td_user_pri;
240662fa74d9SJeff Roberson 	tdq_setlowpri(TDQ_SELF(), td);
24077b20fb19SJeff Roberson 	thread_unlock(td);
2408ad1e7d28SJulian Elischer }
240935e6168fSJeff Roberson 
2410ae7a6b38SJeff Roberson /*
2411ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2412ae7a6b38SJeff Roberson  * threads.
2413ae7a6b38SJeff Roberson  */
241435e6168fSJeff Roberson void
24157cf90fb3SJeff Roberson sched_clock(struct thread *td)
241635e6168fSJeff Roberson {
2417ad1e7d28SJulian Elischer 	struct tdq *tdq;
2418ad1e7d28SJulian Elischer 	struct td_sched *ts;
241935e6168fSJeff Roberson 
2420ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
24213f872f85SJeff Roberson 	tdq = TDQ_SELF();
24227fcf154aSJeff Roberson #ifdef SMP
24237fcf154aSJeff Roberson 	/*
24247fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
24257fcf154aSJeff Roberson 	 */
2426290d9060SDon Lewis 	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0) {
24277fcf154aSJeff Roberson 		if (balance_ticks && --balance_ticks == 0)
24287fcf154aSJeff Roberson 			sched_balance();
24297fcf154aSJeff Roberson 	}
24307fcf154aSJeff Roberson #endif
24313f872f85SJeff Roberson 	/*
24321690c6c1SJeff Roberson 	 * Save the old switch count so we have a record of the last ticks
24331690c6c1SJeff Roberson 	 * activity.   Initialize the new switch count based on our load.
24341690c6c1SJeff Roberson 	 * If there is some activity seed it to reflect that.
24351690c6c1SJeff Roberson 	 */
24361690c6c1SJeff Roberson 	tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
24376c47aaaeSJeff Roberson 	tdq->tdq_switchcnt = tdq->tdq_load;
24381690c6c1SJeff Roberson 	/*
24393f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
24403f872f85SJeff Roberson 	 * threads get a chance to run.
24413f872f85SJeff Roberson 	 */
24423f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
24433f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
24443f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
24453f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
24463f872f85SJeff Roberson 	}
244793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
24487295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2449fd0b8c78SJeff Roberson 	if (td->td_pri_class & PRI_FIFO_BIT)
2450a8949de2SJeff Roberson 		return;
2451c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2452a8949de2SJeff Roberson 		/*
2453fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2454fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
245515dc847eSJeff Roberson 		 */
245693ccd6bfSKonstantin Belousov 		td_get_sched(td)->ts_runtime += tickincr;
24578460a577SJohn Birrell 		sched_interact_update(td);
245873daf66fSJeff Roberson 		sched_priority(td);
2459fd0b8c78SJeff Roberson 	}
2460579895dfSAlexander Motin 
246135e6168fSJeff Roberson 	/*
2462579895dfSAlexander Motin 	 * Force a context switch if the current thread has used up a full
2463579895dfSAlexander Motin 	 * time slice (default is 100ms).
246435e6168fSJeff Roberson 	 */
24655e5c3873SJeff Roberson 	if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) {
24665e5c3873SJeff Roberson 		ts->ts_slice = 0;
24673d7f4117SAlexander Motin 		td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
246835e6168fSJeff Roberson 	}
2469579895dfSAlexander Motin }
247035e6168fSJeff Roberson 
2471ccd0ec40SKonstantin Belousov u_int
2472ccd0ec40SKonstantin Belousov sched_estcpu(struct thread *td __unused)
2473ae7a6b38SJeff Roberson {
2474ae7a6b38SJeff Roberson 
2475ccd0ec40SKonstantin Belousov 	return (0);
2476ae7a6b38SJeff Roberson }
2477ae7a6b38SJeff Roberson 
2478ae7a6b38SJeff Roberson /*
2479ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2480ae7a6b38SJeff Roberson  * cooperative idle threads.
2481ae7a6b38SJeff Roberson  */
248235e6168fSJeff Roberson int
248335e6168fSJeff Roberson sched_runnable(void)
248435e6168fSJeff Roberson {
2485ad1e7d28SJulian Elischer 	struct tdq *tdq;
2486b90816f1SJeff Roberson 	int load;
248735e6168fSJeff Roberson 
2488b90816f1SJeff Roberson 	load = 1;
2489b90816f1SJeff Roberson 
2490ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
24913f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2492d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
24933f741ca1SJeff Roberson 			goto out;
24943f741ca1SJeff Roberson 	} else
2495d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2496b90816f1SJeff Roberson 			goto out;
2497b90816f1SJeff Roberson 	load = 0;
2498b90816f1SJeff Roberson out:
2499b90816f1SJeff Roberson 	return (load);
250035e6168fSJeff Roberson }
250135e6168fSJeff Roberson 
2502ae7a6b38SJeff Roberson /*
2503ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2504ae7a6b38SJeff Roberson  * the run-queue while running however the load remains.  For SMP we set
2505ae7a6b38SJeff Roberson  * the tdq in the global idle bitmask if it idles here.
2506ae7a6b38SJeff Roberson  */
25077a5e5e2aSJeff Roberson struct thread *
2508c9f25d8fSJeff Roberson sched_choose(void)
2509c9f25d8fSJeff Roberson {
25109727e637SJeff Roberson 	struct thread *td;
2511ae7a6b38SJeff Roberson 	struct tdq *tdq;
2512ae7a6b38SJeff Roberson 
2513ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2514ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
25159727e637SJeff Roberson 	td = tdq_choose(tdq);
25169727e637SJeff Roberson 	if (td) {
25179727e637SJeff Roberson 		tdq_runq_rem(tdq, td);
25180502fe2eSJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
25199727e637SJeff Roberson 		return (td);
252035e6168fSJeff Roberson 	}
25210502fe2eSJeff Roberson 	tdq->tdq_lowpri = PRI_MAX_IDLE;
252262fa74d9SJeff Roberson 	return (PCPU_GET(idlethread));
25237a5e5e2aSJeff Roberson }
25247a5e5e2aSJeff Roberson 
2525ae7a6b38SJeff Roberson /*
2526ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2527ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2528ae7a6b38SJeff Roberson  */
2529ae7a6b38SJeff Roberson static inline void
2530ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
25317a5e5e2aSJeff Roberson {
25327a5e5e2aSJeff Roberson 	struct thread *ctd;
25337a5e5e2aSJeff Roberson 	int cpri;
25347a5e5e2aSJeff Roberson 	int pri;
25357a5e5e2aSJeff Roberson 
2536ff256d9cSJeff Roberson 	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2537ff256d9cSJeff Roberson 
25387a5e5e2aSJeff Roberson 	ctd = curthread;
25397a5e5e2aSJeff Roberson 	pri = td->td_priority;
25407a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2541ff256d9cSJeff Roberson 	if (pri < cpri)
2542ff256d9cSJeff Roberson 		ctd->td_flags |= TDF_NEEDRESCHED;
25437a5e5e2aSJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2544ae7a6b38SJeff Roberson 		return;
2545ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2546ae7a6b38SJeff Roberson 		return;
25477a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
254835e6168fSJeff Roberson }
254935e6168fSJeff Roberson 
2550ae7a6b38SJeff Roberson /*
255173daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
255273daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
255373daf66fSJeff Roberson  * predetermined.
2554ae7a6b38SJeff Roberson  */
255535e6168fSJeff Roberson void
2556ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
255735e6168fSJeff Roberson {
2558c9f25d8fSJeff Roberson 
2559ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
25607a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
25617a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
25627a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
25637a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2564b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2565b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2566ae7a6b38SJeff Roberson 
2567ae7a6b38SJeff Roberson 	if (td->td_priority < tdq->tdq_lowpri)
2568ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
25699727e637SJeff Roberson 	tdq_runq_add(tdq, td, flags);
25709727e637SJeff Roberson 	tdq_load_add(tdq, td);
2571ae7a6b38SJeff Roberson }
2572ae7a6b38SJeff Roberson 
2573ae7a6b38SJeff Roberson /*
2574ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2575ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
2576ae7a6b38SJeff Roberson  */
2577ae7a6b38SJeff Roberson void
2578ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2579ae7a6b38SJeff Roberson {
2580ae7a6b38SJeff Roberson 	struct tdq *tdq;
25817b8bfa0dSJeff Roberson #ifdef SMP
2582ae7a6b38SJeff Roberson 	int cpu;
2583ae7a6b38SJeff Roberson #endif
25848f51ad55SJeff Roberson 
25858f51ad55SJeff Roberson 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
25868f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
25878f51ad55SJeff Roberson 	    sched_tdname(curthread));
25888f51ad55SJeff Roberson 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
25898f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(td));
2590b3e9e682SRyan Stone 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
2591b3e9e682SRyan Stone 	    flags & SRQ_PREEMPTED);
2592ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2593ae7a6b38SJeff Roberson 	/*
2594ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2595ae7a6b38SJeff Roberson 	 * run-queue.
2596ae7a6b38SJeff Roberson 	 */
2597ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2598ae7a6b38SJeff Roberson 		sched_priority(td);
2599ae7a6b38SJeff Roberson #ifdef SMP
2600ae7a6b38SJeff Roberson 	/*
2601ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2602ae7a6b38SJeff Roberson 	 * target cpu.
2603ae7a6b38SJeff Roberson 	 */
26049727e637SJeff Roberson 	cpu = sched_pickcpu(td, flags);
26059727e637SJeff Roberson 	tdq = sched_setcpu(td, cpu, flags);
2606ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
260773daf66fSJeff Roberson 	if (cpu != PCPU_GET(cpuid)) {
260827ee18adSRyan Stone 		tdq_notify(tdq, td);
26097b8bfa0dSJeff Roberson 		return;
26107b8bfa0dSJeff Roberson 	}
2611ae7a6b38SJeff Roberson #else
2612ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2613ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
2614ae7a6b38SJeff Roberson 	/*
2615ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2616ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2617ae7a6b38SJeff Roberson 	 */
2618ae7a6b38SJeff Roberson 	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2619ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
26207b8bfa0dSJeff Roberson #endif
2621ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2622ae7a6b38SJeff Roberson 		sched_setpreempt(td);
262335e6168fSJeff Roberson }
262435e6168fSJeff Roberson 
2625ae7a6b38SJeff Roberson /*
2626ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2627ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2628ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2629ae7a6b38SJeff Roberson  */
263035e6168fSJeff Roberson void
26317cf90fb3SJeff Roberson sched_rem(struct thread *td)
263235e6168fSJeff Roberson {
2633ad1e7d28SJulian Elischer 	struct tdq *tdq;
26347cf90fb3SJeff Roberson 
26358f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
26368f51ad55SJeff Roberson 	    "prio:%d", td->td_priority);
2637b3e9e682SRyan Stone 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
263893ccd6bfSKonstantin Belousov 	tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2639ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2640ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
26417a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2642ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
26439727e637SJeff Roberson 	tdq_runq_rem(tdq, td);
26449727e637SJeff Roberson 	tdq_load_rem(tdq, td);
26457a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
264662fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
264762fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
264835e6168fSJeff Roberson }
264935e6168fSJeff Roberson 
2650ae7a6b38SJeff Roberson /*
2651ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2652ae7a6b38SJeff Roberson  */
265335e6168fSJeff Roberson fixpt_t
26547cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
265535e6168fSJeff Roberson {
265635e6168fSJeff Roberson 	fixpt_t pctcpu;
2657ad1e7d28SJulian Elischer 	struct td_sched *ts;
265835e6168fSJeff Roberson 
265935e6168fSJeff Roberson 	pctcpu = 0;
266093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
266135e6168fSJeff Roberson 
26623da35a0aSJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
26637295465eSAlexander Motin 	sched_pctcpu_update(ts, TD_IS_RUNNING(td));
2664ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
266535e6168fSJeff Roberson 		int rtick;
266635e6168fSJeff Roberson 
266735e6168fSJeff Roberson 		/* How many rtick per second ? */
2668e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2669e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
267035e6168fSJeff Roberson 	}
267135e6168fSJeff Roberson 
267235e6168fSJeff Roberson 	return (pctcpu);
267335e6168fSJeff Roberson }
267435e6168fSJeff Roberson 
267562fa74d9SJeff Roberson /*
267662fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
267762fa74d9SJeff Roberson  * cpumask.
267862fa74d9SJeff Roberson  */
2679885d51a3SJeff Roberson void
2680885d51a3SJeff Roberson sched_affinity(struct thread *td)
2681885d51a3SJeff Roberson {
268262fa74d9SJeff Roberson #ifdef SMP
268362fa74d9SJeff Roberson 	struct td_sched *ts;
268462fa74d9SJeff Roberson 
268562fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
268693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
268762fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
268862fa74d9SJeff Roberson 		return;
268953a6c8b3SJeff Roberson 	if (TD_ON_RUNQ(td)) {
269053a6c8b3SJeff Roberson 		sched_rem(td);
269153a6c8b3SJeff Roberson 		sched_add(td, SRQ_BORING);
269253a6c8b3SJeff Roberson 		return;
269353a6c8b3SJeff Roberson 	}
269462fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
269562fa74d9SJeff Roberson 		return;
269662fa74d9SJeff Roberson 	/*
26970f7a0ebdSMatthew D Fleming 	 * Force a switch before returning to userspace.  If the
26980f7a0ebdSMatthew D Fleming 	 * target thread is not running locally send an ipi to force
26990f7a0ebdSMatthew D Fleming 	 * the issue.
270062fa74d9SJeff Roberson 	 */
2701a8103ae8SJohn Baldwin 	td->td_flags |= TDF_NEEDRESCHED;
27020f7a0ebdSMatthew D Fleming 	if (td != curthread)
27030f7a0ebdSMatthew D Fleming 		ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
270462fa74d9SJeff Roberson #endif
2705885d51a3SJeff Roberson }
2706885d51a3SJeff Roberson 
2707ae7a6b38SJeff Roberson /*
2708ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2709ae7a6b38SJeff Roberson  */
27109bacd788SJeff Roberson void
27119bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
27129bacd788SJeff Roberson {
2713ad1e7d28SJulian Elischer 	struct td_sched *ts;
27149bacd788SJeff Roberson 
2715c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
27161d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
271793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
27186b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2719c95d2db2SJeff Roberson 		sched_unbind(td);
27200f7a0ebdSMatthew D Fleming 	KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2721ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
27226b2f763fSJeff Roberson 	sched_pin();
272380f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
27249bacd788SJeff Roberson 		return;
27256b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
27269bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2727279f949eSPoul-Henning Kamp 	mi_switch(SW_VOL, NULL);
27289bacd788SJeff Roberson }
27299bacd788SJeff Roberson 
2730ae7a6b38SJeff Roberson /*
2731ae7a6b38SJeff Roberson  * Release a bound thread.
2732ae7a6b38SJeff Roberson  */
27339bacd788SJeff Roberson void
27349bacd788SJeff Roberson sched_unbind(struct thread *td)
27359bacd788SJeff Roberson {
2736e7d50326SJeff Roberson 	struct td_sched *ts;
2737e7d50326SJeff Roberson 
27387b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
27391d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
274093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
27416b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
27426b2f763fSJeff Roberson 		return;
2743e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2744e7d50326SJeff Roberson 	sched_unpin();
27459bacd788SJeff Roberson }
27469bacd788SJeff Roberson 
274735e6168fSJeff Roberson int
2748ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2749ebccf1e3SJoseph Koshy {
27507b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
275193ccd6bfSKonstantin Belousov 	return (td_get_sched(td)->ts_flags & TSF_BOUND);
2752ebccf1e3SJoseph Koshy }
2753ebccf1e3SJoseph Koshy 
2754ae7a6b38SJeff Roberson /*
2755ae7a6b38SJeff Roberson  * Basic yield call.
2756ae7a6b38SJeff Roberson  */
275736ec198bSDavid Xu void
275836ec198bSDavid Xu sched_relinquish(struct thread *td)
275936ec198bSDavid Xu {
27607b20fb19SJeff Roberson 	thread_lock(td);
27618df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
27627b20fb19SJeff Roberson 	thread_unlock(td);
276336ec198bSDavid Xu }
276436ec198bSDavid Xu 
2765ae7a6b38SJeff Roberson /*
2766ae7a6b38SJeff Roberson  * Return the total system load.
2767ae7a6b38SJeff Roberson  */
2768ebccf1e3SJoseph Koshy int
276933916c36SJeff Roberson sched_load(void)
277033916c36SJeff Roberson {
277133916c36SJeff Roberson #ifdef SMP
277233916c36SJeff Roberson 	int total;
277333916c36SJeff Roberson 	int i;
277433916c36SJeff Roberson 
277533916c36SJeff Roberson 	total = 0;
27763aa6d94eSJohn Baldwin 	CPU_FOREACH(i)
277762fa74d9SJeff Roberson 		total += TDQ_CPU(i)->tdq_sysload;
277833916c36SJeff Roberson 	return (total);
277933916c36SJeff Roberson #else
2780d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
278133916c36SJeff Roberson #endif
278233916c36SJeff Roberson }
278333916c36SJeff Roberson 
278433916c36SJeff Roberson int
278535e6168fSJeff Roberson sched_sizeof_proc(void)
278635e6168fSJeff Roberson {
278735e6168fSJeff Roberson 	return (sizeof(struct proc));
278835e6168fSJeff Roberson }
278935e6168fSJeff Roberson 
279035e6168fSJeff Roberson int
279135e6168fSJeff Roberson sched_sizeof_thread(void)
279235e6168fSJeff Roberson {
279335e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
279435e6168fSJeff Roberson }
2795b41f1452SDavid Xu 
279609c8a4ccSJeff Roberson #ifdef SMP
279709c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)						\
279809c8a4ccSJeff Roberson     ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
279909c8a4ccSJeff Roberson #else
280009c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)	1
280109c8a4ccSJeff Roberson #endif
280209c8a4ccSJeff Roberson 
28037a5e5e2aSJeff Roberson /*
28047a5e5e2aSJeff Roberson  * The actual idle process.
28057a5e5e2aSJeff Roberson  */
28067a5e5e2aSJeff Roberson void
28077a5e5e2aSJeff Roberson sched_idletd(void *dummy)
28087a5e5e2aSJeff Roberson {
28097a5e5e2aSJeff Roberson 	struct thread *td;
2810ae7a6b38SJeff Roberson 	struct tdq *tdq;
28112c27cb3aSAlexander Motin 	int oldswitchcnt, switchcnt;
28121690c6c1SJeff Roberson 	int i;
28137a5e5e2aSJeff Roberson 
28147b55ab05SJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
28157a5e5e2aSJeff Roberson 	td = curthread;
2816ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2817ba96d2d8SJohn Baldwin 	THREAD_NO_SLEEPING();
28182c27cb3aSAlexander Motin 	oldswitchcnt = -1;
2819ae7a6b38SJeff Roberson 	for (;;) {
28202c27cb3aSAlexander Motin 		if (tdq->tdq_load) {
28212c27cb3aSAlexander Motin 			thread_lock(td);
28222c27cb3aSAlexander Motin 			mi_switch(SW_VOL | SWT_IDLE, NULL);
28232c27cb3aSAlexander Motin 			thread_unlock(td);
28242c27cb3aSAlexander Motin 		}
28252c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2826ae7a6b38SJeff Roberson #ifdef SMP
282797e9382dSDon Lewis 		if (always_steal || switchcnt != oldswitchcnt) {
28282c27cb3aSAlexander Motin 			oldswitchcnt = switchcnt;
28291690c6c1SJeff Roberson 			if (tdq_idled(tdq) == 0)
28301690c6c1SJeff Roberson 				continue;
28312c27cb3aSAlexander Motin 		}
28321690c6c1SJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
28332fd4047fSAlexander Motin #else
28342fd4047fSAlexander Motin 		oldswitchcnt = switchcnt;
28352fd4047fSAlexander Motin #endif
28361690c6c1SJeff Roberson 		/*
28371690c6c1SJeff Roberson 		 * If we're switching very frequently, spin while checking
28381690c6c1SJeff Roberson 		 * for load rather than entering a low power state that
28397b55ab05SJeff Roberson 		 * may require an IPI.  However, don't do any busy
28407b55ab05SJeff Roberson 		 * loops while on SMT machines as this simply steals
28417b55ab05SJeff Roberson 		 * cycles from cores doing useful work.
28421690c6c1SJeff Roberson 		 */
284309c8a4ccSJeff Roberson 		if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
28441690c6c1SJeff Roberson 			for (i = 0; i < sched_idlespins; i++) {
28451690c6c1SJeff Roberson 				if (tdq->tdq_load)
28461690c6c1SJeff Roberson 					break;
28471690c6c1SJeff Roberson 				cpu_spinwait();
28481690c6c1SJeff Roberson 			}
28491690c6c1SJeff Roberson 		}
28502c27cb3aSAlexander Motin 
28512c27cb3aSAlexander Motin 		/* If there was context switch during spin, restart it. */
28526c47aaaeSJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
28532c27cb3aSAlexander Motin 		if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt)
28542c27cb3aSAlexander Motin 			continue;
28552c27cb3aSAlexander Motin 
28562c27cb3aSAlexander Motin 		/* Run main MD idle handler. */
28579f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 1;
285879654969SAlexander Motin 		/*
285979654969SAlexander Motin 		 * Make sure that tdq_cpu_idle update is globally visible
286079654969SAlexander Motin 		 * before cpu_idle() read tdq_load.  The order is important
286179654969SAlexander Motin 		 * to avoid race with tdq_notify.
286279654969SAlexander Motin 		 */
2863e8677f38SKonstantin Belousov 		atomic_thread_fence_seq_cst();
286497e9382dSDon Lewis 		/*
286597e9382dSDon Lewis 		 * Checking for again after the fence picks up assigned
286697e9382dSDon Lewis 		 * threads often enough to make it worthwhile to do so in
286797e9382dSDon Lewis 		 * order to avoid calling cpu_idle().
286897e9382dSDon Lewis 		 */
286997e9382dSDon Lewis 		if (tdq->tdq_load != 0) {
287097e9382dSDon Lewis 			tdq->tdq_cpu_idle = 0;
287197e9382dSDon Lewis 			continue;
287297e9382dSDon Lewis 		}
28732c27cb3aSAlexander Motin 		cpu_idle(switchcnt * 4 > sched_idlespinthresh);
28749f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 0;
28752c27cb3aSAlexander Motin 
28762c27cb3aSAlexander Motin 		/*
28772c27cb3aSAlexander Motin 		 * Account thread-less hardware interrupts and
28782c27cb3aSAlexander Motin 		 * other wakeup reasons equal to context switches.
28792c27cb3aSAlexander Motin 		 */
28802c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
28812c27cb3aSAlexander Motin 		if (switchcnt != oldswitchcnt)
28822c27cb3aSAlexander Motin 			continue;
28832c27cb3aSAlexander Motin 		tdq->tdq_switchcnt++;
28842c27cb3aSAlexander Motin 		oldswitchcnt++;
2885ae7a6b38SJeff Roberson 	}
2886b41f1452SDavid Xu }
2887e7d50326SJeff Roberson 
28887b20fb19SJeff Roberson /*
28897b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
28907b20fb19SJeff Roberson  */
28917b20fb19SJeff Roberson void
28927b20fb19SJeff Roberson sched_throw(struct thread *td)
28937b20fb19SJeff Roberson {
289459c68134SJeff Roberson 	struct thread *newtd;
2895ae7a6b38SJeff Roberson 	struct tdq *tdq;
2896ae7a6b38SJeff Roberson 
28977b20fb19SJeff Roberson 	if (td == NULL) {
2898018ff686SJeff Roberson #ifdef SMP
2899018ff686SJeff Roberson 		PCPU_SET(sched, DPCPU_PTR(tdq));
2900018ff686SJeff Roberson #endif
2901ae7a6b38SJeff Roberson 		/* Correct spinlock nesting and acquire the correct lock. */
2902018ff686SJeff Roberson 		tdq = TDQ_SELF();
2903ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
29047b20fb19SJeff Roberson 		spinlock_exit();
29057e3a96eaSJohn Baldwin 		PCPU_SET(switchtime, cpu_ticks());
29067e3a96eaSJohn Baldwin 		PCPU_SET(switchticks, ticks);
29077b20fb19SJeff Roberson 	} else {
2908018ff686SJeff Roberson 		tdq = TDQ_SELF();
2909ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
29109727e637SJeff Roberson 		tdq_load_rem(tdq, td);
2911eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
291292de34dfSJohn Baldwin 		td->td_lastcpu = td->td_oncpu;
291392de34dfSJohn Baldwin 		td->td_oncpu = NOCPU;
29147b20fb19SJeff Roberson 	}
29157b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
291659c68134SJeff Roberson 	newtd = choosethread();
291759c68134SJeff Roberson 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
291859c68134SJeff Roberson 	cpu_throw(td, newtd);		/* doesn't return */
29197b20fb19SJeff Roberson }
29207b20fb19SJeff Roberson 
2921ae7a6b38SJeff Roberson /*
2922ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
2923ae7a6b38SJeff Roberson  * let fork do the rest of the work.
2924ae7a6b38SJeff Roberson  */
29257b20fb19SJeff Roberson void
2926fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
29277b20fb19SJeff Roberson {
2928ae7a6b38SJeff Roberson 	struct tdq *tdq;
2929ae7a6b38SJeff Roberson 	int cpuid;
29307b20fb19SJeff Roberson 
29317b20fb19SJeff Roberson 	/*
29327b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
2933ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
29347b20fb19SJeff Roberson 	 */
2935ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2936018ff686SJeff Roberson 	tdq = TDQ_SELF();
2937ae7a6b38SJeff Roberson 	if (TD_IS_IDLETHREAD(td))
2938ae7a6b38SJeff Roberson 		td->td_lock = TDQ_LOCKPTR(tdq);
2939ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2940ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
294159c68134SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2942eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(
2943eea4f254SJeff Roberson 	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
294428ef18b8SAndriy Gapon 
294528ef18b8SAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
294628ef18b8SAndriy Gapon 	    "prio:%d", td->td_priority);
294728ef18b8SAndriy Gapon 	SDT_PROBE0(sched, , , on__cpu);
29487b20fb19SJeff Roberson }
29497b20fb19SJeff Roberson 
29508f51ad55SJeff Roberson /*
29518f51ad55SJeff Roberson  * Create on first use to catch odd startup conditons.
29528f51ad55SJeff Roberson  */
29538f51ad55SJeff Roberson char *
29548f51ad55SJeff Roberson sched_tdname(struct thread *td)
29558f51ad55SJeff Roberson {
29568f51ad55SJeff Roberson #ifdef KTR
29578f51ad55SJeff Roberson 	struct td_sched *ts;
29588f51ad55SJeff Roberson 
295993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
29608f51ad55SJeff Roberson 	if (ts->ts_name[0] == '\0')
29618f51ad55SJeff Roberson 		snprintf(ts->ts_name, sizeof(ts->ts_name),
29628f51ad55SJeff Roberson 		    "%s tid %d", td->td_name, td->td_tid);
29638f51ad55SJeff Roberson 	return (ts->ts_name);
29648f51ad55SJeff Roberson #else
29658f51ad55SJeff Roberson 	return (td->td_name);
29668f51ad55SJeff Roberson #endif
29678f51ad55SJeff Roberson }
29688f51ad55SJeff Roberson 
296944ad5475SJohn Baldwin #ifdef KTR
297044ad5475SJohn Baldwin void
297144ad5475SJohn Baldwin sched_clear_tdname(struct thread *td)
297244ad5475SJohn Baldwin {
297344ad5475SJohn Baldwin 	struct td_sched *ts;
297444ad5475SJohn Baldwin 
297593ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
297644ad5475SJohn Baldwin 	ts->ts_name[0] = '\0';
297744ad5475SJohn Baldwin }
297844ad5475SJohn Baldwin #endif
297944ad5475SJohn Baldwin 
298007095abfSIvan Voras #ifdef SMP
298107095abfSIvan Voras 
298207095abfSIvan Voras /*
298307095abfSIvan Voras  * Build the CPU topology dump string. Is recursively called to collect
298407095abfSIvan Voras  * the topology tree.
298507095abfSIvan Voras  */
298607095abfSIvan Voras static int
298707095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
298807095abfSIvan Voras     int indent)
298907095abfSIvan Voras {
299071a19bdcSAttilio Rao 	char cpusetbuf[CPUSETBUFSIZ];
299107095abfSIvan Voras 	int i, first;
299207095abfSIvan Voras 
299307095abfSIvan Voras 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
299419b8a6dbSAndriy Gapon 	    "", 1 + indent / 2, cg->cg_level);
299571a19bdcSAttilio Rao 	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
299671a19bdcSAttilio Rao 	    cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
299707095abfSIvan Voras 	first = TRUE;
299807095abfSIvan Voras 	for (i = 0; i < MAXCPU; i++) {
299971a19bdcSAttilio Rao 		if (CPU_ISSET(i, &cg->cg_mask)) {
300007095abfSIvan Voras 			if (!first)
300107095abfSIvan Voras 				sbuf_printf(sb, ", ");
300207095abfSIvan Voras 			else
300307095abfSIvan Voras 				first = FALSE;
300407095abfSIvan Voras 			sbuf_printf(sb, "%d", i);
300507095abfSIvan Voras 		}
300607095abfSIvan Voras 	}
300707095abfSIvan Voras 	sbuf_printf(sb, "</cpu>\n");
300807095abfSIvan Voras 
300907095abfSIvan Voras 	if (cg->cg_flags != 0) {
3010611daf7eSIvan Voras 		sbuf_printf(sb, "%*s <flags>", indent, "");
301107095abfSIvan Voras 		if ((cg->cg_flags & CG_FLAG_HTT) != 0)
30125368befbSIvan Voras 			sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>");
3013a401f2d0SIvan Voras 		if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
3014a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>");
30157b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_SMT) != 0)
3016a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>");
301707095abfSIvan Voras 		sbuf_printf(sb, "</flags>\n");
3018611daf7eSIvan Voras 	}
301907095abfSIvan Voras 
302007095abfSIvan Voras 	if (cg->cg_children > 0) {
302107095abfSIvan Voras 		sbuf_printf(sb, "%*s <children>\n", indent, "");
302207095abfSIvan Voras 		for (i = 0; i < cg->cg_children; i++)
302307095abfSIvan Voras 			sysctl_kern_sched_topology_spec_internal(sb,
302407095abfSIvan Voras 			    &cg->cg_child[i], indent+2);
302507095abfSIvan Voras 		sbuf_printf(sb, "%*s </children>\n", indent, "");
302607095abfSIvan Voras 	}
302707095abfSIvan Voras 	sbuf_printf(sb, "%*s</group>\n", indent, "");
302807095abfSIvan Voras 	return (0);
302907095abfSIvan Voras }
303007095abfSIvan Voras 
303107095abfSIvan Voras /*
303207095abfSIvan Voras  * Sysctl handler for retrieving topology dump. It's a wrapper for
303307095abfSIvan Voras  * the recursive sysctl_kern_smp_topology_spec_internal().
303407095abfSIvan Voras  */
303507095abfSIvan Voras static int
303607095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
303707095abfSIvan Voras {
303807095abfSIvan Voras 	struct sbuf *topo;
303907095abfSIvan Voras 	int err;
304007095abfSIvan Voras 
304107095abfSIvan Voras 	KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
304207095abfSIvan Voras 
3043b97fa22cSIan Lepore 	topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
304407095abfSIvan Voras 	if (topo == NULL)
304507095abfSIvan Voras 		return (ENOMEM);
304607095abfSIvan Voras 
304707095abfSIvan Voras 	sbuf_printf(topo, "<groups>\n");
304807095abfSIvan Voras 	err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
304907095abfSIvan Voras 	sbuf_printf(topo, "</groups>\n");
305007095abfSIvan Voras 
305107095abfSIvan Voras 	if (err == 0) {
3052b97fa22cSIan Lepore 		err = sbuf_finish(topo);
305307095abfSIvan Voras 	}
305407095abfSIvan Voras 	sbuf_delete(topo);
305507095abfSIvan Voras 	return (err);
305607095abfSIvan Voras }
3057b67cc292SDavid Xu 
305807095abfSIvan Voras #endif
305907095abfSIvan Voras 
3060579895dfSAlexander Motin static int
3061579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
3062579895dfSAlexander Motin {
3063579895dfSAlexander Motin 	int error, new_val, period;
3064579895dfSAlexander Motin 
3065579895dfSAlexander Motin 	period = 1000000 / realstathz;
3066579895dfSAlexander Motin 	new_val = period * sched_slice;
3067579895dfSAlexander Motin 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3068579895dfSAlexander Motin 	if (error != 0 || req->newptr == NULL)
3069579895dfSAlexander Motin 		return (error);
3070579895dfSAlexander Motin 	if (new_val <= 0)
3071579895dfSAlexander Motin 		return (EINVAL);
307237f4e025SAlexander Motin 	sched_slice = imax(1, (new_val + period / 2) / period);
30735e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
307437f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
307537f4e025SAlexander Motin 	    realstathz);
3076579895dfSAlexander Motin 	return (0);
3077579895dfSAlexander Motin }
3078579895dfSAlexander Motin 
30799727e637SJeff Roberson SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
3080ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
3081e7d50326SJeff Roberson     "Scheduler name");
3082579895dfSAlexander Motin SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
3083579895dfSAlexander Motin     NULL, 0, sysctl_kern_quantum, "I",
308437f4e025SAlexander Motin     "Quantum for timeshare threads in microseconds");
3085ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
308637f4e025SAlexander Motin     "Quantum for timeshare threads in stathz ticks");
3087ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
3088ae7a6b38SJeff Roberson     "Interactivity score threshold");
308937f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW,
309037f4e025SAlexander Motin     &preempt_thresh, 0,
309137f4e025SAlexander Motin     "Maximal (lowest) priority for preemption");
309237f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0,
309337f4e025SAlexander Motin     "Assign static kernel priorities to sleeping threads");
309437f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0,
309537f4e025SAlexander Motin     "Number of times idle thread will spin waiting for new work");
309637f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW,
309737f4e025SAlexander Motin     &sched_idlespinthresh, 0,
309837f4e025SAlexander Motin     "Threshold before we will permit idle thread spinning");
30997b8bfa0dSJeff Roberson #ifdef SMP
3100ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
3101ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
3102ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
3103ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
31047fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
31057fcf154aSJeff Roberson     &balance_interval, 0,
3106579895dfSAlexander Motin     "Average period in stathz ticks to run the long-term balancer");
3107ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
3108ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
310928994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
311037f4e025SAlexander Motin     "Minimum load on remote CPU before we'll steal");
311197e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit,
311297e9382dSDon Lewis     0, "Topological distance limit for stealing threads in sched_switch()");
311397e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0,
311497e9382dSDon Lewis     "Always run the stealer from the idle thread");
311507095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
3116c69a1a50SMateusz Guzik     CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
311707095abfSIvan Voras     "XML dump of detected CPU topology");
31187b8bfa0dSJeff Roberson #endif
3119e7d50326SJeff Roberson 
312054b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
3121a5423ea3SJeff Roberson static int ccpu = 0;
3122e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
3123