xref: /freebsd/sys/kern/sched_ule.c (revision 61a74c5ccd65d1a00a96779f16eda8c41ff3a426)
135e6168fSJeff Roberson /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
535e6168fSJeff Roberson  * All rights reserved.
635e6168fSJeff Roberson  *
735e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
835e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
935e6168fSJeff Roberson  * are met:
1035e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
1135e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1235e6168fSJeff Roberson  *    disclaimer.
1335e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1435e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1535e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1635e6168fSJeff Roberson  *
1735e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1835e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1935e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2035e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2135e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2235e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2335e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2435e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2535e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2635e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2735e6168fSJeff Roberson  */
2835e6168fSJeff Roberson 
29ae7a6b38SJeff Roberson /*
30ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
31ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
32ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
33ae7a6b38SJeff Roberson  *
34ae7a6b38SJeff Roberson  * etymology:
35a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
36ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
37ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
38ae7a6b38SJeff Roberson  */
39ae7a6b38SJeff Roberson 
40677b542eSDavid E. O'Brien #include <sys/cdefs.h>
41113dda8aSJeff Roberson __FBSDID("$FreeBSD$");
42677b542eSDavid E. O'Brien 
434da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
444da0d332SPeter Wemm #include "opt_sched.h"
459923b511SScott Long 
4635e6168fSJeff Roberson #include <sys/param.h>
4735e6168fSJeff Roberson #include <sys/systm.h>
482c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4935e6168fSJeff Roberson #include <sys/kernel.h>
5035e6168fSJeff Roberson #include <sys/ktr.h>
51c149e542SAttilio Rao #include <sys/limits.h>
5235e6168fSJeff Roberson #include <sys/lock.h>
5335e6168fSJeff Roberson #include <sys/mutex.h>
5435e6168fSJeff Roberson #include <sys/proc.h>
55245f3abfSJeff Roberson #include <sys/resource.h>
569bacd788SJeff Roberson #include <sys/resourcevar.h>
5735e6168fSJeff Roberson #include <sys/sched.h>
58b3e9e682SRyan Stone #include <sys/sdt.h>
5935e6168fSJeff Roberson #include <sys/smp.h>
6035e6168fSJeff Roberson #include <sys/sx.h>
6135e6168fSJeff Roberson #include <sys/sysctl.h>
6235e6168fSJeff Roberson #include <sys/sysproto.h>
63f5c157d9SJohn Baldwin #include <sys/turnstile.h>
643db720fdSDavid Xu #include <sys/umtx.h>
6535e6168fSJeff Roberson #include <sys/vmmeter.h>
6662fa74d9SJeff Roberson #include <sys/cpuset.h>
6707095abfSIvan Voras #include <sys/sbuf.h>
6835e6168fSJeff Roberson 
69ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
70ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
71ebccf1e3SJoseph Koshy #endif
72ebccf1e3SJoseph Koshy 
736f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
746f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
7561322a0aSAlexander Motin int __read_mostly		dtrace_vtime_active;
766f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
776f5f25e5SJohn Birrell #endif
786f5f25e5SJohn Birrell 
7935e6168fSJeff Roberson #include <machine/cpu.h>
8022bf7d9aSJeff Roberson #include <machine/smp.h>
8135e6168fSJeff Roberson 
82ae7a6b38SJeff Roberson #define	KTR_ULE	0
8314618990SJeff Roberson 
840d2cf837SJeff Roberson #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
850d2cf837SJeff Roberson #define	TDQ_NAME_LEN	(sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
866338c579SAttilio Rao #define	TDQ_LOADNAME_LEN	(sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
878f51ad55SJeff Roberson 
886b2f763fSJeff Roberson /*
89ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
90ae7a6b38SJeff Roberson  * by the thread lock.
91ed062c8dSJulian Elischer  */
92ad1e7d28SJulian Elischer struct td_sched {
93ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
94ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
95e77f9fedSAdrian Chadd 	int		ts_cpu;		/* CPU that we have affinity for. */
9673daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
97ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
98ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
99ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
100ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
101ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
102ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
1038f51ad55SJeff Roberson #ifdef KTR
1048f51ad55SJeff Roberson 	char		ts_name[TS_NAME_LEN];
1058f51ad55SJeff Roberson #endif
106ed062c8dSJulian Elischer };
107ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1087b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1097b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
11035e6168fSJeff Roberson 
11162fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
11262fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
11362fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
11462fa74d9SJeff Roberson 
11593ccd6bfSKonstantin Belousov _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
11693ccd6bfSKonstantin Belousov     sizeof(struct thread0_storage),
11793ccd6bfSKonstantin Belousov     "increase struct thread0_storage.t0st_sched size");
11893ccd6bfSKonstantin Belousov 
11935e6168fSJeff Roberson /*
12012d56c0fSJohn Baldwin  * Priority ranges used for interactive and non-interactive timeshare
1212dc29adbSJohn Baldwin  * threads.  The timeshare priorities are split up into four ranges.
1222dc29adbSJohn Baldwin  * The first range handles interactive threads.  The last three ranges
1232dc29adbSJohn Baldwin  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
1242dc29adbSJohn Baldwin  * ranges supporting nice values.
12512d56c0fSJohn Baldwin  */
1262dc29adbSJohn Baldwin #define	PRI_TIMESHARE_RANGE	(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
1272dc29adbSJohn Baldwin #define	PRI_INTERACT_RANGE	((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
12816705791SAndriy Gapon #define	PRI_BATCH_RANGE		(PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
1292dc29adbSJohn Baldwin 
1302dc29adbSJohn Baldwin #define	PRI_MIN_INTERACT	PRI_MIN_TIMESHARE
1312dc29adbSJohn Baldwin #define	PRI_MAX_INTERACT	(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
1322dc29adbSJohn Baldwin #define	PRI_MIN_BATCH		(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
13312d56c0fSJohn Baldwin #define	PRI_MAX_BATCH		PRI_MAX_TIMESHARE
13412d56c0fSJohn Baldwin 
13512d56c0fSJohn Baldwin /*
136e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
137e1f89c22SJeff Roberson  *
138e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
139e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1408ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
141e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
142e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
143e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
14435e6168fSJeff Roberson  */
145e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
146e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1478ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
148e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
149e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
150eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
15135e6168fSJeff Roberson 
15235e6168fSJeff Roberson /*
153e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
154e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
155e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
156e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
157e7d50326SJeff Roberson  * or positive nice respectively.
158e7d50326SJeff Roberson  *
159e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
160e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
161e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
162e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
163e7d50326SJeff Roberson  */
164e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
165e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
16612d56c0fSJohn Baldwin #define	SCHED_PRI_MIN		(PRI_MIN_BATCH + SCHED_PRI_NHALF)
16712d56c0fSJohn Baldwin #define	SCHED_PRI_MAX		(PRI_MAX_BATCH - SCHED_PRI_NHALF)
16878920008SJohn Baldwin #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
169e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
170e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1711e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
172e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
173e7d50326SJeff Roberson 
174e7d50326SJeff Roberson /*
175e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
176e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
177e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
178e7d50326SJeff Roberson  * models the intent of the thread.
17935e6168fSJeff Roberson  *
180407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
181407b0157SJeff Roberson  *		before throttling back.
182d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
183210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
1849f518f20SAttilio Rao  * INTERACT_THRESH:	Threshold for placement on the current runq.
18535e6168fSJeff Roberson  */
186e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
187e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
188210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
189210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1904c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
191e1f89c22SJeff Roberson 
1925e5c3873SJeff Roberson /*
1935e5c3873SJeff Roberson  * These parameters determine the slice behavior for batch work.
1945e5c3873SJeff Roberson  */
1955e5c3873SJeff Roberson #define	SCHED_SLICE_DEFAULT_DIVISOR	10	/* ~94 ms, 12 stathz ticks. */
1965e5c3873SJeff Roberson #define	SCHED_SLICE_MIN_DIVISOR		6	/* DEFAULT/MIN = ~16 ms. */
1975e5c3873SJeff Roberson 
1983d7f4117SAlexander Motin /* Flags kept in td_flags. */
1993d7f4117SAlexander Motin #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
2003d7f4117SAlexander Motin 
20135e6168fSJeff Roberson /*
202e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
203e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
204e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
205e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
206e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
207ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
20835e6168fSJeff Roberson  */
20961322a0aSAlexander Motin static int __read_mostly sched_interact = SCHED_INTERACT_THRESH;
21061322a0aSAlexander Motin static int __read_mostly tickincr = 8 << SCHED_TICK_SHIFT;
21161322a0aSAlexander Motin static int __read_mostly realstathz = 127;	/* reset during boot. */
21261322a0aSAlexander Motin static int __read_mostly sched_slice = 10;	/* reset during boot. */
21361322a0aSAlexander Motin static int __read_mostly sched_slice_min = 1;	/* reset during boot. */
21402e2d6b4SJeff Roberson #ifdef PREEMPTION
21502e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
21661322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MAX_IDLE;
21702e2d6b4SJeff Roberson #else
21861322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MIN_KERN;
21902e2d6b4SJeff Roberson #endif
22002e2d6b4SJeff Roberson #else
22161322a0aSAlexander Motin static int __read_mostly preempt_thresh = 0;
22202e2d6b4SJeff Roberson #endif
22361322a0aSAlexander Motin static int __read_mostly static_boost = PRI_MIN_BATCH;
22461322a0aSAlexander Motin static int __read_mostly sched_idlespins = 10000;
22561322a0aSAlexander Motin static int __read_mostly sched_idlespinthresh = -1;
226ae7a6b38SJeff Roberson 
22735e6168fSJeff Roberson /*
228ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
229ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
230ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
23135e6168fSJeff Roberson  */
232ad1e7d28SJulian Elischer struct tdq {
23339f819e2SJim Harris 	/*
23439f819e2SJim Harris 	 * Ordered to improve efficiency of cpu_search() and switch().
23539f819e2SJim Harris 	 * tdq_lock is padded to avoid false sharing with tdq_load and
23639f819e2SJim Harris 	 * tdq_cpu_idle.
23739f819e2SJim Harris 	 */
2384ceaf45dSAttilio Rao 	struct mtx_padalign tdq_lock;		/* run queue lock. */
23973daf66fSJeff Roberson 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
2401690c6c1SJeff Roberson 	volatile int	tdq_load;		/* Aggregate load. */
2419f9ad565SAlexander Motin 	volatile int	tdq_cpu_idle;		/* cpu_idle() is active. */
24273daf66fSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
24397e9382dSDon Lewis 	volatile int	tdq_transferable;	/* Transferable thread count. */
24497e9382dSDon Lewis 	volatile short	tdq_switchcnt;		/* Switches this tick. */
24597e9382dSDon Lewis 	volatile short	tdq_oldswitchcnt;	/* Switches last tick. */
24673daf66fSJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
2477789ab32SMark Johnston 	u_char		tdq_owepreempt;		/* Remote preemption pending. */
24873daf66fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
24973daf66fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
250018ff686SJeff Roberson 	int		tdq_id;			/* cpuid. */
251e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
252ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
253ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
2548f51ad55SJeff Roberson 	char		tdq_name[TDQ_NAME_LEN];
2558f51ad55SJeff Roberson #ifdef KTR
2568f51ad55SJeff Roberson 	char		tdq_loadname[TDQ_LOADNAME_LEN];
2578f51ad55SJeff Roberson #endif
258ae7a6b38SJeff Roberson } __aligned(64);
25935e6168fSJeff Roberson 
2601690c6c1SJeff Roberson /* Idle thread states and config. */
2611690c6c1SJeff Roberson #define	TDQ_RUNNING	1
2621690c6c1SJeff Roberson #define	TDQ_IDLE	2
2637b8bfa0dSJeff Roberson 
26480f86c9fSJeff Roberson #ifdef SMP
26561322a0aSAlexander Motin struct cpu_group __read_mostly *cpu_top;		/* CPU topology */
2667b8bfa0dSJeff Roberson 
26762fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
26862fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2697b8bfa0dSJeff Roberson 
2707b8bfa0dSJeff Roberson /*
2717b8bfa0dSJeff Roberson  * Run-time tunables.
2727b8bfa0dSJeff Roberson  */
27328994a58SJeff Roberson static int rebalance = 1;
2747fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
27561322a0aSAlexander Motin static int __read_mostly affinity;
27661322a0aSAlexander Motin static int __read_mostly steal_idle = 1;
27761322a0aSAlexander Motin static int __read_mostly steal_thresh = 2;
27861322a0aSAlexander Motin static int __read_mostly always_steal = 0;
27961322a0aSAlexander Motin static int __read_mostly trysteal_limit = 2;
28080f86c9fSJeff Roberson 
28135e6168fSJeff Roberson /*
282d2ad694cSJeff Roberson  * One thread queue per processor.
28335e6168fSJeff Roberson  */
28461322a0aSAlexander Motin static struct tdq __read_mostly *balance_tdq;
2857fcf154aSJeff Roberson static int balance_ticks;
286018ff686SJeff Roberson DPCPU_DEFINE_STATIC(struct tdq, tdq);
2872bf95012SAndrew Turner DPCPU_DEFINE_STATIC(uint32_t, randomval);
288dc03363dSJeff Roberson 
289018ff686SJeff Roberson #define	TDQ_SELF()	((struct tdq *)PCPU_GET(sched))
290018ff686SJeff Roberson #define	TDQ_CPU(x)	(DPCPU_ID_PTR((x), tdq))
291018ff686SJeff Roberson #define	TDQ_ID(x)	((x)->tdq_id)
29280f86c9fSJeff Roberson #else	/* !SMP */
293ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
294dc03363dSJeff Roberson 
29536b36916SJeff Roberson #define	TDQ_ID(x)	(0)
296ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
297ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
2980a016a05SJeff Roberson #endif
29935e6168fSJeff Roberson 
300ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
301ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
302ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
303ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
3044ceaf45dSAttilio Rao #define	TDQ_LOCKPTR(t)		((struct mtx *)(&(t)->tdq_lock))
305ae7a6b38SJeff Roberson 
3068460a577SJohn Birrell static void sched_priority(struct thread *);
30721381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
3088460a577SJohn Birrell static int sched_interact_score(struct thread *);
3098460a577SJohn Birrell static void sched_interact_update(struct thread *);
3108460a577SJohn Birrell static void sched_interact_fork(struct thread *);
3117295465eSAlexander Motin static void sched_pctcpu_update(struct td_sched *, int);
31235e6168fSJeff Roberson 
3135d7ef00cSJeff Roberson /* Operations on per processor queues */
3149727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *);
315018ff686SJeff Roberson static void tdq_setup(struct tdq *, int i);
3169727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *);
3179727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *);
3189727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
3199727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *);
320ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
321ad1e7d28SJulian Elischer void tdq_print(int cpu);
322e7d50326SJeff Roberson static void runq_print(struct runq *rq);
323ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int);
3245d7ef00cSJeff Roberson #ifdef SMP
32597e9382dSDon Lewis static struct thread *tdq_move(struct tdq *, struct tdq *);
326ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
32727ee18adSRyan Stone static void tdq_notify(struct tdq *, struct thread *);
3289727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int);
3299727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int);
3309727e637SJeff Roberson static int sched_pickcpu(struct thread *, int);
3317fcf154aSJeff Roberson static void sched_balance(void);
33262fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *);
3339727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int);
334ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
335c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
33607095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
33707095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
33807095abfSIvan Voras     struct cpu_group *cg, int indent);
3395d7ef00cSJeff Roberson #endif
3405d7ef00cSJeff Roberson 
341e7d50326SJeff Roberson static void sched_setup(void *dummy);
342237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
343e7d50326SJeff Roberson 
344e7d50326SJeff Roberson static void sched_initticks(void *dummy);
345237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
346237fdd78SRobert Watson     NULL);
347e7d50326SJeff Roberson 
348b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched);
349b3e9e682SRyan Stone 
350d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
351b3e9e682SRyan Stone     "struct proc *", "uint8_t");
352d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
353b3e9e682SRyan Stone     "struct proc *", "void *");
354d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
355b3e9e682SRyan Stone     "struct proc *", "void *", "int");
356d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
357b3e9e682SRyan Stone     "struct proc *", "uint8_t", "struct thread *");
358d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
359d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
360b3e9e682SRyan Stone     "struct proc *");
361d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu);
362d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu);
363d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
364b3e9e682SRyan Stone     "struct proc *");
365b3e9e682SRyan Stone 
3660567b6ccSWarner Losh /*
367ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
368ae7a6b38SJeff Roberson  */
369e7d50326SJeff Roberson static void
370e7d50326SJeff Roberson runq_print(struct runq *rq)
371e7d50326SJeff Roberson {
372e7d50326SJeff Roberson 	struct rqhead *rqh;
3739727e637SJeff Roberson 	struct thread *td;
374e7d50326SJeff Roberson 	int pri;
375e7d50326SJeff Roberson 	int j;
376e7d50326SJeff Roberson 	int i;
377e7d50326SJeff Roberson 
378e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
379e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
380e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
381e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
382e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
383e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
384e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
3859727e637SJeff Roberson 				TAILQ_FOREACH(td, rqh, td_runq) {
386e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
3879727e637SJeff Roberson 					    td, td->td_name, td->td_priority,
3889727e637SJeff Roberson 					    td->td_rqindex, pri);
389e7d50326SJeff Roberson 				}
390e7d50326SJeff Roberson 			}
391e7d50326SJeff Roberson 	}
392e7d50326SJeff Roberson }
393e7d50326SJeff Roberson 
394ae7a6b38SJeff Roberson /*
395ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
396ae7a6b38SJeff Roberson  */
39715dc847eSJeff Roberson void
398ad1e7d28SJulian Elischer tdq_print(int cpu)
39915dc847eSJeff Roberson {
400ad1e7d28SJulian Elischer 	struct tdq *tdq;
40115dc847eSJeff Roberson 
402ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
40315dc847eSJeff Roberson 
404c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
40562fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
40662fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
407d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
4081690c6c1SJeff Roberson 	printf("\tswitch cnt:     %d\n", tdq->tdq_switchcnt);
4091690c6c1SJeff Roberson 	printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
410e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
4113f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
4121690c6c1SJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
4131690c6c1SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
414e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
415e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
416e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
417e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
418e7d50326SJeff Roberson 	printf("\tidle runq:\n");
419e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
42015dc847eSJeff Roberson }
42115dc847eSJeff Roberson 
422ff256d9cSJeff Roberson static inline int
423ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
424ff256d9cSJeff Roberson {
425ff256d9cSJeff Roberson 	/*
426ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
427ff256d9cSJeff Roberson 	 * nothing to do.
428ff256d9cSJeff Roberson 	 */
429ff256d9cSJeff Roberson 	if (pri >= cpri)
430ff256d9cSJeff Roberson 		return (0);
431ff256d9cSJeff Roberson 	/*
432ff256d9cSJeff Roberson 	 * Always preempt idle.
433ff256d9cSJeff Roberson 	 */
434ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
435ff256d9cSJeff Roberson 		return (1);
436ff256d9cSJeff Roberson 	/*
437ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
438ff256d9cSJeff Roberson 	 */
439ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
440ff256d9cSJeff Roberson 		return (0);
441ff256d9cSJeff Roberson 	/*
442ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
443ff256d9cSJeff Roberson 	 */
444ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
445ff256d9cSJeff Roberson 		return (1);
446ff256d9cSJeff Roberson 	/*
44712d56c0fSJohn Baldwin 	 * If we're interactive or better and there is non-interactive
44812d56c0fSJohn Baldwin 	 * or worse running preempt only remote processors.
449ff256d9cSJeff Roberson 	 */
45012d56c0fSJohn Baldwin 	if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
451ff256d9cSJeff Roberson 		return (1);
452ff256d9cSJeff Roberson 	return (0);
453ff256d9cSJeff Roberson }
454ff256d9cSJeff Roberson 
455ae7a6b38SJeff Roberson /*
456ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
457ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
458ae7a6b38SJeff Roberson  * queue position for timeshare threads.
459ae7a6b38SJeff Roberson  */
460155b9987SJeff Roberson static __inline void
4619727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
462155b9987SJeff Roberson {
4639727e637SJeff Roberson 	struct td_sched *ts;
464c143ac21SJeff Roberson 	u_char pri;
465c143ac21SJeff Roberson 
466ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
467*61a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
46873daf66fSJeff Roberson 
4699727e637SJeff Roberson 	pri = td->td_priority;
47093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
4719727e637SJeff Roberson 	TD_SET_RUNQ(td);
4729727e637SJeff Roberson 	if (THREAD_CAN_MIGRATE(td)) {
473d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
474ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
47580f86c9fSJeff Roberson 	}
47612d56c0fSJohn Baldwin 	if (pri < PRI_MIN_BATCH) {
477c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
47812d56c0fSJohn Baldwin 	} else if (pri <= PRI_MAX_BATCH) {
479c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
48012d56c0fSJohn Baldwin 		KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
481e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
482e7d50326SJeff Roberson 		/*
483e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
484e7d50326SJeff Roberson 		 * realtime.  Use the whole queue to represent these values.
485e7d50326SJeff Roberson 		 */
486c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
48716705791SAndriy Gapon 			pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
488e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4893f872f85SJeff Roberson 			/*
4903f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4913f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4923f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4933f872f85SJeff Roberson 			 */
4943f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4953f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4964499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
497e7d50326SJeff Roberson 		} else
4983f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
4999727e637SJeff Roberson 		runq_add_pri(ts->ts_runq, td, pri, flags);
500c143ac21SJeff Roberson 		return;
501e7d50326SJeff Roberson 	} else
50273daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
5039727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
50473daf66fSJeff Roberson }
50573daf66fSJeff Roberson 
50673daf66fSJeff Roberson /*
507ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
508ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
509ae7a6b38SJeff Roberson  * transferable count does not reflect them.
510ae7a6b38SJeff Roberson  */
511155b9987SJeff Roberson static __inline void
5129727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td)
513155b9987SJeff Roberson {
5149727e637SJeff Roberson 	struct td_sched *ts;
5159727e637SJeff Roberson 
51693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
517ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
518*61a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
519ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
5209727e637SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", td));
521ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
522d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
523ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
52480f86c9fSJeff Roberson 	}
5253f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
5263f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
5279727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
528e7d50326SJeff Roberson 		else
5299727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, NULL);
5303f872f85SJeff Roberson 	} else
5319727e637SJeff Roberson 		runq_remove(ts->ts_runq, td);
532155b9987SJeff Roberson }
533155b9987SJeff Roberson 
534ae7a6b38SJeff Roberson /*
535ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
536ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
537ae7a6b38SJeff Roberson  */
538a8949de2SJeff Roberson static void
5399727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td)
5405d7ef00cSJeff Roberson {
541ae7a6b38SJeff Roberson 
542ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
543*61a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
54403d17db7SJeff Roberson 
545d2ad694cSJeff Roberson 	tdq->tdq_load++;
5461b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
547d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
5488f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
549d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
5505d7ef00cSJeff Roberson }
55115dc847eSJeff Roberson 
552ae7a6b38SJeff Roberson /*
553ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
554ae7a6b38SJeff Roberson  * exiting.
555ae7a6b38SJeff Roberson  */
556a8949de2SJeff Roberson static void
5579727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td)
5585d7ef00cSJeff Roberson {
559ae7a6b38SJeff Roberson 
560ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
561*61a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
562ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
563c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
56403d17db7SJeff Roberson 
565d2ad694cSJeff Roberson 	tdq->tdq_load--;
5661b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
56703d17db7SJeff Roberson 		tdq->tdq_sysload--;
5688f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
569d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
57015dc847eSJeff Roberson }
57115dc847eSJeff Roberson 
572356500a3SJeff Roberson /*
5735e5c3873SJeff Roberson  * Bound timeshare latency by decreasing slice size as load increases.  We
5745e5c3873SJeff Roberson  * consider the maximum latency as the sum of the threads waiting to run
5755e5c3873SJeff Roberson  * aside from curthread and target no more than sched_slice latency but
5765e5c3873SJeff Roberson  * no less than sched_slice_min runtime.
5775e5c3873SJeff Roberson  */
5785e5c3873SJeff Roberson static inline int
5795e5c3873SJeff Roberson tdq_slice(struct tdq *tdq)
5805e5c3873SJeff Roberson {
5815e5c3873SJeff Roberson 	int load;
5825e5c3873SJeff Roberson 
5835e5c3873SJeff Roberson 	/*
5845e5c3873SJeff Roberson 	 * It is safe to use sys_load here because this is called from
5855e5c3873SJeff Roberson 	 * contexts where timeshare threads are running and so there
5865e5c3873SJeff Roberson 	 * cannot be higher priority load in the system.
5875e5c3873SJeff Roberson 	 */
5885e5c3873SJeff Roberson 	load = tdq->tdq_sysload - 1;
5895e5c3873SJeff Roberson 	if (load >= SCHED_SLICE_MIN_DIVISOR)
5905e5c3873SJeff Roberson 		return (sched_slice_min);
5915e5c3873SJeff Roberson 	if (load <= 1)
5925e5c3873SJeff Roberson 		return (sched_slice);
5935e5c3873SJeff Roberson 	return (sched_slice / load);
5945e5c3873SJeff Roberson }
5955e5c3873SJeff Roberson 
5965e5c3873SJeff Roberson /*
59762fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
59862fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
599356500a3SJeff Roberson  */
60022bf7d9aSJeff Roberson static void
60162fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
60262fa74d9SJeff Roberson {
60362fa74d9SJeff Roberson 	struct thread *td;
60462fa74d9SJeff Roberson 
60562fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
60662fa74d9SJeff Roberson 	if (ctd == NULL)
60762fa74d9SJeff Roberson 		ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
6089727e637SJeff Roberson 	td = tdq_choose(tdq);
6099727e637SJeff Roberson 	if (td == NULL || td->td_priority > ctd->td_priority)
61062fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
61162fa74d9SJeff Roberson 	else
61262fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
61362fa74d9SJeff Roberson }
61462fa74d9SJeff Roberson 
61562fa74d9SJeff Roberson #ifdef SMP
6169129dd59SPedro F. Giffuni /*
6179129dd59SPedro F. Giffuni  * We need some randomness. Implement a classic Linear Congruential
6189129dd59SPedro F. Giffuni  * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
6199129dd59SPedro F. Giffuni  * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
6209129dd59SPedro F. Giffuni  * of the random state (in the low bits of our answer) to keep
6219129dd59SPedro F. Giffuni  * the maximum randomness.
6229129dd59SPedro F. Giffuni  */
6239129dd59SPedro F. Giffuni static uint32_t
6249129dd59SPedro F. Giffuni sched_random(void)
6259129dd59SPedro F. Giffuni {
6269129dd59SPedro F. Giffuni 	uint32_t *rndptr;
6279129dd59SPedro F. Giffuni 
6289129dd59SPedro F. Giffuni 	rndptr = DPCPU_PTR(randomval);
6299129dd59SPedro F. Giffuni 	*rndptr = *rndptr * 69069 + 5;
6309129dd59SPedro F. Giffuni 
6319129dd59SPedro F. Giffuni 	return (*rndptr >> 16);
6329129dd59SPedro F. Giffuni }
6339129dd59SPedro F. Giffuni 
63462fa74d9SJeff Roberson struct cpu_search {
635c76ee827SJeff Roberson 	cpuset_t cs_mask;
63636acfc65SAlexander Motin 	u_int	cs_prefer;
63736acfc65SAlexander Motin 	int	cs_pri;		/* Min priority for low. */
63836acfc65SAlexander Motin 	int	cs_limit;	/* Max load for low, min load for high. */
63936acfc65SAlexander Motin 	int	cs_cpu;
64036acfc65SAlexander Motin 	int	cs_load;
64162fa74d9SJeff Roberson };
64262fa74d9SJeff Roberson 
64362fa74d9SJeff Roberson #define	CPU_SEARCH_LOWEST	0x1
64462fa74d9SJeff Roberson #define	CPU_SEARCH_HIGHEST	0x2
64562fa74d9SJeff Roberson #define	CPU_SEARCH_BOTH		(CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
64662fa74d9SJeff Roberson 
6472499a5ccSKonstantin Belousov static __always_inline int cpu_search(const struct cpu_group *cg,
6482499a5ccSKonstantin Belousov     struct cpu_search *low, struct cpu_search *high, const int match);
6492499a5ccSKonstantin Belousov int __noinline cpu_search_lowest(const struct cpu_group *cg,
6502499a5ccSKonstantin Belousov     struct cpu_search *low);
6512499a5ccSKonstantin Belousov int __noinline cpu_search_highest(const struct cpu_group *cg,
65262fa74d9SJeff Roberson     struct cpu_search *high);
6532499a5ccSKonstantin Belousov int __noinline cpu_search_both(const struct cpu_group *cg,
6542499a5ccSKonstantin Belousov     struct cpu_search *low, struct cpu_search *high);
65562fa74d9SJeff Roberson 
65662fa74d9SJeff Roberson /*
65762fa74d9SJeff Roberson  * Search the tree of cpu_groups for the lowest or highest loaded cpu
65862fa74d9SJeff Roberson  * according to the match argument.  This routine actually compares the
65962fa74d9SJeff Roberson  * load on all paths through the tree and finds the least loaded cpu on
66062fa74d9SJeff Roberson  * the least loaded path, which may differ from the least loaded cpu in
661db4fcadfSConrad Meyer  * the system.  This balances work among caches and buses.
66262fa74d9SJeff Roberson  *
66362fa74d9SJeff Roberson  * This inline is instantiated in three forms below using constants for the
66462fa74d9SJeff Roberson  * match argument.  It is reduced to the minimum set for each case.  It is
66562fa74d9SJeff Roberson  * also recursive to the depth of the tree.
66662fa74d9SJeff Roberson  */
6672499a5ccSKonstantin Belousov static __always_inline int
66836acfc65SAlexander Motin cpu_search(const struct cpu_group *cg, struct cpu_search *low,
66962fa74d9SJeff Roberson     struct cpu_search *high, const int match)
67062fa74d9SJeff Roberson {
67162fa74d9SJeff Roberson 	struct cpu_search lgroup;
67262fa74d9SJeff Roberson 	struct cpu_search hgroup;
67336acfc65SAlexander Motin 	cpuset_t cpumask;
67462fa74d9SJeff Roberson 	struct cpu_group *child;
67536acfc65SAlexander Motin 	struct tdq *tdq;
6760567b6ccSWarner Losh 	int cpu, i, hload, lload, load, total, rnd;
67762fa74d9SJeff Roberson 
67836acfc65SAlexander Motin 	total = 0;
67936acfc65SAlexander Motin 	cpumask = cg->cg_mask;
68062fa74d9SJeff Roberson 	if (match & CPU_SEARCH_LOWEST) {
68136acfc65SAlexander Motin 		lload = INT_MAX;
68262fa74d9SJeff Roberson 		lgroup = *low;
68362fa74d9SJeff Roberson 	}
68462fa74d9SJeff Roberson 	if (match & CPU_SEARCH_HIGHEST) {
68570801abeSAlexander Motin 		hload = INT_MIN;
68662fa74d9SJeff Roberson 		hgroup = *high;
68762fa74d9SJeff Roberson 	}
68836acfc65SAlexander Motin 
68936acfc65SAlexander Motin 	/* Iterate through the child CPU groups and then remaining CPUs. */
69058909b74SAlexander Motin 	for (i = cg->cg_children, cpu = mp_maxid; ; ) {
69170801abeSAlexander Motin 		if (i == 0) {
69258909b74SAlexander Motin #ifdef HAVE_INLINE_FFSL
69358909b74SAlexander Motin 			cpu = CPU_FFS(&cpumask) - 1;
69458909b74SAlexander Motin #else
69570801abeSAlexander Motin 			while (cpu >= 0 && !CPU_ISSET(cpu, &cpumask))
69670801abeSAlexander Motin 				cpu--;
69758909b74SAlexander Motin #endif
69870801abeSAlexander Motin 			if (cpu < 0)
69936acfc65SAlexander Motin 				break;
70036acfc65SAlexander Motin 			child = NULL;
70136acfc65SAlexander Motin 		} else
70270801abeSAlexander Motin 			child = &cg->cg_child[i - 1];
70336acfc65SAlexander Motin 
70470801abeSAlexander Motin 		if (match & CPU_SEARCH_LOWEST)
70570801abeSAlexander Motin 			lgroup.cs_cpu = -1;
70670801abeSAlexander Motin 		if (match & CPU_SEARCH_HIGHEST)
70770801abeSAlexander Motin 			hgroup.cs_cpu = -1;
70836acfc65SAlexander Motin 		if (child) {			/* Handle child CPU group. */
7099825eadfSRyan Libby 			CPU_ANDNOT(&cpumask, &child->cg_mask);
71062fa74d9SJeff Roberson 			switch (match) {
71162fa74d9SJeff Roberson 			case CPU_SEARCH_LOWEST:
71262fa74d9SJeff Roberson 				load = cpu_search_lowest(child, &lgroup);
71362fa74d9SJeff Roberson 				break;
71462fa74d9SJeff Roberson 			case CPU_SEARCH_HIGHEST:
71562fa74d9SJeff Roberson 				load = cpu_search_highest(child, &hgroup);
71662fa74d9SJeff Roberson 				break;
71762fa74d9SJeff Roberson 			case CPU_SEARCH_BOTH:
71862fa74d9SJeff Roberson 				load = cpu_search_both(child, &lgroup, &hgroup);
71962fa74d9SJeff Roberson 				break;
72062fa74d9SJeff Roberson 			}
72136acfc65SAlexander Motin 		} else {			/* Handle child CPU. */
72258909b74SAlexander Motin 			CPU_CLR(cpu, &cpumask);
72336acfc65SAlexander Motin 			tdq = TDQ_CPU(cpu);
72436acfc65SAlexander Motin 			load = tdq->tdq_load * 256;
725b250ad34SWarner Losh 			rnd = sched_random() % 32;
72636acfc65SAlexander Motin 			if (match & CPU_SEARCH_LOWEST) {
72736acfc65SAlexander Motin 				if (cpu == low->cs_prefer)
72836acfc65SAlexander Motin 					load -= 64;
72936acfc65SAlexander Motin 				/* If that CPU is allowed and get data. */
73070801abeSAlexander Motin 				if (tdq->tdq_lowpri > lgroup.cs_pri &&
73170801abeSAlexander Motin 				    tdq->tdq_load <= lgroup.cs_limit &&
73270801abeSAlexander Motin 				    CPU_ISSET(cpu, &lgroup.cs_mask)) {
73336acfc65SAlexander Motin 					lgroup.cs_cpu = cpu;
73436acfc65SAlexander Motin 					lgroup.cs_load = load - rnd;
73536acfc65SAlexander Motin 				}
73662fa74d9SJeff Roberson 			}
73762fa74d9SJeff Roberson 			if (match & CPU_SEARCH_HIGHEST)
73870801abeSAlexander Motin 				if (tdq->tdq_load >= hgroup.cs_limit &&
73970801abeSAlexander Motin 				    tdq->tdq_transferable &&
74070801abeSAlexander Motin 				    CPU_ISSET(cpu, &hgroup.cs_mask)) {
74136acfc65SAlexander Motin 					hgroup.cs_cpu = cpu;
74236acfc65SAlexander Motin 					hgroup.cs_load = load - rnd;
74362fa74d9SJeff Roberson 				}
74462fa74d9SJeff Roberson 		}
74536acfc65SAlexander Motin 		total += load;
74662fa74d9SJeff Roberson 
74736acfc65SAlexander Motin 		/* We have info about child item. Compare it. */
74836acfc65SAlexander Motin 		if (match & CPU_SEARCH_LOWEST) {
74970801abeSAlexander Motin 			if (lgroup.cs_cpu >= 0 &&
7506022f0bcSAlexander Motin 			    (load < lload ||
7516022f0bcSAlexander Motin 			     (load == lload && lgroup.cs_load < low->cs_load))) {
75236acfc65SAlexander Motin 				lload = load;
75336acfc65SAlexander Motin 				low->cs_cpu = lgroup.cs_cpu;
75436acfc65SAlexander Motin 				low->cs_load = lgroup.cs_load;
75536acfc65SAlexander Motin 			}
75636acfc65SAlexander Motin 		}
75736acfc65SAlexander Motin 		if (match & CPU_SEARCH_HIGHEST)
75870801abeSAlexander Motin 			if (hgroup.cs_cpu >= 0 &&
7596022f0bcSAlexander Motin 			    (load > hload ||
7606022f0bcSAlexander Motin 			     (load == hload && hgroup.cs_load > high->cs_load))) {
76136acfc65SAlexander Motin 				hload = load;
76236acfc65SAlexander Motin 				high->cs_cpu = hgroup.cs_cpu;
76336acfc65SAlexander Motin 				high->cs_load = hgroup.cs_load;
76436acfc65SAlexander Motin 			}
76570801abeSAlexander Motin 		if (child) {
76670801abeSAlexander Motin 			i--;
76770801abeSAlexander Motin 			if (i == 0 && CPU_EMPTY(&cpumask))
76870801abeSAlexander Motin 				break;
76958909b74SAlexander Motin 		}
77058909b74SAlexander Motin #ifndef HAVE_INLINE_FFSL
77158909b74SAlexander Motin 		else
77270801abeSAlexander Motin 			cpu--;
77358909b74SAlexander Motin #endif
77462fa74d9SJeff Roberson 	}
77562fa74d9SJeff Roberson 	return (total);
77662fa74d9SJeff Roberson }
77762fa74d9SJeff Roberson 
77862fa74d9SJeff Roberson /*
77962fa74d9SJeff Roberson  * cpu_search instantiations must pass constants to maintain the inline
78062fa74d9SJeff Roberson  * optimization.
78162fa74d9SJeff Roberson  */
78262fa74d9SJeff Roberson int
78336acfc65SAlexander Motin cpu_search_lowest(const struct cpu_group *cg, struct cpu_search *low)
78462fa74d9SJeff Roberson {
78562fa74d9SJeff Roberson 	return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
78662fa74d9SJeff Roberson }
78762fa74d9SJeff Roberson 
78862fa74d9SJeff Roberson int
78936acfc65SAlexander Motin cpu_search_highest(const struct cpu_group *cg, struct cpu_search *high)
79062fa74d9SJeff Roberson {
79162fa74d9SJeff Roberson 	return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
79262fa74d9SJeff Roberson }
79362fa74d9SJeff Roberson 
79462fa74d9SJeff Roberson int
79536acfc65SAlexander Motin cpu_search_both(const struct cpu_group *cg, struct cpu_search *low,
79662fa74d9SJeff Roberson     struct cpu_search *high)
79762fa74d9SJeff Roberson {
79862fa74d9SJeff Roberson 	return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
79962fa74d9SJeff Roberson }
80062fa74d9SJeff Roberson 
80162fa74d9SJeff Roberson /*
80262fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
80362fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
80462fa74d9SJeff Roberson  * acceptable.
80562fa74d9SJeff Roberson  */
80662fa74d9SJeff Roberson static inline int
80736acfc65SAlexander Motin sched_lowest(const struct cpu_group *cg, cpuset_t mask, int pri, int maxload,
80836acfc65SAlexander Motin     int prefer)
80962fa74d9SJeff Roberson {
81062fa74d9SJeff Roberson 	struct cpu_search low;
81162fa74d9SJeff Roberson 
81262fa74d9SJeff Roberson 	low.cs_cpu = -1;
81336acfc65SAlexander Motin 	low.cs_prefer = prefer;
81462fa74d9SJeff Roberson 	low.cs_mask = mask;
81536acfc65SAlexander Motin 	low.cs_pri = pri;
81636acfc65SAlexander Motin 	low.cs_limit = maxload;
81762fa74d9SJeff Roberson 	cpu_search_lowest(cg, &low);
81862fa74d9SJeff Roberson 	return low.cs_cpu;
81962fa74d9SJeff Roberson }
82062fa74d9SJeff Roberson 
82162fa74d9SJeff Roberson /*
82262fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
82362fa74d9SJeff Roberson  */
82462fa74d9SJeff Roberson static inline int
82536acfc65SAlexander Motin sched_highest(const struct cpu_group *cg, cpuset_t mask, int minload)
82662fa74d9SJeff Roberson {
82762fa74d9SJeff Roberson 	struct cpu_search high;
82862fa74d9SJeff Roberson 
82962fa74d9SJeff Roberson 	high.cs_cpu = -1;
83062fa74d9SJeff Roberson 	high.cs_mask = mask;
83162fa74d9SJeff Roberson 	high.cs_limit = minload;
83262fa74d9SJeff Roberson 	cpu_search_highest(cg, &high);
83362fa74d9SJeff Roberson 	return high.cs_cpu;
83462fa74d9SJeff Roberson }
83562fa74d9SJeff Roberson 
83662fa74d9SJeff Roberson static void
83762fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
83862fa74d9SJeff Roberson {
839018ff686SJeff Roberson 	struct tdq *tdq;
84036acfc65SAlexander Motin 	cpuset_t hmask, lmask;
84136acfc65SAlexander Motin 	int high, low, anylow;
84262fa74d9SJeff Roberson 
84336acfc65SAlexander Motin 	CPU_FILL(&hmask);
84462fa74d9SJeff Roberson 	for (;;) {
84597e9382dSDon Lewis 		high = sched_highest(cg, hmask, 2);
84636acfc65SAlexander Motin 		/* Stop if there is no more CPU with transferrable threads. */
84736acfc65SAlexander Motin 		if (high == -1)
84862fa74d9SJeff Roberson 			break;
84936acfc65SAlexander Motin 		CPU_CLR(high, &hmask);
85036acfc65SAlexander Motin 		CPU_COPY(&hmask, &lmask);
85136acfc65SAlexander Motin 		/* Stop if there is no more CPU left for low. */
85236acfc65SAlexander Motin 		if (CPU_EMPTY(&lmask))
85362fa74d9SJeff Roberson 			break;
85436acfc65SAlexander Motin 		anylow = 1;
855018ff686SJeff Roberson 		tdq = TDQ_CPU(high);
85636acfc65SAlexander Motin nextlow:
857018ff686SJeff Roberson 		low = sched_lowest(cg, lmask, -1, tdq->tdq_load - 1, high);
85836acfc65SAlexander Motin 		/* Stop if we looked well and found no less loaded CPU. */
85936acfc65SAlexander Motin 		if (anylow && low == -1)
86036acfc65SAlexander Motin 			break;
86136acfc65SAlexander Motin 		/* Go to next high if we found no less loaded CPU. */
86236acfc65SAlexander Motin 		if (low == -1)
86336acfc65SAlexander Motin 			continue;
86436acfc65SAlexander Motin 		/* Transfer thread from high to low. */
865018ff686SJeff Roberson 		if (sched_balance_pair(tdq, TDQ_CPU(low))) {
86636acfc65SAlexander Motin 			/* CPU that got thread can no longer be a donor. */
86736acfc65SAlexander Motin 			CPU_CLR(low, &hmask);
86836acfc65SAlexander Motin 		} else {
86962fa74d9SJeff Roberson 			/*
87036acfc65SAlexander Motin 			 * If failed, then there is no threads on high
87136acfc65SAlexander Motin 			 * that can run on this low. Drop low from low
87236acfc65SAlexander Motin 			 * mask and look for different one.
87362fa74d9SJeff Roberson 			 */
87436acfc65SAlexander Motin 			CPU_CLR(low, &lmask);
87536acfc65SAlexander Motin 			anylow = 0;
87636acfc65SAlexander Motin 			goto nextlow;
87762fa74d9SJeff Roberson 		}
87836acfc65SAlexander Motin 	}
87962fa74d9SJeff Roberson }
88062fa74d9SJeff Roberson 
88162fa74d9SJeff Roberson static void
88262375ca8SEd Schouten sched_balance(void)
883356500a3SJeff Roberson {
8847fcf154aSJeff Roberson 	struct tdq *tdq;
885356500a3SJeff Roberson 
8860567b6ccSWarner Losh 	balance_ticks = max(balance_interval / 2, 1) +
887b250ad34SWarner Losh 	    (sched_random() % balance_interval);
8887fcf154aSJeff Roberson 	tdq = TDQ_SELF();
8897fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
89062fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
8917fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
892cac77d04SJeff Roberson }
89386f8ae96SJeff Roberson 
894ae7a6b38SJeff Roberson /*
895ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
896ae7a6b38SJeff Roberson  */
897ae7a6b38SJeff Roberson static void
898ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
899ae7a6b38SJeff Roberson {
900ae7a6b38SJeff Roberson 	if (one < two) {
901ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
902ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
903ae7a6b38SJeff Roberson 	} else {
904ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
905ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
906ae7a6b38SJeff Roberson 	}
907ae7a6b38SJeff Roberson }
908ae7a6b38SJeff Roberson 
909ae7a6b38SJeff Roberson /*
9107fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
9117fcf154aSJeff Roberson  */
9127fcf154aSJeff Roberson static void
9137fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
9147fcf154aSJeff Roberson {
9157fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
9167fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
9177fcf154aSJeff Roberson }
9187fcf154aSJeff Roberson 
9197fcf154aSJeff Roberson /*
920ae7a6b38SJeff Roberson  * Transfer load between two imbalanced thread queues.
921ae7a6b38SJeff Roberson  */
92262fa74d9SJeff Roberson static int
923ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
924cac77d04SJeff Roberson {
92597e9382dSDon Lewis 	struct thread *td;
926880bf8b9SMarius Strobl 	int cpu;
927cac77d04SJeff Roberson 
928ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
92997e9382dSDon Lewis 	td = NULL;
930155b9987SJeff Roberson 	/*
93197e9382dSDon Lewis 	 * Transfer a thread from high to low.
932155b9987SJeff Roberson 	 */
93336acfc65SAlexander Motin 	if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load &&
93497e9382dSDon Lewis 	    (td = tdq_move(high, low)) != NULL) {
935a5423ea3SJeff Roberson 		/*
93697e9382dSDon Lewis 		 * In case the target isn't the current cpu notify it of the
93797e9382dSDon Lewis 		 * new load, possibly sending an IPI to force it to reschedule.
938a5423ea3SJeff Roberson 		 */
939880bf8b9SMarius Strobl 		cpu = TDQ_ID(low);
940880bf8b9SMarius Strobl 		if (cpu != PCPU_GET(cpuid))
94197e9382dSDon Lewis 			tdq_notify(low, td);
942ae7a6b38SJeff Roberson 	}
9437fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
94497e9382dSDon Lewis 	return (td != NULL);
945356500a3SJeff Roberson }
946356500a3SJeff Roberson 
947ae7a6b38SJeff Roberson /*
948ae7a6b38SJeff Roberson  * Move a thread from one thread queue to another.
949ae7a6b38SJeff Roberson  */
95097e9382dSDon Lewis static struct thread *
951ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
952356500a3SJeff Roberson {
953ae7a6b38SJeff Roberson 	struct thread *td;
954ae7a6b38SJeff Roberson 	struct tdq *tdq;
955ae7a6b38SJeff Roberson 	int cpu;
956356500a3SJeff Roberson 
9577fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
9587fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
9597fcf154aSJeff Roberson 
960ad1e7d28SJulian Elischer 	tdq = from;
961ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
9629727e637SJeff Roberson 	td = tdq_steal(tdq, cpu);
9639727e637SJeff Roberson 	if (td == NULL)
96497e9382dSDon Lewis 		return (NULL);
965*61a74c5cSJeff Roberson 
966ae7a6b38SJeff Roberson 	/*
967*61a74c5cSJeff Roberson 	 * Although the run queue is locked the thread may be
968*61a74c5cSJeff Roberson 	 * blocked.  We can not set the lock until it is unblocked.
969ae7a6b38SJeff Roberson 	 */
970*61a74c5cSJeff Roberson 	thread_lock_block_wait(td);
971ae7a6b38SJeff Roberson 	sched_rem(td);
972*61a74c5cSJeff Roberson 	THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(from));
973ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
974*61a74c5cSJeff Roberson 	td_get_sched(td)->ts_cpu = cpu;
975ae7a6b38SJeff Roberson 	tdq_add(to, td, SRQ_YIELDING);
976*61a74c5cSJeff Roberson 
97797e9382dSDon Lewis 	return (td);
978356500a3SJeff Roberson }
97922bf7d9aSJeff Roberson 
980ae7a6b38SJeff Roberson /*
981ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
982ae7a6b38SJeff Roberson  * to it.
983ae7a6b38SJeff Roberson  */
98480f86c9fSJeff Roberson static int
985ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
98622bf7d9aSJeff Roberson {
98762fa74d9SJeff Roberson 	struct cpu_group *cg;
988ad1e7d28SJulian Elischer 	struct tdq *steal;
989c76ee827SJeff Roberson 	cpuset_t mask;
99097e9382dSDon Lewis 	int cpu, switchcnt;
99180f86c9fSJeff Roberson 
99297e9382dSDon Lewis 	if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
99388f530ccSJeff Roberson 		return (1);
994c76ee827SJeff Roberson 	CPU_FILL(&mask);
995c76ee827SJeff Roberson 	CPU_CLR(PCPU_GET(cpuid), &mask);
99697e9382dSDon Lewis     restart:
99797e9382dSDon Lewis 	switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
99897e9382dSDon Lewis 	for (cg = tdq->tdq_cg; ; ) {
99997e9382dSDon Lewis 		cpu = sched_highest(cg, mask, steal_thresh);
100097e9382dSDon Lewis 		/*
100197e9382dSDon Lewis 		 * We were assigned a thread but not preempted.  Returning
100297e9382dSDon Lewis 		 * 0 here will cause our caller to switch to it.
100397e9382dSDon Lewis 		 */
100497e9382dSDon Lewis 		if (tdq->tdq_load)
100597e9382dSDon Lewis 			return (0);
100662fa74d9SJeff Roberson 		if (cpu == -1) {
100762fa74d9SJeff Roberson 			cg = cg->cg_parent;
100897e9382dSDon Lewis 			if (cg == NULL)
100997e9382dSDon Lewis 				return (1);
101080f86c9fSJeff Roberson 			continue;
10117b8bfa0dSJeff Roberson 		}
10127b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
101397e9382dSDon Lewis 		/*
101497e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
101597e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
101697e9382dSDon Lewis 		 *
101797e9382dSDon Lewis 		 * Testing this ahead of tdq_lock_pair() only catches
101897e9382dSDon Lewis 		 * this situation about 20% of the time on an 8 core
101997e9382dSDon Lewis 		 * 16 thread Ryzen 7, but it still helps performance.
102097e9382dSDon Lewis 		 */
102197e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
102297e9382dSDon Lewis 		    steal->tdq_transferable == 0)
102397e9382dSDon Lewis 			goto restart;
10247fcf154aSJeff Roberson 		tdq_lock_pair(tdq, steal);
102597e9382dSDon Lewis 		/*
102697e9382dSDon Lewis 		 * We were assigned a thread while waiting for the locks.
102797e9382dSDon Lewis 		 * Switch to it now instead of stealing a thread.
102897e9382dSDon Lewis 		 */
102997e9382dSDon Lewis 		if (tdq->tdq_load)
103097e9382dSDon Lewis 			break;
103197e9382dSDon Lewis 		/*
103297e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
103397e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread, or
103497e9382dSDon Lewis 		 * we were preempted and the CPU loading info may be out
103597e9382dSDon Lewis 		 * of date.  The latter is rare.  In either case restart
103697e9382dSDon Lewis 		 * the search.
103797e9382dSDon Lewis 		 */
103897e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
103997e9382dSDon Lewis 		    steal->tdq_transferable == 0 ||
104097e9382dSDon Lewis 		    switchcnt != tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt) {
10417fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
104297e9382dSDon Lewis 			goto restart;
104362fa74d9SJeff Roberson 		}
104462fa74d9SJeff Roberson 		/*
104597e9382dSDon Lewis 		 * Steal the thread and switch to it.
104662fa74d9SJeff Roberson 		 */
104797e9382dSDon Lewis 		if (tdq_move(steal, tdq) != NULL)
104897e9382dSDon Lewis 			break;
104997e9382dSDon Lewis 		/*
105097e9382dSDon Lewis 		 * We failed to acquire a thread even though it looked
105197e9382dSDon Lewis 		 * like one was available.  This could be due to affinity
105297e9382dSDon Lewis 		 * restrictions or for other reasons.  Loop again after
105397e9382dSDon Lewis 		 * removing this CPU from the set.  The restart logic
105497e9382dSDon Lewis 		 * above does not restore this CPU to the set due to the
105597e9382dSDon Lewis 		 * likelyhood of failing here again.
105697e9382dSDon Lewis 		 */
105797e9382dSDon Lewis 		CPU_CLR(cpu, &mask);
105862fa74d9SJeff Roberson 		tdq_unlock_pair(tdq, steal);
105980f86c9fSJeff Roberson 	}
1060ae7a6b38SJeff Roberson 	TDQ_UNLOCK(steal);
10618df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_IDLE, NULL);
1062ae7a6b38SJeff Roberson 	thread_unlock(curthread);
10637b8bfa0dSJeff Roberson 	return (0);
106422bf7d9aSJeff Roberson }
106522bf7d9aSJeff Roberson 
1066ae7a6b38SJeff Roberson /*
1067ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
1068ae7a6b38SJeff Roberson  */
106922bf7d9aSJeff Roberson static void
107027ee18adSRyan Stone tdq_notify(struct tdq *tdq, struct thread *td)
107122bf7d9aSJeff Roberson {
107202f0ff6dSJohn Baldwin 	struct thread *ctd;
107327ee18adSRyan Stone 	int pri;
10747b8bfa0dSJeff Roberson 	int cpu;
107522bf7d9aSJeff Roberson 
10767789ab32SMark Johnston 	if (tdq->tdq_owepreempt)
1077ff256d9cSJeff Roberson 		return;
107827ee18adSRyan Stone 	cpu = td_get_sched(td)->ts_cpu;
107927ee18adSRyan Stone 	pri = td->td_priority;
108002f0ff6dSJohn Baldwin 	ctd = pcpu_find(cpu)->pc_curthread;
108102f0ff6dSJohn Baldwin 	if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
10826b2f763fSJeff Roberson 		return;
108379654969SAlexander Motin 
108479654969SAlexander Motin 	/*
1085ae9e9b4fSAlexander Motin 	 * Make sure that our caller's earlier update to tdq_load is
1086ae9e9b4fSAlexander Motin 	 * globally visible before we read tdq_cpu_idle.  Idle thread
108779654969SAlexander Motin 	 * accesses both of them without locks, and the order is important.
108879654969SAlexander Motin 	 */
1089e8677f38SKonstantin Belousov 	atomic_thread_fence_seq_cst();
109079654969SAlexander Motin 
109102f0ff6dSJohn Baldwin 	if (TD_IS_IDLETHREAD(ctd)) {
10921690c6c1SJeff Roberson 		/*
10936c47aaaeSJeff Roberson 		 * If the MD code has an idle wakeup routine try that before
10946c47aaaeSJeff Roberson 		 * falling back to IPI.
10956c47aaaeSJeff Roberson 		 */
10969f9ad565SAlexander Motin 		if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
10976c47aaaeSJeff Roberson 			return;
10981690c6c1SJeff Roberson 	}
10997789ab32SMark Johnston 
11007789ab32SMark Johnston 	/*
11017789ab32SMark Johnston 	 * The run queues have been updated, so any switch on the remote CPU
11027789ab32SMark Johnston 	 * will satisfy the preemption request.
11037789ab32SMark Johnston 	 */
11047789ab32SMark Johnston 	tdq->tdq_owepreempt = 1;
1105d9d8d144SJohn Baldwin 	ipi_cpu(cpu, IPI_PREEMPT);
110622bf7d9aSJeff Roberson }
110722bf7d9aSJeff Roberson 
1108ae7a6b38SJeff Roberson /*
1109ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
1110ae7a6b38SJeff Roberson  * index.
1111ae7a6b38SJeff Roberson  */
11129727e637SJeff Roberson static struct thread *
111362fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
1114ae7a6b38SJeff Roberson {
1115ae7a6b38SJeff Roberson 	struct rqbits *rqb;
1116ae7a6b38SJeff Roberson 	struct rqhead *rqh;
111736acfc65SAlexander Motin 	struct thread *td, *first;
1118ae7a6b38SJeff Roberson 	int bit;
1119ae7a6b38SJeff Roberson 	int i;
1120ae7a6b38SJeff Roberson 
1121ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
1122ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
112336acfc65SAlexander Motin 	first = NULL;
1124ae7a6b38SJeff Roberson again:
1125ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1126ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
1127ae7a6b38SJeff Roberson 			continue;
11288bc713f6SJeff Roberson 		if (bit == 0)
11298bc713f6SJeff Roberson 			bit = RQB_FFS(rqb->rqb_bits[i]);
11308bc713f6SJeff Roberson 		for (; bit < RQB_BPW; bit++) {
11318bc713f6SJeff Roberson 			if ((rqb->rqb_bits[i] & (1ul << bit)) == 0)
1132ae7a6b38SJeff Roberson 				continue;
11338bc713f6SJeff Roberson 			rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)];
11349727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq) {
11359727e637SJeff Roberson 				if (first && THREAD_CAN_MIGRATE(td) &&
11369727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
11379727e637SJeff Roberson 					return (td);
113836acfc65SAlexander Motin 				first = td;
1139ae7a6b38SJeff Roberson 			}
1140ae7a6b38SJeff Roberson 		}
11418bc713f6SJeff Roberson 	}
1142ae7a6b38SJeff Roberson 	if (start != 0) {
1143ae7a6b38SJeff Roberson 		start = 0;
1144ae7a6b38SJeff Roberson 		goto again;
1145ae7a6b38SJeff Roberson 	}
1146ae7a6b38SJeff Roberson 
114736acfc65SAlexander Motin 	if (first && THREAD_CAN_MIGRATE(first) &&
114836acfc65SAlexander Motin 	    THREAD_CAN_SCHED(first, cpu))
114936acfc65SAlexander Motin 		return (first);
1150ae7a6b38SJeff Roberson 	return (NULL);
1151ae7a6b38SJeff Roberson }
1152ae7a6b38SJeff Roberson 
1153ae7a6b38SJeff Roberson /*
1154ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1155ae7a6b38SJeff Roberson  */
11569727e637SJeff Roberson static struct thread *
115762fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
115822bf7d9aSJeff Roberson {
115922bf7d9aSJeff Roberson 	struct rqhead *rqh;
116022bf7d9aSJeff Roberson 	struct rqbits *rqb;
11619727e637SJeff Roberson 	struct thread *td;
116222bf7d9aSJeff Roberson 	int word;
116322bf7d9aSJeff Roberson 	int bit;
116422bf7d9aSJeff Roberson 
116522bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
116622bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
116722bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
116822bf7d9aSJeff Roberson 			continue;
116922bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1170a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
117122bf7d9aSJeff Roberson 				continue;
117222bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
11739727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq)
11749727e637SJeff Roberson 				if (THREAD_CAN_MIGRATE(td) &&
11759727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
11769727e637SJeff Roberson 					return (td);
117722bf7d9aSJeff Roberson 		}
117822bf7d9aSJeff Roberson 	}
117922bf7d9aSJeff Roberson 	return (NULL);
118022bf7d9aSJeff Roberson }
118122bf7d9aSJeff Roberson 
1182ae7a6b38SJeff Roberson /*
1183ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1184ae7a6b38SJeff Roberson  */
11859727e637SJeff Roberson static struct thread *
118662fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
118722bf7d9aSJeff Roberson {
11889727e637SJeff Roberson 	struct thread *td;
118922bf7d9aSJeff Roberson 
1190ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
11919727e637SJeff Roberson 	if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
11929727e637SJeff Roberson 		return (td);
11939727e637SJeff Roberson 	if ((td = runq_steal_from(&tdq->tdq_timeshare,
11949727e637SJeff Roberson 	    cpu, tdq->tdq_ridx)) != NULL)
11959727e637SJeff Roberson 		return (td);
119662fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
119722bf7d9aSJeff Roberson }
119880f86c9fSJeff Roberson 
1199ae7a6b38SJeff Roberson /*
1200ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
12017fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1202ae7a6b38SJeff Roberson  */
1203ae7a6b38SJeff Roberson static inline struct tdq *
12049727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags)
120580f86c9fSJeff Roberson {
12069727e637SJeff Roberson 
1207ae7a6b38SJeff Roberson 	struct tdq *tdq;
1208*61a74c5cSJeff Roberson 	struct mtx *mtx;
120980f86c9fSJeff Roberson 
12109727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1211ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
121293ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_cpu = cpu;
12139727e637SJeff Roberson 	/*
12149727e637SJeff Roberson 	 * If the lock matches just return the queue.
12159727e637SJeff Roberson 	 */
1216*61a74c5cSJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq)) {
1217*61a74c5cSJeff Roberson 		KASSERT((flags & SRQ_HOLD) == 0,
1218*61a74c5cSJeff Roberson 		    ("sched_setcpu: Invalid lock for SRQ_HOLD"));
1219ae7a6b38SJeff Roberson 		return (tdq);
1220ae7a6b38SJeff Roberson 	}
1221*61a74c5cSJeff Roberson 
122280f86c9fSJeff Roberson 	/*
1223ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1224ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
12257b8bfa0dSJeff Roberson 	 */
1226b0b9dee5SAttilio Rao 	spinlock_enter();
1227*61a74c5cSJeff Roberson 	mtx = thread_lock_block(td);
1228*61a74c5cSJeff Roberson 	if ((flags & SRQ_HOLD) == 0)
1229*61a74c5cSJeff Roberson 		mtx_unlock_spin(mtx);
1230ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1231ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1232b0b9dee5SAttilio Rao 	spinlock_exit();
1233ae7a6b38SJeff Roberson 	return (tdq);
123480f86c9fSJeff Roberson }
12352454aaf5SJeff Roberson 
12368df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
12378df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
12388df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
12398df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
12408df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
12418df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
12428df78c41SJeff Roberson 
1243ae7a6b38SJeff Roberson static int
12449727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags)
1245ae7a6b38SJeff Roberson {
124636acfc65SAlexander Motin 	struct cpu_group *cg, *ccg;
12479727e637SJeff Roberson 	struct td_sched *ts;
1248ae7a6b38SJeff Roberson 	struct tdq *tdq;
1249c76ee827SJeff Roberson 	cpuset_t mask;
1250c9205e35SAlexander Motin 	int cpu, pri, self, intr;
12517b8bfa0dSJeff Roberson 
125262fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
125393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1254efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
1255efe67753SNathan Whitehorn 	    "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
12567b8bfa0dSJeff Roberson 	if (smp_started == 0)
12577b8bfa0dSJeff Roberson 		return (self);
125828994a58SJeff Roberson 	/*
125928994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
126028994a58SJeff Roberson 	 */
126162fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
126262fa74d9SJeff Roberson 		return (ts->ts_cpu);
12637b8bfa0dSJeff Roberson 	/*
126462fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
126562fa74d9SJeff Roberson 	 * the interrupt.
12667b8bfa0dSJeff Roberson 	 */
126762fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1268c9205e35SAlexander Motin 	    curthread->td_intr_nesting_level) {
1269c55dc51cSAlexander Motin 		tdq = TDQ_SELF();
1270c55dc51cSAlexander Motin 		if (tdq->tdq_lowpri >= PRI_MIN_IDLE) {
1271c55dc51cSAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
1272c55dc51cSAlexander Motin 			return (self);
1273c55dc51cSAlexander Motin 		}
127462fa74d9SJeff Roberson 		ts->ts_cpu = self;
1275c9205e35SAlexander Motin 		intr = 1;
1276c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1277c55dc51cSAlexander Motin 		goto llc;
1278c55dc51cSAlexander Motin 	} else {
1279c9205e35SAlexander Motin 		intr = 0;
1280c55dc51cSAlexander Motin 		tdq = TDQ_CPU(ts->ts_cpu);
1281c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1282c55dc51cSAlexander Motin 	}
12837b8bfa0dSJeff Roberson 	/*
128436acfc65SAlexander Motin 	 * If the thread can run on the last cpu and the affinity has not
12850127914cSEric van Gyzen 	 * expired and it is idle, run it there.
12867b8bfa0dSJeff Roberson 	 */
128736acfc65SAlexander Motin 	if (THREAD_CAN_SCHED(td, ts->ts_cpu) &&
128836acfc65SAlexander Motin 	    tdq->tdq_lowpri >= PRI_MIN_IDLE &&
128936acfc65SAlexander Motin 	    SCHED_AFFINITY(ts, CG_SHARE_L2)) {
1290c55dc51cSAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD) {
1291176dd236SAlexander Motin 			/* Check all SMT threads for being idle. */
1292176dd236SAlexander Motin 			for (cpu = CPU_FFS(&cg->cg_mask) - 1; ; cpu++) {
1293176dd236SAlexander Motin 				if (CPU_ISSET(cpu, &cg->cg_mask) &&
1294176dd236SAlexander Motin 				    TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
129562fa74d9SJeff Roberson 					break;
1296176dd236SAlexander Motin 				if (cpu >= mp_maxid) {
1297176dd236SAlexander Motin 					SCHED_STAT_INC(pickcpu_idle_affinity);
1298176dd236SAlexander Motin 					return (ts->ts_cpu);
129936acfc65SAlexander Motin 				}
1300176dd236SAlexander Motin 			}
1301176dd236SAlexander Motin 		} else {
130236acfc65SAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
130336acfc65SAlexander Motin 			return (ts->ts_cpu);
130436acfc65SAlexander Motin 		}
130536acfc65SAlexander Motin 	}
1306c55dc51cSAlexander Motin llc:
130736acfc65SAlexander Motin 	/*
130836acfc65SAlexander Motin 	 * Search for the last level cache CPU group in the tree.
1309c9205e35SAlexander Motin 	 * Skip SMT, identical groups and caches with expired affinity.
1310c9205e35SAlexander Motin 	 * Interrupt threads affinity is explicit and never expires.
131136acfc65SAlexander Motin 	 */
131236acfc65SAlexander Motin 	for (ccg = NULL; cg != NULL; cg = cg->cg_parent) {
131336acfc65SAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD)
131436acfc65SAlexander Motin 			continue;
1315c9205e35SAlexander Motin 		if (cg->cg_children == 1 || cg->cg_count == 1)
1316c9205e35SAlexander Motin 			continue;
1317c9205e35SAlexander Motin 		if (cg->cg_level == CG_SHARE_NONE ||
1318c9205e35SAlexander Motin 		    (!intr && !SCHED_AFFINITY(ts, cg->cg_level)))
131936acfc65SAlexander Motin 			continue;
132036acfc65SAlexander Motin 		ccg = cg;
132136acfc65SAlexander Motin 	}
1322c9205e35SAlexander Motin 	/* Found LLC shared by all CPUs, so do a global search. */
1323c9205e35SAlexander Motin 	if (ccg == cpu_top)
1324c9205e35SAlexander Motin 		ccg = NULL;
132562fa74d9SJeff Roberson 	cpu = -1;
1326c76ee827SJeff Roberson 	mask = td->td_cpuset->cs_mask;
1327c9205e35SAlexander Motin 	pri = td->td_priority;
1328c9205e35SAlexander Motin 	/*
1329c9205e35SAlexander Motin 	 * Try hard to keep interrupts within found LLC.  Search the LLC for
1330c9205e35SAlexander Motin 	 * the least loaded CPU we can run now.  For NUMA systems it should
1331c9205e35SAlexander Motin 	 * be within target domain, and it also reduces scheduling overhead.
1332c9205e35SAlexander Motin 	 */
1333c9205e35SAlexander Motin 	if (ccg != NULL && intr) {
1334c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, pri, INT_MAX, ts->ts_cpu);
1335c9205e35SAlexander Motin 		if (cpu >= 0)
1336c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_intrbind);
1337c9205e35SAlexander Motin 	} else
1338c9205e35SAlexander Motin 	/* Search the LLC for the least loaded idle CPU we can run now. */
1339c9205e35SAlexander Motin 	if (ccg != NULL) {
1340c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, max(pri, PRI_MAX_TIMESHARE),
134136acfc65SAlexander Motin 		    INT_MAX, ts->ts_cpu);
1342c9205e35SAlexander Motin 		if (cpu >= 0)
1343c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_affinity);
1344c9205e35SAlexander Motin 	}
1345c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU we can run now. */
1346c9205e35SAlexander Motin 	if (cpu < 0) {
134736acfc65SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu);
1348c9205e35SAlexander Motin 		if (cpu >= 0)
1349c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1350c9205e35SAlexander Motin 	}
1351c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU. */
1352c9205e35SAlexander Motin 	if (cpu < 0) {
135336acfc65SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu);
1354c9205e35SAlexander Motin 		if (cpu >= 0)
1355c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1356c9205e35SAlexander Motin 	}
1357bb3dfc6aSAlexander Motin 	KASSERT(cpu >= 0, ("sched_pickcpu: Failed to find a cpu."));
1358efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
135962fa74d9SJeff Roberson 	/*
136062fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
136162fa74d9SJeff Roberson 	 */
1362018ff686SJeff Roberson 	tdq = TDQ_CPU(cpu);
1363018ff686SJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_SELF()->tdq_lowpri > pri &&
1364018ff686SJeff Roberson 	    tdq->tdq_lowpri < PRI_MIN_IDLE &&
1365018ff686SJeff Roberson 	    TDQ_SELF()->tdq_load <= tdq->tdq_load + 1) {
13668df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_local);
136762fa74d9SJeff Roberson 		cpu = self;
1368c9205e35SAlexander Motin 	}
13698df78c41SJeff Roberson 	if (cpu != ts->ts_cpu)
13708df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_migration);
1371ae7a6b38SJeff Roberson 	return (cpu);
137280f86c9fSJeff Roberson }
137362fa74d9SJeff Roberson #endif
137422bf7d9aSJeff Roberson 
137522bf7d9aSJeff Roberson /*
137622bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
13770c0a98b2SJeff Roberson  */
13789727e637SJeff Roberson static struct thread *
1379ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
13805d7ef00cSJeff Roberson {
13819727e637SJeff Roberson 	struct thread *td;
13825d7ef00cSJeff Roberson 
1383ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
13849727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_realtime);
13859727e637SJeff Roberson 	if (td != NULL)
13869727e637SJeff Roberson 		return (td);
13879727e637SJeff Roberson 	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
13889727e637SJeff Roberson 	if (td != NULL) {
138912d56c0fSJohn Baldwin 		KASSERT(td->td_priority >= PRI_MIN_BATCH,
1390e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
13919727e637SJeff Roberson 		    td->td_priority));
13929727e637SJeff Roberson 		return (td);
139315dc847eSJeff Roberson 	}
13949727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_idle);
13959727e637SJeff Roberson 	if (td != NULL) {
13969727e637SJeff Roberson 		KASSERT(td->td_priority >= PRI_MIN_IDLE,
1397e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
13989727e637SJeff Roberson 		    td->td_priority));
13999727e637SJeff Roberson 		return (td);
1400e7d50326SJeff Roberson 	}
1401e7d50326SJeff Roberson 
1402e7d50326SJeff Roberson 	return (NULL);
1403245f3abfSJeff Roberson }
14040a016a05SJeff Roberson 
1405ae7a6b38SJeff Roberson /*
1406ae7a6b38SJeff Roberson  * Initialize a thread queue.
1407ae7a6b38SJeff Roberson  */
14080a016a05SJeff Roberson static void
1409018ff686SJeff Roberson tdq_setup(struct tdq *tdq, int id)
14100a016a05SJeff Roberson {
1411ae7a6b38SJeff Roberson 
1412c47f202bSJeff Roberson 	if (bootverbose)
1413018ff686SJeff Roberson 		printf("ULE: setup cpu %d\n", id);
1414e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1415e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1416d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
1417018ff686SJeff Roberson 	tdq->tdq_id = id;
141862fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
141962fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
1420*61a74c5cSJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN);
14218f51ad55SJeff Roberson #ifdef KTR
14228f51ad55SJeff Roberson 	snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
14238f51ad55SJeff Roberson 	    "CPU %d load", (int)TDQ_ID(tdq));
14248f51ad55SJeff Roberson #endif
14250a016a05SJeff Roberson }
14260a016a05SJeff Roberson 
1427c47f202bSJeff Roberson #ifdef SMP
1428c47f202bSJeff Roberson static void
1429c47f202bSJeff Roberson sched_setup_smp(void)
1430c47f202bSJeff Roberson {
1431c47f202bSJeff Roberson 	struct tdq *tdq;
1432c47f202bSJeff Roberson 	int i;
1433c47f202bSJeff Roberson 
143462fa74d9SJeff Roberson 	cpu_top = smp_topo();
14353aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
1436018ff686SJeff Roberson 		tdq = DPCPU_ID_PTR(i, tdq);
1437018ff686SJeff Roberson 		tdq_setup(tdq, i);
143862fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
143962fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
144062fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1441c47f202bSJeff Roberson 	}
1442018ff686SJeff Roberson 	PCPU_SET(sched, DPCPU_PTR(tdq));
144362fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
1444c47f202bSJeff Roberson }
1445c47f202bSJeff Roberson #endif
1446c47f202bSJeff Roberson 
1447ae7a6b38SJeff Roberson /*
1448ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1449ae7a6b38SJeff Roberson  * information.
1450ae7a6b38SJeff Roberson  */
145135e6168fSJeff Roberson static void
145235e6168fSJeff Roberson sched_setup(void *dummy)
145335e6168fSJeff Roberson {
1454ae7a6b38SJeff Roberson 	struct tdq *tdq;
1455c47f202bSJeff Roberson 
14560ec896fdSJeff Roberson #ifdef SMP
1457c47f202bSJeff Roberson 	sched_setup_smp();
1458749d01b0SJeff Roberson #else
1459018ff686SJeff Roberson 	tdq_setup(TDQ_SELF(), 0);
1460356500a3SJeff Roberson #endif
1461018ff686SJeff Roberson 	tdq = TDQ_SELF();
1462ae7a6b38SJeff Roberson 
1463ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1464ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1465e1504695SJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(tdq);
14669727e637SJeff Roberson 	tdq_load_add(tdq, &thread0);
146762fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1468ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
146935e6168fSJeff Roberson }
147035e6168fSJeff Roberson 
1471ae7a6b38SJeff Roberson /*
1472579895dfSAlexander Motin  * This routine determines time constants after stathz and hz are setup.
1473ae7a6b38SJeff Roberson  */
1474a1d4fe69SDavid Xu /* ARGSUSED */
1475a1d4fe69SDavid Xu static void
1476a1d4fe69SDavid Xu sched_initticks(void *dummy)
1477a1d4fe69SDavid Xu {
1478ae7a6b38SJeff Roberson 	int incr;
1479ae7a6b38SJeff Roberson 
1480a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
14815e5c3873SJeff Roberson 	sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
14825e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
148337f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
148437f4e025SAlexander Motin 	    realstathz);
1485a1d4fe69SDavid Xu 
1486a1d4fe69SDavid Xu 	/*
1487e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
14883f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1489e7d50326SJeff Roberson 	 */
1490ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1491e7d50326SJeff Roberson 	/*
1492e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1493e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1494a1d4fe69SDavid Xu 	 */
1495ae7a6b38SJeff Roberson 	if (incr == 0)
1496ae7a6b38SJeff Roberson 		incr = 1;
1497ae7a6b38SJeff Roberson 	tickincr = incr;
14987b8bfa0dSJeff Roberson #ifdef SMP
14999862717aSJeff Roberson 	/*
15007fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
15017fcf154aSJeff Roberson 	 * what realstathz is.
15027fcf154aSJeff Roberson 	 */
15037fcf154aSJeff Roberson 	balance_interval = realstathz;
1504290d9060SDon Lewis 	balance_ticks = balance_interval;
15057b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
15067b8bfa0dSJeff Roberson #endif
1507b3f40a41SAlexander Motin 	if (sched_idlespinthresh < 0)
15082c27cb3aSAlexander Motin 		sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
1509a1d4fe69SDavid Xu }
1510a1d4fe69SDavid Xu 
1511a1d4fe69SDavid Xu 
151235e6168fSJeff Roberson /*
1513ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1514ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1515ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1516ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1517ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
151857031f79SGeorge V. Neville-Neil  *
151957031f79SGeorge V. Neville-Neil  * When a thread's sleep time is greater than its run time the
152057031f79SGeorge V. Neville-Neil  * calculation is:
152157031f79SGeorge V. Neville-Neil  *
152257031f79SGeorge V. Neville-Neil  *                           scaling factor
152357031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------
152457031f79SGeorge V. Neville-Neil  *                        sleep time / run time
152557031f79SGeorge V. Neville-Neil  *
152657031f79SGeorge V. Neville-Neil  *
152757031f79SGeorge V. Neville-Neil  * When a thread's run time is greater than its sleep time the
152857031f79SGeorge V. Neville-Neil  * calculation is:
152957031f79SGeorge V. Neville-Neil  *
153057031f79SGeorge V. Neville-Neil  *                           scaling factor
153157031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------    + scaling factor
153257031f79SGeorge V. Neville-Neil  *                        run time / sleep time
1533ae7a6b38SJeff Roberson  */
1534ae7a6b38SJeff Roberson static int
1535ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1536ae7a6b38SJeff Roberson {
1537ae7a6b38SJeff Roberson 	struct td_sched *ts;
1538ae7a6b38SJeff Roberson 	int div;
1539ae7a6b38SJeff Roberson 
154093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1541ae7a6b38SJeff Roberson 	/*
1542ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1543ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1544ae7a6b38SJeff Roberson 	 * no chance.
1545ae7a6b38SJeff Roberson 	 */
1546ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1547ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1548ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1549ae7a6b38SJeff Roberson 
1550ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1551ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1552ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1553ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1554ae7a6b38SJeff Roberson 	}
1555ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1556ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1557ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1558ae7a6b38SJeff Roberson 	}
1559ae7a6b38SJeff Roberson 	/* runtime == slptime */
1560ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1561ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1562ae7a6b38SJeff Roberson 
1563ae7a6b38SJeff Roberson 	/*
1564ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1565ae7a6b38SJeff Roberson 	 */
1566ae7a6b38SJeff Roberson 	return (0);
1567ae7a6b38SJeff Roberson 
1568ae7a6b38SJeff Roberson }
1569ae7a6b38SJeff Roberson 
1570ae7a6b38SJeff Roberson /*
157135e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
157235e6168fSJeff Roberson  * process.
157335e6168fSJeff Roberson  */
157415dc847eSJeff Roberson static void
15758460a577SJohn Birrell sched_priority(struct thread *td)
157635e6168fSJeff Roberson {
1577e7d50326SJeff Roberson 	int score;
157835e6168fSJeff Roberson 	int pri;
157935e6168fSJeff Roberson 
1580c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
158115dc847eSJeff Roberson 		return;
1582e7d50326SJeff Roberson 	/*
1583e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1584e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1585e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1586e7d50326SJeff Roberson 	 *
1587ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1588e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1589e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1590a5423ea3SJeff Roberson 	 *
1591a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1592a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1593a5423ea3SJeff Roberson 	 * considered interactive.
1594e7d50326SJeff Roberson 	 */
1595a0f15352SJohn Baldwin 	score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1596e7d50326SJeff Roberson 	if (score < sched_interact) {
159712d56c0fSJohn Baldwin 		pri = PRI_MIN_INTERACT;
159812d56c0fSJohn Baldwin 		pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) /
159978920008SJohn Baldwin 		    sched_interact) * score;
160012d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
16019a93305aSJeff Roberson 		    ("sched_priority: invalid interactive priority %d score %d",
16029a93305aSJeff Roberson 		    pri, score));
1603e7d50326SJeff Roberson 	} else {
1604e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
160593ccd6bfSKonstantin Belousov 		if (td_get_sched(td)->ts_ticks)
160693ccd6bfSKonstantin Belousov 			pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
16075457fa23SJohn Baldwin 			    SCHED_PRI_RANGE - 1);
1608e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
160912d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
1610ae7a6b38SJeff Roberson 		    ("sched_priority: invalid priority %d: nice %d, "
1611ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
161293ccd6bfSKonstantin Belousov 		    pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
161393ccd6bfSKonstantin Belousov 		    td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
161493ccd6bfSKonstantin Belousov 		    SCHED_PRI_TICKS(td_get_sched(td))));
1615e7d50326SJeff Roberson 	}
16168460a577SJohn Birrell 	sched_user_prio(td, pri);
161735e6168fSJeff Roberson 
161815dc847eSJeff Roberson 	return;
161935e6168fSJeff Roberson }
162035e6168fSJeff Roberson 
162135e6168fSJeff Roberson /*
1622d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1623ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1624ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1625d322132cSJeff Roberson  */
16264b60e324SJeff Roberson static void
16278460a577SJohn Birrell sched_interact_update(struct thread *td)
16284b60e324SJeff Roberson {
1629155b6ca1SJeff Roberson 	struct td_sched *ts;
16309a93305aSJeff Roberson 	u_int sum;
16313f741ca1SJeff Roberson 
163293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1633ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1634d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1635d322132cSJeff Roberson 		return;
1636d322132cSJeff Roberson 	/*
1637155b6ca1SJeff Roberson 	 * This only happens from two places:
1638155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1639155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1640155b6ca1SJeff Roberson 	 */
1641155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1642ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1643ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1644ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1645155b6ca1SJeff Roberson 		} else {
1646ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1647ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1648155b6ca1SJeff Roberson 		}
1649155b6ca1SJeff Roberson 		return;
1650155b6ca1SJeff Roberson 	}
1651155b6ca1SJeff Roberson 	/*
1652d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1653d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
16542454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1655d322132cSJeff Roberson 	 */
165637a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1657ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1658ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1659d322132cSJeff Roberson 		return;
1660d322132cSJeff Roberson 	}
1661ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1662ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1663d322132cSJeff Roberson }
1664d322132cSJeff Roberson 
1665ae7a6b38SJeff Roberson /*
1666ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1667ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1668ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1669ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1670ae7a6b38SJeff Roberson  */
1671d322132cSJeff Roberson static void
16728460a577SJohn Birrell sched_interact_fork(struct thread *td)
1673d322132cSJeff Roberson {
167493ccd6bfSKonstantin Belousov 	struct td_sched *ts;
1675d322132cSJeff Roberson 	int ratio;
1676d322132cSJeff Roberson 	int sum;
1677d322132cSJeff Roberson 
167893ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
167993ccd6bfSKonstantin Belousov 	sum = ts->ts_runtime + ts->ts_slptime;
1680d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1681d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
168293ccd6bfSKonstantin Belousov 		ts->ts_runtime /= ratio;
168393ccd6bfSKonstantin Belousov 		ts->ts_slptime /= ratio;
16844b60e324SJeff Roberson 	}
16854b60e324SJeff Roberson }
16864b60e324SJeff Roberson 
168715dc847eSJeff Roberson /*
1688ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1689ed062c8dSJulian Elischer  */
1690ed062c8dSJulian Elischer void
1691ed062c8dSJulian Elischer schedinit(void)
1692ed062c8dSJulian Elischer {
169393ccd6bfSKonstantin Belousov 	struct td_sched *ts0;
1694e7d50326SJeff Roberson 
1695ed062c8dSJulian Elischer 	/*
169693ccd6bfSKonstantin Belousov 	 * Set up the scheduler specific parts of thread0.
1697ed062c8dSJulian Elischer 	 */
169893ccd6bfSKonstantin Belousov 	ts0 = td_get_sched(&thread0);
169993ccd6bfSKonstantin Belousov 	ts0->ts_ltick = ticks;
170093ccd6bfSKonstantin Belousov 	ts0->ts_ftick = ticks;
170193ccd6bfSKonstantin Belousov 	ts0->ts_slice = 0;
17021408b84aSHans Petter Selasky 	ts0->ts_cpu = curcpu;	/* set valid CPU number */
1703ed062c8dSJulian Elischer }
1704ed062c8dSJulian Elischer 
1705ed062c8dSJulian Elischer /*
170615dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
170715dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1708e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
170915dc847eSJeff Roberson  */
171035e6168fSJeff Roberson int
171135e6168fSJeff Roberson sched_rr_interval(void)
171235e6168fSJeff Roberson {
1713e7d50326SJeff Roberson 
1714579895dfSAlexander Motin 	/* Convert sched_slice from stathz to hz. */
171537f4e025SAlexander Motin 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
171635e6168fSJeff Roberson }
171735e6168fSJeff Roberson 
1718ae7a6b38SJeff Roberson /*
1719ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1720ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1721ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1722ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1723ae7a6b38SJeff Roberson  */
172422bf7d9aSJeff Roberson static void
17257295465eSAlexander Motin sched_pctcpu_update(struct td_sched *ts, int run)
172635e6168fSJeff Roberson {
17277295465eSAlexander Motin 	int t = ticks;
1728e7d50326SJeff Roberson 
172978133024SMark Johnston 	/*
173078133024SMark Johnston 	 * The signed difference may be negative if the thread hasn't run for
173178133024SMark Johnston 	 * over half of the ticks rollover period.
173278133024SMark Johnston 	 */
173378133024SMark Johnston 	if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) {
1734ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
17357295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
17367295465eSAlexander Motin 	} else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
17377295465eSAlexander Motin 		ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
17387295465eSAlexander Motin 		    (ts->ts_ltick - (t - SCHED_TICK_TARG));
17397295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
17407295465eSAlexander Motin 	}
17417295465eSAlexander Motin 	if (run)
17427295465eSAlexander Motin 		ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
17437295465eSAlexander Motin 	ts->ts_ltick = t;
174435e6168fSJeff Roberson }
174535e6168fSJeff Roberson 
1746ae7a6b38SJeff Roberson /*
1747ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1748ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1749ae7a6b38SJeff Roberson  * functions.
1750ae7a6b38SJeff Roberson  */
1751e7d50326SJeff Roberson static void
1752f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
175335e6168fSJeff Roberson {
1754ad1e7d28SJulian Elischer 	struct td_sched *ts;
175573daf66fSJeff Roberson 	struct tdq *tdq;
175673daf66fSJeff Roberson 	int oldpri;
175735e6168fSJeff Roberson 
17588f51ad55SJeff Roberson 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
17598f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "new prio:%d", prio,
17608f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(curthread));
1761d9fae5abSAndriy Gapon 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
1762e87fc7cfSAndriy Gapon 	if (td != curthread && prio < td->td_priority) {
17638f51ad55SJeff Roberson 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
17648f51ad55SJeff Roberson 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
17658f51ad55SJeff Roberson 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
1766d9fae5abSAndriy Gapon 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
1767b3e9e682SRyan Stone 		    curthread);
17688f51ad55SJeff Roberson 	}
176993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
17707b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1771f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1772f5c157d9SJohn Baldwin 		return;
17733f741ca1SJeff Roberson 	/*
17743f741ca1SJeff Roberson 	 * If the priority has been elevated due to priority
17753f741ca1SJeff Roberson 	 * propagation, we may have to move ourselves to a new
1776e7d50326SJeff Roberson 	 * queue.  This could be optimized to not re-add in some
1777e7d50326SJeff Roberson 	 * cases.
1778f2b74cbfSJeff Roberson 	 */
17796d55b3ecSJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1780e7d50326SJeff Roberson 		sched_rem(td);
1781e7d50326SJeff Roberson 		td->td_priority = prio;
1782*61a74c5cSJeff Roberson 		sched_add(td, SRQ_BORROWING | SRQ_HOLDTD);
178373daf66fSJeff Roberson 		return;
178473daf66fSJeff Roberson 	}
17856d55b3ecSJeff Roberson 	/*
17866d55b3ecSJeff Roberson 	 * If the thread is currently running we may have to adjust the lowpri
17876d55b3ecSJeff Roberson 	 * information so other cpus are aware of our current priority.
17886d55b3ecSJeff Roberson 	 */
17896d55b3ecSJeff Roberson 	if (TD_IS_RUNNING(td)) {
1790ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(ts->ts_cpu);
179162fa74d9SJeff Roberson 		oldpri = td->td_priority;
17923f741ca1SJeff Roberson 		td->td_priority = prio;
179362fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
179462fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
179562fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
179662fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
17976d55b3ecSJeff Roberson 		return;
179873daf66fSJeff Roberson 	}
17996d55b3ecSJeff Roberson 	td->td_priority = prio;
1800ae7a6b38SJeff Roberson }
180135e6168fSJeff Roberson 
1802f5c157d9SJohn Baldwin /*
1803f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1804f5c157d9SJohn Baldwin  * priority.
1805f5c157d9SJohn Baldwin  */
1806f5c157d9SJohn Baldwin void
1807f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1808f5c157d9SJohn Baldwin {
1809f5c157d9SJohn Baldwin 
1810f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1811f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1812f5c157d9SJohn Baldwin }
1813f5c157d9SJohn Baldwin 
1814f5c157d9SJohn Baldwin /*
1815f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1816f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1817f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1818f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1819f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1820f5c157d9SJohn Baldwin  * of prio.
1821f5c157d9SJohn Baldwin  */
1822f5c157d9SJohn Baldwin void
1823f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1824f5c157d9SJohn Baldwin {
1825f5c157d9SJohn Baldwin 	u_char base_pri;
1826f5c157d9SJohn Baldwin 
1827f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1828f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
18298460a577SJohn Birrell 		base_pri = td->td_user_pri;
1830f5c157d9SJohn Baldwin 	else
1831f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1832f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1833f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1834f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1835f5c157d9SJohn Baldwin 	} else
1836f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1837f5c157d9SJohn Baldwin }
1838f5c157d9SJohn Baldwin 
1839ae7a6b38SJeff Roberson /*
1840ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1841ae7a6b38SJeff Roberson  */
1842f5c157d9SJohn Baldwin void
1843f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1844f5c157d9SJohn Baldwin {
1845f5c157d9SJohn Baldwin 	u_char oldprio;
1846f5c157d9SJohn Baldwin 
1847f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1848f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1849f5c157d9SJohn Baldwin 
1850f5c157d9SJohn Baldwin 	/*
185150aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1852f5c157d9SJohn Baldwin 	 * ever lower the priority.
1853f5c157d9SJohn Baldwin 	 */
1854f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1855f5c157d9SJohn Baldwin 		return;
1856f5c157d9SJohn Baldwin 
1857f5c157d9SJohn Baldwin 	/* Change the real priority. */
1858f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1859f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1860f5c157d9SJohn Baldwin 
1861f5c157d9SJohn Baldwin 	/*
1862f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1863f5c157d9SJohn Baldwin 	 * its state.
1864f5c157d9SJohn Baldwin 	 */
1865f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1866f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1867f5c157d9SJohn Baldwin }
1868f5c157d9SJohn Baldwin 
1869ae7a6b38SJeff Roberson /*
1870ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1871ae7a6b38SJeff Roberson  */
187235e6168fSJeff Roberson void
18738460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
18743db720fdSDavid Xu {
18753db720fdSDavid Xu 
18768460a577SJohn Birrell 	td->td_base_user_pri = prio;
1877acbe332aSDavid Xu 	if (td->td_lend_user_pri <= prio)
1878fc6c30f6SJulian Elischer 		return;
18798460a577SJohn Birrell 	td->td_user_pri = prio;
18803db720fdSDavid Xu }
18813db720fdSDavid Xu 
18823db720fdSDavid Xu void
18833db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
18843db720fdSDavid Xu {
18853db720fdSDavid Xu 
1886435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1887acbe332aSDavid Xu 	td->td_lend_user_pri = prio;
1888c8e368a9SDavid Xu 	td->td_user_pri = min(prio, td->td_base_user_pri);
1889c8e368a9SDavid Xu 	if (td->td_priority > td->td_user_pri)
1890c8e368a9SDavid Xu 		sched_prio(td, td->td_user_pri);
1891c8e368a9SDavid Xu 	else if (td->td_priority != td->td_user_pri)
1892c8e368a9SDavid Xu 		td->td_flags |= TDF_NEEDRESCHED;
1893435806d3SDavid Xu }
18943db720fdSDavid Xu 
1895ac97da9aSMateusz Guzik /*
1896ac97da9aSMateusz Guzik  * Like the above but first check if there is anything to do.
1897ac97da9aSMateusz Guzik  */
1898ac97da9aSMateusz Guzik void
1899ac97da9aSMateusz Guzik sched_lend_user_prio_cond(struct thread *td, u_char prio)
1900ac97da9aSMateusz Guzik {
1901ac97da9aSMateusz Guzik 
1902ac97da9aSMateusz Guzik 	if (td->td_lend_user_pri != prio)
1903ac97da9aSMateusz Guzik 		goto lend;
1904ac97da9aSMateusz Guzik 	if (td->td_user_pri != min(prio, td->td_base_user_pri))
1905ac97da9aSMateusz Guzik 		goto lend;
1906ac97da9aSMateusz Guzik 	if (td->td_priority >= td->td_user_pri)
1907ac97da9aSMateusz Guzik 		goto lend;
1908ac97da9aSMateusz Guzik 	return;
1909ac97da9aSMateusz Guzik 
1910ac97da9aSMateusz Guzik lend:
1911ac97da9aSMateusz Guzik 	thread_lock(td);
1912ac97da9aSMateusz Guzik 	sched_lend_user_prio(td, prio);
1913ac97da9aSMateusz Guzik 	thread_unlock(td);
1914ac97da9aSMateusz Guzik }
1915ac97da9aSMateusz Guzik 
19164c8a8cfcSKonstantin Belousov #ifdef SMP
1917ae7a6b38SJeff Roberson /*
191897e9382dSDon Lewis  * This tdq is about to idle.  Try to steal a thread from another CPU before
191997e9382dSDon Lewis  * choosing the idle thread.
192097e9382dSDon Lewis  */
192197e9382dSDon Lewis static void
192297e9382dSDon Lewis tdq_trysteal(struct tdq *tdq)
192397e9382dSDon Lewis {
192497e9382dSDon Lewis 	struct cpu_group *cg;
192597e9382dSDon Lewis 	struct tdq *steal;
192697e9382dSDon Lewis 	cpuset_t mask;
192797e9382dSDon Lewis 	int cpu, i;
192897e9382dSDon Lewis 
192997e9382dSDon Lewis 	if (smp_started == 0 || trysteal_limit == 0 || tdq->tdq_cg == NULL)
193097e9382dSDon Lewis 		return;
193197e9382dSDon Lewis 	CPU_FILL(&mask);
193297e9382dSDon Lewis 	CPU_CLR(PCPU_GET(cpuid), &mask);
193397e9382dSDon Lewis 	/* We don't want to be preempted while we're iterating. */
193497e9382dSDon Lewis 	spinlock_enter();
193597e9382dSDon Lewis 	TDQ_UNLOCK(tdq);
193697e9382dSDon Lewis 	for (i = 1, cg = tdq->tdq_cg; ; ) {
193797e9382dSDon Lewis 		cpu = sched_highest(cg, mask, steal_thresh);
193897e9382dSDon Lewis 		/*
193997e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
194097e9382dSDon Lewis 		 * steal one here.
194197e9382dSDon Lewis 		 */
194297e9382dSDon Lewis 		if (tdq->tdq_load > 0) {
194397e9382dSDon Lewis 			TDQ_LOCK(tdq);
194497e9382dSDon Lewis 			break;
194597e9382dSDon Lewis 		}
194697e9382dSDon Lewis 		if (cpu == -1) {
194797e9382dSDon Lewis 			i++;
194897e9382dSDon Lewis 			cg = cg->cg_parent;
194997e9382dSDon Lewis 			if (cg == NULL || i > trysteal_limit) {
195097e9382dSDon Lewis 				TDQ_LOCK(tdq);
195197e9382dSDon Lewis 				break;
195297e9382dSDon Lewis 			}
195397e9382dSDon Lewis 			continue;
195497e9382dSDon Lewis 		}
195597e9382dSDon Lewis 		steal = TDQ_CPU(cpu);
195697e9382dSDon Lewis 		/*
195797e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
195897e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
195997e9382dSDon Lewis 		 */
196097e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
196197e9382dSDon Lewis 		    steal->tdq_transferable == 0)
196297e9382dSDon Lewis 			continue;
196397e9382dSDon Lewis 		tdq_lock_pair(tdq, steal);
196497e9382dSDon Lewis 		/*
196597e9382dSDon Lewis 		 * If we get to this point, unconditonally exit the loop
196697e9382dSDon Lewis 		 * to bound the time spent in the critcal section.
196797e9382dSDon Lewis 		 *
196897e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
196997e9382dSDon Lewis 		 * steal one here.
197097e9382dSDon Lewis 		 */
197197e9382dSDon Lewis 		if (tdq->tdq_load > 0) {
197297e9382dSDon Lewis 			TDQ_UNLOCK(steal);
197397e9382dSDon Lewis 			break;
197497e9382dSDon Lewis 		}
197597e9382dSDon Lewis 		/*
197697e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
197797e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
197897e9382dSDon Lewis 		 */
197997e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
198097e9382dSDon Lewis 		    steal->tdq_transferable == 0) {
198197e9382dSDon Lewis 			TDQ_UNLOCK(steal);
198297e9382dSDon Lewis 			break;
198397e9382dSDon Lewis 		}
198497e9382dSDon Lewis 		/*
198597e9382dSDon Lewis 		 * If we fail to acquire one due to affinity restrictions,
198697e9382dSDon Lewis 		 * bail out and let the idle thread to a more complete search
198797e9382dSDon Lewis 		 * outside of a critical section.
198897e9382dSDon Lewis 		 */
198997e9382dSDon Lewis 		if (tdq_move(steal, tdq) == NULL) {
199097e9382dSDon Lewis 			TDQ_UNLOCK(steal);
199197e9382dSDon Lewis 			break;
199297e9382dSDon Lewis 		}
199397e9382dSDon Lewis 		TDQ_UNLOCK(steal);
199497e9382dSDon Lewis 		break;
199597e9382dSDon Lewis 	}
199697e9382dSDon Lewis 	spinlock_exit();
199797e9382dSDon Lewis }
19984c8a8cfcSKonstantin Belousov #endif
199997e9382dSDon Lewis 
200097e9382dSDon Lewis /*
2001c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
2002c47f202bSJeff Roberson  * cpu binding.
2003c47f202bSJeff Roberson  */
2004c47f202bSJeff Roberson static struct mtx *
2005c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
2006c47f202bSJeff Roberson {
2007c47f202bSJeff Roberson 	struct tdq *tdn;
2008*61a74c5cSJeff Roberson 	struct mtx *mtx;
2009c47f202bSJeff Roberson 
2010efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
2011efe67753SNathan Whitehorn 	    "thread %s queued on absent CPU %d.", td->td_name,
2012efe67753SNathan Whitehorn 	    td_get_sched(td)->ts_cpu));
201393ccd6bfSKonstantin Belousov 	tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
2014c47f202bSJeff Roberson #ifdef SMP
20159727e637SJeff Roberson 	tdq_load_rem(tdq, td);
2016c47f202bSJeff Roberson 	/*
2017c47f202bSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We grab an extra
2018c47f202bSJeff Roberson 	 * spinlock nesting to prevent preemption while we're
2019c47f202bSJeff Roberson 	 * not holding either run-queue lock.
2020c47f202bSJeff Roberson 	 */
2021c47f202bSJeff Roberson 	spinlock_enter();
2022*61a74c5cSJeff Roberson 	mtx = thread_lock_block(td);
2023*61a74c5cSJeff Roberson 	mtx_unlock_spin(mtx);
2024435068aaSAttilio Rao 
2025435068aaSAttilio Rao 	/*
2026435068aaSAttilio Rao 	 * Acquire both run-queue locks before placing the thread on the new
2027435068aaSAttilio Rao 	 * run-queue to avoid deadlocks created by placing a thread with a
2028435068aaSAttilio Rao 	 * blocked lock on the run-queue of a remote processor.  The deadlock
2029435068aaSAttilio Rao 	 * occurs when a third processor attempts to lock the two queues in
2030435068aaSAttilio Rao 	 * question while the target processor is spinning with its own
2031435068aaSAttilio Rao 	 * run-queue lock held while waiting for the blocked lock to clear.
2032435068aaSAttilio Rao 	 */
2033435068aaSAttilio Rao 	tdq_lock_pair(tdn, tdq);
2034c47f202bSJeff Roberson 	tdq_add(tdn, td, flags);
203527ee18adSRyan Stone 	tdq_notify(tdn, td);
2036c47f202bSJeff Roberson 	TDQ_UNLOCK(tdn);
2037c47f202bSJeff Roberson 	spinlock_exit();
2038c47f202bSJeff Roberson #endif
2039c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
2040c47f202bSJeff Roberson }
2041c47f202bSJeff Roberson 
2042c47f202bSJeff Roberson /*
2043*61a74c5cSJeff Roberson  * thread_lock_unblock() that does not assume td_lock is blocked.
2044ae7a6b38SJeff Roberson  */
2045ae7a6b38SJeff Roberson static inline void
2046ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
2047ae7a6b38SJeff Roberson {
2048ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
2049ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
2050ae7a6b38SJeff Roberson }
2051ae7a6b38SJeff Roberson 
2052ae7a6b38SJeff Roberson /*
2053ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
2054ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
2055ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
2056ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
2057ae7a6b38SJeff Roberson  */
20583db720fdSDavid Xu void
20593389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
206035e6168fSJeff Roberson {
2061c02bbb43SJeff Roberson 	struct tdq *tdq;
2062ad1e7d28SJulian Elischer 	struct td_sched *ts;
2063ae7a6b38SJeff Roberson 	struct mtx *mtx;
2064c47f202bSJeff Roberson 	int srqflag;
20653d7f4117SAlexander Motin 	int cpuid, preempted;
206635e6168fSJeff Roberson 
20677b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
20686d55b3ecSJeff Roberson 	KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument"));
206935e6168fSJeff Roberson 
2070ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2071018ff686SJeff Roberson 	tdq = TDQ_SELF();
207293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
2073c47f202bSJeff Roberson 	mtx = td->td_lock;
20747295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2075ae7a6b38SJeff Roberson 	ts->ts_rltick = ticks;
2076060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
2077060563ecSJulian Elischer 	td->td_oncpu = NOCPU;
2078ad9dadc4SAndriy Gapon 	preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
2079ad9dadc4SAndriy Gapon 	    (flags & SW_PREEMPT) != 0;
20803d7f4117SAlexander Motin 	td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
208177918643SStephan Uphoff 	td->td_owepreempt = 0;
20827789ab32SMark Johnston 	tdq->tdq_owepreempt = 0;
20832c27cb3aSAlexander Motin 	if (!TD_IS_IDLETHREAD(td))
20841690c6c1SJeff Roberson 		tdq->tdq_switchcnt++;
20857789ab32SMark Johnston 
2086b11fdad0SJeff Roberson 	/*
2087ae7a6b38SJeff Roberson 	 * The lock pointer in an idle thread should never change.  Reset it
2088ae7a6b38SJeff Roberson 	 * to CAN_RUN as well.
2089b11fdad0SJeff Roberson 	 */
2090486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
2091ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2092bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
20937b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
2094ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
20953d7f4117SAlexander Motin 		srqflag = preempted ?
2096598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
2097c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
2098ba4932b5SMatthew D Fleming #ifdef SMP
20990f7a0ebdSMatthew D Fleming 		if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
21000f7a0ebdSMatthew D Fleming 			ts->ts_cpu = sched_pickcpu(td, 0);
2101ba4932b5SMatthew D Fleming #endif
2102c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
21039727e637SJeff Roberson 			tdq_runq_add(tdq, td, srqflag);
21040f7a0ebdSMatthew D Fleming 		else {
21050f7a0ebdSMatthew D Fleming 			KASSERT(THREAD_CAN_MIGRATE(td) ||
21060f7a0ebdSMatthew D Fleming 			    (ts->ts_flags & TSF_BOUND) != 0,
21070f7a0ebdSMatthew D Fleming 			    ("Thread %p shouldn't migrate", td));
2108c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
21090f7a0ebdSMatthew D Fleming 		}
2110ae7a6b38SJeff Roberson 	} else {
2111ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
2112b0b9dee5SAttilio Rao 		mtx = thread_lock_block(td);
2113*61a74c5cSJeff Roberson 		if (mtx != TDQ_LOCKPTR(tdq)) {
2114*61a74c5cSJeff Roberson 			spinlock_enter();
2115*61a74c5cSJeff Roberson 			mtx_unlock_spin(mtx);
2116*61a74c5cSJeff Roberson 			TDQ_LOCK(tdq);
2117*61a74c5cSJeff Roberson 			spinlock_exit();
2118*61a74c5cSJeff Roberson 		}
21199727e637SJeff Roberson 		tdq_load_rem(tdq, td);
21204c8a8cfcSKonstantin Belousov #ifdef SMP
212197e9382dSDon Lewis 		if (tdq->tdq_load == 0)
212297e9382dSDon Lewis 			tdq_trysteal(tdq);
21234c8a8cfcSKonstantin Belousov #endif
2124ae7a6b38SJeff Roberson 	}
2125afa0a46cSAndriy Gapon 
2126afa0a46cSAndriy Gapon #if (KTR_COMPILE & KTR_SCHED) != 0
2127afa0a46cSAndriy Gapon 	if (TD_IS_IDLETHREAD(td))
2128afa0a46cSAndriy Gapon 		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
2129afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority);
2130afa0a46cSAndriy Gapon 	else
2131afa0a46cSAndriy Gapon 		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
2132afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
2133afa0a46cSAndriy Gapon 		    "lockname:\"%s\"", td->td_lockname);
2134afa0a46cSAndriy Gapon #endif
2135afa0a46cSAndriy Gapon 
2136ae7a6b38SJeff Roberson 	/*
2137ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
2138ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
2139ae7a6b38SJeff Roberson 	 * thread-queue locked.
2140ae7a6b38SJeff Roberson 	 */
2141ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
21422454aaf5SJeff Roberson 	newtd = choosethread();
2143ae7a6b38SJeff Roberson 	/*
2144ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
2145ae7a6b38SJeff Roberson 	 */
2146ebccf1e3SJoseph Koshy 	if (td != newtd) {
2147ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2148ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2149ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
2150ebccf1e3SJoseph Koshy #endif
2151d9fae5abSAndriy Gapon 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
2152eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
215359c68134SJeff Roberson 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
215493ccd6bfSKonstantin Belousov 		sched_pctcpu_update(td_get_sched(newtd), 0);
21556f5f25e5SJohn Birrell 
21566f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
21576f5f25e5SJohn Birrell 		/*
21586f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
21596f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
21606f5f25e5SJohn Birrell 		 * function to call.
21616f5f25e5SJohn Birrell 		 */
21626f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
21636f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
21646f5f25e5SJohn Birrell #endif
21656f5f25e5SJohn Birrell 
2166ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
2167ae7a6b38SJeff Roberson 		/*
2168ae7a6b38SJeff Roberson 		 * We may return from cpu_switch on a different cpu.  However,
2169ae7a6b38SJeff Roberson 		 * we always return with td_lock pointing to the current cpu's
2170ae7a6b38SJeff Roberson 		 * run queue lock.
2171ae7a6b38SJeff Roberson 		 */
2172ae7a6b38SJeff Roberson 		cpuid = PCPU_GET(cpuid);
2173018ff686SJeff Roberson 		tdq = TDQ_SELF();
2174eea4f254SJeff Roberson 		lock_profile_obtain_lock_success(
2175eea4f254SJeff Roberson 		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2176b3e9e682SRyan Stone 
2177d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , on__cpu);
2178ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2179ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2180ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
2181ebccf1e3SJoseph Koshy #endif
2182b3e9e682SRyan Stone 	} else {
2183ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
2184d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , remain__cpu);
2185b3e9e682SRyan Stone 	}
2186afa0a46cSAndriy Gapon 
2187afa0a46cSAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2188afa0a46cSAndriy Gapon 	    "prio:%d", td->td_priority);
2189afa0a46cSAndriy Gapon 
2190ae7a6b38SJeff Roberson 	/*
2191ae7a6b38SJeff Roberson 	 * Assert that all went well and return.
2192ae7a6b38SJeff Roberson 	 */
2193ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
2194ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2195ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
219635e6168fSJeff Roberson }
219735e6168fSJeff Roberson 
2198ae7a6b38SJeff Roberson /*
2199ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
2200ae7a6b38SJeff Roberson  */
220135e6168fSJeff Roberson void
2202fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
220335e6168fSJeff Roberson {
220435e6168fSJeff Roberson 	struct thread *td;
220535e6168fSJeff Roberson 
2206fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
2207e7d50326SJeff Roberson 
2208fa885116SJulian Elischer 	p->p_nice = nice;
22098460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
22107b20fb19SJeff Roberson 		thread_lock(td);
22118460a577SJohn Birrell 		sched_priority(td);
2212e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
22137b20fb19SJeff Roberson 		thread_unlock(td);
221435e6168fSJeff Roberson 	}
2215fa885116SJulian Elischer }
221635e6168fSJeff Roberson 
2217ae7a6b38SJeff Roberson /*
2218ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
2219ae7a6b38SJeff Roberson  */
222035e6168fSJeff Roberson void
2221c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio)
222235e6168fSJeff Roberson {
2223e7d50326SJeff Roberson 
22247b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
222535e6168fSJeff Roberson 
222654b0e65fSJeff Roberson 	td->td_slptick = ticks;
222717c4c356SKonstantin Belousov 	if (TD_IS_SUSPENDED(td) || prio >= PSOCK)
2228c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
22292dc29adbSJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
22302dc29adbSJohn Baldwin 		return;
22310502fe2eSJeff Roberson 	if (static_boost == 1 && prio)
2232c5aa6b58SJeff Roberson 		sched_prio(td, prio);
22330502fe2eSJeff Roberson 	else if (static_boost && td->td_priority > static_boost)
22340502fe2eSJeff Roberson 		sched_prio(td, static_boost);
223535e6168fSJeff Roberson }
223635e6168fSJeff Roberson 
2237ae7a6b38SJeff Roberson /*
2238ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
2239ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
2240*61a74c5cSJeff Roberson  *
2241*61a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2242ae7a6b38SJeff Roberson  */
224335e6168fSJeff Roberson void
2244*61a74c5cSJeff Roberson sched_wakeup(struct thread *td, int srqflags)
224535e6168fSJeff Roberson {
224614618990SJeff Roberson 	struct td_sched *ts;
2247ae7a6b38SJeff Roberson 	int slptick;
2248e7d50326SJeff Roberson 
22497b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
225093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
2251c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
2252*61a74c5cSJeff Roberson 
225335e6168fSJeff Roberson 	/*
2254e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
2255e7d50326SJeff Roberson 	 * priority.
225635e6168fSJeff Roberson 	 */
225754b0e65fSJeff Roberson 	slptick = td->td_slptick;
225854b0e65fSJeff Roberson 	td->td_slptick = 0;
2259ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
22607295465eSAlexander Motin 		ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
22618460a577SJohn Birrell 		sched_interact_update(td);
22627295465eSAlexander Motin 		sched_pctcpu_update(ts, 0);
2263f1e8dc4aSJeff Roberson 	}
22645e5c3873SJeff Roberson 	/*
22655e5c3873SJeff Roberson 	 * Reset the slice value since we slept and advanced the round-robin.
22665e5c3873SJeff Roberson 	 */
22675e5c3873SJeff Roberson 	ts->ts_slice = 0;
2268*61a74c5cSJeff Roberson 	sched_add(td, SRQ_BORING | srqflags);
226935e6168fSJeff Roberson }
227035e6168fSJeff Roberson 
227135e6168fSJeff Roberson /*
227235e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
227335e6168fSJeff Roberson  * priority.
227435e6168fSJeff Roberson  */
227535e6168fSJeff Roberson void
22768460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
227715dc847eSJeff Roberson {
22787b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
227993ccd6bfSKonstantin Belousov 	sched_pctcpu_update(td_get_sched(td), 1);
2280ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
2281e7d50326SJeff Roberson 	/*
2282e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
2283e7d50326SJeff Roberson 	 */
2284e7d50326SJeff Roberson 	sched_interact_fork(child);
2285e7d50326SJeff Roberson 	sched_priority(child);
228693ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += tickincr;
2287e7d50326SJeff Roberson 	sched_interact_update(td);
2288e7d50326SJeff Roberson 	sched_priority(td);
2289ad1e7d28SJulian Elischer }
2290ad1e7d28SJulian Elischer 
2291ae7a6b38SJeff Roberson /*
2292ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
2293ae7a6b38SJeff Roberson  */
2294ad1e7d28SJulian Elischer void
2295ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
2296ad1e7d28SJulian Elischer {
2297ad1e7d28SJulian Elischer 	struct td_sched *ts;
2298ad1e7d28SJulian Elischer 	struct td_sched *ts2;
22995e5c3873SJeff Roberson 	struct tdq *tdq;
23008460a577SJohn Birrell 
23015e5c3873SJeff Roberson 	tdq = TDQ_SELF();
23028b16c208SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2303e7d50326SJeff Roberson 	/*
2304e7d50326SJeff Roberson 	 * Initialize child.
2305e7d50326SJeff Roberson 	 */
230693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
230793ccd6bfSKonstantin Belousov 	ts2 = td_get_sched(child);
230892de34dfSJohn Baldwin 	child->td_oncpu = NOCPU;
230992de34dfSJohn Baldwin 	child->td_lastcpu = NOCPU;
23105e5c3873SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(tdq);
23118b16c208SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
23123f289c3fSJeff Roberson 	child->td_domain.dr_policy = td->td_cpuset->cs_domain;
2313ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
23148b16c208SJeff Roberson 	ts2->ts_flags = 0;
2315e7d50326SJeff Roberson 	/*
231622d19207SJohn Baldwin 	 * Grab our parents cpu estimation information.
2317e7d50326SJeff Roberson 	 */
2318ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
2319ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
2320ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
232122d19207SJohn Baldwin 	/*
232222d19207SJohn Baldwin 	 * Do not inherit any borrowed priority from the parent.
232322d19207SJohn Baldwin 	 */
232422d19207SJohn Baldwin 	child->td_priority = child->td_base_pri;
2325e7d50326SJeff Roberson 	/*
2326e7d50326SJeff Roberson 	 * And update interactivity score.
2327e7d50326SJeff Roberson 	 */
2328ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2329ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
23305e5c3873SJeff Roberson 	/* Attempt to quickly learn interactivity. */
23315e5c3873SJeff Roberson 	ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
23328f51ad55SJeff Roberson #ifdef KTR
23338f51ad55SJeff Roberson 	bzero(ts2->ts_name, sizeof(ts2->ts_name));
23348f51ad55SJeff Roberson #endif
233515dc847eSJeff Roberson }
233615dc847eSJeff Roberson 
2337ae7a6b38SJeff Roberson /*
2338ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2339ae7a6b38SJeff Roberson  */
234015dc847eSJeff Roberson void
23418460a577SJohn Birrell sched_class(struct thread *td, int class)
234215dc847eSJeff Roberson {
234315dc847eSJeff Roberson 
23447b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
23458460a577SJohn Birrell 	if (td->td_pri_class == class)
234615dc847eSJeff Roberson 		return;
23478460a577SJohn Birrell 	td->td_pri_class = class;
234835e6168fSJeff Roberson }
234935e6168fSJeff Roberson 
235035e6168fSJeff Roberson /*
235135e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
235235e6168fSJeff Roberson  */
235335e6168fSJeff Roberson void
2354fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
235535e6168fSJeff Roberson {
2356e7d50326SJeff Roberson 	struct thread *td;
2357141ad61cSJeff Roberson 
23588f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2359cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2360374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
2361e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2362e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2363ad1e7d28SJulian Elischer }
2364ad1e7d28SJulian Elischer 
2365ae7a6b38SJeff Roberson /*
2366ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2367ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2368ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2369ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2370ae7a6b38SJeff Roberson  */
2371ad1e7d28SJulian Elischer void
2372fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2373ad1e7d28SJulian Elischer {
2374fc6c30f6SJulian Elischer 
23758f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2376cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2377e7d50326SJeff Roberson 	/*
2378e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2379e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2380e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2381e7d50326SJeff Roberson 	 */
23827b20fb19SJeff Roberson 	thread_lock(td);
238393ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
2384fc6c30f6SJulian Elischer 	sched_interact_update(td);
2385e7d50326SJeff Roberson 	sched_priority(td);
23867b20fb19SJeff Roberson 	thread_unlock(td);
2387ad1e7d28SJulian Elischer }
2388ad1e7d28SJulian Elischer 
2389ff256d9cSJeff Roberson void
2390ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2391ff256d9cSJeff Roberson {
2392ff256d9cSJeff Roberson 	struct tdq *tdq;
2393ff256d9cSJeff Roberson 
2394b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
2395b3e9e682SRyan Stone 
2396ff256d9cSJeff Roberson 	thread_lock(td);
2397ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2398ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2399ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
24008df78c41SJeff Roberson 		int flags;
24018df78c41SJeff Roberson 
24028df78c41SJeff Roberson 		flags = SW_INVOL | SW_PREEMPT;
2403ff256d9cSJeff Roberson 		if (td->td_critnest > 1)
2404ff256d9cSJeff Roberson 			td->td_owepreempt = 1;
24058df78c41SJeff Roberson 		else if (TD_IS_IDLETHREAD(td))
24068df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL);
2407ff256d9cSJeff Roberson 		else
24088df78c41SJeff Roberson 			mi_switch(flags | SWT_REMOTEPREEMPT, NULL);
24097789ab32SMark Johnston 	} else {
24107789ab32SMark Johnston 		tdq->tdq_owepreempt = 0;
2411ff256d9cSJeff Roberson 	}
2412ff256d9cSJeff Roberson 	thread_unlock(td);
2413ff256d9cSJeff Roberson }
2414ff256d9cSJeff Roberson 
2415ae7a6b38SJeff Roberson /*
2416ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2417ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2418ae7a6b38SJeff Roberson  */
2419ad1e7d28SJulian Elischer void
242028240885SMateusz Guzik sched_userret_slowpath(struct thread *td)
2421ad1e7d28SJulian Elischer {
242228240885SMateusz Guzik 
24237b20fb19SJeff Roberson 	thread_lock(td);
2424ad1e7d28SJulian Elischer 	td->td_priority = td->td_user_pri;
2425ad1e7d28SJulian Elischer 	td->td_base_pri = td->td_user_pri;
242662fa74d9SJeff Roberson 	tdq_setlowpri(TDQ_SELF(), td);
24277b20fb19SJeff Roberson 	thread_unlock(td);
2428ad1e7d28SJulian Elischer }
242935e6168fSJeff Roberson 
2430ae7a6b38SJeff Roberson /*
2431ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2432ae7a6b38SJeff Roberson  * threads.
2433ae7a6b38SJeff Roberson  */
243435e6168fSJeff Roberson void
2435c3cccf95SJeff Roberson sched_clock(struct thread *td, int cnt)
243635e6168fSJeff Roberson {
2437ad1e7d28SJulian Elischer 	struct tdq *tdq;
2438ad1e7d28SJulian Elischer 	struct td_sched *ts;
243935e6168fSJeff Roberson 
2440ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
24413f872f85SJeff Roberson 	tdq = TDQ_SELF();
24427fcf154aSJeff Roberson #ifdef SMP
24437fcf154aSJeff Roberson 	/*
24447fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
24457fcf154aSJeff Roberson 	 */
2446c3cccf95SJeff Roberson 	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
2447c3cccf95SJeff Roberson 	    balance_ticks != 0) {
2448c3cccf95SJeff Roberson 		balance_ticks -= cnt;
2449c3cccf95SJeff Roberson 		if (balance_ticks <= 0)
24507fcf154aSJeff Roberson 			sched_balance();
24517fcf154aSJeff Roberson 	}
24527fcf154aSJeff Roberson #endif
24533f872f85SJeff Roberson 	/*
24541690c6c1SJeff Roberson 	 * Save the old switch count so we have a record of the last ticks
24551690c6c1SJeff Roberson 	 * activity.   Initialize the new switch count based on our load.
24561690c6c1SJeff Roberson 	 * If there is some activity seed it to reflect that.
24571690c6c1SJeff Roberson 	 */
24581690c6c1SJeff Roberson 	tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
24596c47aaaeSJeff Roberson 	tdq->tdq_switchcnt = tdq->tdq_load;
24601690c6c1SJeff Roberson 	/*
24613f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
24623f872f85SJeff Roberson 	 * threads get a chance to run.
24633f872f85SJeff Roberson 	 */
24643f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
24653f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
24663f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
24673f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
24683f872f85SJeff Roberson 	}
246993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
24707295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2471c3cccf95SJeff Roberson 	if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td))
2472a8949de2SJeff Roberson 		return;
2473c3cccf95SJeff Roberson 
2474c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2475a8949de2SJeff Roberson 		/*
2476fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2477fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
247815dc847eSJeff Roberson 		 */
2479c3cccf95SJeff Roberson 		td_get_sched(td)->ts_runtime += tickincr * cnt;
24808460a577SJohn Birrell 		sched_interact_update(td);
248173daf66fSJeff Roberson 		sched_priority(td);
2482fd0b8c78SJeff Roberson 	}
2483579895dfSAlexander Motin 
248435e6168fSJeff Roberson 	/*
2485579895dfSAlexander Motin 	 * Force a context switch if the current thread has used up a full
2486579895dfSAlexander Motin 	 * time slice (default is 100ms).
248735e6168fSJeff Roberson 	 */
2488c3cccf95SJeff Roberson 	ts->ts_slice += cnt;
2489c3cccf95SJeff Roberson 	if (ts->ts_slice >= tdq_slice(tdq)) {
24905e5c3873SJeff Roberson 		ts->ts_slice = 0;
24913d7f4117SAlexander Motin 		td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
249235e6168fSJeff Roberson 	}
2493579895dfSAlexander Motin }
249435e6168fSJeff Roberson 
2495ccd0ec40SKonstantin Belousov u_int
2496ccd0ec40SKonstantin Belousov sched_estcpu(struct thread *td __unused)
2497ae7a6b38SJeff Roberson {
2498ae7a6b38SJeff Roberson 
2499ccd0ec40SKonstantin Belousov 	return (0);
2500ae7a6b38SJeff Roberson }
2501ae7a6b38SJeff Roberson 
2502ae7a6b38SJeff Roberson /*
2503ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2504ae7a6b38SJeff Roberson  * cooperative idle threads.
2505ae7a6b38SJeff Roberson  */
250635e6168fSJeff Roberson int
250735e6168fSJeff Roberson sched_runnable(void)
250835e6168fSJeff Roberson {
2509ad1e7d28SJulian Elischer 	struct tdq *tdq;
2510b90816f1SJeff Roberson 	int load;
251135e6168fSJeff Roberson 
2512b90816f1SJeff Roberson 	load = 1;
2513b90816f1SJeff Roberson 
2514ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
25153f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2516d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
25173f741ca1SJeff Roberson 			goto out;
25183f741ca1SJeff Roberson 	} else
2519d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2520b90816f1SJeff Roberson 			goto out;
2521b90816f1SJeff Roberson 	load = 0;
2522b90816f1SJeff Roberson out:
2523b90816f1SJeff Roberson 	return (load);
252435e6168fSJeff Roberson }
252535e6168fSJeff Roberson 
2526ae7a6b38SJeff Roberson /*
2527ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2528ae7a6b38SJeff Roberson  * the run-queue while running however the load remains.  For SMP we set
2529ae7a6b38SJeff Roberson  * the tdq in the global idle bitmask if it idles here.
2530ae7a6b38SJeff Roberson  */
25317a5e5e2aSJeff Roberson struct thread *
2532c9f25d8fSJeff Roberson sched_choose(void)
2533c9f25d8fSJeff Roberson {
25349727e637SJeff Roberson 	struct thread *td;
2535ae7a6b38SJeff Roberson 	struct tdq *tdq;
2536ae7a6b38SJeff Roberson 
2537ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2538ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
25399727e637SJeff Roberson 	td = tdq_choose(tdq);
25409727e637SJeff Roberson 	if (td) {
25419727e637SJeff Roberson 		tdq_runq_rem(tdq, td);
25420502fe2eSJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
25439727e637SJeff Roberson 		return (td);
254435e6168fSJeff Roberson 	}
25450502fe2eSJeff Roberson 	tdq->tdq_lowpri = PRI_MAX_IDLE;
254662fa74d9SJeff Roberson 	return (PCPU_GET(idlethread));
25477a5e5e2aSJeff Roberson }
25487a5e5e2aSJeff Roberson 
2549ae7a6b38SJeff Roberson /*
2550ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2551ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2552ae7a6b38SJeff Roberson  */
2553ae7a6b38SJeff Roberson static inline void
2554ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
25557a5e5e2aSJeff Roberson {
25567a5e5e2aSJeff Roberson 	struct thread *ctd;
25577a5e5e2aSJeff Roberson 	int cpri;
25587a5e5e2aSJeff Roberson 	int pri;
25597a5e5e2aSJeff Roberson 
2560ff256d9cSJeff Roberson 	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2561ff256d9cSJeff Roberson 
25627a5e5e2aSJeff Roberson 	ctd = curthread;
25637a5e5e2aSJeff Roberson 	pri = td->td_priority;
25647a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2565ff256d9cSJeff Roberson 	if (pri < cpri)
2566ff256d9cSJeff Roberson 		ctd->td_flags |= TDF_NEEDRESCHED;
25677a5e5e2aSJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2568ae7a6b38SJeff Roberson 		return;
2569ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2570ae7a6b38SJeff Roberson 		return;
25717a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
257235e6168fSJeff Roberson }
257335e6168fSJeff Roberson 
2574ae7a6b38SJeff Roberson /*
257573daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
257673daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
257773daf66fSJeff Roberson  * predetermined.
2578ae7a6b38SJeff Roberson  */
257935e6168fSJeff Roberson void
2580ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
258135e6168fSJeff Roberson {
2582c9f25d8fSJeff Roberson 
2583ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2584*61a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
25857a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
25867a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
25877a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
25887a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2589b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2590b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2591ae7a6b38SJeff Roberson 
2592ae7a6b38SJeff Roberson 	if (td->td_priority < tdq->tdq_lowpri)
2593ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
25949727e637SJeff Roberson 	tdq_runq_add(tdq, td, flags);
25959727e637SJeff Roberson 	tdq_load_add(tdq, td);
2596ae7a6b38SJeff Roberson }
2597ae7a6b38SJeff Roberson 
2598ae7a6b38SJeff Roberson /*
2599ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2600ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
2601*61a74c5cSJeff Roberson  *
2602*61a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2603ae7a6b38SJeff Roberson  */
2604ae7a6b38SJeff Roberson void
2605ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2606ae7a6b38SJeff Roberson {
2607ae7a6b38SJeff Roberson 	struct tdq *tdq;
26087b8bfa0dSJeff Roberson #ifdef SMP
2609ae7a6b38SJeff Roberson 	int cpu;
2610ae7a6b38SJeff Roberson #endif
26118f51ad55SJeff Roberson 
26128f51ad55SJeff Roberson 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
26138f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
26148f51ad55SJeff Roberson 	    sched_tdname(curthread));
26158f51ad55SJeff Roberson 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
26168f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(td));
2617b3e9e682SRyan Stone 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
2618b3e9e682SRyan Stone 	    flags & SRQ_PREEMPTED);
2619ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2620ae7a6b38SJeff Roberson 	/*
2621ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2622ae7a6b38SJeff Roberson 	 * run-queue.
2623ae7a6b38SJeff Roberson 	 */
2624ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2625ae7a6b38SJeff Roberson 		sched_priority(td);
2626ae7a6b38SJeff Roberson #ifdef SMP
2627ae7a6b38SJeff Roberson 	/*
2628ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2629ae7a6b38SJeff Roberson 	 * target cpu.
2630ae7a6b38SJeff Roberson 	 */
26319727e637SJeff Roberson 	cpu = sched_pickcpu(td, flags);
26329727e637SJeff Roberson 	tdq = sched_setcpu(td, cpu, flags);
2633ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
2634*61a74c5cSJeff Roberson 	if (cpu != PCPU_GET(cpuid))
263527ee18adSRyan Stone 		tdq_notify(tdq, td);
2636*61a74c5cSJeff Roberson 	else if (!(flags & SRQ_YIELDING))
2637*61a74c5cSJeff Roberson 		sched_setpreempt(td);
2638ae7a6b38SJeff Roberson #else
2639ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2640ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
2641ae7a6b38SJeff Roberson 	/*
2642ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2643ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2644ae7a6b38SJeff Roberson 	 */
2645*61a74c5cSJeff Roberson 	if ((flags & SRQ_HOLD) != 0)
2646*61a74c5cSJeff Roberson 		td->td_lock = TDQ_LOCKPTR(tdq);
2647*61a74c5cSJeff Roberson 	else
2648ae7a6b38SJeff Roberson 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
2649ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
2650ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2651ae7a6b38SJeff Roberson 		sched_setpreempt(td);
2652*61a74c5cSJeff Roberson #endif
2653*61a74c5cSJeff Roberson 	if (!(flags & SRQ_HOLDTD))
2654*61a74c5cSJeff Roberson 		thread_unlock(td);
265535e6168fSJeff Roberson }
265635e6168fSJeff Roberson 
2657ae7a6b38SJeff Roberson /*
2658ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2659ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2660ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2661ae7a6b38SJeff Roberson  */
266235e6168fSJeff Roberson void
26637cf90fb3SJeff Roberson sched_rem(struct thread *td)
266435e6168fSJeff Roberson {
2665ad1e7d28SJulian Elischer 	struct tdq *tdq;
26667cf90fb3SJeff Roberson 
26678f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
26688f51ad55SJeff Roberson 	    "prio:%d", td->td_priority);
2669b3e9e682SRyan Stone 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
267093ccd6bfSKonstantin Belousov 	tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2671ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2672ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
26737a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2674ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
26759727e637SJeff Roberson 	tdq_runq_rem(tdq, td);
26769727e637SJeff Roberson 	tdq_load_rem(tdq, td);
26777a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
267862fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
267962fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
268035e6168fSJeff Roberson }
268135e6168fSJeff Roberson 
2682ae7a6b38SJeff Roberson /*
2683ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2684ae7a6b38SJeff Roberson  */
268535e6168fSJeff Roberson fixpt_t
26867cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
268735e6168fSJeff Roberson {
268835e6168fSJeff Roberson 	fixpt_t pctcpu;
2689ad1e7d28SJulian Elischer 	struct td_sched *ts;
269035e6168fSJeff Roberson 
269135e6168fSJeff Roberson 	pctcpu = 0;
269293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
269335e6168fSJeff Roberson 
26943da35a0aSJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
26957295465eSAlexander Motin 	sched_pctcpu_update(ts, TD_IS_RUNNING(td));
2696ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
269735e6168fSJeff Roberson 		int rtick;
269835e6168fSJeff Roberson 
269935e6168fSJeff Roberson 		/* How many rtick per second ? */
2700e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2701e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
270235e6168fSJeff Roberson 	}
270335e6168fSJeff Roberson 
270435e6168fSJeff Roberson 	return (pctcpu);
270535e6168fSJeff Roberson }
270635e6168fSJeff Roberson 
270762fa74d9SJeff Roberson /*
270862fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
270962fa74d9SJeff Roberson  * cpumask.
271062fa74d9SJeff Roberson  */
2711885d51a3SJeff Roberson void
2712885d51a3SJeff Roberson sched_affinity(struct thread *td)
2713885d51a3SJeff Roberson {
271462fa74d9SJeff Roberson #ifdef SMP
271562fa74d9SJeff Roberson 	struct td_sched *ts;
271662fa74d9SJeff Roberson 
271762fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
271893ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
271962fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
272062fa74d9SJeff Roberson 		return;
272153a6c8b3SJeff Roberson 	if (TD_ON_RUNQ(td)) {
272253a6c8b3SJeff Roberson 		sched_rem(td);
272353a6c8b3SJeff Roberson 		sched_add(td, SRQ_BORING);
272453a6c8b3SJeff Roberson 		return;
272553a6c8b3SJeff Roberson 	}
272662fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
272762fa74d9SJeff Roberson 		return;
272862fa74d9SJeff Roberson 	/*
27290f7a0ebdSMatthew D Fleming 	 * Force a switch before returning to userspace.  If the
27300f7a0ebdSMatthew D Fleming 	 * target thread is not running locally send an ipi to force
27310f7a0ebdSMatthew D Fleming 	 * the issue.
273262fa74d9SJeff Roberson 	 */
2733a8103ae8SJohn Baldwin 	td->td_flags |= TDF_NEEDRESCHED;
27340f7a0ebdSMatthew D Fleming 	if (td != curthread)
27350f7a0ebdSMatthew D Fleming 		ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
273662fa74d9SJeff Roberson #endif
2737885d51a3SJeff Roberson }
2738885d51a3SJeff Roberson 
2739ae7a6b38SJeff Roberson /*
2740ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2741ae7a6b38SJeff Roberson  */
27429bacd788SJeff Roberson void
27439bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
27449bacd788SJeff Roberson {
2745ad1e7d28SJulian Elischer 	struct td_sched *ts;
27469bacd788SJeff Roberson 
2747c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
27481d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
274993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
27506b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2751c95d2db2SJeff Roberson 		sched_unbind(td);
27520f7a0ebdSMatthew D Fleming 	KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2753ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
27546b2f763fSJeff Roberson 	sched_pin();
275580f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
27569bacd788SJeff Roberson 		return;
27576b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
27589bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2759279f949eSPoul-Henning Kamp 	mi_switch(SW_VOL, NULL);
27609bacd788SJeff Roberson }
27619bacd788SJeff Roberson 
2762ae7a6b38SJeff Roberson /*
2763ae7a6b38SJeff Roberson  * Release a bound thread.
2764ae7a6b38SJeff Roberson  */
27659bacd788SJeff Roberson void
27669bacd788SJeff Roberson sched_unbind(struct thread *td)
27679bacd788SJeff Roberson {
2768e7d50326SJeff Roberson 	struct td_sched *ts;
2769e7d50326SJeff Roberson 
27707b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
27711d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
277293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
27736b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
27746b2f763fSJeff Roberson 		return;
2775e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2776e7d50326SJeff Roberson 	sched_unpin();
27779bacd788SJeff Roberson }
27789bacd788SJeff Roberson 
277935e6168fSJeff Roberson int
2780ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2781ebccf1e3SJoseph Koshy {
27827b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
278393ccd6bfSKonstantin Belousov 	return (td_get_sched(td)->ts_flags & TSF_BOUND);
2784ebccf1e3SJoseph Koshy }
2785ebccf1e3SJoseph Koshy 
2786ae7a6b38SJeff Roberson /*
2787ae7a6b38SJeff Roberson  * Basic yield call.
2788ae7a6b38SJeff Roberson  */
278936ec198bSDavid Xu void
279036ec198bSDavid Xu sched_relinquish(struct thread *td)
279136ec198bSDavid Xu {
27927b20fb19SJeff Roberson 	thread_lock(td);
27938df78c41SJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
27947b20fb19SJeff Roberson 	thread_unlock(td);
279536ec198bSDavid Xu }
279636ec198bSDavid Xu 
2797ae7a6b38SJeff Roberson /*
2798ae7a6b38SJeff Roberson  * Return the total system load.
2799ae7a6b38SJeff Roberson  */
2800ebccf1e3SJoseph Koshy int
280133916c36SJeff Roberson sched_load(void)
280233916c36SJeff Roberson {
280333916c36SJeff Roberson #ifdef SMP
280433916c36SJeff Roberson 	int total;
280533916c36SJeff Roberson 	int i;
280633916c36SJeff Roberson 
280733916c36SJeff Roberson 	total = 0;
28083aa6d94eSJohn Baldwin 	CPU_FOREACH(i)
280962fa74d9SJeff Roberson 		total += TDQ_CPU(i)->tdq_sysload;
281033916c36SJeff Roberson 	return (total);
281133916c36SJeff Roberson #else
2812d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
281333916c36SJeff Roberson #endif
281433916c36SJeff Roberson }
281533916c36SJeff Roberson 
281633916c36SJeff Roberson int
281735e6168fSJeff Roberson sched_sizeof_proc(void)
281835e6168fSJeff Roberson {
281935e6168fSJeff Roberson 	return (sizeof(struct proc));
282035e6168fSJeff Roberson }
282135e6168fSJeff Roberson 
282235e6168fSJeff Roberson int
282335e6168fSJeff Roberson sched_sizeof_thread(void)
282435e6168fSJeff Roberson {
282535e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
282635e6168fSJeff Roberson }
2827b41f1452SDavid Xu 
282809c8a4ccSJeff Roberson #ifdef SMP
282909c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)						\
283009c8a4ccSJeff Roberson     ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
283109c8a4ccSJeff Roberson #else
283209c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)	1
283309c8a4ccSJeff Roberson #endif
283409c8a4ccSJeff Roberson 
28357a5e5e2aSJeff Roberson /*
28367a5e5e2aSJeff Roberson  * The actual idle process.
28377a5e5e2aSJeff Roberson  */
28387a5e5e2aSJeff Roberson void
28397a5e5e2aSJeff Roberson sched_idletd(void *dummy)
28407a5e5e2aSJeff Roberson {
28417a5e5e2aSJeff Roberson 	struct thread *td;
2842ae7a6b38SJeff Roberson 	struct tdq *tdq;
28432c27cb3aSAlexander Motin 	int oldswitchcnt, switchcnt;
28441690c6c1SJeff Roberson 	int i;
28457a5e5e2aSJeff Roberson 
28467b55ab05SJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
28477a5e5e2aSJeff Roberson 	td = curthread;
2848ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2849ba96d2d8SJohn Baldwin 	THREAD_NO_SLEEPING();
28502c27cb3aSAlexander Motin 	oldswitchcnt = -1;
2851ae7a6b38SJeff Roberson 	for (;;) {
28522c27cb3aSAlexander Motin 		if (tdq->tdq_load) {
28532c27cb3aSAlexander Motin 			thread_lock(td);
28542c27cb3aSAlexander Motin 			mi_switch(SW_VOL | SWT_IDLE, NULL);
28552c27cb3aSAlexander Motin 			thread_unlock(td);
28562c27cb3aSAlexander Motin 		}
28572c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2858ae7a6b38SJeff Roberson #ifdef SMP
285997e9382dSDon Lewis 		if (always_steal || switchcnt != oldswitchcnt) {
28602c27cb3aSAlexander Motin 			oldswitchcnt = switchcnt;
28611690c6c1SJeff Roberson 			if (tdq_idled(tdq) == 0)
28621690c6c1SJeff Roberson 				continue;
28632c27cb3aSAlexander Motin 		}
28641690c6c1SJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
28652fd4047fSAlexander Motin #else
28662fd4047fSAlexander Motin 		oldswitchcnt = switchcnt;
28672fd4047fSAlexander Motin #endif
28681690c6c1SJeff Roberson 		/*
28691690c6c1SJeff Roberson 		 * If we're switching very frequently, spin while checking
28701690c6c1SJeff Roberson 		 * for load rather than entering a low power state that
28717b55ab05SJeff Roberson 		 * may require an IPI.  However, don't do any busy
28727b55ab05SJeff Roberson 		 * loops while on SMT machines as this simply steals
28737b55ab05SJeff Roberson 		 * cycles from cores doing useful work.
28741690c6c1SJeff Roberson 		 */
287509c8a4ccSJeff Roberson 		if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
28761690c6c1SJeff Roberson 			for (i = 0; i < sched_idlespins; i++) {
28771690c6c1SJeff Roberson 				if (tdq->tdq_load)
28781690c6c1SJeff Roberson 					break;
28791690c6c1SJeff Roberson 				cpu_spinwait();
28801690c6c1SJeff Roberson 			}
28811690c6c1SJeff Roberson 		}
28822c27cb3aSAlexander Motin 
28832c27cb3aSAlexander Motin 		/* If there was context switch during spin, restart it. */
28846c47aaaeSJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
28852c27cb3aSAlexander Motin 		if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt)
28862c27cb3aSAlexander Motin 			continue;
28872c27cb3aSAlexander Motin 
28882c27cb3aSAlexander Motin 		/* Run main MD idle handler. */
28899f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 1;
289079654969SAlexander Motin 		/*
289179654969SAlexander Motin 		 * Make sure that tdq_cpu_idle update is globally visible
289279654969SAlexander Motin 		 * before cpu_idle() read tdq_load.  The order is important
289379654969SAlexander Motin 		 * to avoid race with tdq_notify.
289479654969SAlexander Motin 		 */
2895e8677f38SKonstantin Belousov 		atomic_thread_fence_seq_cst();
289697e9382dSDon Lewis 		/*
289797e9382dSDon Lewis 		 * Checking for again after the fence picks up assigned
289897e9382dSDon Lewis 		 * threads often enough to make it worthwhile to do so in
289997e9382dSDon Lewis 		 * order to avoid calling cpu_idle().
290097e9382dSDon Lewis 		 */
290197e9382dSDon Lewis 		if (tdq->tdq_load != 0) {
290297e9382dSDon Lewis 			tdq->tdq_cpu_idle = 0;
290397e9382dSDon Lewis 			continue;
290497e9382dSDon Lewis 		}
29052c27cb3aSAlexander Motin 		cpu_idle(switchcnt * 4 > sched_idlespinthresh);
29069f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 0;
29072c27cb3aSAlexander Motin 
29082c27cb3aSAlexander Motin 		/*
29092c27cb3aSAlexander Motin 		 * Account thread-less hardware interrupts and
29102c27cb3aSAlexander Motin 		 * other wakeup reasons equal to context switches.
29112c27cb3aSAlexander Motin 		 */
29122c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
29132c27cb3aSAlexander Motin 		if (switchcnt != oldswitchcnt)
29142c27cb3aSAlexander Motin 			continue;
29152c27cb3aSAlexander Motin 		tdq->tdq_switchcnt++;
29162c27cb3aSAlexander Motin 		oldswitchcnt++;
2917ae7a6b38SJeff Roberson 	}
2918b41f1452SDavid Xu }
2919e7d50326SJeff Roberson 
29207b20fb19SJeff Roberson /*
29217b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
29227b20fb19SJeff Roberson  */
29237b20fb19SJeff Roberson void
29247b20fb19SJeff Roberson sched_throw(struct thread *td)
29257b20fb19SJeff Roberson {
292659c68134SJeff Roberson 	struct thread *newtd;
2927ae7a6b38SJeff Roberson 	struct tdq *tdq;
2928ae7a6b38SJeff Roberson 
29297b20fb19SJeff Roberson 	if (td == NULL) {
2930018ff686SJeff Roberson #ifdef SMP
2931018ff686SJeff Roberson 		PCPU_SET(sched, DPCPU_PTR(tdq));
2932018ff686SJeff Roberson #endif
2933ae7a6b38SJeff Roberson 		/* Correct spinlock nesting and acquire the correct lock. */
2934018ff686SJeff Roberson 		tdq = TDQ_SELF();
2935ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
29367b20fb19SJeff Roberson 		spinlock_exit();
29377e3a96eaSJohn Baldwin 		PCPU_SET(switchtime, cpu_ticks());
29387e3a96eaSJohn Baldwin 		PCPU_SET(switchticks, ticks);
2939e1504695SJeff Roberson 		PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(tdq);
29407b20fb19SJeff Roberson 	} else {
2941*61a74c5cSJeff Roberson 		THREAD_LOCK_ASSERT(td, MA_OWNED);
2942018ff686SJeff Roberson 		tdq = TDQ_SELF();
2943ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
29449727e637SJeff Roberson 		tdq_load_rem(tdq, td);
2945eea4f254SJeff Roberson 		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
294692de34dfSJohn Baldwin 		td->td_lastcpu = td->td_oncpu;
294792de34dfSJohn Baldwin 		td->td_oncpu = NOCPU;
29487b20fb19SJeff Roberson 	}
29497b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
295059c68134SJeff Roberson 	newtd = choosethread();
295159c68134SJeff Roberson 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
295259c68134SJeff Roberson 	cpu_throw(td, newtd);		/* doesn't return */
29537b20fb19SJeff Roberson }
29547b20fb19SJeff Roberson 
2955ae7a6b38SJeff Roberson /*
2956ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
2957ae7a6b38SJeff Roberson  * let fork do the rest of the work.
2958ae7a6b38SJeff Roberson  */
29597b20fb19SJeff Roberson void
2960fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
29617b20fb19SJeff Roberson {
2962ae7a6b38SJeff Roberson 	struct tdq *tdq;
2963ae7a6b38SJeff Roberson 	int cpuid;
29647b20fb19SJeff Roberson 
29657b20fb19SJeff Roberson 	/*
29667b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
2967ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
29687b20fb19SJeff Roberson 	 */
2969ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2970018ff686SJeff Roberson 	tdq = TDQ_SELF();
2971ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2972ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
297359c68134SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2974eea4f254SJeff Roberson 	lock_profile_obtain_lock_success(
2975eea4f254SJeff Roberson 	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
297628ef18b8SAndriy Gapon 
297728ef18b8SAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
297828ef18b8SAndriy Gapon 	    "prio:%d", td->td_priority);
297928ef18b8SAndriy Gapon 	SDT_PROBE0(sched, , , on__cpu);
29807b20fb19SJeff Roberson }
29817b20fb19SJeff Roberson 
29828f51ad55SJeff Roberson /*
29838f51ad55SJeff Roberson  * Create on first use to catch odd startup conditons.
29848f51ad55SJeff Roberson  */
29858f51ad55SJeff Roberson char *
29868f51ad55SJeff Roberson sched_tdname(struct thread *td)
29878f51ad55SJeff Roberson {
29888f51ad55SJeff Roberson #ifdef KTR
29898f51ad55SJeff Roberson 	struct td_sched *ts;
29908f51ad55SJeff Roberson 
299193ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
29928f51ad55SJeff Roberson 	if (ts->ts_name[0] == '\0')
29938f51ad55SJeff Roberson 		snprintf(ts->ts_name, sizeof(ts->ts_name),
29948f51ad55SJeff Roberson 		    "%s tid %d", td->td_name, td->td_tid);
29958f51ad55SJeff Roberson 	return (ts->ts_name);
29968f51ad55SJeff Roberson #else
29978f51ad55SJeff Roberson 	return (td->td_name);
29988f51ad55SJeff Roberson #endif
29998f51ad55SJeff Roberson }
30008f51ad55SJeff Roberson 
300144ad5475SJohn Baldwin #ifdef KTR
300244ad5475SJohn Baldwin void
300344ad5475SJohn Baldwin sched_clear_tdname(struct thread *td)
300444ad5475SJohn Baldwin {
300544ad5475SJohn Baldwin 	struct td_sched *ts;
300644ad5475SJohn Baldwin 
300793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
300844ad5475SJohn Baldwin 	ts->ts_name[0] = '\0';
300944ad5475SJohn Baldwin }
301044ad5475SJohn Baldwin #endif
301144ad5475SJohn Baldwin 
301207095abfSIvan Voras #ifdef SMP
301307095abfSIvan Voras 
301407095abfSIvan Voras /*
301507095abfSIvan Voras  * Build the CPU topology dump string. Is recursively called to collect
301607095abfSIvan Voras  * the topology tree.
301707095abfSIvan Voras  */
301807095abfSIvan Voras static int
301907095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
302007095abfSIvan Voras     int indent)
302107095abfSIvan Voras {
302271a19bdcSAttilio Rao 	char cpusetbuf[CPUSETBUFSIZ];
302307095abfSIvan Voras 	int i, first;
302407095abfSIvan Voras 
302507095abfSIvan Voras 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
302619b8a6dbSAndriy Gapon 	    "", 1 + indent / 2, cg->cg_level);
302771a19bdcSAttilio Rao 	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
302871a19bdcSAttilio Rao 	    cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
302907095abfSIvan Voras 	first = TRUE;
303007095abfSIvan Voras 	for (i = 0; i < MAXCPU; i++) {
303171a19bdcSAttilio Rao 		if (CPU_ISSET(i, &cg->cg_mask)) {
303207095abfSIvan Voras 			if (!first)
303307095abfSIvan Voras 				sbuf_printf(sb, ", ");
303407095abfSIvan Voras 			else
303507095abfSIvan Voras 				first = FALSE;
303607095abfSIvan Voras 			sbuf_printf(sb, "%d", i);
303707095abfSIvan Voras 		}
303807095abfSIvan Voras 	}
303907095abfSIvan Voras 	sbuf_printf(sb, "</cpu>\n");
304007095abfSIvan Voras 
304107095abfSIvan Voras 	if (cg->cg_flags != 0) {
3042611daf7eSIvan Voras 		sbuf_printf(sb, "%*s <flags>", indent, "");
304307095abfSIvan Voras 		if ((cg->cg_flags & CG_FLAG_HTT) != 0)
30445368befbSIvan Voras 			sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>");
3045a401f2d0SIvan Voras 		if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
3046a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>");
30477b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_SMT) != 0)
3048a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>");
304907095abfSIvan Voras 		sbuf_printf(sb, "</flags>\n");
3050611daf7eSIvan Voras 	}
305107095abfSIvan Voras 
305207095abfSIvan Voras 	if (cg->cg_children > 0) {
305307095abfSIvan Voras 		sbuf_printf(sb, "%*s <children>\n", indent, "");
305407095abfSIvan Voras 		for (i = 0; i < cg->cg_children; i++)
305507095abfSIvan Voras 			sysctl_kern_sched_topology_spec_internal(sb,
305607095abfSIvan Voras 			    &cg->cg_child[i], indent+2);
305707095abfSIvan Voras 		sbuf_printf(sb, "%*s </children>\n", indent, "");
305807095abfSIvan Voras 	}
305907095abfSIvan Voras 	sbuf_printf(sb, "%*s</group>\n", indent, "");
306007095abfSIvan Voras 	return (0);
306107095abfSIvan Voras }
306207095abfSIvan Voras 
306307095abfSIvan Voras /*
306407095abfSIvan Voras  * Sysctl handler for retrieving topology dump. It's a wrapper for
306507095abfSIvan Voras  * the recursive sysctl_kern_smp_topology_spec_internal().
306607095abfSIvan Voras  */
306707095abfSIvan Voras static int
306807095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
306907095abfSIvan Voras {
307007095abfSIvan Voras 	struct sbuf *topo;
307107095abfSIvan Voras 	int err;
307207095abfSIvan Voras 
307307095abfSIvan Voras 	KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
307407095abfSIvan Voras 
3075b97fa22cSIan Lepore 	topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
307607095abfSIvan Voras 	if (topo == NULL)
307707095abfSIvan Voras 		return (ENOMEM);
307807095abfSIvan Voras 
307907095abfSIvan Voras 	sbuf_printf(topo, "<groups>\n");
308007095abfSIvan Voras 	err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
308107095abfSIvan Voras 	sbuf_printf(topo, "</groups>\n");
308207095abfSIvan Voras 
308307095abfSIvan Voras 	if (err == 0) {
3084b97fa22cSIan Lepore 		err = sbuf_finish(topo);
308507095abfSIvan Voras 	}
308607095abfSIvan Voras 	sbuf_delete(topo);
308707095abfSIvan Voras 	return (err);
308807095abfSIvan Voras }
3089b67cc292SDavid Xu 
309007095abfSIvan Voras #endif
309107095abfSIvan Voras 
3092579895dfSAlexander Motin static int
3093579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
3094579895dfSAlexander Motin {
3095579895dfSAlexander Motin 	int error, new_val, period;
3096579895dfSAlexander Motin 
3097579895dfSAlexander Motin 	period = 1000000 / realstathz;
3098579895dfSAlexander Motin 	new_val = period * sched_slice;
3099579895dfSAlexander Motin 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3100579895dfSAlexander Motin 	if (error != 0 || req->newptr == NULL)
3101579895dfSAlexander Motin 		return (error);
3102579895dfSAlexander Motin 	if (new_val <= 0)
3103579895dfSAlexander Motin 		return (EINVAL);
310437f4e025SAlexander Motin 	sched_slice = imax(1, (new_val + period / 2) / period);
31055e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
310637f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
310737f4e025SAlexander Motin 	    realstathz);
3108579895dfSAlexander Motin 	return (0);
3109579895dfSAlexander Motin }
3110579895dfSAlexander Motin 
31119727e637SJeff Roberson SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
3112ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
3113e7d50326SJeff Roberson     "Scheduler name");
3114579895dfSAlexander Motin SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
3115579895dfSAlexander Motin     NULL, 0, sysctl_kern_quantum, "I",
311637f4e025SAlexander Motin     "Quantum for timeshare threads in microseconds");
3117ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
311837f4e025SAlexander Motin     "Quantum for timeshare threads in stathz ticks");
3119ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
3120ae7a6b38SJeff Roberson     "Interactivity score threshold");
312137f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW,
312237f4e025SAlexander Motin     &preempt_thresh, 0,
312337f4e025SAlexander Motin     "Maximal (lowest) priority for preemption");
312437f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0,
312537f4e025SAlexander Motin     "Assign static kernel priorities to sleeping threads");
312637f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0,
312737f4e025SAlexander Motin     "Number of times idle thread will spin waiting for new work");
312837f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW,
312937f4e025SAlexander Motin     &sched_idlespinthresh, 0,
313037f4e025SAlexander Motin     "Threshold before we will permit idle thread spinning");
31317b8bfa0dSJeff Roberson #ifdef SMP
3132ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
3133ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
3134ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
3135ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
31367fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
31377fcf154aSJeff Roberson     &balance_interval, 0,
3138579895dfSAlexander Motin     "Average period in stathz ticks to run the long-term balancer");
3139ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
3140ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
314128994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
314237f4e025SAlexander Motin     "Minimum load on remote CPU before we'll steal");
314397e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit,
314497e9382dSDon Lewis     0, "Topological distance limit for stealing threads in sched_switch()");
314597e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0,
314697e9382dSDon Lewis     "Always run the stealer from the idle thread");
314707095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
3148c69a1a50SMateusz Guzik     CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
314907095abfSIvan Voras     "XML dump of detected CPU topology");
31507b8bfa0dSJeff Roberson #endif
3151e7d50326SJeff Roberson 
315254b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
3153a5423ea3SJeff Roberson static int ccpu = 0;
3154e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
3155