xref: /freebsd/sys/kern/sched_ule.c (revision 6d3f74a14a83b867c273c6be2599da182a9b9ec7)
135e6168fSJeff Roberson /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
535e6168fSJeff Roberson  * All rights reserved.
635e6168fSJeff Roberson  *
735e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
835e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
935e6168fSJeff Roberson  * are met:
1035e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
1135e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1235e6168fSJeff Roberson  *    disclaimer.
1335e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1435e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1535e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1635e6168fSJeff Roberson  *
1735e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1835e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1935e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2035e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2135e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2235e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2335e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2435e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2535e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2635e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2735e6168fSJeff Roberson  */
2835e6168fSJeff Roberson 
29ae7a6b38SJeff Roberson /*
30ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
31ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
32ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
33ae7a6b38SJeff Roberson  *
34ae7a6b38SJeff Roberson  * etymology:
35a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
36ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
37ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
38ae7a6b38SJeff Roberson  */
39ae7a6b38SJeff Roberson 
40677b542eSDavid E. O'Brien #include <sys/cdefs.h>
41113dda8aSJeff Roberson __FBSDID("$FreeBSD$");
42677b542eSDavid E. O'Brien 
434da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
444da0d332SPeter Wemm #include "opt_sched.h"
459923b511SScott Long 
4635e6168fSJeff Roberson #include <sys/param.h>
4735e6168fSJeff Roberson #include <sys/systm.h>
482c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4935e6168fSJeff Roberson #include <sys/kernel.h>
5035e6168fSJeff Roberson #include <sys/ktr.h>
51c149e542SAttilio Rao #include <sys/limits.h>
5235e6168fSJeff Roberson #include <sys/lock.h>
5335e6168fSJeff Roberson #include <sys/mutex.h>
5435e6168fSJeff Roberson #include <sys/proc.h>
55245f3abfSJeff Roberson #include <sys/resource.h>
569bacd788SJeff Roberson #include <sys/resourcevar.h>
5735e6168fSJeff Roberson #include <sys/sched.h>
58b3e9e682SRyan Stone #include <sys/sdt.h>
5935e6168fSJeff Roberson #include <sys/smp.h>
6035e6168fSJeff Roberson #include <sys/sx.h>
6135e6168fSJeff Roberson #include <sys/sysctl.h>
6235e6168fSJeff Roberson #include <sys/sysproto.h>
63f5c157d9SJohn Baldwin #include <sys/turnstile.h>
64af29f399SDmitry Chagin #include <sys/umtxvar.h>
6535e6168fSJeff Roberson #include <sys/vmmeter.h>
6662fa74d9SJeff Roberson #include <sys/cpuset.h>
6707095abfSIvan Voras #include <sys/sbuf.h>
6835e6168fSJeff Roberson 
69ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
70ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
71ebccf1e3SJoseph Koshy #endif
72ebccf1e3SJoseph Koshy 
736f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
746f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
7561322a0aSAlexander Motin int __read_mostly		dtrace_vtime_active;
766f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
776f5f25e5SJohn Birrell #endif
786f5f25e5SJohn Birrell 
7935e6168fSJeff Roberson #include <machine/cpu.h>
8022bf7d9aSJeff Roberson #include <machine/smp.h>
8135e6168fSJeff Roberson 
82ae7a6b38SJeff Roberson #define	KTR_ULE	0
8314618990SJeff Roberson 
840d2cf837SJeff Roberson #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
850d2cf837SJeff Roberson #define	TDQ_NAME_LEN	(sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
866338c579SAttilio Rao #define	TDQ_LOADNAME_LEN	(sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
878f51ad55SJeff Roberson 
886b2f763fSJeff Roberson /*
89ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
90ae7a6b38SJeff Roberson  * by the thread lock.
91ed062c8dSJulian Elischer  */
92ad1e7d28SJulian Elischer struct td_sched {
93ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
94ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
95e77f9fedSAdrian Chadd 	int		ts_cpu;		/* CPU that we have affinity for. */
9673daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
97ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
98ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
99ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
100ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
101ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
102ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
1038f51ad55SJeff Roberson #ifdef KTR
1048f51ad55SJeff Roberson 	char		ts_name[TS_NAME_LEN];
1058f51ad55SJeff Roberson #endif
106ed062c8dSJulian Elischer };
107ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1087b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1097b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
11035e6168fSJeff Roberson 
11162fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
11262fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
11362fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
11462fa74d9SJeff Roberson 
11593ccd6bfSKonstantin Belousov _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
11693ccd6bfSKonstantin Belousov     sizeof(struct thread0_storage),
11793ccd6bfSKonstantin Belousov     "increase struct thread0_storage.t0st_sched size");
11893ccd6bfSKonstantin Belousov 
11935e6168fSJeff Roberson /*
12012d56c0fSJohn Baldwin  * Priority ranges used for interactive and non-interactive timeshare
1212dc29adbSJohn Baldwin  * threads.  The timeshare priorities are split up into four ranges.
1222dc29adbSJohn Baldwin  * The first range handles interactive threads.  The last three ranges
1232dc29adbSJohn Baldwin  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
1242dc29adbSJohn Baldwin  * ranges supporting nice values.
12512d56c0fSJohn Baldwin  */
1262dc29adbSJohn Baldwin #define	PRI_TIMESHARE_RANGE	(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
1272dc29adbSJohn Baldwin #define	PRI_INTERACT_RANGE	((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
12816705791SAndriy Gapon #define	PRI_BATCH_RANGE		(PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
1292dc29adbSJohn Baldwin 
1302dc29adbSJohn Baldwin #define	PRI_MIN_INTERACT	PRI_MIN_TIMESHARE
1312dc29adbSJohn Baldwin #define	PRI_MAX_INTERACT	(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
1322dc29adbSJohn Baldwin #define	PRI_MIN_BATCH		(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
13312d56c0fSJohn Baldwin #define	PRI_MAX_BATCH		PRI_MAX_TIMESHARE
13412d56c0fSJohn Baldwin 
13512d56c0fSJohn Baldwin /*
136e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
137e1f89c22SJeff Roberson  *
138e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
139e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1408ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
141e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
142e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
143e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
14435e6168fSJeff Roberson  */
145e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
146e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1478ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
148e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
149e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
150eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
15135e6168fSJeff Roberson 
15235e6168fSJeff Roberson /*
153e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
154e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
155e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
156e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
157e7d50326SJeff Roberson  * or positive nice respectively.
158e7d50326SJeff Roberson  *
159e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
160e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
161e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
162e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
163e7d50326SJeff Roberson  */
164e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
165e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
16612d56c0fSJohn Baldwin #define	SCHED_PRI_MIN		(PRI_MIN_BATCH + SCHED_PRI_NHALF)
16712d56c0fSJohn Baldwin #define	SCHED_PRI_MAX		(PRI_MAX_BATCH - SCHED_PRI_NHALF)
16878920008SJohn Baldwin #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
169e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
170e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1711e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
172e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
173e7d50326SJeff Roberson 
174e7d50326SJeff Roberson /*
175e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
176e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
177e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
178e7d50326SJeff Roberson  * models the intent of the thread.
17935e6168fSJeff Roberson  *
180407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
181407b0157SJeff Roberson  *		before throttling back.
182d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
183210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
1849f518f20SAttilio Rao  * INTERACT_THRESH:	Threshold for placement on the current runq.
18535e6168fSJeff Roberson  */
186e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
187e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
188210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
189210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1904c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
191e1f89c22SJeff Roberson 
1925e5c3873SJeff Roberson /*
1935e5c3873SJeff Roberson  * These parameters determine the slice behavior for batch work.
1945e5c3873SJeff Roberson  */
1955e5c3873SJeff Roberson #define	SCHED_SLICE_DEFAULT_DIVISOR	10	/* ~94 ms, 12 stathz ticks. */
1965e5c3873SJeff Roberson #define	SCHED_SLICE_MIN_DIVISOR		6	/* DEFAULT/MIN = ~16 ms. */
1975e5c3873SJeff Roberson 
1983d7f4117SAlexander Motin /* Flags kept in td_flags. */
199e745d729SAlexander Motin #define	TDF_PICKCPU	TDF_SCHED0	/* Thread should pick new CPU. */
2003d7f4117SAlexander Motin #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
2013d7f4117SAlexander Motin 
20235e6168fSJeff Roberson /*
203e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
204e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
205e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
206e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
207e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
208ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
20935e6168fSJeff Roberson  */
2101c119e17SAlexander Motin static u_int __read_mostly sched_interact = SCHED_INTERACT_THRESH;
21161322a0aSAlexander Motin static int __read_mostly tickincr = 8 << SCHED_TICK_SHIFT;
21261322a0aSAlexander Motin static int __read_mostly realstathz = 127;	/* reset during boot. */
21361322a0aSAlexander Motin static int __read_mostly sched_slice = 10;	/* reset during boot. */
21461322a0aSAlexander Motin static int __read_mostly sched_slice_min = 1;	/* reset during boot. */
21502e2d6b4SJeff Roberson #ifdef PREEMPTION
21602e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
21761322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MAX_IDLE;
21802e2d6b4SJeff Roberson #else
21961322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MIN_KERN;
22002e2d6b4SJeff Roberson #endif
22102e2d6b4SJeff Roberson #else
22261322a0aSAlexander Motin static int __read_mostly preempt_thresh = 0;
22302e2d6b4SJeff Roberson #endif
22461322a0aSAlexander Motin static int __read_mostly static_boost = PRI_MIN_BATCH;
22561322a0aSAlexander Motin static int __read_mostly sched_idlespins = 10000;
22661322a0aSAlexander Motin static int __read_mostly sched_idlespinthresh = -1;
227ae7a6b38SJeff Roberson 
22835e6168fSJeff Roberson /*
229ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
230ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
231ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
23235e6168fSJeff Roberson  */
233ad1e7d28SJulian Elischer struct tdq {
23439f819e2SJim Harris 	/*
23539f819e2SJim Harris 	 * Ordered to improve efficiency of cpu_search() and switch().
23639f819e2SJim Harris 	 * tdq_lock is padded to avoid false sharing with tdq_load and
23739f819e2SJim Harris 	 * tdq_cpu_idle.
23839f819e2SJim Harris 	 */
2394ceaf45dSAttilio Rao 	struct mtx_padalign tdq_lock;		/* run queue lock. */
24073daf66fSJeff Roberson 	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
241*6d3f74a1SMark Johnston 	struct thread	*tdq_curthread;		/* Current executing thread. */
2421690c6c1SJeff Roberson 	volatile int	tdq_load;		/* Aggregate load. */
2439f9ad565SAlexander Motin 	volatile int	tdq_cpu_idle;		/* cpu_idle() is active. */
24473daf66fSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
24597e9382dSDon Lewis 	volatile int	tdq_transferable;	/* Transferable thread count. */
24697e9382dSDon Lewis 	volatile short	tdq_switchcnt;		/* Switches this tick. */
24797e9382dSDon Lewis 	volatile short	tdq_oldswitchcnt;	/* Switches last tick. */
24873daf66fSJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
2497789ab32SMark Johnston 	u_char		tdq_owepreempt;		/* Remote preemption pending. */
25073daf66fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
25173daf66fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
252018ff686SJeff Roberson 	int		tdq_id;			/* cpuid. */
253e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
254ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
255ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
2568f51ad55SJeff Roberson 	char		tdq_name[TDQ_NAME_LEN];
2578f51ad55SJeff Roberson #ifdef KTR
2588f51ad55SJeff Roberson 	char		tdq_loadname[TDQ_LOADNAME_LEN];
2598f51ad55SJeff Roberson #endif
260ae7a6b38SJeff Roberson } __aligned(64);
26135e6168fSJeff Roberson 
2621690c6c1SJeff Roberson /* Idle thread states and config. */
2631690c6c1SJeff Roberson #define	TDQ_RUNNING	1
2641690c6c1SJeff Roberson #define	TDQ_IDLE	2
2657b8bfa0dSJeff Roberson 
26680f86c9fSJeff Roberson #ifdef SMP
26761322a0aSAlexander Motin struct cpu_group __read_mostly *cpu_top;		/* CPU topology */
2687b8bfa0dSJeff Roberson 
26962fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
27062fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2717b8bfa0dSJeff Roberson 
2727b8bfa0dSJeff Roberson /*
2737b8bfa0dSJeff Roberson  * Run-time tunables.
2747b8bfa0dSJeff Roberson  */
27528994a58SJeff Roberson static int rebalance = 1;
2767fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
27761322a0aSAlexander Motin static int __read_mostly affinity;
27861322a0aSAlexander Motin static int __read_mostly steal_idle = 1;
27961322a0aSAlexander Motin static int __read_mostly steal_thresh = 2;
28061322a0aSAlexander Motin static int __read_mostly always_steal = 0;
28161322a0aSAlexander Motin static int __read_mostly trysteal_limit = 2;
28280f86c9fSJeff Roberson 
28335e6168fSJeff Roberson /*
284d2ad694cSJeff Roberson  * One thread queue per processor.
28535e6168fSJeff Roberson  */
28661322a0aSAlexander Motin static struct tdq __read_mostly *balance_tdq;
2877fcf154aSJeff Roberson static int balance_ticks;
288018ff686SJeff Roberson DPCPU_DEFINE_STATIC(struct tdq, tdq);
2892bf95012SAndrew Turner DPCPU_DEFINE_STATIC(uint32_t, randomval);
290dc03363dSJeff Roberson 
291018ff686SJeff Roberson #define	TDQ_SELF()	((struct tdq *)PCPU_GET(sched))
292018ff686SJeff Roberson #define	TDQ_CPU(x)	(DPCPU_ID_PTR((x), tdq))
293018ff686SJeff Roberson #define	TDQ_ID(x)	((x)->tdq_id)
29480f86c9fSJeff Roberson #else	/* !SMP */
295ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
296dc03363dSJeff Roberson 
29736b36916SJeff Roberson #define	TDQ_ID(x)	(0)
298ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
299ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
3000a016a05SJeff Roberson #endif
30135e6168fSJeff Roberson 
302ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
303ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
304ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
3058bb173fbSAlexander Motin #define	TDQ_TRYLOCK(t)		mtx_trylock_spin(TDQ_LOCKPTR((t)))
3068bb173fbSAlexander Motin #define	TDQ_TRYLOCK_FLAGS(t, f)	mtx_trylock_spin_flags(TDQ_LOCKPTR((t)), (f))
307ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
3084ceaf45dSAttilio Rao #define	TDQ_LOCKPTR(t)		((struct mtx *)(&(t)->tdq_lock))
309ae7a6b38SJeff Roberson 
3108460a577SJohn Birrell static void sched_priority(struct thread *);
31121381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
3128460a577SJohn Birrell static int sched_interact_score(struct thread *);
3138460a577SJohn Birrell static void sched_interact_update(struct thread *);
3148460a577SJohn Birrell static void sched_interact_fork(struct thread *);
3157295465eSAlexander Motin static void sched_pctcpu_update(struct td_sched *, int);
31635e6168fSJeff Roberson 
3175d7ef00cSJeff Roberson /* Operations on per processor queues */
3189727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *);
319018ff686SJeff Roberson static void tdq_setup(struct tdq *, int i);
3209727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *);
3219727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *);
3229727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
3239727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *);
324ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
325ad1e7d28SJulian Elischer void tdq_print(int cpu);
326e7d50326SJeff Roberson static void runq_print(struct runq *rq);
327*6d3f74a1SMark Johnston static int tdq_add(struct tdq *, struct thread *, int);
3285d7ef00cSJeff Roberson #ifdef SMP
329*6d3f74a1SMark Johnston static int tdq_move(struct tdq *, struct tdq *);
330ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
331*6d3f74a1SMark Johnston static void tdq_notify(struct tdq *, int lowpri);
3329727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int);
3339727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int);
3349727e637SJeff Roberson static int sched_pickcpu(struct thread *, int);
3357fcf154aSJeff Roberson static void sched_balance(void);
336*6d3f74a1SMark Johnston static bool sched_balance_pair(struct tdq *, struct tdq *);
3379727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int);
338ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
33907095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
34007095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
34107095abfSIvan Voras     struct cpu_group *cg, int indent);
3425d7ef00cSJeff Roberson #endif
3435d7ef00cSJeff Roberson 
344e7d50326SJeff Roberson static void sched_setup(void *dummy);
345237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
346e7d50326SJeff Roberson 
347e7d50326SJeff Roberson static void sched_initticks(void *dummy);
348237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
349237fdd78SRobert Watson     NULL);
350e7d50326SJeff Roberson 
351b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched);
352b3e9e682SRyan Stone 
353d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
354b3e9e682SRyan Stone     "struct proc *", "uint8_t");
355d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
356b3e9e682SRyan Stone     "struct proc *", "void *");
357d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
358b3e9e682SRyan Stone     "struct proc *", "void *", "int");
359d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
360b3e9e682SRyan Stone     "struct proc *", "uint8_t", "struct thread *");
361d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
362d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
363b3e9e682SRyan Stone     "struct proc *");
364d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu);
365d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu);
366d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
367b3e9e682SRyan Stone     "struct proc *");
368b3e9e682SRyan Stone 
3690567b6ccSWarner Losh /*
370ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
371ae7a6b38SJeff Roberson  */
372e7d50326SJeff Roberson static void
373e7d50326SJeff Roberson runq_print(struct runq *rq)
374e7d50326SJeff Roberson {
375e7d50326SJeff Roberson 	struct rqhead *rqh;
3769727e637SJeff Roberson 	struct thread *td;
377e7d50326SJeff Roberson 	int pri;
378e7d50326SJeff Roberson 	int j;
379e7d50326SJeff Roberson 	int i;
380e7d50326SJeff Roberson 
381e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
382e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
383e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
384e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
385e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
386e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
387e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
3889727e637SJeff Roberson 				TAILQ_FOREACH(td, rqh, td_runq) {
389e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
3909727e637SJeff Roberson 					    td, td->td_name, td->td_priority,
3919727e637SJeff Roberson 					    td->td_rqindex, pri);
392e7d50326SJeff Roberson 				}
393e7d50326SJeff Roberson 			}
394e7d50326SJeff Roberson 	}
395e7d50326SJeff Roberson }
396e7d50326SJeff Roberson 
397ae7a6b38SJeff Roberson /*
398ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
399ae7a6b38SJeff Roberson  */
40015dc847eSJeff Roberson void
401ad1e7d28SJulian Elischer tdq_print(int cpu)
40215dc847eSJeff Roberson {
403ad1e7d28SJulian Elischer 	struct tdq *tdq;
40415dc847eSJeff Roberson 
405ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
40615dc847eSJeff Roberson 
407c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
40862fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
40962fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
410d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
4111690c6c1SJeff Roberson 	printf("\tswitch cnt:     %d\n", tdq->tdq_switchcnt);
4121690c6c1SJeff Roberson 	printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
413e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
4143f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
4151690c6c1SJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
4161690c6c1SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
417e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
418e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
419e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
420e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
421e7d50326SJeff Roberson 	printf("\tidle runq:\n");
422e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
42315dc847eSJeff Roberson }
42415dc847eSJeff Roberson 
425ff256d9cSJeff Roberson static inline int
426ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
427ff256d9cSJeff Roberson {
428ff256d9cSJeff Roberson 	/*
429ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
430ff256d9cSJeff Roberson 	 * nothing to do.
431ff256d9cSJeff Roberson 	 */
432ff256d9cSJeff Roberson 	if (pri >= cpri)
433ff256d9cSJeff Roberson 		return (0);
434ff256d9cSJeff Roberson 	/*
435ff256d9cSJeff Roberson 	 * Always preempt idle.
436ff256d9cSJeff Roberson 	 */
437ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
438ff256d9cSJeff Roberson 		return (1);
439ff256d9cSJeff Roberson 	/*
440ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
441ff256d9cSJeff Roberson 	 */
442ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
443ff256d9cSJeff Roberson 		return (0);
444ff256d9cSJeff Roberson 	/*
445ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
446ff256d9cSJeff Roberson 	 */
447ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
448ff256d9cSJeff Roberson 		return (1);
449ff256d9cSJeff Roberson 	/*
45012d56c0fSJohn Baldwin 	 * If we're interactive or better and there is non-interactive
45112d56c0fSJohn Baldwin 	 * or worse running preempt only remote processors.
452ff256d9cSJeff Roberson 	 */
45312d56c0fSJohn Baldwin 	if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
454ff256d9cSJeff Roberson 		return (1);
455ff256d9cSJeff Roberson 	return (0);
456ff256d9cSJeff Roberson }
457ff256d9cSJeff Roberson 
458ae7a6b38SJeff Roberson /*
459ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
460ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
461ae7a6b38SJeff Roberson  * queue position for timeshare threads.
462ae7a6b38SJeff Roberson  */
463155b9987SJeff Roberson static __inline void
4649727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
465155b9987SJeff Roberson {
4669727e637SJeff Roberson 	struct td_sched *ts;
467c143ac21SJeff Roberson 	u_char pri;
468c143ac21SJeff Roberson 
469ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
47061a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
47173daf66fSJeff Roberson 
4729727e637SJeff Roberson 	pri = td->td_priority;
47393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
4749727e637SJeff Roberson 	TD_SET_RUNQ(td);
4759727e637SJeff Roberson 	if (THREAD_CAN_MIGRATE(td)) {
476d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
477ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
47880f86c9fSJeff Roberson 	}
47912d56c0fSJohn Baldwin 	if (pri < PRI_MIN_BATCH) {
480c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
48112d56c0fSJohn Baldwin 	} else if (pri <= PRI_MAX_BATCH) {
482c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
48312d56c0fSJohn Baldwin 		KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
484e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
485e7d50326SJeff Roberson 		/*
486e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
487ba71333fSMark Johnston 		 * batch.  Use the whole queue to represent these values.
488e7d50326SJeff Roberson 		 */
489c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
49016705791SAndriy Gapon 			pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
491e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4923f872f85SJeff Roberson 			/*
4933f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4943f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4953f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4963f872f85SJeff Roberson 			 */
4973f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4983f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4994499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
500e7d50326SJeff Roberson 		} else
5013f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
5029727e637SJeff Roberson 		runq_add_pri(ts->ts_runq, td, pri, flags);
503c143ac21SJeff Roberson 		return;
504e7d50326SJeff Roberson 	} else
50573daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
5069727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
50773daf66fSJeff Roberson }
50873daf66fSJeff Roberson 
50973daf66fSJeff Roberson /*
510ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
511ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
512ae7a6b38SJeff Roberson  * transferable count does not reflect them.
513ae7a6b38SJeff Roberson  */
514155b9987SJeff Roberson static __inline void
5159727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td)
516155b9987SJeff Roberson {
5179727e637SJeff Roberson 	struct td_sched *ts;
5189727e637SJeff Roberson 
51993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
520ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
52161a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
522ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
5239727e637SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", td));
524ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
525d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
526ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
52780f86c9fSJeff Roberson 	}
5283f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
5293f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
5309727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
531e7d50326SJeff Roberson 		else
5329727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, NULL);
5333f872f85SJeff Roberson 	} else
5349727e637SJeff Roberson 		runq_remove(ts->ts_runq, td);
535155b9987SJeff Roberson }
536155b9987SJeff Roberson 
537ae7a6b38SJeff Roberson /*
538ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
539ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
540ae7a6b38SJeff Roberson  */
541a8949de2SJeff Roberson static void
5429727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td)
5435d7ef00cSJeff Roberson {
544ae7a6b38SJeff Roberson 
545ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
54661a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
54703d17db7SJeff Roberson 
548d2ad694cSJeff Roberson 	tdq->tdq_load++;
5491b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
550d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
5518f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
552d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
5535d7ef00cSJeff Roberson }
55415dc847eSJeff Roberson 
555ae7a6b38SJeff Roberson /*
556ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
557ae7a6b38SJeff Roberson  * exiting.
558ae7a6b38SJeff Roberson  */
559a8949de2SJeff Roberson static void
5609727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td)
5615d7ef00cSJeff Roberson {
562ae7a6b38SJeff Roberson 
563ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
56461a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
565ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
566c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
56703d17db7SJeff Roberson 
568d2ad694cSJeff Roberson 	tdq->tdq_load--;
5691b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
57003d17db7SJeff Roberson 		tdq->tdq_sysload--;
5718f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
572d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
57315dc847eSJeff Roberson }
57415dc847eSJeff Roberson 
575356500a3SJeff Roberson /*
5765e5c3873SJeff Roberson  * Bound timeshare latency by decreasing slice size as load increases.  We
5775e5c3873SJeff Roberson  * consider the maximum latency as the sum of the threads waiting to run
5785e5c3873SJeff Roberson  * aside from curthread and target no more than sched_slice latency but
5795e5c3873SJeff Roberson  * no less than sched_slice_min runtime.
5805e5c3873SJeff Roberson  */
5815e5c3873SJeff Roberson static inline int
5825e5c3873SJeff Roberson tdq_slice(struct tdq *tdq)
5835e5c3873SJeff Roberson {
5845e5c3873SJeff Roberson 	int load;
5855e5c3873SJeff Roberson 
5865e5c3873SJeff Roberson 	/*
5875e5c3873SJeff Roberson 	 * It is safe to use sys_load here because this is called from
5885e5c3873SJeff Roberson 	 * contexts where timeshare threads are running and so there
5895e5c3873SJeff Roberson 	 * cannot be higher priority load in the system.
5905e5c3873SJeff Roberson 	 */
5915e5c3873SJeff Roberson 	load = tdq->tdq_sysload - 1;
5925e5c3873SJeff Roberson 	if (load >= SCHED_SLICE_MIN_DIVISOR)
5935e5c3873SJeff Roberson 		return (sched_slice_min);
5945e5c3873SJeff Roberson 	if (load <= 1)
5955e5c3873SJeff Roberson 		return (sched_slice);
5965e5c3873SJeff Roberson 	return (sched_slice / load);
5975e5c3873SJeff Roberson }
5985e5c3873SJeff Roberson 
5995e5c3873SJeff Roberson /*
60062fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
60162fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
602356500a3SJeff Roberson  */
60322bf7d9aSJeff Roberson static void
60462fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
60562fa74d9SJeff Roberson {
60662fa74d9SJeff Roberson 	struct thread *td;
60762fa74d9SJeff Roberson 
60862fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
60962fa74d9SJeff Roberson 	if (ctd == NULL)
610*6d3f74a1SMark Johnston 		ctd = atomic_load_ptr(&tdq->tdq_curthread);
6119727e637SJeff Roberson 	td = tdq_choose(tdq);
6129727e637SJeff Roberson 	if (td == NULL || td->td_priority > ctd->td_priority)
61362fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
61462fa74d9SJeff Roberson 	else
61562fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
61662fa74d9SJeff Roberson }
61762fa74d9SJeff Roberson 
61862fa74d9SJeff Roberson #ifdef SMP
6199129dd59SPedro F. Giffuni /*
6209129dd59SPedro F. Giffuni  * We need some randomness. Implement a classic Linear Congruential
6219129dd59SPedro F. Giffuni  * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
6229129dd59SPedro F. Giffuni  * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
6239129dd59SPedro F. Giffuni  * of the random state (in the low bits of our answer) to keep
6249129dd59SPedro F. Giffuni  * the maximum randomness.
6259129dd59SPedro F. Giffuni  */
6269129dd59SPedro F. Giffuni static uint32_t
6279129dd59SPedro F. Giffuni sched_random(void)
6289129dd59SPedro F. Giffuni {
6299129dd59SPedro F. Giffuni 	uint32_t *rndptr;
6309129dd59SPedro F. Giffuni 
6319129dd59SPedro F. Giffuni 	rndptr = DPCPU_PTR(randomval);
6329129dd59SPedro F. Giffuni 	*rndptr = *rndptr * 69069 + 5;
6339129dd59SPedro F. Giffuni 
6349129dd59SPedro F. Giffuni 	return (*rndptr >> 16);
6359129dd59SPedro F. Giffuni }
6369129dd59SPedro F. Giffuni 
63762fa74d9SJeff Roberson struct cpu_search {
638e745d729SAlexander Motin 	cpuset_t *cs_mask;	/* The mask of allowed CPUs to choose from. */
639e745d729SAlexander Motin 	int	cs_prefer;	/* Prefer this CPU and groups including it. */
640e745d729SAlexander Motin 	int	cs_running;	/* The thread is now running at cs_prefer. */
64136acfc65SAlexander Motin 	int	cs_pri;		/* Min priority for low. */
64208063e9fSAlexander Motin 	int	cs_load;	/* Max load for low, min load for high. */
64308063e9fSAlexander Motin 	int	cs_trans;	/* Min transferable load for high. */
644aefe0a8cSAlexander Motin };
645aefe0a8cSAlexander Motin 
646aefe0a8cSAlexander Motin struct cpu_search_res {
64708063e9fSAlexander Motin 	int	csr_cpu;	/* The best CPU found. */
64808063e9fSAlexander Motin 	int	csr_load;	/* The load of cs_cpu. */
64962fa74d9SJeff Roberson };
65062fa74d9SJeff Roberson 
65162fa74d9SJeff Roberson /*
652aefe0a8cSAlexander Motin  * Search the tree of cpu_groups for the lowest or highest loaded CPU.
653aefe0a8cSAlexander Motin  * These routines actually compare the load on all paths through the tree
654aefe0a8cSAlexander Motin  * and find the least loaded cpu on the least loaded path, which may differ
655aefe0a8cSAlexander Motin  * from the least loaded cpu in the system.  This balances work among caches
656aefe0a8cSAlexander Motin  * and buses.
65762fa74d9SJeff Roberson  */
658aefe0a8cSAlexander Motin static int
659aefe0a8cSAlexander Motin cpu_search_lowest(const struct cpu_group *cg, const struct cpu_search *s,
660aefe0a8cSAlexander Motin     struct cpu_search_res *r)
66162fa74d9SJeff Roberson {
662aefe0a8cSAlexander Motin 	struct cpu_search_res lr;
66336acfc65SAlexander Motin 	struct tdq *tdq;
664e745d729SAlexander Motin 	int c, bload, l, load, p, total;
66562fa74d9SJeff Roberson 
66636acfc65SAlexander Motin 	total = 0;
667aefe0a8cSAlexander Motin 	bload = INT_MAX;
66808063e9fSAlexander Motin 	r->csr_cpu = -1;
66936acfc65SAlexander Motin 
670aefe0a8cSAlexander Motin 	/* Loop through children CPU groups if there are any. */
671aefe0a8cSAlexander Motin 	if (cg->cg_children > 0) {
672aefe0a8cSAlexander Motin 		for (c = cg->cg_children - 1; c >= 0; c--) {
673aefe0a8cSAlexander Motin 			load = cpu_search_lowest(&cg->cg_child[c], s, &lr);
67436acfc65SAlexander Motin 			total += load;
675e745d729SAlexander Motin 
676e745d729SAlexander Motin 			/*
677e745d729SAlexander Motin 			 * When balancing do not prefer SMT groups with load >1.
678e745d729SAlexander Motin 			 * It allows round-robin between SMT groups with equal
679e745d729SAlexander Motin 			 * load within parent group for more fair scheduling.
680e745d729SAlexander Motin 			 */
681e745d729SAlexander Motin 			if (__predict_false(s->cs_running) &&
682e745d729SAlexander Motin 			    (cg->cg_child[c].cg_flags & CG_FLAG_THREAD) &&
683e745d729SAlexander Motin 			    load >= 128 && (load & 128) != 0)
684e745d729SAlexander Motin 				load += 128;
685e745d729SAlexander Motin 
68608063e9fSAlexander Motin 			if (lr.csr_cpu >= 0 && (load < bload ||
68708063e9fSAlexander Motin 			    (load == bload && lr.csr_load < r->csr_load))) {
688aefe0a8cSAlexander Motin 				bload = load;
68908063e9fSAlexander Motin 				r->csr_cpu = lr.csr_cpu;
69008063e9fSAlexander Motin 				r->csr_load = lr.csr_load;
69136acfc65SAlexander Motin 			}
69236acfc65SAlexander Motin 		}
69362fa74d9SJeff Roberson 		return (total);
69462fa74d9SJeff Roberson 	}
69562fa74d9SJeff Roberson 
696aefe0a8cSAlexander Motin 	/* Loop through children CPUs otherwise. */
697aefe0a8cSAlexander Motin 	for (c = cg->cg_last; c >= cg->cg_first; c--) {
698aefe0a8cSAlexander Motin 		if (!CPU_ISSET(c, &cg->cg_mask))
699aefe0a8cSAlexander Motin 			continue;
700aefe0a8cSAlexander Motin 		tdq = TDQ_CPU(c);
701aefe0a8cSAlexander Motin 		l = tdq->tdq_load;
702e745d729SAlexander Motin 		if (c == s->cs_prefer) {
703e745d729SAlexander Motin 			if (__predict_false(s->cs_running))
704e745d729SAlexander Motin 				l--;
705e745d729SAlexander Motin 			p = 128;
706e745d729SAlexander Motin 		} else
707e745d729SAlexander Motin 			p = 0;
708aefe0a8cSAlexander Motin 		load = l * 256;
709e745d729SAlexander Motin 		total += load - p;
710e745d729SAlexander Motin 
711e745d729SAlexander Motin 		/*
712e745d729SAlexander Motin 		 * Check this CPU is acceptable.
713e745d729SAlexander Motin 		 * If the threads is already on the CPU, don't look on the TDQ
714e745d729SAlexander Motin 		 * priority, since it can be the priority of the thread itself.
715e745d729SAlexander Motin 		 */
71608063e9fSAlexander Motin 		if (l > s->cs_load || (tdq->tdq_lowpri <= s->cs_pri &&
717e745d729SAlexander Motin 		     (!s->cs_running || c != s->cs_prefer)) ||
718aefe0a8cSAlexander Motin 		    !CPU_ISSET(c, s->cs_mask))
719aefe0a8cSAlexander Motin 			continue;
720e745d729SAlexander Motin 
721e745d729SAlexander Motin 		/*
722e745d729SAlexander Motin 		 * When balancing do not prefer CPUs with load > 1.
723e745d729SAlexander Motin 		 * It allows round-robin between CPUs with equal load
724e745d729SAlexander Motin 		 * within the CPU group for more fair scheduling.
725e745d729SAlexander Motin 		 */
726e745d729SAlexander Motin 		if (__predict_false(s->cs_running) && l > 0)
727e745d729SAlexander Motin 			p = 0;
728e745d729SAlexander Motin 
729aefe0a8cSAlexander Motin 		load -= sched_random() % 128;
730e745d729SAlexander Motin 		if (bload > load - p) {
731e745d729SAlexander Motin 			bload = load - p;
73208063e9fSAlexander Motin 			r->csr_cpu = c;
73308063e9fSAlexander Motin 			r->csr_load = load;
734aefe0a8cSAlexander Motin 		}
735aefe0a8cSAlexander Motin 	}
736aefe0a8cSAlexander Motin 	return (total);
73762fa74d9SJeff Roberson }
73862fa74d9SJeff Roberson 
739aefe0a8cSAlexander Motin static int
740aefe0a8cSAlexander Motin cpu_search_highest(const struct cpu_group *cg, const struct cpu_search *s,
741aefe0a8cSAlexander Motin     struct cpu_search_res *r)
74262fa74d9SJeff Roberson {
743aefe0a8cSAlexander Motin 	struct cpu_search_res lr;
744aefe0a8cSAlexander Motin 	struct tdq *tdq;
745aefe0a8cSAlexander Motin 	int c, bload, l, load, total;
746aefe0a8cSAlexander Motin 
747aefe0a8cSAlexander Motin 	total = 0;
748aefe0a8cSAlexander Motin 	bload = INT_MIN;
74908063e9fSAlexander Motin 	r->csr_cpu = -1;
750aefe0a8cSAlexander Motin 
751aefe0a8cSAlexander Motin 	/* Loop through children CPU groups if there are any. */
752aefe0a8cSAlexander Motin 	if (cg->cg_children > 0) {
753aefe0a8cSAlexander Motin 		for (c = cg->cg_children - 1; c >= 0; c--) {
754aefe0a8cSAlexander Motin 			load = cpu_search_highest(&cg->cg_child[c], s, &lr);
755aefe0a8cSAlexander Motin 			total += load;
75608063e9fSAlexander Motin 			if (lr.csr_cpu >= 0 && (load > bload ||
75708063e9fSAlexander Motin 			    (load == bload && lr.csr_load > r->csr_load))) {
758aefe0a8cSAlexander Motin 				bload = load;
75908063e9fSAlexander Motin 				r->csr_cpu = lr.csr_cpu;
76008063e9fSAlexander Motin 				r->csr_load = lr.csr_load;
761aefe0a8cSAlexander Motin 			}
762aefe0a8cSAlexander Motin 		}
763aefe0a8cSAlexander Motin 		return (total);
76462fa74d9SJeff Roberson 	}
76562fa74d9SJeff Roberson 
766aefe0a8cSAlexander Motin 	/* Loop through children CPUs otherwise. */
767aefe0a8cSAlexander Motin 	for (c = cg->cg_last; c >= cg->cg_first; c--) {
768aefe0a8cSAlexander Motin 		if (!CPU_ISSET(c, &cg->cg_mask))
769aefe0a8cSAlexander Motin 			continue;
770aefe0a8cSAlexander Motin 		tdq = TDQ_CPU(c);
771aefe0a8cSAlexander Motin 		l = tdq->tdq_load;
772aefe0a8cSAlexander Motin 		load = l * 256;
773aefe0a8cSAlexander Motin 		total += load;
774e745d729SAlexander Motin 
775e745d729SAlexander Motin 		/*
776e745d729SAlexander Motin 		 * Check this CPU is acceptable.
777e745d729SAlexander Motin 		 */
77808063e9fSAlexander Motin 		if (l < s->cs_load || (tdq->tdq_transferable < s->cs_trans) ||
779aefe0a8cSAlexander Motin 		    !CPU_ISSET(c, s->cs_mask))
780aefe0a8cSAlexander Motin 			continue;
781e745d729SAlexander Motin 
782ca34553bSAlexander Motin 		load -= sched_random() % 256;
783aefe0a8cSAlexander Motin 		if (load > bload) {
784aefe0a8cSAlexander Motin 			bload = load;
78508063e9fSAlexander Motin 			r->csr_cpu = c;
786aefe0a8cSAlexander Motin 		}
787aefe0a8cSAlexander Motin 	}
78808063e9fSAlexander Motin 	r->csr_load = bload;
789aefe0a8cSAlexander Motin 	return (total);
79062fa74d9SJeff Roberson }
79162fa74d9SJeff Roberson 
79262fa74d9SJeff Roberson /*
79362fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
79462fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
79562fa74d9SJeff Roberson  * acceptable.
79662fa74d9SJeff Roberson  */
79762fa74d9SJeff Roberson static inline int
798aefe0a8cSAlexander Motin sched_lowest(const struct cpu_group *cg, cpuset_t *mask, int pri, int maxload,
799e745d729SAlexander Motin     int prefer, int running)
80062fa74d9SJeff Roberson {
801aefe0a8cSAlexander Motin 	struct cpu_search s;
802aefe0a8cSAlexander Motin 	struct cpu_search_res r;
80362fa74d9SJeff Roberson 
804aefe0a8cSAlexander Motin 	s.cs_prefer = prefer;
805e745d729SAlexander Motin 	s.cs_running = running;
806aefe0a8cSAlexander Motin 	s.cs_mask = mask;
807aefe0a8cSAlexander Motin 	s.cs_pri = pri;
80808063e9fSAlexander Motin 	s.cs_load = maxload;
809aefe0a8cSAlexander Motin 	cpu_search_lowest(cg, &s, &r);
81008063e9fSAlexander Motin 	return (r.csr_cpu);
81162fa74d9SJeff Roberson }
81262fa74d9SJeff Roberson 
81362fa74d9SJeff Roberson /*
81462fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
81562fa74d9SJeff Roberson  */
81662fa74d9SJeff Roberson static inline int
81708063e9fSAlexander Motin sched_highest(const struct cpu_group *cg, cpuset_t *mask, int minload,
81808063e9fSAlexander Motin     int mintrans)
81962fa74d9SJeff Roberson {
820aefe0a8cSAlexander Motin 	struct cpu_search s;
821aefe0a8cSAlexander Motin 	struct cpu_search_res r;
82262fa74d9SJeff Roberson 
823aefe0a8cSAlexander Motin 	s.cs_mask = mask;
82408063e9fSAlexander Motin 	s.cs_load = minload;
82508063e9fSAlexander Motin 	s.cs_trans = mintrans;
826aefe0a8cSAlexander Motin 	cpu_search_highest(cg, &s, &r);
82708063e9fSAlexander Motin 	return (r.csr_cpu);
82862fa74d9SJeff Roberson }
82962fa74d9SJeff Roberson 
83062fa74d9SJeff Roberson static void
83162fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
83262fa74d9SJeff Roberson {
833018ff686SJeff Roberson 	struct tdq *tdq;
834e745d729SAlexander Motin 	struct thread *td;
83536acfc65SAlexander Motin 	cpuset_t hmask, lmask;
83636acfc65SAlexander Motin 	int high, low, anylow;
83762fa74d9SJeff Roberson 
83836acfc65SAlexander Motin 	CPU_FILL(&hmask);
83962fa74d9SJeff Roberson 	for (;;) {
84008063e9fSAlexander Motin 		high = sched_highest(cg, &hmask, 1, 0);
84136acfc65SAlexander Motin 		/* Stop if there is no more CPU with transferrable threads. */
84236acfc65SAlexander Motin 		if (high == -1)
84362fa74d9SJeff Roberson 			break;
84436acfc65SAlexander Motin 		CPU_CLR(high, &hmask);
84536acfc65SAlexander Motin 		CPU_COPY(&hmask, &lmask);
84636acfc65SAlexander Motin 		/* Stop if there is no more CPU left for low. */
84736acfc65SAlexander Motin 		if (CPU_EMPTY(&lmask))
84862fa74d9SJeff Roberson 			break;
849018ff686SJeff Roberson 		tdq = TDQ_CPU(high);
850e745d729SAlexander Motin 		if (tdq->tdq_load == 1) {
851e745d729SAlexander Motin 			/*
852e745d729SAlexander Motin 			 * There is only one running thread.  We can't move
853e745d729SAlexander Motin 			 * it from here, so tell it to pick new CPU by itself.
854e745d729SAlexander Motin 			 */
855e745d729SAlexander Motin 			TDQ_LOCK(tdq);
856*6d3f74a1SMark Johnston 			td = atomic_load_ptr(&tdq->tdq_curthread);
857e745d729SAlexander Motin 			if ((td->td_flags & TDF_IDLETD) == 0 &&
858e745d729SAlexander Motin 			    THREAD_CAN_MIGRATE(td)) {
859e745d729SAlexander Motin 				td->td_flags |= TDF_NEEDRESCHED | TDF_PICKCPU;
860e745d729SAlexander Motin 				if (high != curcpu)
861e745d729SAlexander Motin 					ipi_cpu(high, IPI_AST);
862e745d729SAlexander Motin 			}
863e745d729SAlexander Motin 			TDQ_UNLOCK(tdq);
864e745d729SAlexander Motin 			break;
865e745d729SAlexander Motin 		}
866e745d729SAlexander Motin 		anylow = 1;
86736acfc65SAlexander Motin nextlow:
868e745d729SAlexander Motin 		if (tdq->tdq_transferable == 0)
869e745d729SAlexander Motin 			continue;
870e745d729SAlexander Motin 		low = sched_lowest(cg, &lmask, -1, tdq->tdq_load - 1, high, 1);
87136acfc65SAlexander Motin 		/* Stop if we looked well and found no less loaded CPU. */
87236acfc65SAlexander Motin 		if (anylow && low == -1)
87336acfc65SAlexander Motin 			break;
87436acfc65SAlexander Motin 		/* Go to next high if we found no less loaded CPU. */
87536acfc65SAlexander Motin 		if (low == -1)
87636acfc65SAlexander Motin 			continue;
87736acfc65SAlexander Motin 		/* Transfer thread from high to low. */
878018ff686SJeff Roberson 		if (sched_balance_pair(tdq, TDQ_CPU(low))) {
87936acfc65SAlexander Motin 			/* CPU that got thread can no longer be a donor. */
88036acfc65SAlexander Motin 			CPU_CLR(low, &hmask);
88136acfc65SAlexander Motin 		} else {
88262fa74d9SJeff Roberson 			/*
88336acfc65SAlexander Motin 			 * If failed, then there is no threads on high
88436acfc65SAlexander Motin 			 * that can run on this low. Drop low from low
88536acfc65SAlexander Motin 			 * mask and look for different one.
88662fa74d9SJeff Roberson 			 */
88736acfc65SAlexander Motin 			CPU_CLR(low, &lmask);
88836acfc65SAlexander Motin 			anylow = 0;
88936acfc65SAlexander Motin 			goto nextlow;
89062fa74d9SJeff Roberson 		}
89136acfc65SAlexander Motin 	}
89262fa74d9SJeff Roberson }
89362fa74d9SJeff Roberson 
89462fa74d9SJeff Roberson static void
89562375ca8SEd Schouten sched_balance(void)
896356500a3SJeff Roberson {
8977fcf154aSJeff Roberson 	struct tdq *tdq;
898356500a3SJeff Roberson 
8990567b6ccSWarner Losh 	balance_ticks = max(balance_interval / 2, 1) +
900b250ad34SWarner Losh 	    (sched_random() % balance_interval);
9017fcf154aSJeff Roberson 	tdq = TDQ_SELF();
9027fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
90362fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
9047fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
905cac77d04SJeff Roberson }
90686f8ae96SJeff Roberson 
907ae7a6b38SJeff Roberson /*
908ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
909ae7a6b38SJeff Roberson  */
910ae7a6b38SJeff Roberson static void
911ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
912ae7a6b38SJeff Roberson {
913ae7a6b38SJeff Roberson 	if (one < two) {
914ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
915ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
916ae7a6b38SJeff Roberson 	} else {
917ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
918ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
919ae7a6b38SJeff Roberson 	}
920ae7a6b38SJeff Roberson }
921ae7a6b38SJeff Roberson 
922ae7a6b38SJeff Roberson /*
9237fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
9247fcf154aSJeff Roberson  */
9257fcf154aSJeff Roberson static void
9267fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
9277fcf154aSJeff Roberson {
9287fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
9297fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
9307fcf154aSJeff Roberson }
9317fcf154aSJeff Roberson 
9327fcf154aSJeff Roberson /*
933*6d3f74a1SMark Johnston  * Transfer load between two imbalanced thread queues.  Returns true if a thread
934*6d3f74a1SMark Johnston  * was moved between the queues, and false otherwise.
935ae7a6b38SJeff Roberson  */
936*6d3f74a1SMark Johnston static bool
937ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
938cac77d04SJeff Roberson {
939*6d3f74a1SMark Johnston 	int cpu, lowpri;
940*6d3f74a1SMark Johnston 	bool ret;
941cac77d04SJeff Roberson 
942*6d3f74a1SMark Johnston 	ret = false;
943ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
944*6d3f74a1SMark Johnston 
945155b9987SJeff Roberson 	/*
94697e9382dSDon Lewis 	 * Transfer a thread from high to low.
947155b9987SJeff Roberson 	 */
948*6d3f74a1SMark Johnston 	if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load) {
949*6d3f74a1SMark Johnston 		lowpri = tdq_move(high, low);
950*6d3f74a1SMark Johnston 		if (lowpri != -1) {
951a5423ea3SJeff Roberson 			/*
952*6d3f74a1SMark Johnston 			 * In case the target isn't the current cpu notify it of
953*6d3f74a1SMark Johnston 			 * the new load, possibly sending an IPI to force it to
954*6d3f74a1SMark Johnston 			 * reschedule.
955a5423ea3SJeff Roberson 			 */
956880bf8b9SMarius Strobl 			cpu = TDQ_ID(low);
957880bf8b9SMarius Strobl 			if (cpu != PCPU_GET(cpuid))
958*6d3f74a1SMark Johnston 				tdq_notify(low, lowpri);
959*6d3f74a1SMark Johnston 			ret = true;
960*6d3f74a1SMark Johnston 		}
961ae7a6b38SJeff Roberson 	}
9627fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
963*6d3f74a1SMark Johnston 	return (ret);
964356500a3SJeff Roberson }
965356500a3SJeff Roberson 
966ae7a6b38SJeff Roberson /*
967*6d3f74a1SMark Johnston  * Move a thread from one thread queue to another.  Returns -1 if the source
968*6d3f74a1SMark Johnston  * queue was empty, else returns the maximum priority of all threads in
969*6d3f74a1SMark Johnston  * the destination queue prior to the addition of the new thread.  In the latter
970*6d3f74a1SMark Johnston  * case, this priority can be used to determine whether an IPI needs to be
971*6d3f74a1SMark Johnston  * delivered.
972ae7a6b38SJeff Roberson  */
973*6d3f74a1SMark Johnston static int
974ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
975356500a3SJeff Roberson {
976ae7a6b38SJeff Roberson 	struct thread *td;
977ae7a6b38SJeff Roberson 	int cpu;
978356500a3SJeff Roberson 
9797fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
9807fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
9817fcf154aSJeff Roberson 
982ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
98335dd6d6cSMark Johnston 	td = tdq_steal(from, cpu);
9849727e637SJeff Roberson 	if (td == NULL)
985*6d3f74a1SMark Johnston 		return (-1);
98661a74c5cSJeff Roberson 
987ae7a6b38SJeff Roberson 	/*
98861a74c5cSJeff Roberson 	 * Although the run queue is locked the thread may be
98961a74c5cSJeff Roberson 	 * blocked.  We can not set the lock until it is unblocked.
990ae7a6b38SJeff Roberson 	 */
99161a74c5cSJeff Roberson 	thread_lock_block_wait(td);
992ae7a6b38SJeff Roberson 	sched_rem(td);
99361a74c5cSJeff Roberson 	THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(from));
994ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
99561a74c5cSJeff Roberson 	td_get_sched(td)->ts_cpu = cpu;
996*6d3f74a1SMark Johnston 	return (tdq_add(to, td, SRQ_YIELDING));
997356500a3SJeff Roberson }
99822bf7d9aSJeff Roberson 
999ae7a6b38SJeff Roberson /*
1000ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
1001ae7a6b38SJeff Roberson  * to it.
1002ae7a6b38SJeff Roberson  */
100380f86c9fSJeff Roberson static int
1004ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
100522bf7d9aSJeff Roberson {
10062668bb2aSAlexander Motin 	struct cpu_group *cg, *parent;
1007ad1e7d28SJulian Elischer 	struct tdq *steal;
1008c76ee827SJeff Roberson 	cpuset_t mask;
10092668bb2aSAlexander Motin 	int cpu, switchcnt, goup;
101080f86c9fSJeff Roberson 
101197e9382dSDon Lewis 	if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
101288f530ccSJeff Roberson 		return (1);
1013c76ee827SJeff Roberson 	CPU_FILL(&mask);
1014c76ee827SJeff Roberson 	CPU_CLR(PCPU_GET(cpuid), &mask);
101597e9382dSDon Lewis     restart:
101697e9382dSDon Lewis 	switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
10172668bb2aSAlexander Motin 	for (cg = tdq->tdq_cg, goup = 0; ; ) {
101808063e9fSAlexander Motin 		cpu = sched_highest(cg, &mask, steal_thresh, 1);
101997e9382dSDon Lewis 		/*
102097e9382dSDon Lewis 		 * We were assigned a thread but not preempted.  Returning
102197e9382dSDon Lewis 		 * 0 here will cause our caller to switch to it.
102297e9382dSDon Lewis 		 */
102397e9382dSDon Lewis 		if (tdq->tdq_load)
102497e9382dSDon Lewis 			return (0);
10252668bb2aSAlexander Motin 
10262668bb2aSAlexander Motin 		/*
10272668bb2aSAlexander Motin 		 * We found no CPU to steal from in this group.  Escalate to
10282668bb2aSAlexander Motin 		 * the parent and repeat.  But if parent has only two children
10292668bb2aSAlexander Motin 		 * groups we can avoid searching this group again by searching
10302668bb2aSAlexander Motin 		 * the other one specifically and then escalating two levels.
10312668bb2aSAlexander Motin 		 */
103262fa74d9SJeff Roberson 		if (cpu == -1) {
10332668bb2aSAlexander Motin 			if (goup) {
103462fa74d9SJeff Roberson 				cg = cg->cg_parent;
10352668bb2aSAlexander Motin 				goup = 0;
10362668bb2aSAlexander Motin 			}
10372668bb2aSAlexander Motin 			parent = cg->cg_parent;
10382668bb2aSAlexander Motin 			if (parent == NULL)
103997e9382dSDon Lewis 				return (1);
10402668bb2aSAlexander Motin 			if (parent->cg_children == 2) {
10412668bb2aSAlexander Motin 				if (cg == &parent->cg_child[0])
10422668bb2aSAlexander Motin 					cg = &parent->cg_child[1];
10432668bb2aSAlexander Motin 				else
10442668bb2aSAlexander Motin 					cg = &parent->cg_child[0];
10452668bb2aSAlexander Motin 				goup = 1;
10462668bb2aSAlexander Motin 			} else
10472668bb2aSAlexander Motin 				cg = parent;
104880f86c9fSJeff Roberson 			continue;
10497b8bfa0dSJeff Roberson 		}
10507b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
105197e9382dSDon Lewis 		/*
105297e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
105397e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
105497e9382dSDon Lewis 		 *
105597e9382dSDon Lewis 		 * Testing this ahead of tdq_lock_pair() only catches
105697e9382dSDon Lewis 		 * this situation about 20% of the time on an 8 core
105797e9382dSDon Lewis 		 * 16 thread Ryzen 7, but it still helps performance.
105897e9382dSDon Lewis 		 */
105997e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
106097e9382dSDon Lewis 		    steal->tdq_transferable == 0)
106197e9382dSDon Lewis 			goto restart;
106297e9382dSDon Lewis 		/*
10638bb173fbSAlexander Motin 		 * Try to lock both queues. If we are assigned a thread while
10648bb173fbSAlexander Motin 		 * waited for the lock, switch to it now instead of stealing.
10658bb173fbSAlexander Motin 		 * If we can't get the lock, then somebody likely got there
10668bb173fbSAlexander Motin 		 * first so continue searching.
106797e9382dSDon Lewis 		 */
10688bb173fbSAlexander Motin 		TDQ_LOCK(tdq);
10698bb173fbSAlexander Motin 		if (tdq->tdq_load > 0) {
10708bb173fbSAlexander Motin 			mi_switch(SW_VOL | SWT_IDLE);
10718bb173fbSAlexander Motin 			return (0);
10728bb173fbSAlexander Motin 		}
10738bb173fbSAlexander Motin 		if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0) {
10748bb173fbSAlexander Motin 			TDQ_UNLOCK(tdq);
10758bb173fbSAlexander Motin 			CPU_CLR(cpu, &mask);
10768bb173fbSAlexander Motin 			continue;
10778bb173fbSAlexander Motin 		}
107897e9382dSDon Lewis 		/*
107997e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
108097e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread, or
108197e9382dSDon Lewis 		 * we were preempted and the CPU loading info may be out
108297e9382dSDon Lewis 		 * of date.  The latter is rare.  In either case restart
108397e9382dSDon Lewis 		 * the search.
108497e9382dSDon Lewis 		 */
108597e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
108697e9382dSDon Lewis 		    steal->tdq_transferable == 0 ||
108797e9382dSDon Lewis 		    switchcnt != tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt) {
10887fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
108997e9382dSDon Lewis 			goto restart;
109062fa74d9SJeff Roberson 		}
109162fa74d9SJeff Roberson 		/*
109297e9382dSDon Lewis 		 * Steal the thread and switch to it.
109362fa74d9SJeff Roberson 		 */
1094*6d3f74a1SMark Johnston 		if (tdq_move(steal, tdq) != -1)
109597e9382dSDon Lewis 			break;
109697e9382dSDon Lewis 		/*
109797e9382dSDon Lewis 		 * We failed to acquire a thread even though it looked
109897e9382dSDon Lewis 		 * like one was available.  This could be due to affinity
109997e9382dSDon Lewis 		 * restrictions or for other reasons.  Loop again after
110097e9382dSDon Lewis 		 * removing this CPU from the set.  The restart logic
110197e9382dSDon Lewis 		 * above does not restore this CPU to the set due to the
110297e9382dSDon Lewis 		 * likelyhood of failing here again.
110397e9382dSDon Lewis 		 */
110497e9382dSDon Lewis 		CPU_CLR(cpu, &mask);
110562fa74d9SJeff Roberson 		tdq_unlock_pair(tdq, steal);
110680f86c9fSJeff Roberson 	}
1107ae7a6b38SJeff Roberson 	TDQ_UNLOCK(steal);
1108686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_IDLE);
11097b8bfa0dSJeff Roberson 	return (0);
111022bf7d9aSJeff Roberson }
111122bf7d9aSJeff Roberson 
1112ae7a6b38SJeff Roberson /*
1113ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
1114*6d3f74a1SMark Johnston  *
1115*6d3f74a1SMark Johnston  * "lowpri" is the minimum scheduling priority among all threads on
1116*6d3f74a1SMark Johnston  * the queue prior to the addition of the new thread.
1117ae7a6b38SJeff Roberson  */
111822bf7d9aSJeff Roberson static void
1119*6d3f74a1SMark Johnston tdq_notify(struct tdq *tdq, int lowpri)
112022bf7d9aSJeff Roberson {
11217b8bfa0dSJeff Roberson 	int cpu;
112222bf7d9aSJeff Roberson 
1123*6d3f74a1SMark Johnston 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1124*6d3f74a1SMark Johnston 	KASSERT(tdq->tdq_lowpri <= lowpri,
1125*6d3f74a1SMark Johnston 	    ("tdq_notify: lowpri %d > tdq_lowpri %d", lowpri, tdq->tdq_lowpri));
1126*6d3f74a1SMark Johnston 
11277789ab32SMark Johnston 	if (tdq->tdq_owepreempt)
1128ff256d9cSJeff Roberson 		return;
1129*6d3f74a1SMark Johnston 
1130*6d3f74a1SMark Johnston 	/*
1131*6d3f74a1SMark Johnston 	 * Check to see if the newly added thread should preempt the one
1132*6d3f74a1SMark Johnston 	 * currently running.
1133*6d3f74a1SMark Johnston 	 */
1134*6d3f74a1SMark Johnston 	if (!sched_shouldpreempt(tdq->tdq_lowpri, lowpri, 1))
11356b2f763fSJeff Roberson 		return;
113679654969SAlexander Motin 
113779654969SAlexander Motin 	/*
1138ae9e9b4fSAlexander Motin 	 * Make sure that our caller's earlier update to tdq_load is
1139ae9e9b4fSAlexander Motin 	 * globally visible before we read tdq_cpu_idle.  Idle thread
114079654969SAlexander Motin 	 * accesses both of them without locks, and the order is important.
114179654969SAlexander Motin 	 */
1142e8677f38SKonstantin Belousov 	atomic_thread_fence_seq_cst();
114379654969SAlexander Motin 
11441690c6c1SJeff Roberson 	/*
1145*6d3f74a1SMark Johnston 	 * Try to figure out if we can signal the idle thread instead of sending
1146*6d3f74a1SMark Johnston 	 * an IPI.  This check is racy; at worst, we will deliever an IPI
1147*6d3f74a1SMark Johnston 	 * unnecessarily.
11486c47aaaeSJeff Roberson 	 */
1149*6d3f74a1SMark Johnston 	cpu = TDQ_ID(tdq);
1150*6d3f74a1SMark Johnston 	if (TD_IS_IDLETHREAD(tdq->tdq_curthread) &&
1151*6d3f74a1SMark Johnston 	    (tdq->tdq_cpu_idle == 0 || cpu_idle_wakeup(cpu)))
11526c47aaaeSJeff Roberson 		return;
11537789ab32SMark Johnston 
11547789ab32SMark Johnston 	/*
11557789ab32SMark Johnston 	 * The run queues have been updated, so any switch on the remote CPU
11567789ab32SMark Johnston 	 * will satisfy the preemption request.
11577789ab32SMark Johnston 	 */
11587789ab32SMark Johnston 	tdq->tdq_owepreempt = 1;
1159d9d8d144SJohn Baldwin 	ipi_cpu(cpu, IPI_PREEMPT);
116022bf7d9aSJeff Roberson }
116122bf7d9aSJeff Roberson 
1162ae7a6b38SJeff Roberson /*
1163ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
1164ae7a6b38SJeff Roberson  * index.
1165ae7a6b38SJeff Roberson  */
11669727e637SJeff Roberson static struct thread *
116762fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
1168ae7a6b38SJeff Roberson {
1169ae7a6b38SJeff Roberson 	struct rqbits *rqb;
1170ae7a6b38SJeff Roberson 	struct rqhead *rqh;
117136acfc65SAlexander Motin 	struct thread *td, *first;
1172ae7a6b38SJeff Roberson 	int bit;
1173ae7a6b38SJeff Roberson 	int i;
1174ae7a6b38SJeff Roberson 
1175ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
1176ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
117736acfc65SAlexander Motin 	first = NULL;
1178ae7a6b38SJeff Roberson again:
1179ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1180ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
1181ae7a6b38SJeff Roberson 			continue;
11828bc713f6SJeff Roberson 		if (bit == 0)
11838bc713f6SJeff Roberson 			bit = RQB_FFS(rqb->rqb_bits[i]);
11848bc713f6SJeff Roberson 		for (; bit < RQB_BPW; bit++) {
11858bc713f6SJeff Roberson 			if ((rqb->rqb_bits[i] & (1ul << bit)) == 0)
1186ae7a6b38SJeff Roberson 				continue;
11878bc713f6SJeff Roberson 			rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)];
11889727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq) {
1189bd84094aSAlexander Motin 				if (first) {
1190bd84094aSAlexander Motin 					if (THREAD_CAN_MIGRATE(td) &&
11919727e637SJeff Roberson 					    THREAD_CAN_SCHED(td, cpu))
11929727e637SJeff Roberson 						return (td);
1193bd84094aSAlexander Motin 				} else
119436acfc65SAlexander Motin 					first = td;
1195ae7a6b38SJeff Roberson 			}
1196ae7a6b38SJeff Roberson 		}
11978bc713f6SJeff Roberson 	}
1198ae7a6b38SJeff Roberson 	if (start != 0) {
1199ae7a6b38SJeff Roberson 		start = 0;
1200ae7a6b38SJeff Roberson 		goto again;
1201ae7a6b38SJeff Roberson 	}
1202ae7a6b38SJeff Roberson 
120336acfc65SAlexander Motin 	if (first && THREAD_CAN_MIGRATE(first) &&
120436acfc65SAlexander Motin 	    THREAD_CAN_SCHED(first, cpu))
120536acfc65SAlexander Motin 		return (first);
1206ae7a6b38SJeff Roberson 	return (NULL);
1207ae7a6b38SJeff Roberson }
1208ae7a6b38SJeff Roberson 
1209ae7a6b38SJeff Roberson /*
1210ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1211ae7a6b38SJeff Roberson  */
12129727e637SJeff Roberson static struct thread *
121362fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
121422bf7d9aSJeff Roberson {
121522bf7d9aSJeff Roberson 	struct rqhead *rqh;
121622bf7d9aSJeff Roberson 	struct rqbits *rqb;
12179727e637SJeff Roberson 	struct thread *td;
121822bf7d9aSJeff Roberson 	int word;
121922bf7d9aSJeff Roberson 	int bit;
122022bf7d9aSJeff Roberson 
122122bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
122222bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
122322bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
122422bf7d9aSJeff Roberson 			continue;
122522bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1226a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
122722bf7d9aSJeff Roberson 				continue;
122822bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
12299727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq)
12309727e637SJeff Roberson 				if (THREAD_CAN_MIGRATE(td) &&
12319727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
12329727e637SJeff Roberson 					return (td);
123322bf7d9aSJeff Roberson 		}
123422bf7d9aSJeff Roberson 	}
123522bf7d9aSJeff Roberson 	return (NULL);
123622bf7d9aSJeff Roberson }
123722bf7d9aSJeff Roberson 
1238ae7a6b38SJeff Roberson /*
1239ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1240ae7a6b38SJeff Roberson  */
12419727e637SJeff Roberson static struct thread *
124262fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
124322bf7d9aSJeff Roberson {
12449727e637SJeff Roberson 	struct thread *td;
124522bf7d9aSJeff Roberson 
1246ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
12479727e637SJeff Roberson 	if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
12489727e637SJeff Roberson 		return (td);
12499727e637SJeff Roberson 	if ((td = runq_steal_from(&tdq->tdq_timeshare,
12509727e637SJeff Roberson 	    cpu, tdq->tdq_ridx)) != NULL)
12519727e637SJeff Roberson 		return (td);
125262fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
125322bf7d9aSJeff Roberson }
125480f86c9fSJeff Roberson 
1255ae7a6b38SJeff Roberson /*
1256ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
12577fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1258ae7a6b38SJeff Roberson  */
1259ae7a6b38SJeff Roberson static inline struct tdq *
12609727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags)
126180f86c9fSJeff Roberson {
12629727e637SJeff Roberson 
1263ae7a6b38SJeff Roberson 	struct tdq *tdq;
126461a74c5cSJeff Roberson 	struct mtx *mtx;
126580f86c9fSJeff Roberson 
12669727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1267ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
126893ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_cpu = cpu;
12699727e637SJeff Roberson 	/*
12709727e637SJeff Roberson 	 * If the lock matches just return the queue.
12719727e637SJeff Roberson 	 */
127261a74c5cSJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq)) {
127361a74c5cSJeff Roberson 		KASSERT((flags & SRQ_HOLD) == 0,
127461a74c5cSJeff Roberson 		    ("sched_setcpu: Invalid lock for SRQ_HOLD"));
1275ae7a6b38SJeff Roberson 		return (tdq);
1276ae7a6b38SJeff Roberson 	}
127761a74c5cSJeff Roberson 
127880f86c9fSJeff Roberson 	/*
1279ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1280ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
12817b8bfa0dSJeff Roberson 	 */
1282b0b9dee5SAttilio Rao 	spinlock_enter();
128361a74c5cSJeff Roberson 	mtx = thread_lock_block(td);
128461a74c5cSJeff Roberson 	if ((flags & SRQ_HOLD) == 0)
128561a74c5cSJeff Roberson 		mtx_unlock_spin(mtx);
1286ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1287ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1288b0b9dee5SAttilio Rao 	spinlock_exit();
1289ae7a6b38SJeff Roberson 	return (tdq);
129080f86c9fSJeff Roberson }
12912454aaf5SJeff Roberson 
12928df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
12938df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
12948df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
12958df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
12968df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
12978df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
12988df78c41SJeff Roberson 
1299ae7a6b38SJeff Roberson static int
13009727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags)
1301ae7a6b38SJeff Roberson {
130236acfc65SAlexander Motin 	struct cpu_group *cg, *ccg;
13039727e637SJeff Roberson 	struct td_sched *ts;
1304ae7a6b38SJeff Roberson 	struct tdq *tdq;
1305aefe0a8cSAlexander Motin 	cpuset_t *mask;
1306e745d729SAlexander Motin 	int cpu, pri, r, self, intr;
13077b8bfa0dSJeff Roberson 
130862fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
130993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1310efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
1311efe67753SNathan Whitehorn 	    "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
13127b8bfa0dSJeff Roberson 	if (smp_started == 0)
13137b8bfa0dSJeff Roberson 		return (self);
131428994a58SJeff Roberson 	/*
131528994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
131628994a58SJeff Roberson 	 */
131762fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
131862fa74d9SJeff Roberson 		return (ts->ts_cpu);
13197b8bfa0dSJeff Roberson 	/*
132062fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
132162fa74d9SJeff Roberson 	 * the interrupt.
13227b8bfa0dSJeff Roberson 	 */
132362fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1324c9205e35SAlexander Motin 	    curthread->td_intr_nesting_level) {
1325c55dc51cSAlexander Motin 		tdq = TDQ_SELF();
1326c55dc51cSAlexander Motin 		if (tdq->tdq_lowpri >= PRI_MIN_IDLE) {
1327c55dc51cSAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
1328c55dc51cSAlexander Motin 			return (self);
1329c55dc51cSAlexander Motin 		}
133062fa74d9SJeff Roberson 		ts->ts_cpu = self;
1331c9205e35SAlexander Motin 		intr = 1;
1332c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1333c55dc51cSAlexander Motin 		goto llc;
1334c55dc51cSAlexander Motin 	} else {
1335c9205e35SAlexander Motin 		intr = 0;
1336c55dc51cSAlexander Motin 		tdq = TDQ_CPU(ts->ts_cpu);
1337c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1338c55dc51cSAlexander Motin 	}
13397b8bfa0dSJeff Roberson 	/*
134036acfc65SAlexander Motin 	 * If the thread can run on the last cpu and the affinity has not
13410127914cSEric van Gyzen 	 * expired and it is idle, run it there.
13427b8bfa0dSJeff Roberson 	 */
134336acfc65SAlexander Motin 	if (THREAD_CAN_SCHED(td, ts->ts_cpu) &&
134436acfc65SAlexander Motin 	    tdq->tdq_lowpri >= PRI_MIN_IDLE &&
134536acfc65SAlexander Motin 	    SCHED_AFFINITY(ts, CG_SHARE_L2)) {
1346c55dc51cSAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD) {
1347176dd236SAlexander Motin 			/* Check all SMT threads for being idle. */
1348aefe0a8cSAlexander Motin 			for (cpu = cg->cg_first; cpu <= cg->cg_last; cpu++) {
1349176dd236SAlexander Motin 				if (CPU_ISSET(cpu, &cg->cg_mask) &&
1350176dd236SAlexander Motin 				    TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
135162fa74d9SJeff Roberson 					break;
1352aefe0a8cSAlexander Motin 			}
1353aefe0a8cSAlexander Motin 			if (cpu > cg->cg_last) {
1354176dd236SAlexander Motin 				SCHED_STAT_INC(pickcpu_idle_affinity);
1355176dd236SAlexander Motin 				return (ts->ts_cpu);
135636acfc65SAlexander Motin 			}
1357176dd236SAlexander Motin 		} else {
135836acfc65SAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
135936acfc65SAlexander Motin 			return (ts->ts_cpu);
136036acfc65SAlexander Motin 		}
136136acfc65SAlexander Motin 	}
1362c55dc51cSAlexander Motin llc:
136336acfc65SAlexander Motin 	/*
136436acfc65SAlexander Motin 	 * Search for the last level cache CPU group in the tree.
1365c9205e35SAlexander Motin 	 * Skip SMT, identical groups and caches with expired affinity.
1366c9205e35SAlexander Motin 	 * Interrupt threads affinity is explicit and never expires.
136736acfc65SAlexander Motin 	 */
136836acfc65SAlexander Motin 	for (ccg = NULL; cg != NULL; cg = cg->cg_parent) {
136936acfc65SAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD)
137036acfc65SAlexander Motin 			continue;
1371c9205e35SAlexander Motin 		if (cg->cg_children == 1 || cg->cg_count == 1)
1372c9205e35SAlexander Motin 			continue;
1373c9205e35SAlexander Motin 		if (cg->cg_level == CG_SHARE_NONE ||
1374c9205e35SAlexander Motin 		    (!intr && !SCHED_AFFINITY(ts, cg->cg_level)))
137536acfc65SAlexander Motin 			continue;
137636acfc65SAlexander Motin 		ccg = cg;
137736acfc65SAlexander Motin 	}
1378c9205e35SAlexander Motin 	/* Found LLC shared by all CPUs, so do a global search. */
1379c9205e35SAlexander Motin 	if (ccg == cpu_top)
1380c9205e35SAlexander Motin 		ccg = NULL;
138162fa74d9SJeff Roberson 	cpu = -1;
1382aefe0a8cSAlexander Motin 	mask = &td->td_cpuset->cs_mask;
1383c9205e35SAlexander Motin 	pri = td->td_priority;
1384e745d729SAlexander Motin 	r = TD_IS_RUNNING(td);
1385c9205e35SAlexander Motin 	/*
1386c9205e35SAlexander Motin 	 * Try hard to keep interrupts within found LLC.  Search the LLC for
1387c9205e35SAlexander Motin 	 * the least loaded CPU we can run now.  For NUMA systems it should
1388c9205e35SAlexander Motin 	 * be within target domain, and it also reduces scheduling overhead.
1389c9205e35SAlexander Motin 	 */
1390c9205e35SAlexander Motin 	if (ccg != NULL && intr) {
1391e745d729SAlexander Motin 		cpu = sched_lowest(ccg, mask, pri, INT_MAX, ts->ts_cpu, r);
1392c9205e35SAlexander Motin 		if (cpu >= 0)
1393c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_intrbind);
1394c9205e35SAlexander Motin 	} else
1395c9205e35SAlexander Motin 	/* Search the LLC for the least loaded idle CPU we can run now. */
1396c9205e35SAlexander Motin 	if (ccg != NULL) {
1397c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, max(pri, PRI_MAX_TIMESHARE),
1398e745d729SAlexander Motin 		    INT_MAX, ts->ts_cpu, r);
1399c9205e35SAlexander Motin 		if (cpu >= 0)
1400c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_affinity);
1401c9205e35SAlexander Motin 	}
1402c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU we can run now. */
1403c9205e35SAlexander Motin 	if (cpu < 0) {
1404e745d729SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu, r);
1405c9205e35SAlexander Motin 		if (cpu >= 0)
1406c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1407c9205e35SAlexander Motin 	}
1408c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU. */
1409c9205e35SAlexander Motin 	if (cpu < 0) {
1410e745d729SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu, r);
1411c9205e35SAlexander Motin 		if (cpu >= 0)
1412c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1413c9205e35SAlexander Motin 	}
1414bb3dfc6aSAlexander Motin 	KASSERT(cpu >= 0, ("sched_pickcpu: Failed to find a cpu."));
1415efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
141662fa74d9SJeff Roberson 	/*
141762fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
141862fa74d9SJeff Roberson 	 */
1419018ff686SJeff Roberson 	tdq = TDQ_CPU(cpu);
1420018ff686SJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_SELF()->tdq_lowpri > pri &&
1421018ff686SJeff Roberson 	    tdq->tdq_lowpri < PRI_MIN_IDLE &&
1422018ff686SJeff Roberson 	    TDQ_SELF()->tdq_load <= tdq->tdq_load + 1) {
14238df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_local);
142462fa74d9SJeff Roberson 		cpu = self;
1425c9205e35SAlexander Motin 	}
14268df78c41SJeff Roberson 	if (cpu != ts->ts_cpu)
14278df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_migration);
1428ae7a6b38SJeff Roberson 	return (cpu);
142980f86c9fSJeff Roberson }
143062fa74d9SJeff Roberson #endif
143122bf7d9aSJeff Roberson 
143222bf7d9aSJeff Roberson /*
143322bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
14340c0a98b2SJeff Roberson  */
14359727e637SJeff Roberson static struct thread *
1436ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
14375d7ef00cSJeff Roberson {
14389727e637SJeff Roberson 	struct thread *td;
14395d7ef00cSJeff Roberson 
1440ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
14419727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_realtime);
14429727e637SJeff Roberson 	if (td != NULL)
14439727e637SJeff Roberson 		return (td);
14449727e637SJeff Roberson 	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
14459727e637SJeff Roberson 	if (td != NULL) {
144612d56c0fSJohn Baldwin 		KASSERT(td->td_priority >= PRI_MIN_BATCH,
1447e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
14489727e637SJeff Roberson 		    td->td_priority));
14499727e637SJeff Roberson 		return (td);
145015dc847eSJeff Roberson 	}
14519727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_idle);
14529727e637SJeff Roberson 	if (td != NULL) {
14539727e637SJeff Roberson 		KASSERT(td->td_priority >= PRI_MIN_IDLE,
1454e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
14559727e637SJeff Roberson 		    td->td_priority));
14569727e637SJeff Roberson 		return (td);
1457e7d50326SJeff Roberson 	}
1458e7d50326SJeff Roberson 
1459e7d50326SJeff Roberson 	return (NULL);
1460245f3abfSJeff Roberson }
14610a016a05SJeff Roberson 
1462ae7a6b38SJeff Roberson /*
1463ae7a6b38SJeff Roberson  * Initialize a thread queue.
1464ae7a6b38SJeff Roberson  */
14650a016a05SJeff Roberson static void
1466018ff686SJeff Roberson tdq_setup(struct tdq *tdq, int id)
14670a016a05SJeff Roberson {
1468ae7a6b38SJeff Roberson 
1469c47f202bSJeff Roberson 	if (bootverbose)
1470018ff686SJeff Roberson 		printf("ULE: setup cpu %d\n", id);
1471e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1472e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1473d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
1474018ff686SJeff Roberson 	tdq->tdq_id = id;
147562fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
147662fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
147761a74c5cSJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN);
14788f51ad55SJeff Roberson #ifdef KTR
14798f51ad55SJeff Roberson 	snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
14808f51ad55SJeff Roberson 	    "CPU %d load", (int)TDQ_ID(tdq));
14818f51ad55SJeff Roberson #endif
14820a016a05SJeff Roberson }
14830a016a05SJeff Roberson 
1484c47f202bSJeff Roberson #ifdef SMP
1485c47f202bSJeff Roberson static void
1486c47f202bSJeff Roberson sched_setup_smp(void)
1487c47f202bSJeff Roberson {
1488c47f202bSJeff Roberson 	struct tdq *tdq;
1489c47f202bSJeff Roberson 	int i;
1490c47f202bSJeff Roberson 
149162fa74d9SJeff Roberson 	cpu_top = smp_topo();
14923aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
1493018ff686SJeff Roberson 		tdq = DPCPU_ID_PTR(i, tdq);
1494018ff686SJeff Roberson 		tdq_setup(tdq, i);
149562fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
149662fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
149762fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1498ca34553bSAlexander Motin 		DPCPU_ID_SET(i, randomval, i * 69069 + 5);
1499c47f202bSJeff Roberson 	}
1500018ff686SJeff Roberson 	PCPU_SET(sched, DPCPU_PTR(tdq));
150162fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
1502c47f202bSJeff Roberson }
1503c47f202bSJeff Roberson #endif
1504c47f202bSJeff Roberson 
1505ae7a6b38SJeff Roberson /*
1506ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1507ae7a6b38SJeff Roberson  * information.
1508ae7a6b38SJeff Roberson  */
150935e6168fSJeff Roberson static void
151035e6168fSJeff Roberson sched_setup(void *dummy)
151135e6168fSJeff Roberson {
1512ae7a6b38SJeff Roberson 	struct tdq *tdq;
1513c47f202bSJeff Roberson 
15140ec896fdSJeff Roberson #ifdef SMP
1515c47f202bSJeff Roberson 	sched_setup_smp();
1516749d01b0SJeff Roberson #else
1517018ff686SJeff Roberson 	tdq_setup(TDQ_SELF(), 0);
1518356500a3SJeff Roberson #endif
1519018ff686SJeff Roberson 	tdq = TDQ_SELF();
1520ae7a6b38SJeff Roberson 
1521ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1522ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1523e1504695SJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(tdq);
15249727e637SJeff Roberson 	tdq_load_add(tdq, &thread0);
1525*6d3f74a1SMark Johnston 	tdq->tdq_curthread = &thread0;
152662fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1527ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
152835e6168fSJeff Roberson }
152935e6168fSJeff Roberson 
1530ae7a6b38SJeff Roberson /*
1531579895dfSAlexander Motin  * This routine determines time constants after stathz and hz are setup.
1532ae7a6b38SJeff Roberson  */
1533a1d4fe69SDavid Xu /* ARGSUSED */
1534a1d4fe69SDavid Xu static void
1535a1d4fe69SDavid Xu sched_initticks(void *dummy)
1536a1d4fe69SDavid Xu {
1537ae7a6b38SJeff Roberson 	int incr;
1538ae7a6b38SJeff Roberson 
1539a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
15405e5c3873SJeff Roberson 	sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
15415e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
154237f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
154337f4e025SAlexander Motin 	    realstathz);
1544a1d4fe69SDavid Xu 
1545a1d4fe69SDavid Xu 	/*
1546e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
15473f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1548e7d50326SJeff Roberson 	 */
1549ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1550e7d50326SJeff Roberson 	/*
1551e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1552e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1553a1d4fe69SDavid Xu 	 */
1554ae7a6b38SJeff Roberson 	if (incr == 0)
1555ae7a6b38SJeff Roberson 		incr = 1;
1556ae7a6b38SJeff Roberson 	tickincr = incr;
15577b8bfa0dSJeff Roberson #ifdef SMP
15589862717aSJeff Roberson 	/*
15597fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
15607fcf154aSJeff Roberson 	 * what realstathz is.
15617fcf154aSJeff Roberson 	 */
15627fcf154aSJeff Roberson 	balance_interval = realstathz;
1563290d9060SDon Lewis 	balance_ticks = balance_interval;
15647b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
15657b8bfa0dSJeff Roberson #endif
1566b3f40a41SAlexander Motin 	if (sched_idlespinthresh < 0)
15672c27cb3aSAlexander Motin 		sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
1568a1d4fe69SDavid Xu }
1569a1d4fe69SDavid Xu 
157035e6168fSJeff Roberson /*
1571ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1572ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1573ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1574ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1575ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
157657031f79SGeorge V. Neville-Neil  *
157757031f79SGeorge V. Neville-Neil  * When a thread's sleep time is greater than its run time the
157857031f79SGeorge V. Neville-Neil  * calculation is:
157957031f79SGeorge V. Neville-Neil  *
158057031f79SGeorge V. Neville-Neil  *                           scaling factor
158157031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------
158257031f79SGeorge V. Neville-Neil  *                        sleep time / run time
158357031f79SGeorge V. Neville-Neil  *
158457031f79SGeorge V. Neville-Neil  *
158557031f79SGeorge V. Neville-Neil  * When a thread's run time is greater than its sleep time the
158657031f79SGeorge V. Neville-Neil  * calculation is:
158757031f79SGeorge V. Neville-Neil  *
158857031f79SGeorge V. Neville-Neil  *                                                 scaling factor
158943521b46Swiklam  * interactivity score = 2 * scaling factor  -  ---------------------
159057031f79SGeorge V. Neville-Neil  *                                              run time / sleep time
1591ae7a6b38SJeff Roberson  */
1592ae7a6b38SJeff Roberson static int
1593ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1594ae7a6b38SJeff Roberson {
1595ae7a6b38SJeff Roberson 	struct td_sched *ts;
1596ae7a6b38SJeff Roberson 	int div;
1597ae7a6b38SJeff Roberson 
159893ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1599ae7a6b38SJeff Roberson 	/*
1600ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1601ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1602ae7a6b38SJeff Roberson 	 * no chance.
1603ae7a6b38SJeff Roberson 	 */
1604ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1605ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1606ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1607ae7a6b38SJeff Roberson 
1608ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1609ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1610ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1611ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1612ae7a6b38SJeff Roberson 	}
1613ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1614ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1615ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1616ae7a6b38SJeff Roberson 	}
1617ae7a6b38SJeff Roberson 	/* runtime == slptime */
1618ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1619ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1620ae7a6b38SJeff Roberson 
1621ae7a6b38SJeff Roberson 	/*
1622ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1623ae7a6b38SJeff Roberson 	 */
1624ae7a6b38SJeff Roberson 	return (0);
1625ae7a6b38SJeff Roberson 
1626ae7a6b38SJeff Roberson }
1627ae7a6b38SJeff Roberson 
1628ae7a6b38SJeff Roberson /*
162935e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
163035e6168fSJeff Roberson  * process.
163135e6168fSJeff Roberson  */
163215dc847eSJeff Roberson static void
16338460a577SJohn Birrell sched_priority(struct thread *td)
163435e6168fSJeff Roberson {
16351c119e17SAlexander Motin 	u_int pri, score;
163635e6168fSJeff Roberson 
1637c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
163815dc847eSJeff Roberson 		return;
1639e7d50326SJeff Roberson 	/*
1640e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1641e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1642e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1643e7d50326SJeff Roberson 	 *
1644ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1645e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1646e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1647a5423ea3SJeff Roberson 	 *
1648a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1649a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1650a5423ea3SJeff Roberson 	 * considered interactive.
1651e7d50326SJeff Roberson 	 */
1652a0f15352SJohn Baldwin 	score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1653e7d50326SJeff Roberson 	if (score < sched_interact) {
165412d56c0fSJohn Baldwin 		pri = PRI_MIN_INTERACT;
16551c119e17SAlexander Motin 		pri += (PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) * score /
16561c119e17SAlexander Motin 		    sched_interact;
165712d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
16581c119e17SAlexander Motin 		    ("sched_priority: invalid interactive priority %u score %u",
16599a93305aSJeff Roberson 		    pri, score));
1660e7d50326SJeff Roberson 	} else {
1661e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
166293ccd6bfSKonstantin Belousov 		if (td_get_sched(td)->ts_ticks)
166393ccd6bfSKonstantin Belousov 			pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
16645457fa23SJohn Baldwin 			    SCHED_PRI_RANGE - 1);
1665e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
166612d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
16671c119e17SAlexander Motin 		    ("sched_priority: invalid priority %u: nice %d, "
1668ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
166993ccd6bfSKonstantin Belousov 		    pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
167093ccd6bfSKonstantin Belousov 		    td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
167193ccd6bfSKonstantin Belousov 		    SCHED_PRI_TICKS(td_get_sched(td))));
1672e7d50326SJeff Roberson 	}
16738460a577SJohn Birrell 	sched_user_prio(td, pri);
167435e6168fSJeff Roberson 
167515dc847eSJeff Roberson 	return;
167635e6168fSJeff Roberson }
167735e6168fSJeff Roberson 
167835e6168fSJeff Roberson /*
1679d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1680ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1681ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1682d322132cSJeff Roberson  */
16834b60e324SJeff Roberson static void
16848460a577SJohn Birrell sched_interact_update(struct thread *td)
16854b60e324SJeff Roberson {
1686155b6ca1SJeff Roberson 	struct td_sched *ts;
16879a93305aSJeff Roberson 	u_int sum;
16883f741ca1SJeff Roberson 
168993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1690ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1691d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1692d322132cSJeff Roberson 		return;
1693d322132cSJeff Roberson 	/*
1694155b6ca1SJeff Roberson 	 * This only happens from two places:
1695155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1696155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1697155b6ca1SJeff Roberson 	 */
1698155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1699ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1700ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1701ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1702155b6ca1SJeff Roberson 		} else {
1703ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1704ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1705155b6ca1SJeff Roberson 		}
1706155b6ca1SJeff Roberson 		return;
1707155b6ca1SJeff Roberson 	}
1708155b6ca1SJeff Roberson 	/*
1709d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1710d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
17112454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1712d322132cSJeff Roberson 	 */
171337a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1714ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1715ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1716d322132cSJeff Roberson 		return;
1717d322132cSJeff Roberson 	}
1718ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1719ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1720d322132cSJeff Roberson }
1721d322132cSJeff Roberson 
1722ae7a6b38SJeff Roberson /*
1723ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1724ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1725ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1726ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1727ae7a6b38SJeff Roberson  */
1728d322132cSJeff Roberson static void
17298460a577SJohn Birrell sched_interact_fork(struct thread *td)
1730d322132cSJeff Roberson {
173193ccd6bfSKonstantin Belousov 	struct td_sched *ts;
1732d322132cSJeff Roberson 	int ratio;
1733d322132cSJeff Roberson 	int sum;
1734d322132cSJeff Roberson 
173593ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
173693ccd6bfSKonstantin Belousov 	sum = ts->ts_runtime + ts->ts_slptime;
1737d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1738d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
173993ccd6bfSKonstantin Belousov 		ts->ts_runtime /= ratio;
174093ccd6bfSKonstantin Belousov 		ts->ts_slptime /= ratio;
17414b60e324SJeff Roberson 	}
17424b60e324SJeff Roberson }
17434b60e324SJeff Roberson 
174415dc847eSJeff Roberson /*
1745ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1746ed062c8dSJulian Elischer  */
1747ed062c8dSJulian Elischer void
1748ed062c8dSJulian Elischer schedinit(void)
1749ed062c8dSJulian Elischer {
175093ccd6bfSKonstantin Belousov 	struct td_sched *ts0;
1751e7d50326SJeff Roberson 
1752ed062c8dSJulian Elischer 	/*
175393ccd6bfSKonstantin Belousov 	 * Set up the scheduler specific parts of thread0.
1754ed062c8dSJulian Elischer 	 */
175593ccd6bfSKonstantin Belousov 	ts0 = td_get_sched(&thread0);
175693ccd6bfSKonstantin Belousov 	ts0->ts_ltick = ticks;
175793ccd6bfSKonstantin Belousov 	ts0->ts_ftick = ticks;
175893ccd6bfSKonstantin Belousov 	ts0->ts_slice = 0;
17591408b84aSHans Petter Selasky 	ts0->ts_cpu = curcpu;	/* set valid CPU number */
1760ed062c8dSJulian Elischer }
1761ed062c8dSJulian Elischer 
1762ed062c8dSJulian Elischer /*
1763589aed00SKyle Evans  * schedinit_ap() is needed prior to calling sched_throw(NULL) to ensure that
1764589aed00SKyle Evans  * the pcpu requirements are met for any calls in the period between curthread
1765589aed00SKyle Evans  * initialization and sched_throw().  One can safely add threads to the queue
1766589aed00SKyle Evans  * before sched_throw(), for instance, as long as the thread lock is setup
1767589aed00SKyle Evans  * correctly.
1768589aed00SKyle Evans  *
1769589aed00SKyle Evans  * TDQ_SELF() relies on the below sched pcpu setting; it may be used only
1770589aed00SKyle Evans  * after schedinit_ap().
1771589aed00SKyle Evans  */
1772589aed00SKyle Evans void
1773589aed00SKyle Evans schedinit_ap(void)
1774589aed00SKyle Evans {
1775589aed00SKyle Evans 
1776589aed00SKyle Evans #ifdef SMP
1777589aed00SKyle Evans 	PCPU_SET(sched, DPCPU_PTR(tdq));
1778589aed00SKyle Evans #endif
1779589aed00SKyle Evans 	PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1780589aed00SKyle Evans }
1781589aed00SKyle Evans 
1782589aed00SKyle Evans /*
178315dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
178415dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1785e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
178615dc847eSJeff Roberson  */
178735e6168fSJeff Roberson int
178835e6168fSJeff Roberson sched_rr_interval(void)
178935e6168fSJeff Roberson {
1790e7d50326SJeff Roberson 
1791579895dfSAlexander Motin 	/* Convert sched_slice from stathz to hz. */
179237f4e025SAlexander Motin 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
179335e6168fSJeff Roberson }
179435e6168fSJeff Roberson 
1795ae7a6b38SJeff Roberson /*
1796ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1797ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1798ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1799ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1800ae7a6b38SJeff Roberson  */
180122bf7d9aSJeff Roberson static void
18027295465eSAlexander Motin sched_pctcpu_update(struct td_sched *ts, int run)
180335e6168fSJeff Roberson {
18047295465eSAlexander Motin 	int t = ticks;
1805e7d50326SJeff Roberson 
180678133024SMark Johnston 	/*
180778133024SMark Johnston 	 * The signed difference may be negative if the thread hasn't run for
180878133024SMark Johnston 	 * over half of the ticks rollover period.
180978133024SMark Johnston 	 */
181078133024SMark Johnston 	if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) {
1811ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
18127295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
18137295465eSAlexander Motin 	} else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
18147295465eSAlexander Motin 		ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
18157295465eSAlexander Motin 		    (ts->ts_ltick - (t - SCHED_TICK_TARG));
18167295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
18177295465eSAlexander Motin 	}
18187295465eSAlexander Motin 	if (run)
18197295465eSAlexander Motin 		ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
18207295465eSAlexander Motin 	ts->ts_ltick = t;
182135e6168fSJeff Roberson }
182235e6168fSJeff Roberson 
1823ae7a6b38SJeff Roberson /*
1824ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1825ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1826ae7a6b38SJeff Roberson  * functions.
1827ae7a6b38SJeff Roberson  */
1828e7d50326SJeff Roberson static void
1829f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
183035e6168fSJeff Roberson {
183173daf66fSJeff Roberson 	struct tdq *tdq;
183273daf66fSJeff Roberson 	int oldpri;
183335e6168fSJeff Roberson 
18348f51ad55SJeff Roberson 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
18358f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "new prio:%d", prio,
18368f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(curthread));
1837d9fae5abSAndriy Gapon 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
1838e87fc7cfSAndriy Gapon 	if (td != curthread && prio < td->td_priority) {
18398f51ad55SJeff Roberson 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
18408f51ad55SJeff Roberson 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
18418f51ad55SJeff Roberson 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
1842d9fae5abSAndriy Gapon 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
1843b3e9e682SRyan Stone 		    curthread);
18448f51ad55SJeff Roberson 	}
18457b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1846f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1847f5c157d9SJohn Baldwin 		return;
18483f741ca1SJeff Roberson 	/*
18493f741ca1SJeff Roberson 	 * If the priority has been elevated due to priority
18503f741ca1SJeff Roberson 	 * propagation, we may have to move ourselves to a new
1851e7d50326SJeff Roberson 	 * queue.  This could be optimized to not re-add in some
1852e7d50326SJeff Roberson 	 * cases.
1853f2b74cbfSJeff Roberson 	 */
18546d55b3ecSJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1855e7d50326SJeff Roberson 		sched_rem(td);
1856e7d50326SJeff Roberson 		td->td_priority = prio;
185761a74c5cSJeff Roberson 		sched_add(td, SRQ_BORROWING | SRQ_HOLDTD);
185873daf66fSJeff Roberson 		return;
185973daf66fSJeff Roberson 	}
18606d55b3ecSJeff Roberson 	/*
18616d55b3ecSJeff Roberson 	 * If the thread is currently running we may have to adjust the lowpri
18626d55b3ecSJeff Roberson 	 * information so other cpus are aware of our current priority.
18636d55b3ecSJeff Roberson 	 */
18646d55b3ecSJeff Roberson 	if (TD_IS_RUNNING(td)) {
18654aec1984SJohn Baldwin 		tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
186662fa74d9SJeff Roberson 		oldpri = td->td_priority;
18673f741ca1SJeff Roberson 		td->td_priority = prio;
186862fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
186962fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
187062fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
187162fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
18726d55b3ecSJeff Roberson 		return;
187373daf66fSJeff Roberson 	}
18746d55b3ecSJeff Roberson 	td->td_priority = prio;
1875ae7a6b38SJeff Roberson }
187635e6168fSJeff Roberson 
1877f5c157d9SJohn Baldwin /*
1878f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1879f5c157d9SJohn Baldwin  * priority.
1880f5c157d9SJohn Baldwin  */
1881f5c157d9SJohn Baldwin void
1882f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1883f5c157d9SJohn Baldwin {
1884f5c157d9SJohn Baldwin 
1885f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1886f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1887f5c157d9SJohn Baldwin }
1888f5c157d9SJohn Baldwin 
1889f5c157d9SJohn Baldwin /*
1890f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1891f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1892f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1893f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1894f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1895f5c157d9SJohn Baldwin  * of prio.
1896f5c157d9SJohn Baldwin  */
1897f5c157d9SJohn Baldwin void
1898f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1899f5c157d9SJohn Baldwin {
1900f5c157d9SJohn Baldwin 	u_char base_pri;
1901f5c157d9SJohn Baldwin 
1902f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1903f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
19048460a577SJohn Birrell 		base_pri = td->td_user_pri;
1905f5c157d9SJohn Baldwin 	else
1906f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1907f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1908f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1909f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1910f5c157d9SJohn Baldwin 	} else
1911f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1912f5c157d9SJohn Baldwin }
1913f5c157d9SJohn Baldwin 
1914ae7a6b38SJeff Roberson /*
1915ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1916ae7a6b38SJeff Roberson  */
1917f5c157d9SJohn Baldwin void
1918f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1919f5c157d9SJohn Baldwin {
1920f5c157d9SJohn Baldwin 	u_char oldprio;
1921f5c157d9SJohn Baldwin 
1922f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1923f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1924f5c157d9SJohn Baldwin 
1925f5c157d9SJohn Baldwin 	/*
192650aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1927f5c157d9SJohn Baldwin 	 * ever lower the priority.
1928f5c157d9SJohn Baldwin 	 */
1929f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1930f5c157d9SJohn Baldwin 		return;
1931f5c157d9SJohn Baldwin 
1932f5c157d9SJohn Baldwin 	/* Change the real priority. */
1933f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1934f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1935f5c157d9SJohn Baldwin 
1936f5c157d9SJohn Baldwin 	/*
1937f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1938f5c157d9SJohn Baldwin 	 * its state.
1939f5c157d9SJohn Baldwin 	 */
1940f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1941f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1942f5c157d9SJohn Baldwin }
1943f5c157d9SJohn Baldwin 
1944ae7a6b38SJeff Roberson /*
1945ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1946ae7a6b38SJeff Roberson  */
194735e6168fSJeff Roberson void
19488460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
19493db720fdSDavid Xu {
19503db720fdSDavid Xu 
19518460a577SJohn Birrell 	td->td_base_user_pri = prio;
1952acbe332aSDavid Xu 	if (td->td_lend_user_pri <= prio)
1953fc6c30f6SJulian Elischer 		return;
19548460a577SJohn Birrell 	td->td_user_pri = prio;
19553db720fdSDavid Xu }
19563db720fdSDavid Xu 
19573db720fdSDavid Xu void
19583db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
19593db720fdSDavid Xu {
19603db720fdSDavid Xu 
1961435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1962acbe332aSDavid Xu 	td->td_lend_user_pri = prio;
1963c8e368a9SDavid Xu 	td->td_user_pri = min(prio, td->td_base_user_pri);
1964c8e368a9SDavid Xu 	if (td->td_priority > td->td_user_pri)
1965c8e368a9SDavid Xu 		sched_prio(td, td->td_user_pri);
1966c8e368a9SDavid Xu 	else if (td->td_priority != td->td_user_pri)
1967c8e368a9SDavid Xu 		td->td_flags |= TDF_NEEDRESCHED;
1968435806d3SDavid Xu }
19693db720fdSDavid Xu 
1970ac97da9aSMateusz Guzik /*
1971ac97da9aSMateusz Guzik  * Like the above but first check if there is anything to do.
1972ac97da9aSMateusz Guzik  */
1973ac97da9aSMateusz Guzik void
1974ac97da9aSMateusz Guzik sched_lend_user_prio_cond(struct thread *td, u_char prio)
1975ac97da9aSMateusz Guzik {
1976ac97da9aSMateusz Guzik 
1977ac97da9aSMateusz Guzik 	if (td->td_lend_user_pri != prio)
1978ac97da9aSMateusz Guzik 		goto lend;
1979ac97da9aSMateusz Guzik 	if (td->td_user_pri != min(prio, td->td_base_user_pri))
1980ac97da9aSMateusz Guzik 		goto lend;
1981b77594bbSMateusz Guzik 	if (td->td_priority != td->td_user_pri)
1982ac97da9aSMateusz Guzik 		goto lend;
1983ac97da9aSMateusz Guzik 	return;
1984ac97da9aSMateusz Guzik 
1985ac97da9aSMateusz Guzik lend:
1986ac97da9aSMateusz Guzik 	thread_lock(td);
1987ac97da9aSMateusz Guzik 	sched_lend_user_prio(td, prio);
1988ac97da9aSMateusz Guzik 	thread_unlock(td);
1989ac97da9aSMateusz Guzik }
1990ac97da9aSMateusz Guzik 
19914c8a8cfcSKonstantin Belousov #ifdef SMP
1992ae7a6b38SJeff Roberson /*
199397e9382dSDon Lewis  * This tdq is about to idle.  Try to steal a thread from another CPU before
199497e9382dSDon Lewis  * choosing the idle thread.
199597e9382dSDon Lewis  */
199697e9382dSDon Lewis static void
199797e9382dSDon Lewis tdq_trysteal(struct tdq *tdq)
199897e9382dSDon Lewis {
19992668bb2aSAlexander Motin 	struct cpu_group *cg, *parent;
200097e9382dSDon Lewis 	struct tdq *steal;
200197e9382dSDon Lewis 	cpuset_t mask;
20022668bb2aSAlexander Motin 	int cpu, i, goup;
200397e9382dSDon Lewis 
200408063e9fSAlexander Motin 	if (smp_started == 0 || steal_idle == 0 || trysteal_limit == 0 ||
200508063e9fSAlexander Motin 	    tdq->tdq_cg == NULL)
200697e9382dSDon Lewis 		return;
200797e9382dSDon Lewis 	CPU_FILL(&mask);
200897e9382dSDon Lewis 	CPU_CLR(PCPU_GET(cpuid), &mask);
200997e9382dSDon Lewis 	/* We don't want to be preempted while we're iterating. */
201097e9382dSDon Lewis 	spinlock_enter();
201197e9382dSDon Lewis 	TDQ_UNLOCK(tdq);
20122668bb2aSAlexander Motin 	for (i = 1, cg = tdq->tdq_cg, goup = 0; ; ) {
201308063e9fSAlexander Motin 		cpu = sched_highest(cg, &mask, steal_thresh, 1);
201497e9382dSDon Lewis 		/*
201597e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
201697e9382dSDon Lewis 		 * steal one here.
201797e9382dSDon Lewis 		 */
201897e9382dSDon Lewis 		if (tdq->tdq_load > 0) {
201997e9382dSDon Lewis 			TDQ_LOCK(tdq);
202097e9382dSDon Lewis 			break;
202197e9382dSDon Lewis 		}
20222668bb2aSAlexander Motin 
20232668bb2aSAlexander Motin 		/*
20242668bb2aSAlexander Motin 		 * We found no CPU to steal from in this group.  Escalate to
20252668bb2aSAlexander Motin 		 * the parent and repeat.  But if parent has only two children
20262668bb2aSAlexander Motin 		 * groups we can avoid searching this group again by searching
20272668bb2aSAlexander Motin 		 * the other one specifically and then escalating two levels.
20282668bb2aSAlexander Motin 		 */
202997e9382dSDon Lewis 		if (cpu == -1) {
20302668bb2aSAlexander Motin 			if (goup) {
203197e9382dSDon Lewis 				cg = cg->cg_parent;
20322668bb2aSAlexander Motin 				goup = 0;
20332668bb2aSAlexander Motin 			}
20342668bb2aSAlexander Motin 			if (++i > trysteal_limit) {
203597e9382dSDon Lewis 				TDQ_LOCK(tdq);
203697e9382dSDon Lewis 				break;
203797e9382dSDon Lewis 			}
20382668bb2aSAlexander Motin 			parent = cg->cg_parent;
20392668bb2aSAlexander Motin 			if (parent == NULL) {
20402668bb2aSAlexander Motin 				TDQ_LOCK(tdq);
20412668bb2aSAlexander Motin 				break;
20422668bb2aSAlexander Motin 			}
20432668bb2aSAlexander Motin 			if (parent->cg_children == 2) {
20442668bb2aSAlexander Motin 				if (cg == &parent->cg_child[0])
20452668bb2aSAlexander Motin 					cg = &parent->cg_child[1];
20462668bb2aSAlexander Motin 				else
20472668bb2aSAlexander Motin 					cg = &parent->cg_child[0];
20482668bb2aSAlexander Motin 				goup = 1;
20492668bb2aSAlexander Motin 			} else
20502668bb2aSAlexander Motin 				cg = parent;
205197e9382dSDon Lewis 			continue;
205297e9382dSDon Lewis 		}
205397e9382dSDon Lewis 		steal = TDQ_CPU(cpu);
205497e9382dSDon Lewis 		/*
205597e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
205697e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
205715b5c347SGordon Bergling 		 * At this point unconditionally exit the loop to bound
205808063e9fSAlexander Motin 		 * the time spent in the critcal section.
205997e9382dSDon Lewis 		 */
206097e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
206197e9382dSDon Lewis 		    steal->tdq_transferable == 0)
206297e9382dSDon Lewis 			continue;
206397e9382dSDon Lewis 		/*
20648bb173fbSAlexander Motin 		 * Try to lock both queues. If we are assigned a thread while
20658bb173fbSAlexander Motin 		 * waited for the lock, switch to it now instead of stealing.
20668bb173fbSAlexander Motin 		 * If we can't get the lock, then somebody likely got there
206708063e9fSAlexander Motin 		 * first.
206897e9382dSDon Lewis 		 */
20698bb173fbSAlexander Motin 		TDQ_LOCK(tdq);
20708bb173fbSAlexander Motin 		if (tdq->tdq_load > 0)
207197e9382dSDon Lewis 			break;
20728bb173fbSAlexander Motin 		if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0)
20738bb173fbSAlexander Motin 			break;
207497e9382dSDon Lewis 		/*
207597e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
207697e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
207797e9382dSDon Lewis 		 */
207897e9382dSDon Lewis 		if (steal->tdq_load < steal_thresh ||
207997e9382dSDon Lewis 		    steal->tdq_transferable == 0) {
208097e9382dSDon Lewis 			TDQ_UNLOCK(steal);
208197e9382dSDon Lewis 			break;
208297e9382dSDon Lewis 		}
208397e9382dSDon Lewis 		/*
208497e9382dSDon Lewis 		 * If we fail to acquire one due to affinity restrictions,
208597e9382dSDon Lewis 		 * bail out and let the idle thread to a more complete search
208697e9382dSDon Lewis 		 * outside of a critical section.
208797e9382dSDon Lewis 		 */
2088*6d3f74a1SMark Johnston 		if (tdq_move(steal, tdq) == -1) {
208997e9382dSDon Lewis 			TDQ_UNLOCK(steal);
209097e9382dSDon Lewis 			break;
209197e9382dSDon Lewis 		}
209297e9382dSDon Lewis 		TDQ_UNLOCK(steal);
209397e9382dSDon Lewis 		break;
209497e9382dSDon Lewis 	}
209597e9382dSDon Lewis 	spinlock_exit();
209697e9382dSDon Lewis }
20974c8a8cfcSKonstantin Belousov #endif
209897e9382dSDon Lewis 
209997e9382dSDon Lewis /*
2100c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
2101c47f202bSJeff Roberson  * cpu binding.
2102c47f202bSJeff Roberson  */
2103c47f202bSJeff Roberson static struct mtx *
2104c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
2105c47f202bSJeff Roberson {
2106c47f202bSJeff Roberson 	struct tdq *tdn;
2107*6d3f74a1SMark Johnston 	int lowpri;
2108c47f202bSJeff Roberson 
2109686bcb5cSJeff Roberson 	KASSERT(THREAD_CAN_MIGRATE(td) ||
2110686bcb5cSJeff Roberson 	    (td_get_sched(td)->ts_flags & TSF_BOUND) != 0,
2111686bcb5cSJeff Roberson 	    ("Thread %p shouldn't migrate", td));
2112efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
2113efe67753SNathan Whitehorn 	    "thread %s queued on absent CPU %d.", td->td_name,
2114efe67753SNathan Whitehorn 	    td_get_sched(td)->ts_cpu));
211593ccd6bfSKonstantin Belousov 	tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
2116c47f202bSJeff Roberson #ifdef SMP
21179727e637SJeff Roberson 	tdq_load_rem(tdq, td);
2118c47f202bSJeff Roberson 	/*
2119686bcb5cSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We have an
2120686bcb5cSJeff Roberson 	 * extra spinlock nesting from sched_switch() which will
2121686bcb5cSJeff Roberson 	 * prevent preemption while we're holding neither run-queue lock.
2122c47f202bSJeff Roberson 	 */
2123686bcb5cSJeff Roberson 	TDQ_UNLOCK(tdq);
2124686bcb5cSJeff Roberson 	TDQ_LOCK(tdn);
2125*6d3f74a1SMark Johnston 	lowpri = tdq_add(tdn, td, flags);
2126*6d3f74a1SMark Johnston 	tdq_notify(tdn, lowpri);
2127c47f202bSJeff Roberson 	TDQ_UNLOCK(tdn);
2128686bcb5cSJeff Roberson 	TDQ_LOCK(tdq);
2129c47f202bSJeff Roberson #endif
2130c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
2131c47f202bSJeff Roberson }
2132c47f202bSJeff Roberson 
2133c47f202bSJeff Roberson /*
213461a74c5cSJeff Roberson  * thread_lock_unblock() that does not assume td_lock is blocked.
2135ae7a6b38SJeff Roberson  */
2136ae7a6b38SJeff Roberson static inline void
2137ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
2138ae7a6b38SJeff Roberson {
2139ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
2140ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
2141ae7a6b38SJeff Roberson }
2142ae7a6b38SJeff Roberson 
2143ae7a6b38SJeff Roberson /*
2144ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
2145ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
2146ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
2147ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
2148ae7a6b38SJeff Roberson  */
21493db720fdSDavid Xu void
2150686bcb5cSJeff Roberson sched_switch(struct thread *td, int flags)
215135e6168fSJeff Roberson {
2152686bcb5cSJeff Roberson 	struct thread *newtd;
2153c02bbb43SJeff Roberson 	struct tdq *tdq;
2154ad1e7d28SJulian Elischer 	struct td_sched *ts;
2155ae7a6b38SJeff Roberson 	struct mtx *mtx;
2156c47f202bSJeff Roberson 	int srqflag;
21578db16699SAlexander Motin 	int cpuid, preempted;
21588db16699SAlexander Motin #ifdef SMP
21598db16699SAlexander Motin 	int pickcpu;
21608db16699SAlexander Motin #endif
216135e6168fSJeff Roberson 
21627b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
216335e6168fSJeff Roberson 
2164ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2165018ff686SJeff Roberson 	tdq = TDQ_SELF();
216693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
21677295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
21688db16699SAlexander Motin #ifdef SMP
2169e745d729SAlexander Motin 	pickcpu = (td->td_flags & TDF_PICKCPU) != 0;
2170e745d729SAlexander Motin 	if (pickcpu)
2171e745d729SAlexander Motin 		ts->ts_rltick = ticks - affinity * MAX_CACHE_LEVELS;
2172e745d729SAlexander Motin 	else
2173ae7a6b38SJeff Roberson 		ts->ts_rltick = ticks;
21748db16699SAlexander Motin #endif
2175060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
2176ad9dadc4SAndriy Gapon 	preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
2177ad9dadc4SAndriy Gapon 	    (flags & SW_PREEMPT) != 0;
2178e745d729SAlexander Motin 	td->td_flags &= ~(TDF_NEEDRESCHED | TDF_PICKCPU | TDF_SLICEEND);
217977918643SStephan Uphoff 	td->td_owepreempt = 0;
21807789ab32SMark Johnston 	tdq->tdq_owepreempt = 0;
21812c27cb3aSAlexander Motin 	if (!TD_IS_IDLETHREAD(td))
21821690c6c1SJeff Roberson 		tdq->tdq_switchcnt++;
21837789ab32SMark Johnston 
2184b11fdad0SJeff Roberson 	/*
2185686bcb5cSJeff Roberson 	 * Always block the thread lock so we can drop the tdq lock early.
2186b11fdad0SJeff Roberson 	 */
2187686bcb5cSJeff Roberson 	mtx = thread_lock_block(td);
2188686bcb5cSJeff Roberson 	spinlock_enter();
2189486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
2190686bcb5cSJeff Roberson 		MPASS(mtx == TDQ_LOCKPTR(tdq));
2191bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
21927b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
2193686bcb5cSJeff Roberson 		MPASS(mtx == TDQ_LOCKPTR(tdq));
21943d7f4117SAlexander Motin 		srqflag = preempted ?
2195598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
2196c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
2197ba4932b5SMatthew D Fleming #ifdef SMP
2198e745d729SAlexander Motin 		if (THREAD_CAN_MIGRATE(td) && (!THREAD_CAN_SCHED(td, ts->ts_cpu)
2199e745d729SAlexander Motin 		    || pickcpu))
22000f7a0ebdSMatthew D Fleming 			ts->ts_cpu = sched_pickcpu(td, 0);
2201ba4932b5SMatthew D Fleming #endif
2202c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
22039727e637SJeff Roberson 			tdq_runq_add(tdq, td, srqflag);
2204686bcb5cSJeff Roberson 		else
2205c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
2206ae7a6b38SJeff Roberson 	} else {
2207ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
220861a74c5cSJeff Roberson 		if (mtx != TDQ_LOCKPTR(tdq)) {
220961a74c5cSJeff Roberson 			mtx_unlock_spin(mtx);
221061a74c5cSJeff Roberson 			TDQ_LOCK(tdq);
221161a74c5cSJeff Roberson 		}
22129727e637SJeff Roberson 		tdq_load_rem(tdq, td);
22134c8a8cfcSKonstantin Belousov #ifdef SMP
221497e9382dSDon Lewis 		if (tdq->tdq_load == 0)
221597e9382dSDon Lewis 			tdq_trysteal(tdq);
22164c8a8cfcSKonstantin Belousov #endif
2217ae7a6b38SJeff Roberson 	}
2218afa0a46cSAndriy Gapon 
2219afa0a46cSAndriy Gapon #if (KTR_COMPILE & KTR_SCHED) != 0
2220afa0a46cSAndriy Gapon 	if (TD_IS_IDLETHREAD(td))
2221afa0a46cSAndriy Gapon 		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
2222afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority);
2223afa0a46cSAndriy Gapon 	else
2224afa0a46cSAndriy Gapon 		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
2225afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
2226afa0a46cSAndriy Gapon 		    "lockname:\"%s\"", td->td_lockname);
2227afa0a46cSAndriy Gapon #endif
2228afa0a46cSAndriy Gapon 
2229ae7a6b38SJeff Roberson 	/*
2230ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
2231ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
2232ae7a6b38SJeff Roberson 	 * thread-queue locked.
2233ae7a6b38SJeff Roberson 	 */
2234ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2235*6d3f74a1SMark Johnston 	MPASS(td == tdq->tdq_curthread);
22362454aaf5SJeff Roberson 	newtd = choosethread();
2237686bcb5cSJeff Roberson 	sched_pctcpu_update(td_get_sched(newtd), 0);
2238686bcb5cSJeff Roberson 	TDQ_UNLOCK(tdq);
2239686bcb5cSJeff Roberson 
2240ae7a6b38SJeff Roberson 	/*
2241ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
2242ae7a6b38SJeff Roberson 	 */
2243ebccf1e3SJoseph Koshy 	if (td != newtd) {
2244ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2245ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2246ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
2247ebccf1e3SJoseph Koshy #endif
2248d9fae5abSAndriy Gapon 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
22496f5f25e5SJohn Birrell 
22506f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
22516f5f25e5SJohn Birrell 		/*
22526f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
22536f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
22546f5f25e5SJohn Birrell 		 * function to call.
22556f5f25e5SJohn Birrell 		 */
22566f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
22576f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
22586f5f25e5SJohn Birrell #endif
2259686bcb5cSJeff Roberson 		td->td_oncpu = NOCPU;
2260ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
2261a89c2c8cSMark Johnston 		cpuid = td->td_oncpu = PCPU_GET(cpuid);
2262b3e9e682SRyan Stone 
2263d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , on__cpu);
2264ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2265ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2266ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
2267ebccf1e3SJoseph Koshy #endif
2268b3e9e682SRyan Stone 	} else {
2269ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
2270d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , remain__cpu);
2271b3e9e682SRyan Stone 	}
2272686bcb5cSJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1,
2273686bcb5cSJeff Roberson 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
2274afa0a46cSAndriy Gapon 
2275afa0a46cSAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2276afa0a46cSAndriy Gapon 	    "prio:%d", td->td_priority);
227735e6168fSJeff Roberson }
227835e6168fSJeff Roberson 
2279ae7a6b38SJeff Roberson /*
2280ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
2281ae7a6b38SJeff Roberson  */
228235e6168fSJeff Roberson void
2283fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
228435e6168fSJeff Roberson {
228535e6168fSJeff Roberson 	struct thread *td;
228635e6168fSJeff Roberson 
2287fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
2288e7d50326SJeff Roberson 
2289fa885116SJulian Elischer 	p->p_nice = nice;
22908460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
22917b20fb19SJeff Roberson 		thread_lock(td);
22928460a577SJohn Birrell 		sched_priority(td);
2293e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
22947b20fb19SJeff Roberson 		thread_unlock(td);
229535e6168fSJeff Roberson 	}
2296fa885116SJulian Elischer }
229735e6168fSJeff Roberson 
2298ae7a6b38SJeff Roberson /*
2299ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
2300ae7a6b38SJeff Roberson  */
230135e6168fSJeff Roberson void
2302c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio)
230335e6168fSJeff Roberson {
2304e7d50326SJeff Roberson 
23057b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
230635e6168fSJeff Roberson 
230754b0e65fSJeff Roberson 	td->td_slptick = ticks;
230817c4c356SKonstantin Belousov 	if (TD_IS_SUSPENDED(td) || prio >= PSOCK)
2309c5aa6b58SJeff Roberson 		td->td_flags |= TDF_CANSWAP;
23102dc29adbSJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
23112dc29adbSJohn Baldwin 		return;
23120502fe2eSJeff Roberson 	if (static_boost == 1 && prio)
2313c5aa6b58SJeff Roberson 		sched_prio(td, prio);
23140502fe2eSJeff Roberson 	else if (static_boost && td->td_priority > static_boost)
23150502fe2eSJeff Roberson 		sched_prio(td, static_boost);
231635e6168fSJeff Roberson }
231735e6168fSJeff Roberson 
2318ae7a6b38SJeff Roberson /*
2319ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
2320ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
232161a74c5cSJeff Roberson  *
232261a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2323ae7a6b38SJeff Roberson  */
232435e6168fSJeff Roberson void
232561a74c5cSJeff Roberson sched_wakeup(struct thread *td, int srqflags)
232635e6168fSJeff Roberson {
232714618990SJeff Roberson 	struct td_sched *ts;
2328ae7a6b38SJeff Roberson 	int slptick;
2329e7d50326SJeff Roberson 
23307b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
233193ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
2332c5aa6b58SJeff Roberson 	td->td_flags &= ~TDF_CANSWAP;
233361a74c5cSJeff Roberson 
233435e6168fSJeff Roberson 	/*
2335e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
2336e7d50326SJeff Roberson 	 * priority.
233735e6168fSJeff Roberson 	 */
233854b0e65fSJeff Roberson 	slptick = td->td_slptick;
233954b0e65fSJeff Roberson 	td->td_slptick = 0;
2340ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
23417295465eSAlexander Motin 		ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
23428460a577SJohn Birrell 		sched_interact_update(td);
23437295465eSAlexander Motin 		sched_pctcpu_update(ts, 0);
2344f1e8dc4aSJeff Roberson 	}
23455e5c3873SJeff Roberson 	/*
23465e5c3873SJeff Roberson 	 * Reset the slice value since we slept and advanced the round-robin.
23475e5c3873SJeff Roberson 	 */
23485e5c3873SJeff Roberson 	ts->ts_slice = 0;
234961a74c5cSJeff Roberson 	sched_add(td, SRQ_BORING | srqflags);
235035e6168fSJeff Roberson }
235135e6168fSJeff Roberson 
235235e6168fSJeff Roberson /*
235335e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
235435e6168fSJeff Roberson  * priority.
235535e6168fSJeff Roberson  */
235635e6168fSJeff Roberson void
23578460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
235815dc847eSJeff Roberson {
23597b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
236093ccd6bfSKonstantin Belousov 	sched_pctcpu_update(td_get_sched(td), 1);
2361ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
2362e7d50326SJeff Roberson 	/*
2363e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
2364e7d50326SJeff Roberson 	 */
2365e7d50326SJeff Roberson 	sched_interact_fork(child);
2366e7d50326SJeff Roberson 	sched_priority(child);
236793ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += tickincr;
2368e7d50326SJeff Roberson 	sched_interact_update(td);
2369e7d50326SJeff Roberson 	sched_priority(td);
2370ad1e7d28SJulian Elischer }
2371ad1e7d28SJulian Elischer 
2372ae7a6b38SJeff Roberson /*
2373ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
2374ae7a6b38SJeff Roberson  */
2375ad1e7d28SJulian Elischer void
2376ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
2377ad1e7d28SJulian Elischer {
2378ad1e7d28SJulian Elischer 	struct td_sched *ts;
2379ad1e7d28SJulian Elischer 	struct td_sched *ts2;
23805e5c3873SJeff Roberson 	struct tdq *tdq;
23818460a577SJohn Birrell 
23825e5c3873SJeff Roberson 	tdq = TDQ_SELF();
23838b16c208SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2384e7d50326SJeff Roberson 	/*
2385e7d50326SJeff Roberson 	 * Initialize child.
2386e7d50326SJeff Roberson 	 */
238793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
238893ccd6bfSKonstantin Belousov 	ts2 = td_get_sched(child);
238992de34dfSJohn Baldwin 	child->td_oncpu = NOCPU;
239092de34dfSJohn Baldwin 	child->td_lastcpu = NOCPU;
23915e5c3873SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(tdq);
23928b16c208SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
23933f289c3fSJeff Roberson 	child->td_domain.dr_policy = td->td_cpuset->cs_domain;
2394ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
23958b16c208SJeff Roberson 	ts2->ts_flags = 0;
2396e7d50326SJeff Roberson 	/*
239722d19207SJohn Baldwin 	 * Grab our parents cpu estimation information.
2398e7d50326SJeff Roberson 	 */
2399ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
2400ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
2401ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
240222d19207SJohn Baldwin 	/*
240322d19207SJohn Baldwin 	 * Do not inherit any borrowed priority from the parent.
240422d19207SJohn Baldwin 	 */
240522d19207SJohn Baldwin 	child->td_priority = child->td_base_pri;
2406e7d50326SJeff Roberson 	/*
2407e7d50326SJeff Roberson 	 * And update interactivity score.
2408e7d50326SJeff Roberson 	 */
2409ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2410ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
24115e5c3873SJeff Roberson 	/* Attempt to quickly learn interactivity. */
24125e5c3873SJeff Roberson 	ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
24138f51ad55SJeff Roberson #ifdef KTR
24148f51ad55SJeff Roberson 	bzero(ts2->ts_name, sizeof(ts2->ts_name));
24158f51ad55SJeff Roberson #endif
241615dc847eSJeff Roberson }
241715dc847eSJeff Roberson 
2418ae7a6b38SJeff Roberson /*
2419ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2420ae7a6b38SJeff Roberson  */
242115dc847eSJeff Roberson void
24228460a577SJohn Birrell sched_class(struct thread *td, int class)
242315dc847eSJeff Roberson {
242415dc847eSJeff Roberson 
24257b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
24268460a577SJohn Birrell 	if (td->td_pri_class == class)
242715dc847eSJeff Roberson 		return;
24288460a577SJohn Birrell 	td->td_pri_class = class;
242935e6168fSJeff Roberson }
243035e6168fSJeff Roberson 
243135e6168fSJeff Roberson /*
243235e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
243335e6168fSJeff Roberson  */
243435e6168fSJeff Roberson void
2435fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
243635e6168fSJeff Roberson {
2437e7d50326SJeff Roberson 	struct thread *td;
2438141ad61cSJeff Roberson 
24398f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2440cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2441374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
2442e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2443e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2444ad1e7d28SJulian Elischer }
2445ad1e7d28SJulian Elischer 
2446ae7a6b38SJeff Roberson /*
2447ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2448ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2449ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2450ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2451ae7a6b38SJeff Roberson  */
2452ad1e7d28SJulian Elischer void
2453fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2454ad1e7d28SJulian Elischer {
2455fc6c30f6SJulian Elischer 
24568f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2457cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2458e7d50326SJeff Roberson 	/*
2459e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2460e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2461e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2462e7d50326SJeff Roberson 	 */
24637b20fb19SJeff Roberson 	thread_lock(td);
246493ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
2465fc6c30f6SJulian Elischer 	sched_interact_update(td);
2466e7d50326SJeff Roberson 	sched_priority(td);
24677b20fb19SJeff Roberson 	thread_unlock(td);
2468ad1e7d28SJulian Elischer }
2469ad1e7d28SJulian Elischer 
2470ff256d9cSJeff Roberson void
2471ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2472ff256d9cSJeff Roberson {
2473ff256d9cSJeff Roberson 	struct tdq *tdq;
2474686bcb5cSJeff Roberson 	int flags;
2475ff256d9cSJeff Roberson 
2476b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
2477b3e9e682SRyan Stone 
2478ff256d9cSJeff Roberson 	thread_lock(td);
2479ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2480ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2481ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
2482686bcb5cSJeff Roberson 		if (td->td_critnest == 1) {
24838df78c41SJeff Roberson 			flags = SW_INVOL | SW_PREEMPT;
2484686bcb5cSJeff Roberson 			flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE :
2485686bcb5cSJeff Roberson 			    SWT_REMOTEPREEMPT;
2486686bcb5cSJeff Roberson 			mi_switch(flags);
2487686bcb5cSJeff Roberson 			/* Switch dropped thread lock. */
2488686bcb5cSJeff Roberson 			return;
2489686bcb5cSJeff Roberson 		}
2490ff256d9cSJeff Roberson 		td->td_owepreempt = 1;
24917789ab32SMark Johnston 	} else {
24927789ab32SMark Johnston 		tdq->tdq_owepreempt = 0;
2493ff256d9cSJeff Roberson 	}
2494ff256d9cSJeff Roberson 	thread_unlock(td);
2495ff256d9cSJeff Roberson }
2496ff256d9cSJeff Roberson 
2497ae7a6b38SJeff Roberson /*
2498ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2499ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2500ae7a6b38SJeff Roberson  */
2501ad1e7d28SJulian Elischer void
250228240885SMateusz Guzik sched_userret_slowpath(struct thread *td)
2503ad1e7d28SJulian Elischer {
250428240885SMateusz Guzik 
25057b20fb19SJeff Roberson 	thread_lock(td);
2506ad1e7d28SJulian Elischer 	td->td_priority = td->td_user_pri;
2507ad1e7d28SJulian Elischer 	td->td_base_pri = td->td_user_pri;
250862fa74d9SJeff Roberson 	tdq_setlowpri(TDQ_SELF(), td);
25097b20fb19SJeff Roberson 	thread_unlock(td);
2510ad1e7d28SJulian Elischer }
251135e6168fSJeff Roberson 
2512ae7a6b38SJeff Roberson /*
2513ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2514ae7a6b38SJeff Roberson  * threads.
2515ae7a6b38SJeff Roberson  */
251635e6168fSJeff Roberson void
2517c3cccf95SJeff Roberson sched_clock(struct thread *td, int cnt)
251835e6168fSJeff Roberson {
2519ad1e7d28SJulian Elischer 	struct tdq *tdq;
2520ad1e7d28SJulian Elischer 	struct td_sched *ts;
252135e6168fSJeff Roberson 
2522ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
25233f872f85SJeff Roberson 	tdq = TDQ_SELF();
25247fcf154aSJeff Roberson #ifdef SMP
25257fcf154aSJeff Roberson 	/*
25267fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
25277fcf154aSJeff Roberson 	 */
2528c3cccf95SJeff Roberson 	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
2529c3cccf95SJeff Roberson 	    balance_ticks != 0) {
2530c3cccf95SJeff Roberson 		balance_ticks -= cnt;
2531c3cccf95SJeff Roberson 		if (balance_ticks <= 0)
25327fcf154aSJeff Roberson 			sched_balance();
25337fcf154aSJeff Roberson 	}
25347fcf154aSJeff Roberson #endif
25353f872f85SJeff Roberson 	/*
25361690c6c1SJeff Roberson 	 * Save the old switch count so we have a record of the last ticks
25371690c6c1SJeff Roberson 	 * activity.   Initialize the new switch count based on our load.
25381690c6c1SJeff Roberson 	 * If there is some activity seed it to reflect that.
25391690c6c1SJeff Roberson 	 */
25401690c6c1SJeff Roberson 	tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
25416c47aaaeSJeff Roberson 	tdq->tdq_switchcnt = tdq->tdq_load;
25421690c6c1SJeff Roberson 	/*
25433f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
25443f872f85SJeff Roberson 	 * threads get a chance to run.
25453f872f85SJeff Roberson 	 */
25463f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
25473f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
25483f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
25493f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
25503f872f85SJeff Roberson 	}
255193ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
25527295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2553c3cccf95SJeff Roberson 	if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td))
2554a8949de2SJeff Roberson 		return;
2555c3cccf95SJeff Roberson 
2556c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2557a8949de2SJeff Roberson 		/*
2558fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2559fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
256015dc847eSJeff Roberson 		 */
2561c3cccf95SJeff Roberson 		td_get_sched(td)->ts_runtime += tickincr * cnt;
25628460a577SJohn Birrell 		sched_interact_update(td);
256373daf66fSJeff Roberson 		sched_priority(td);
2564fd0b8c78SJeff Roberson 	}
2565579895dfSAlexander Motin 
256635e6168fSJeff Roberson 	/*
2567579895dfSAlexander Motin 	 * Force a context switch if the current thread has used up a full
2568579895dfSAlexander Motin 	 * time slice (default is 100ms).
256935e6168fSJeff Roberson 	 */
2570c3cccf95SJeff Roberson 	ts->ts_slice += cnt;
2571c3cccf95SJeff Roberson 	if (ts->ts_slice >= tdq_slice(tdq)) {
25725e5c3873SJeff Roberson 		ts->ts_slice = 0;
25733d7f4117SAlexander Motin 		td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
257435e6168fSJeff Roberson 	}
2575579895dfSAlexander Motin }
257635e6168fSJeff Roberson 
2577ccd0ec40SKonstantin Belousov u_int
2578ccd0ec40SKonstantin Belousov sched_estcpu(struct thread *td __unused)
2579ae7a6b38SJeff Roberson {
2580ae7a6b38SJeff Roberson 
2581ccd0ec40SKonstantin Belousov 	return (0);
2582ae7a6b38SJeff Roberson }
2583ae7a6b38SJeff Roberson 
2584ae7a6b38SJeff Roberson /*
2585ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2586ae7a6b38SJeff Roberson  * cooperative idle threads.
2587ae7a6b38SJeff Roberson  */
258835e6168fSJeff Roberson int
258935e6168fSJeff Roberson sched_runnable(void)
259035e6168fSJeff Roberson {
2591ad1e7d28SJulian Elischer 	struct tdq *tdq;
2592b90816f1SJeff Roberson 	int load;
259335e6168fSJeff Roberson 
2594b90816f1SJeff Roberson 	load = 1;
2595b90816f1SJeff Roberson 
2596ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
25973f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2598d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
25993f741ca1SJeff Roberson 			goto out;
26003f741ca1SJeff Roberson 	} else
2601d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2602b90816f1SJeff Roberson 			goto out;
2603b90816f1SJeff Roberson 	load = 0;
2604b90816f1SJeff Roberson out:
2605b90816f1SJeff Roberson 	return (load);
260635e6168fSJeff Roberson }
260735e6168fSJeff Roberson 
2608ae7a6b38SJeff Roberson /*
2609ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2610ef80894cSMark Johnston  * the run-queue while running however the load remains.
2611ae7a6b38SJeff Roberson  */
26127a5e5e2aSJeff Roberson struct thread *
2613c9f25d8fSJeff Roberson sched_choose(void)
2614c9f25d8fSJeff Roberson {
26159727e637SJeff Roberson 	struct thread *td;
2616ae7a6b38SJeff Roberson 	struct tdq *tdq;
2617ae7a6b38SJeff Roberson 
2618ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2619ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
26209727e637SJeff Roberson 	td = tdq_choose(tdq);
2621*6d3f74a1SMark Johnston 	if (td != NULL) {
26229727e637SJeff Roberson 		tdq_runq_rem(tdq, td);
26230502fe2eSJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
2624*6d3f74a1SMark Johnston 	} else {
26250502fe2eSJeff Roberson 		tdq->tdq_lowpri = PRI_MAX_IDLE;
2626*6d3f74a1SMark Johnston 		td = PCPU_GET(idlethread);
2627*6d3f74a1SMark Johnston 	}
2628*6d3f74a1SMark Johnston 	tdq->tdq_curthread = td;
2629*6d3f74a1SMark Johnston 	return (td);
26307a5e5e2aSJeff Roberson }
26317a5e5e2aSJeff Roberson 
2632ae7a6b38SJeff Roberson /*
2633ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2634ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2635ae7a6b38SJeff Roberson  */
2636ae7a6b38SJeff Roberson static inline void
2637ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
26387a5e5e2aSJeff Roberson {
26397a5e5e2aSJeff Roberson 	struct thread *ctd;
26407a5e5e2aSJeff Roberson 	int cpri;
26417a5e5e2aSJeff Roberson 	int pri;
26427a5e5e2aSJeff Roberson 
2643ff256d9cSJeff Roberson 	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2644ff256d9cSJeff Roberson 
26457a5e5e2aSJeff Roberson 	ctd = curthread;
26467a5e5e2aSJeff Roberson 	pri = td->td_priority;
26477a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2648ff256d9cSJeff Roberson 	if (pri < cpri)
2649ff256d9cSJeff Roberson 		ctd->td_flags |= TDF_NEEDRESCHED;
2650879e0604SMateusz Guzik 	if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2651ae7a6b38SJeff Roberson 		return;
2652ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2653ae7a6b38SJeff Roberson 		return;
26547a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
265535e6168fSJeff Roberson }
265635e6168fSJeff Roberson 
2657ae7a6b38SJeff Roberson /*
265873daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
265973daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
266073daf66fSJeff Roberson  * predetermined.
2661ae7a6b38SJeff Roberson  */
2662*6d3f74a1SMark Johnston static int
2663ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
266435e6168fSJeff Roberson {
2665*6d3f74a1SMark Johnston 	int lowpri;
2666c9f25d8fSJeff Roberson 
2667ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
266861a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
26697a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
26707a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
26717a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
26727a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2673b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2674b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2675ae7a6b38SJeff Roberson 
2676*6d3f74a1SMark Johnston 	lowpri = tdq->tdq_lowpri;
2677*6d3f74a1SMark Johnston 	if (td->td_priority < lowpri)
2678ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
26799727e637SJeff Roberson 	tdq_runq_add(tdq, td, flags);
26809727e637SJeff Roberson 	tdq_load_add(tdq, td);
2681*6d3f74a1SMark Johnston 	return (lowpri);
2682ae7a6b38SJeff Roberson }
2683ae7a6b38SJeff Roberson 
2684ae7a6b38SJeff Roberson /*
2685ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2686ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
268761a74c5cSJeff Roberson  *
268861a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2689ae7a6b38SJeff Roberson  */
2690ae7a6b38SJeff Roberson void
2691ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2692ae7a6b38SJeff Roberson {
2693ae7a6b38SJeff Roberson 	struct tdq *tdq;
26947b8bfa0dSJeff Roberson #ifdef SMP
2695*6d3f74a1SMark Johnston 	int cpu, lowpri;
2696ae7a6b38SJeff Roberson #endif
26978f51ad55SJeff Roberson 
26988f51ad55SJeff Roberson 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
26998f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
27008f51ad55SJeff Roberson 	    sched_tdname(curthread));
27018f51ad55SJeff Roberson 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
27028f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(td));
2703b3e9e682SRyan Stone 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
2704b3e9e682SRyan Stone 	    flags & SRQ_PREEMPTED);
2705ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2706ae7a6b38SJeff Roberson 	/*
2707ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2708ae7a6b38SJeff Roberson 	 * run-queue.
2709ae7a6b38SJeff Roberson 	 */
2710ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2711ae7a6b38SJeff Roberson 		sched_priority(td);
2712ae7a6b38SJeff Roberson #ifdef SMP
2713ae7a6b38SJeff Roberson 	/*
2714ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2715ae7a6b38SJeff Roberson 	 * target cpu.
2716ae7a6b38SJeff Roberson 	 */
27179727e637SJeff Roberson 	cpu = sched_pickcpu(td, flags);
27189727e637SJeff Roberson 	tdq = sched_setcpu(td, cpu, flags);
2719*6d3f74a1SMark Johnston 	lowpri = tdq_add(tdq, td, flags);
272061a74c5cSJeff Roberson 	if (cpu != PCPU_GET(cpuid))
2721*6d3f74a1SMark Johnston 		tdq_notify(tdq, lowpri);
272261a74c5cSJeff Roberson 	else if (!(flags & SRQ_YIELDING))
272361a74c5cSJeff Roberson 		sched_setpreempt(td);
2724ae7a6b38SJeff Roberson #else
2725ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2726ae7a6b38SJeff Roberson 	/*
2727ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2728ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2729ae7a6b38SJeff Roberson 	 */
2730e4894505SMark Johnston 	if (td->td_lock != TDQ_LOCKPTR(tdq)) {
2731e4894505SMark Johnston 		TDQ_LOCK(tdq);
273261a74c5cSJeff Roberson 		if ((flags & SRQ_HOLD) != 0)
273361a74c5cSJeff Roberson 			td->td_lock = TDQ_LOCKPTR(tdq);
273461a74c5cSJeff Roberson 		else
2735ae7a6b38SJeff Roberson 			thread_lock_set(td, TDQ_LOCKPTR(tdq));
2736e4894505SMark Johnston 	}
2737*6d3f74a1SMark Johnston 	(void)tdq_add(tdq, td, flags);
2738ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2739ae7a6b38SJeff Roberson 		sched_setpreempt(td);
274061a74c5cSJeff Roberson #endif
274161a74c5cSJeff Roberson 	if (!(flags & SRQ_HOLDTD))
274261a74c5cSJeff Roberson 		thread_unlock(td);
274335e6168fSJeff Roberson }
274435e6168fSJeff Roberson 
2745ae7a6b38SJeff Roberson /*
2746ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2747ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2748ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2749ae7a6b38SJeff Roberson  */
275035e6168fSJeff Roberson void
27517cf90fb3SJeff Roberson sched_rem(struct thread *td)
275235e6168fSJeff Roberson {
2753ad1e7d28SJulian Elischer 	struct tdq *tdq;
27547cf90fb3SJeff Roberson 
27558f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
27568f51ad55SJeff Roberson 	    "prio:%d", td->td_priority);
2757b3e9e682SRyan Stone 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
275893ccd6bfSKonstantin Belousov 	tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2759ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2760ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
27617a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2762ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
27639727e637SJeff Roberson 	tdq_runq_rem(tdq, td);
27649727e637SJeff Roberson 	tdq_load_rem(tdq, td);
27657a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
276662fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
276762fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
276835e6168fSJeff Roberson }
276935e6168fSJeff Roberson 
2770ae7a6b38SJeff Roberson /*
2771ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2772ae7a6b38SJeff Roberson  */
277335e6168fSJeff Roberson fixpt_t
27747cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
277535e6168fSJeff Roberson {
277635e6168fSJeff Roberson 	fixpt_t pctcpu;
2777ad1e7d28SJulian Elischer 	struct td_sched *ts;
277835e6168fSJeff Roberson 
277935e6168fSJeff Roberson 	pctcpu = 0;
278093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
278135e6168fSJeff Roberson 
27823da35a0aSJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
27837295465eSAlexander Motin 	sched_pctcpu_update(ts, TD_IS_RUNNING(td));
2784ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
278535e6168fSJeff Roberson 		int rtick;
278635e6168fSJeff Roberson 
278735e6168fSJeff Roberson 		/* How many rtick per second ? */
2788e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2789e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
279035e6168fSJeff Roberson 	}
279135e6168fSJeff Roberson 
279235e6168fSJeff Roberson 	return (pctcpu);
279335e6168fSJeff Roberson }
279435e6168fSJeff Roberson 
279562fa74d9SJeff Roberson /*
279662fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
279762fa74d9SJeff Roberson  * cpumask.
279862fa74d9SJeff Roberson  */
2799885d51a3SJeff Roberson void
2800885d51a3SJeff Roberson sched_affinity(struct thread *td)
2801885d51a3SJeff Roberson {
280262fa74d9SJeff Roberson #ifdef SMP
280362fa74d9SJeff Roberson 	struct td_sched *ts;
280462fa74d9SJeff Roberson 
280562fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
280693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
280762fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
280862fa74d9SJeff Roberson 		return;
280953a6c8b3SJeff Roberson 	if (TD_ON_RUNQ(td)) {
281053a6c8b3SJeff Roberson 		sched_rem(td);
2811d8d5f036SJeff Roberson 		sched_add(td, SRQ_BORING | SRQ_HOLDTD);
281253a6c8b3SJeff Roberson 		return;
281353a6c8b3SJeff Roberson 	}
281462fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
281562fa74d9SJeff Roberson 		return;
281662fa74d9SJeff Roberson 	/*
28170f7a0ebdSMatthew D Fleming 	 * Force a switch before returning to userspace.  If the
28180f7a0ebdSMatthew D Fleming 	 * target thread is not running locally send an ipi to force
28190f7a0ebdSMatthew D Fleming 	 * the issue.
282062fa74d9SJeff Roberson 	 */
2821a8103ae8SJohn Baldwin 	td->td_flags |= TDF_NEEDRESCHED;
28220f7a0ebdSMatthew D Fleming 	if (td != curthread)
28230f7a0ebdSMatthew D Fleming 		ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
282462fa74d9SJeff Roberson #endif
2825885d51a3SJeff Roberson }
2826885d51a3SJeff Roberson 
2827ae7a6b38SJeff Roberson /*
2828ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2829ae7a6b38SJeff Roberson  */
28309bacd788SJeff Roberson void
28319bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
28329bacd788SJeff Roberson {
2833ad1e7d28SJulian Elischer 	struct td_sched *ts;
28349bacd788SJeff Roberson 
2835c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
28361d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
283793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
28386b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2839c95d2db2SJeff Roberson 		sched_unbind(td);
28400f7a0ebdSMatthew D Fleming 	KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2841ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
28426b2f763fSJeff Roberson 	sched_pin();
284380f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
28449bacd788SJeff Roberson 		return;
28456b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
28469bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2847686bcb5cSJeff Roberson 	mi_switch(SW_VOL);
2848686bcb5cSJeff Roberson 	thread_lock(td);
28499bacd788SJeff Roberson }
28509bacd788SJeff Roberson 
2851ae7a6b38SJeff Roberson /*
2852ae7a6b38SJeff Roberson  * Release a bound thread.
2853ae7a6b38SJeff Roberson  */
28549bacd788SJeff Roberson void
28559bacd788SJeff Roberson sched_unbind(struct thread *td)
28569bacd788SJeff Roberson {
2857e7d50326SJeff Roberson 	struct td_sched *ts;
2858e7d50326SJeff Roberson 
28597b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
28601d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
286193ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
28626b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
28636b2f763fSJeff Roberson 		return;
2864e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2865e7d50326SJeff Roberson 	sched_unpin();
28669bacd788SJeff Roberson }
28679bacd788SJeff Roberson 
286835e6168fSJeff Roberson int
2869ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2870ebccf1e3SJoseph Koshy {
28717b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
287293ccd6bfSKonstantin Belousov 	return (td_get_sched(td)->ts_flags & TSF_BOUND);
2873ebccf1e3SJoseph Koshy }
2874ebccf1e3SJoseph Koshy 
2875ae7a6b38SJeff Roberson /*
2876ae7a6b38SJeff Roberson  * Basic yield call.
2877ae7a6b38SJeff Roberson  */
287836ec198bSDavid Xu void
287936ec198bSDavid Xu sched_relinquish(struct thread *td)
288036ec198bSDavid Xu {
28817b20fb19SJeff Roberson 	thread_lock(td);
2882686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH);
288336ec198bSDavid Xu }
288436ec198bSDavid Xu 
2885ae7a6b38SJeff Roberson /*
2886ae7a6b38SJeff Roberson  * Return the total system load.
2887ae7a6b38SJeff Roberson  */
2888ebccf1e3SJoseph Koshy int
288933916c36SJeff Roberson sched_load(void)
289033916c36SJeff Roberson {
289133916c36SJeff Roberson #ifdef SMP
289233916c36SJeff Roberson 	int total;
289333916c36SJeff Roberson 	int i;
289433916c36SJeff Roberson 
289533916c36SJeff Roberson 	total = 0;
28963aa6d94eSJohn Baldwin 	CPU_FOREACH(i)
289762fa74d9SJeff Roberson 		total += TDQ_CPU(i)->tdq_sysload;
289833916c36SJeff Roberson 	return (total);
289933916c36SJeff Roberson #else
2900d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
290133916c36SJeff Roberson #endif
290233916c36SJeff Roberson }
290333916c36SJeff Roberson 
290433916c36SJeff Roberson int
290535e6168fSJeff Roberson sched_sizeof_proc(void)
290635e6168fSJeff Roberson {
290735e6168fSJeff Roberson 	return (sizeof(struct proc));
290835e6168fSJeff Roberson }
290935e6168fSJeff Roberson 
291035e6168fSJeff Roberson int
291135e6168fSJeff Roberson sched_sizeof_thread(void)
291235e6168fSJeff Roberson {
291335e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
291435e6168fSJeff Roberson }
2915b41f1452SDavid Xu 
291609c8a4ccSJeff Roberson #ifdef SMP
291709c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)						\
291809c8a4ccSJeff Roberson     ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
291909c8a4ccSJeff Roberson #else
292009c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)	1
292109c8a4ccSJeff Roberson #endif
292209c8a4ccSJeff Roberson 
29237a5e5e2aSJeff Roberson /*
29247a5e5e2aSJeff Roberson  * The actual idle process.
29257a5e5e2aSJeff Roberson  */
29267a5e5e2aSJeff Roberson void
29277a5e5e2aSJeff Roberson sched_idletd(void *dummy)
29287a5e5e2aSJeff Roberson {
29297a5e5e2aSJeff Roberson 	struct thread *td;
2930ae7a6b38SJeff Roberson 	struct tdq *tdq;
29312c27cb3aSAlexander Motin 	int oldswitchcnt, switchcnt;
29321690c6c1SJeff Roberson 	int i;
29337a5e5e2aSJeff Roberson 
29347b55ab05SJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
29357a5e5e2aSJeff Roberson 	td = curthread;
2936ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2937ba96d2d8SJohn Baldwin 	THREAD_NO_SLEEPING();
29382c27cb3aSAlexander Motin 	oldswitchcnt = -1;
2939ae7a6b38SJeff Roberson 	for (;;) {
29402c27cb3aSAlexander Motin 		if (tdq->tdq_load) {
29412c27cb3aSAlexander Motin 			thread_lock(td);
2942686bcb5cSJeff Roberson 			mi_switch(SW_VOL | SWT_IDLE);
29432c27cb3aSAlexander Motin 		}
29442c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2945ae7a6b38SJeff Roberson #ifdef SMP
294697e9382dSDon Lewis 		if (always_steal || switchcnt != oldswitchcnt) {
29472c27cb3aSAlexander Motin 			oldswitchcnt = switchcnt;
29481690c6c1SJeff Roberson 			if (tdq_idled(tdq) == 0)
29491690c6c1SJeff Roberson 				continue;
29502c27cb3aSAlexander Motin 		}
29511690c6c1SJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
29522fd4047fSAlexander Motin #else
29532fd4047fSAlexander Motin 		oldswitchcnt = switchcnt;
29542fd4047fSAlexander Motin #endif
29551690c6c1SJeff Roberson 		/*
29561690c6c1SJeff Roberson 		 * If we're switching very frequently, spin while checking
29571690c6c1SJeff Roberson 		 * for load rather than entering a low power state that
29587b55ab05SJeff Roberson 		 * may require an IPI.  However, don't do any busy
29597b55ab05SJeff Roberson 		 * loops while on SMT machines as this simply steals
29607b55ab05SJeff Roberson 		 * cycles from cores doing useful work.
29611690c6c1SJeff Roberson 		 */
296209c8a4ccSJeff Roberson 		if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
29631690c6c1SJeff Roberson 			for (i = 0; i < sched_idlespins; i++) {
29641690c6c1SJeff Roberson 				if (tdq->tdq_load)
29651690c6c1SJeff Roberson 					break;
29661690c6c1SJeff Roberson 				cpu_spinwait();
29671690c6c1SJeff Roberson 			}
29681690c6c1SJeff Roberson 		}
29692c27cb3aSAlexander Motin 
29702c27cb3aSAlexander Motin 		/* If there was context switch during spin, restart it. */
29716c47aaaeSJeff Roberson 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
29722c27cb3aSAlexander Motin 		if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt)
29732c27cb3aSAlexander Motin 			continue;
29742c27cb3aSAlexander Motin 
29752c27cb3aSAlexander Motin 		/* Run main MD idle handler. */
29769f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 1;
297779654969SAlexander Motin 		/*
2978*6d3f74a1SMark Johnston 		 * Make sure that the tdq_cpu_idle update is globally visible
2979*6d3f74a1SMark Johnston 		 * before cpu_idle() reads tdq_load.  The order is important
2980*6d3f74a1SMark Johnston 		 * to avoid races with tdq_notify().
298179654969SAlexander Motin 		 */
2982e8677f38SKonstantin Belousov 		atomic_thread_fence_seq_cst();
298397e9382dSDon Lewis 		/*
298497e9382dSDon Lewis 		 * Checking for again after the fence picks up assigned
298597e9382dSDon Lewis 		 * threads often enough to make it worthwhile to do so in
298697e9382dSDon Lewis 		 * order to avoid calling cpu_idle().
298797e9382dSDon Lewis 		 */
298897e9382dSDon Lewis 		if (tdq->tdq_load != 0) {
298997e9382dSDon Lewis 			tdq->tdq_cpu_idle = 0;
299097e9382dSDon Lewis 			continue;
299197e9382dSDon Lewis 		}
29922c27cb3aSAlexander Motin 		cpu_idle(switchcnt * 4 > sched_idlespinthresh);
29939f9ad565SAlexander Motin 		tdq->tdq_cpu_idle = 0;
29942c27cb3aSAlexander Motin 
29952c27cb3aSAlexander Motin 		/*
29962c27cb3aSAlexander Motin 		 * Account thread-less hardware interrupts and
29972c27cb3aSAlexander Motin 		 * other wakeup reasons equal to context switches.
29982c27cb3aSAlexander Motin 		 */
29992c27cb3aSAlexander Motin 		switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
30002c27cb3aSAlexander Motin 		if (switchcnt != oldswitchcnt)
30012c27cb3aSAlexander Motin 			continue;
30022c27cb3aSAlexander Motin 		tdq->tdq_switchcnt++;
30032c27cb3aSAlexander Motin 		oldswitchcnt++;
3004ae7a6b38SJeff Roberson 	}
3005b41f1452SDavid Xu }
3006e7d50326SJeff Roberson 
30077b20fb19SJeff Roberson /*
30086a8ea6d1SKyle Evans  * sched_throw_grab() chooses a thread from the queue to switch to
30096a8ea6d1SKyle Evans  * next.  It returns with the tdq lock dropped in a spinlock section to
30106a8ea6d1SKyle Evans  * keep interrupts disabled until the CPU is running in a proper threaded
30116a8ea6d1SKyle Evans  * context.
30126a8ea6d1SKyle Evans  */
30136a8ea6d1SKyle Evans static struct thread *
30146a8ea6d1SKyle Evans sched_throw_grab(struct tdq *tdq)
30156a8ea6d1SKyle Evans {
30166a8ea6d1SKyle Evans 	struct thread *newtd;
30176a8ea6d1SKyle Evans 
30186a8ea6d1SKyle Evans 	newtd = choosethread();
30196a8ea6d1SKyle Evans 	spinlock_enter();
30206a8ea6d1SKyle Evans 	TDQ_UNLOCK(tdq);
30216a8ea6d1SKyle Evans 	KASSERT(curthread->td_md.md_spinlock_count == 1,
30226a8ea6d1SKyle Evans 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
30236a8ea6d1SKyle Evans 	return (newtd);
30246a8ea6d1SKyle Evans }
30256a8ea6d1SKyle Evans 
30266a8ea6d1SKyle Evans /*
30276a8ea6d1SKyle Evans  * A CPU is entering for the first time.
30286a8ea6d1SKyle Evans  */
30296a8ea6d1SKyle Evans void
30306a8ea6d1SKyle Evans sched_ap_entry(void)
30316a8ea6d1SKyle Evans {
30326a8ea6d1SKyle Evans 	struct thread *newtd;
30336a8ea6d1SKyle Evans 	struct tdq *tdq;
30346a8ea6d1SKyle Evans 
30356a8ea6d1SKyle Evans 	tdq = TDQ_SELF();
30366a8ea6d1SKyle Evans 
30376a8ea6d1SKyle Evans 	/* This should have been setup in schedinit_ap(). */
30386a8ea6d1SKyle Evans 	THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq));
30396a8ea6d1SKyle Evans 
30406a8ea6d1SKyle Evans 	TDQ_LOCK(tdq);
30416a8ea6d1SKyle Evans 	/* Correct spinlock nesting. */
30426a8ea6d1SKyle Evans 	spinlock_exit();
30436a8ea6d1SKyle Evans 	PCPU_SET(switchtime, cpu_ticks());
30446a8ea6d1SKyle Evans 	PCPU_SET(switchticks, ticks);
30456a8ea6d1SKyle Evans 
30466a8ea6d1SKyle Evans 	newtd = sched_throw_grab(tdq);
30476a8ea6d1SKyle Evans 
30486a8ea6d1SKyle Evans 	/* doesn't return */
30496a8ea6d1SKyle Evans 	cpu_throw(NULL, newtd);
30506a8ea6d1SKyle Evans }
30516a8ea6d1SKyle Evans 
30526a8ea6d1SKyle Evans /*
30536a8ea6d1SKyle Evans  * A thread is exiting.
30547b20fb19SJeff Roberson  */
30557b20fb19SJeff Roberson void
30567b20fb19SJeff Roberson sched_throw(struct thread *td)
30577b20fb19SJeff Roberson {
305859c68134SJeff Roberson 	struct thread *newtd;
3059ae7a6b38SJeff Roberson 	struct tdq *tdq;
3060ae7a6b38SJeff Roberson 
3061018ff686SJeff Roberson 	tdq = TDQ_SELF();
30626a8ea6d1SKyle Evans 
30636a8ea6d1SKyle Evans 	MPASS(td != NULL);
3064686bcb5cSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
3065686bcb5cSJeff Roberson 	THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq));
30666a8ea6d1SKyle Evans 
30679727e637SJeff Roberson 	tdq_load_rem(tdq, td);
306892de34dfSJohn Baldwin 	td->td_lastcpu = td->td_oncpu;
306992de34dfSJohn Baldwin 	td->td_oncpu = NOCPU;
30701eb13fceSJeff Roberson 	thread_lock_block(td);
30716a8ea6d1SKyle Evans 
30726a8ea6d1SKyle Evans 	newtd = sched_throw_grab(tdq);
30736a8ea6d1SKyle Evans 
30741eb13fceSJeff Roberson 	/* doesn't return */
30751eb13fceSJeff Roberson 	cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
30767b20fb19SJeff Roberson }
30777b20fb19SJeff Roberson 
3078ae7a6b38SJeff Roberson /*
3079ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
3080ae7a6b38SJeff Roberson  * let fork do the rest of the work.
3081ae7a6b38SJeff Roberson  */
30827b20fb19SJeff Roberson void
3083fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
30847b20fb19SJeff Roberson {
3085ae7a6b38SJeff Roberson 	struct tdq *tdq;
3086ae7a6b38SJeff Roberson 	int cpuid;
30877b20fb19SJeff Roberson 
30887b20fb19SJeff Roberson 	/*
30897b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
3090ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
30917b20fb19SJeff Roberson 	 */
3092686bcb5cSJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1,
3093686bcb5cSJeff Roberson 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
3094ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
3095018ff686SJeff Roberson 	tdq = TDQ_SELF();
3096686bcb5cSJeff Roberson 	TDQ_LOCK(tdq);
3097686bcb5cSJeff Roberson 	spinlock_exit();
3098ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
3099ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
310028ef18b8SAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
310128ef18b8SAndriy Gapon 	    "prio:%d", td->td_priority);
310228ef18b8SAndriy Gapon 	SDT_PROBE0(sched, , , on__cpu);
31037b20fb19SJeff Roberson }
31047b20fb19SJeff Roberson 
31058f51ad55SJeff Roberson /*
310615b5c347SGordon Bergling  * Create on first use to catch odd startup conditions.
31078f51ad55SJeff Roberson  */
31088f51ad55SJeff Roberson char *
31098f51ad55SJeff Roberson sched_tdname(struct thread *td)
31108f51ad55SJeff Roberson {
31118f51ad55SJeff Roberson #ifdef KTR
31128f51ad55SJeff Roberson 	struct td_sched *ts;
31138f51ad55SJeff Roberson 
311493ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
31158f51ad55SJeff Roberson 	if (ts->ts_name[0] == '\0')
31168f51ad55SJeff Roberson 		snprintf(ts->ts_name, sizeof(ts->ts_name),
31178f51ad55SJeff Roberson 		    "%s tid %d", td->td_name, td->td_tid);
31188f51ad55SJeff Roberson 	return (ts->ts_name);
31198f51ad55SJeff Roberson #else
31208f51ad55SJeff Roberson 	return (td->td_name);
31218f51ad55SJeff Roberson #endif
31228f51ad55SJeff Roberson }
31238f51ad55SJeff Roberson 
312444ad5475SJohn Baldwin #ifdef KTR
312544ad5475SJohn Baldwin void
312644ad5475SJohn Baldwin sched_clear_tdname(struct thread *td)
312744ad5475SJohn Baldwin {
312844ad5475SJohn Baldwin 	struct td_sched *ts;
312944ad5475SJohn Baldwin 
313093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
313144ad5475SJohn Baldwin 	ts->ts_name[0] = '\0';
313244ad5475SJohn Baldwin }
313344ad5475SJohn Baldwin #endif
313444ad5475SJohn Baldwin 
313507095abfSIvan Voras #ifdef SMP
313607095abfSIvan Voras 
313707095abfSIvan Voras /*
313807095abfSIvan Voras  * Build the CPU topology dump string. Is recursively called to collect
313907095abfSIvan Voras  * the topology tree.
314007095abfSIvan Voras  */
314107095abfSIvan Voras static int
314207095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
314307095abfSIvan Voras     int indent)
314407095abfSIvan Voras {
314571a19bdcSAttilio Rao 	char cpusetbuf[CPUSETBUFSIZ];
314607095abfSIvan Voras 	int i, first;
314707095abfSIvan Voras 
314807095abfSIvan Voras 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
314919b8a6dbSAndriy Gapon 	    "", 1 + indent / 2, cg->cg_level);
315071a19bdcSAttilio Rao 	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
315171a19bdcSAttilio Rao 	    cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
315207095abfSIvan Voras 	first = TRUE;
3153aefe0a8cSAlexander Motin 	for (i = cg->cg_first; i <= cg->cg_last; i++) {
315471a19bdcSAttilio Rao 		if (CPU_ISSET(i, &cg->cg_mask)) {
315507095abfSIvan Voras 			if (!first)
315607095abfSIvan Voras 				sbuf_printf(sb, ", ");
315707095abfSIvan Voras 			else
315807095abfSIvan Voras 				first = FALSE;
315907095abfSIvan Voras 			sbuf_printf(sb, "%d", i);
316007095abfSIvan Voras 		}
316107095abfSIvan Voras 	}
316207095abfSIvan Voras 	sbuf_printf(sb, "</cpu>\n");
316307095abfSIvan Voras 
316407095abfSIvan Voras 	if (cg->cg_flags != 0) {
3165611daf7eSIvan Voras 		sbuf_printf(sb, "%*s <flags>", indent, "");
316607095abfSIvan Voras 		if ((cg->cg_flags & CG_FLAG_HTT) != 0)
31675368befbSIvan Voras 			sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>");
3168a401f2d0SIvan Voras 		if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
3169a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>");
31707b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_SMT) != 0)
3171a401f2d0SIvan Voras 			sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>");
3172ef50d5fbSAlexander Motin 		if ((cg->cg_flags & CG_FLAG_NODE) != 0)
3173ef50d5fbSAlexander Motin 			sbuf_printf(sb, "<flag name=\"NODE\">NUMA node</flag>");
317407095abfSIvan Voras 		sbuf_printf(sb, "</flags>\n");
3175611daf7eSIvan Voras 	}
317607095abfSIvan Voras 
317707095abfSIvan Voras 	if (cg->cg_children > 0) {
317807095abfSIvan Voras 		sbuf_printf(sb, "%*s <children>\n", indent, "");
317907095abfSIvan Voras 		for (i = 0; i < cg->cg_children; i++)
318007095abfSIvan Voras 			sysctl_kern_sched_topology_spec_internal(sb,
318107095abfSIvan Voras 			    &cg->cg_child[i], indent+2);
318207095abfSIvan Voras 		sbuf_printf(sb, "%*s </children>\n", indent, "");
318307095abfSIvan Voras 	}
318407095abfSIvan Voras 	sbuf_printf(sb, "%*s</group>\n", indent, "");
318507095abfSIvan Voras 	return (0);
318607095abfSIvan Voras }
318707095abfSIvan Voras 
318807095abfSIvan Voras /*
318907095abfSIvan Voras  * Sysctl handler for retrieving topology dump. It's a wrapper for
319007095abfSIvan Voras  * the recursive sysctl_kern_smp_topology_spec_internal().
319107095abfSIvan Voras  */
319207095abfSIvan Voras static int
319307095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
319407095abfSIvan Voras {
319507095abfSIvan Voras 	struct sbuf *topo;
319607095abfSIvan Voras 	int err;
319707095abfSIvan Voras 
319807095abfSIvan Voras 	KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
319907095abfSIvan Voras 
3200b97fa22cSIan Lepore 	topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
320107095abfSIvan Voras 	if (topo == NULL)
320207095abfSIvan Voras 		return (ENOMEM);
320307095abfSIvan Voras 
320407095abfSIvan Voras 	sbuf_printf(topo, "<groups>\n");
320507095abfSIvan Voras 	err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
320607095abfSIvan Voras 	sbuf_printf(topo, "</groups>\n");
320707095abfSIvan Voras 
320807095abfSIvan Voras 	if (err == 0) {
3209b97fa22cSIan Lepore 		err = sbuf_finish(topo);
321007095abfSIvan Voras 	}
321107095abfSIvan Voras 	sbuf_delete(topo);
321207095abfSIvan Voras 	return (err);
321307095abfSIvan Voras }
3214b67cc292SDavid Xu 
321507095abfSIvan Voras #endif
321607095abfSIvan Voras 
3217579895dfSAlexander Motin static int
3218579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
3219579895dfSAlexander Motin {
3220579895dfSAlexander Motin 	int error, new_val, period;
3221579895dfSAlexander Motin 
3222579895dfSAlexander Motin 	period = 1000000 / realstathz;
3223579895dfSAlexander Motin 	new_val = period * sched_slice;
3224579895dfSAlexander Motin 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3225579895dfSAlexander Motin 	if (error != 0 || req->newptr == NULL)
3226579895dfSAlexander Motin 		return (error);
3227579895dfSAlexander Motin 	if (new_val <= 0)
3228579895dfSAlexander Motin 		return (EINVAL);
322937f4e025SAlexander Motin 	sched_slice = imax(1, (new_val + period / 2) / period);
32305e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
323137f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
323237f4e025SAlexander Motin 	    realstathz);
3233579895dfSAlexander Motin 	return (0);
3234579895dfSAlexander Motin }
3235579895dfSAlexander Motin 
32367029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
32377029da5cSPawel Biernacki     "Scheduler");
3238ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
3239e7d50326SJeff Roberson     "Scheduler name");
32407029da5cSPawel Biernacki SYSCTL_PROC(_kern_sched, OID_AUTO, quantum,
32417029da5cSPawel Biernacki     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
32427029da5cSPawel Biernacki     sysctl_kern_quantum, "I",
324337f4e025SAlexander Motin     "Quantum for timeshare threads in microseconds");
3244ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
324537f4e025SAlexander Motin     "Quantum for timeshare threads in stathz ticks");
32461c119e17SAlexander Motin SYSCTL_UINT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
3247ae7a6b38SJeff Roberson     "Interactivity score threshold");
324837f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW,
324937f4e025SAlexander Motin     &preempt_thresh, 0,
325037f4e025SAlexander Motin     "Maximal (lowest) priority for preemption");
325137f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0,
325237f4e025SAlexander Motin     "Assign static kernel priorities to sleeping threads");
325337f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0,
325437f4e025SAlexander Motin     "Number of times idle thread will spin waiting for new work");
325537f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW,
325637f4e025SAlexander Motin     &sched_idlespinthresh, 0,
325737f4e025SAlexander Motin     "Threshold before we will permit idle thread spinning");
32587b8bfa0dSJeff Roberson #ifdef SMP
3259ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
3260ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
3261ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
3262ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
32637fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
32647fcf154aSJeff Roberson     &balance_interval, 0,
3265579895dfSAlexander Motin     "Average period in stathz ticks to run the long-term balancer");
3266ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
3267ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
326828994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
326937f4e025SAlexander Motin     "Minimum load on remote CPU before we'll steal");
327097e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit,
327197e9382dSDon Lewis     0, "Topological distance limit for stealing threads in sched_switch()");
327297e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0,
327397e9382dSDon Lewis     "Always run the stealer from the idle thread");
327407095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
3275c69a1a50SMateusz Guzik     CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
327607095abfSIvan Voras     "XML dump of detected CPU topology");
32777b8bfa0dSJeff Roberson #endif
3278e7d50326SJeff Roberson 
327954b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
3280a5423ea3SJeff Roberson static int ccpu = 0;
3281b05ca429SPawel Biernacki SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0,
3282b05ca429SPawel Biernacki     "Decay factor used for updating %CPU in 4BSD scheduler");
3283