xref: /freebsd/sys/kern/sched_ule.c (revision e24a65528388f4debfb12e936f314f85ba6ac407)
135e6168fSJeff Roberson /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
38a36da99SPedro F. Giffuni  *
4e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
535e6168fSJeff Roberson  * All rights reserved.
635e6168fSJeff Roberson  *
735e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
835e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
935e6168fSJeff Roberson  * are met:
1035e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
1135e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1235e6168fSJeff Roberson  *    disclaimer.
1335e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1435e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1535e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1635e6168fSJeff Roberson  *
1735e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1835e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1935e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2035e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2135e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2235e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2335e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2435e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2535e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2635e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2735e6168fSJeff Roberson  */
2835e6168fSJeff Roberson 
29ae7a6b38SJeff Roberson /*
30ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
31ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
32ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
33ae7a6b38SJeff Roberson  *
34ae7a6b38SJeff Roberson  * etymology:
35a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
36ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
37ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
38ae7a6b38SJeff Roberson  */
39ae7a6b38SJeff Roberson 
40677b542eSDavid E. O'Brien #include <sys/cdefs.h>
414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
424da0d332SPeter Wemm #include "opt_sched.h"
439923b511SScott Long 
4435e6168fSJeff Roberson #include <sys/param.h>
4535e6168fSJeff Roberson #include <sys/systm.h>
462c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4735e6168fSJeff Roberson #include <sys/kernel.h>
4835e6168fSJeff Roberson #include <sys/ktr.h>
49c149e542SAttilio Rao #include <sys/limits.h>
5035e6168fSJeff Roberson #include <sys/lock.h>
5135e6168fSJeff Roberson #include <sys/mutex.h>
5235e6168fSJeff Roberson #include <sys/proc.h>
53245f3abfSJeff Roberson #include <sys/resource.h>
549bacd788SJeff Roberson #include <sys/resourcevar.h>
5535e6168fSJeff Roberson #include <sys/sched.h>
56b3e9e682SRyan Stone #include <sys/sdt.h>
5735e6168fSJeff Roberson #include <sys/smp.h>
5835e6168fSJeff Roberson #include <sys/sx.h>
5935e6168fSJeff Roberson #include <sys/sysctl.h>
6035e6168fSJeff Roberson #include <sys/sysproto.h>
61f5c157d9SJohn Baldwin #include <sys/turnstile.h>
62af29f399SDmitry Chagin #include <sys/umtxvar.h>
6335e6168fSJeff Roberson #include <sys/vmmeter.h>
6462fa74d9SJeff Roberson #include <sys/cpuset.h>
6507095abfSIvan Voras #include <sys/sbuf.h>
6635e6168fSJeff Roberson 
67ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
68ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
69ebccf1e3SJoseph Koshy #endif
70ebccf1e3SJoseph Koshy 
716f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
726f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h>
7361322a0aSAlexander Motin int __read_mostly		dtrace_vtime_active;
746f5f25e5SJohn Birrell dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
756f5f25e5SJohn Birrell #endif
766f5f25e5SJohn Birrell 
7735e6168fSJeff Roberson #include <machine/cpu.h>
7822bf7d9aSJeff Roberson #include <machine/smp.h>
7935e6168fSJeff Roberson 
80ae7a6b38SJeff Roberson #define	KTR_ULE	0
8114618990SJeff Roberson 
820d2cf837SJeff Roberson #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
830d2cf837SJeff Roberson #define	TDQ_NAME_LEN	(sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
846338c579SAttilio Rao #define	TDQ_LOADNAME_LEN	(sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
858f51ad55SJeff Roberson 
866b2f763fSJeff Roberson /*
87ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
88ae7a6b38SJeff Roberson  * by the thread lock.
89ed062c8dSJulian Elischer  */
90ad1e7d28SJulian Elischer struct td_sched {
91ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
92ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
93e77f9fedSAdrian Chadd 	int		ts_cpu;		/* CPU that we have affinity for. */
9473daf66fSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
95ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
96ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
97ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
98ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
99ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
100ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
1018f51ad55SJeff Roberson #ifdef KTR
1028f51ad55SJeff Roberson 	char		ts_name[TS_NAME_LEN];
1038f51ad55SJeff Roberson #endif
104ed062c8dSJulian Elischer };
105ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1067b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1077b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
10835e6168fSJeff Roberson 
10962fa74d9SJeff Roberson #define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
11062fa74d9SJeff Roberson #define	THREAD_CAN_SCHED(td, cpu)	\
11162fa74d9SJeff Roberson     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
11262fa74d9SJeff Roberson 
11393ccd6bfSKonstantin Belousov _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
11493ccd6bfSKonstantin Belousov     sizeof(struct thread0_storage),
11593ccd6bfSKonstantin Belousov     "increase struct thread0_storage.t0st_sched size");
11693ccd6bfSKonstantin Belousov 
11735e6168fSJeff Roberson /*
11812d56c0fSJohn Baldwin  * Priority ranges used for interactive and non-interactive timeshare
1192dc29adbSJohn Baldwin  * threads.  The timeshare priorities are split up into four ranges.
1202dc29adbSJohn Baldwin  * The first range handles interactive threads.  The last three ranges
1212dc29adbSJohn Baldwin  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
1222dc29adbSJohn Baldwin  * ranges supporting nice values.
12312d56c0fSJohn Baldwin  */
1242dc29adbSJohn Baldwin #define	PRI_TIMESHARE_RANGE	(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
1252dc29adbSJohn Baldwin #define	PRI_INTERACT_RANGE	((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
12616705791SAndriy Gapon #define	PRI_BATCH_RANGE		(PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
1272dc29adbSJohn Baldwin 
1282dc29adbSJohn Baldwin #define	PRI_MIN_INTERACT	PRI_MIN_TIMESHARE
1292dc29adbSJohn Baldwin #define	PRI_MAX_INTERACT	(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
1302dc29adbSJohn Baldwin #define	PRI_MIN_BATCH		(PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
13112d56c0fSJohn Baldwin #define	PRI_MAX_BATCH		PRI_MAX_TIMESHARE
13212d56c0fSJohn Baldwin 
13312d56c0fSJohn Baldwin /*
134e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
135e1f89c22SJeff Roberson  *
136e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
137e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1388ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
139e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
140e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
141e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
14235e6168fSJeff Roberson  */
143e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
144e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1458ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
146e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
147e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
148eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
14935e6168fSJeff Roberson 
15035e6168fSJeff Roberson /*
151e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
152e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
153e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
154e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
155e7d50326SJeff Roberson  * or positive nice respectively.
156e7d50326SJeff Roberson  *
157e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
158e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
159e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
160e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
161e7d50326SJeff Roberson  */
162e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
163e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
16412d56c0fSJohn Baldwin #define	SCHED_PRI_MIN		(PRI_MIN_BATCH + SCHED_PRI_NHALF)
16512d56c0fSJohn Baldwin #define	SCHED_PRI_MAX		(PRI_MAX_BATCH - SCHED_PRI_NHALF)
16678920008SJohn Baldwin #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
167e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
168e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1691e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
170e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
171e7d50326SJeff Roberson 
172e7d50326SJeff Roberson /*
173e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
174e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
175e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
176e7d50326SJeff Roberson  * models the intent of the thread.
17735e6168fSJeff Roberson  *
178407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
179407b0157SJeff Roberson  *		before throttling back.
180d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
181210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
1829f518f20SAttilio Rao  * INTERACT_THRESH:	Threshold for placement on the current runq.
18335e6168fSJeff Roberson  */
184e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
185e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
186210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
187210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1884c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
189e1f89c22SJeff Roberson 
1905e5c3873SJeff Roberson /*
1915e5c3873SJeff Roberson  * These parameters determine the slice behavior for batch work.
1925e5c3873SJeff Roberson  */
1935e5c3873SJeff Roberson #define	SCHED_SLICE_DEFAULT_DIVISOR	10	/* ~94 ms, 12 stathz ticks. */
1945e5c3873SJeff Roberson #define	SCHED_SLICE_MIN_DIVISOR		6	/* DEFAULT/MIN = ~16 ms. */
1955e5c3873SJeff Roberson 
1963d7f4117SAlexander Motin /* Flags kept in td_flags. */
197e745d729SAlexander Motin #define	TDF_PICKCPU	TDF_SCHED0	/* Thread should pick new CPU. */
1983d7f4117SAlexander Motin #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
1993d7f4117SAlexander Motin 
20035e6168fSJeff Roberson /*
201e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
202e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
203e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
204e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
205e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
206ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
20735e6168fSJeff Roberson  */
2081c119e17SAlexander Motin static u_int __read_mostly sched_interact = SCHED_INTERACT_THRESH;
20961322a0aSAlexander Motin static int __read_mostly tickincr = 8 << SCHED_TICK_SHIFT;
21061322a0aSAlexander Motin static int __read_mostly realstathz = 127;	/* reset during boot. */
21161322a0aSAlexander Motin static int __read_mostly sched_slice = 10;	/* reset during boot. */
21261322a0aSAlexander Motin static int __read_mostly sched_slice_min = 1;	/* reset during boot. */
21302e2d6b4SJeff Roberson #ifdef PREEMPTION
21402e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
21561322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MAX_IDLE;
21602e2d6b4SJeff Roberson #else
21761322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MIN_KERN;
21802e2d6b4SJeff Roberson #endif
21902e2d6b4SJeff Roberson #else
22061322a0aSAlexander Motin static int __read_mostly preempt_thresh = 0;
22102e2d6b4SJeff Roberson #endif
22261322a0aSAlexander Motin static int __read_mostly static_boost = PRI_MIN_BATCH;
22361322a0aSAlexander Motin static int __read_mostly sched_idlespins = 10000;
22461322a0aSAlexander Motin static int __read_mostly sched_idlespinthresh = -1;
225ae7a6b38SJeff Roberson 
22635e6168fSJeff Roberson /*
22711484ad8SMark Johnston  * tdq - per processor runqs and statistics.  A mutex synchronizes access to
22811484ad8SMark Johnston  * most fields.  Some fields are loaded or modified without the mutex.
22911484ad8SMark Johnston  *
23011484ad8SMark Johnston  * Locking protocols:
23111484ad8SMark Johnston  * (c)  constant after initialization
23211484ad8SMark Johnston  * (f)  flag, set with the tdq lock held, cleared on local CPU
23311484ad8SMark Johnston  * (l)  all accesses are CPU-local
23411484ad8SMark Johnston  * (ls) stores are performed by the local CPU, loads may be lockless
23511484ad8SMark Johnston  * (t)  all accesses are protected by the tdq mutex
23611484ad8SMark Johnston  * (ts) stores are serialized by the tdq mutex, loads may be lockless
23735e6168fSJeff Roberson  */
238ad1e7d28SJulian Elischer struct tdq {
23939f819e2SJim Harris 	/*
24039f819e2SJim Harris 	 * Ordered to improve efficiency of cpu_search() and switch().
24139f819e2SJim Harris 	 * tdq_lock is padded to avoid false sharing with tdq_load and
24239f819e2SJim Harris 	 * tdq_cpu_idle.
24339f819e2SJim Harris 	 */
2444ceaf45dSAttilio Rao 	struct mtx_padalign tdq_lock;	/* run queue lock. */
24511484ad8SMark Johnston 	struct cpu_group *tdq_cg;	/* (c) Pointer to cpu topology. */
24611484ad8SMark Johnston 	struct thread	*tdq_curthread;	/* (t) Current executing thread. */
24711484ad8SMark Johnston 	int		tdq_load;	/* (ts) Aggregate load. */
24811484ad8SMark Johnston 	int		tdq_sysload;	/* (ts) For loadavg, !ITHD load. */
24911484ad8SMark Johnston 	int		tdq_cpu_idle;	/* (ls) cpu_idle() is active. */
25011484ad8SMark Johnston 	int		tdq_transferable; /* (ts) Transferable thread count. */
25111484ad8SMark Johnston 	short		tdq_switchcnt;	/* (l) Switches this tick. */
25211484ad8SMark Johnston 	short		tdq_oldswitchcnt; /* (l) Switches last tick. */
25311484ad8SMark Johnston 	u_char		tdq_lowpri;	/* (ts) Lowest priority thread. */
25411484ad8SMark Johnston 	u_char		tdq_owepreempt;	/* (f) Remote preemption pending. */
25511484ad8SMark Johnston 	u_char		tdq_idx;	/* (t) Current insert index. */
25611484ad8SMark Johnston 	u_char		tdq_ridx;	/* (t) Current removal index. */
25711484ad8SMark Johnston 	int		tdq_id;		/* (c) cpuid. */
25811484ad8SMark Johnston 	struct runq	tdq_realtime;	/* (t) real-time run queue. */
25911484ad8SMark Johnston 	struct runq	tdq_timeshare;	/* (t) timeshare run queue. */
26011484ad8SMark Johnston 	struct runq	tdq_idle;	/* (t) Queue of IDLE threads. */
2618f51ad55SJeff Roberson 	char		tdq_name[TDQ_NAME_LEN];
2628f51ad55SJeff Roberson #ifdef KTR
2638f51ad55SJeff Roberson 	char		tdq_loadname[TDQ_LOADNAME_LEN];
2648f51ad55SJeff Roberson #endif
26511484ad8SMark Johnston };
26635e6168fSJeff Roberson 
2671690c6c1SJeff Roberson /* Idle thread states and config. */
2681690c6c1SJeff Roberson #define	TDQ_RUNNING	1
2691690c6c1SJeff Roberson #define	TDQ_IDLE	2
2707b8bfa0dSJeff Roberson 
27111484ad8SMark Johnston /* Lockless accessors. */
27211484ad8SMark Johnston #define	TDQ_LOAD(tdq)		atomic_load_int(&(tdq)->tdq_load)
27311484ad8SMark Johnston #define	TDQ_TRANSFERABLE(tdq)	atomic_load_int(&(tdq)->tdq_transferable)
27411484ad8SMark Johnston #define	TDQ_SWITCHCNT(tdq)	(atomic_load_short(&(tdq)->tdq_switchcnt) + \
27511484ad8SMark Johnston 				 atomic_load_short(&(tdq)->tdq_oldswitchcnt))
27611484ad8SMark Johnston #define	TDQ_SWITCHCNT_INC(tdq)	(atomic_store_short(&(tdq)->tdq_switchcnt, \
27711484ad8SMark Johnston 				 atomic_load_short(&(tdq)->tdq_switchcnt) + 1))
27811484ad8SMark Johnston 
27980f86c9fSJeff Roberson #ifdef SMP
28061322a0aSAlexander Motin struct cpu_group __read_mostly *cpu_top;		/* CPU topology */
2817b8bfa0dSJeff Roberson 
28262fa74d9SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
28362fa74d9SJeff Roberson #define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
2847b8bfa0dSJeff Roberson 
2857b8bfa0dSJeff Roberson /*
2867b8bfa0dSJeff Roberson  * Run-time tunables.
2877b8bfa0dSJeff Roberson  */
28828994a58SJeff Roberson static int rebalance = 1;
2897fcf154aSJeff Roberson static int balance_interval = 128;	/* Default set in sched_initticks(). */
29061322a0aSAlexander Motin static int __read_mostly affinity;
29161322a0aSAlexander Motin static int __read_mostly steal_idle = 1;
29261322a0aSAlexander Motin static int __read_mostly steal_thresh = 2;
29361322a0aSAlexander Motin static int __read_mostly always_steal = 0;
29461322a0aSAlexander Motin static int __read_mostly trysteal_limit = 2;
29580f86c9fSJeff Roberson 
29635e6168fSJeff Roberson /*
297d2ad694cSJeff Roberson  * One thread queue per processor.
29835e6168fSJeff Roberson  */
29961322a0aSAlexander Motin static struct tdq __read_mostly *balance_tdq;
3007fcf154aSJeff Roberson static int balance_ticks;
301018ff686SJeff Roberson DPCPU_DEFINE_STATIC(struct tdq, tdq);
3022bf95012SAndrew Turner DPCPU_DEFINE_STATIC(uint32_t, randomval);
303dc03363dSJeff Roberson 
304018ff686SJeff Roberson #define	TDQ_SELF()	((struct tdq *)PCPU_GET(sched))
305018ff686SJeff Roberson #define	TDQ_CPU(x)	(DPCPU_ID_PTR((x), tdq))
306018ff686SJeff Roberson #define	TDQ_ID(x)	((x)->tdq_id)
30780f86c9fSJeff Roberson #else	/* !SMP */
308ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
309dc03363dSJeff Roberson 
31036b36916SJeff Roberson #define	TDQ_ID(x)	(0)
311ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
312ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
3130a016a05SJeff Roberson #endif
31435e6168fSJeff Roberson 
315ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
316ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
317ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
3188bb173fbSAlexander Motin #define	TDQ_TRYLOCK(t)		mtx_trylock_spin(TDQ_LOCKPTR((t)))
3198bb173fbSAlexander Motin #define	TDQ_TRYLOCK_FLAGS(t, f)	mtx_trylock_spin_flags(TDQ_LOCKPTR((t)), (f))
320ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
3214ceaf45dSAttilio Rao #define	TDQ_LOCKPTR(t)		((struct mtx *)(&(t)->tdq_lock))
322ae7a6b38SJeff Roberson 
3230927ff78SMark Johnston static void sched_setpreempt(int);
3248460a577SJohn Birrell static void sched_priority(struct thread *);
32521381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
3268460a577SJohn Birrell static int sched_interact_score(struct thread *);
3278460a577SJohn Birrell static void sched_interact_update(struct thread *);
3288460a577SJohn Birrell static void sched_interact_fork(struct thread *);
3297295465eSAlexander Motin static void sched_pctcpu_update(struct td_sched *, int);
33035e6168fSJeff Roberson 
3315d7ef00cSJeff Roberson /* Operations on per processor queues */
3329727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *);
333018ff686SJeff Roberson static void tdq_setup(struct tdq *, int i);
3349727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *);
3359727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *);
3369727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
3379727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *);
338ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int);
33911484ad8SMark Johnston static void tdq_print(int cpu);
340e7d50326SJeff Roberson static void runq_print(struct runq *rq);
3416d3f74a1SMark Johnston static int tdq_add(struct tdq *, struct thread *, int);
3425d7ef00cSJeff Roberson #ifdef SMP
3436d3f74a1SMark Johnston static int tdq_move(struct tdq *, struct tdq *);
344ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
3456d3f74a1SMark Johnston static void tdq_notify(struct tdq *, int lowpri);
3469727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int);
3479727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int);
3489727e637SJeff Roberson static int sched_pickcpu(struct thread *, int);
3497fcf154aSJeff Roberson static void sched_balance(void);
3506d3f74a1SMark Johnston static bool sched_balance_pair(struct tdq *, struct tdq *);
3519727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int);
352ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
35307095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
35407095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
35507095abfSIvan Voras     struct cpu_group *cg, int indent);
3565d7ef00cSJeff Roberson #endif
3575d7ef00cSJeff Roberson 
358e7d50326SJeff Roberson static void sched_setup(void *dummy);
359237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
360e7d50326SJeff Roberson 
361e7d50326SJeff Roberson static void sched_initticks(void *dummy);
362237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
363237fdd78SRobert Watson     NULL);
364e7d50326SJeff Roberson 
365b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched);
366b3e9e682SRyan Stone 
367d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
368b3e9e682SRyan Stone     "struct proc *", "uint8_t");
369d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
370b3e9e682SRyan Stone     "struct proc *", "void *");
371d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
372b3e9e682SRyan Stone     "struct proc *", "void *", "int");
373d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
374b3e9e682SRyan Stone     "struct proc *", "uint8_t", "struct thread *");
375d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
376d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
377b3e9e682SRyan Stone     "struct proc *");
378d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu);
379d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu);
380d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
381b3e9e682SRyan Stone     "struct proc *");
382b3e9e682SRyan Stone 
3830567b6ccSWarner Losh /*
384ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
385ae7a6b38SJeff Roberson  */
386e7d50326SJeff Roberson static void
387e7d50326SJeff Roberson runq_print(struct runq *rq)
388e7d50326SJeff Roberson {
389e7d50326SJeff Roberson 	struct rqhead *rqh;
3909727e637SJeff Roberson 	struct thread *td;
391e7d50326SJeff Roberson 	int pri;
392e7d50326SJeff Roberson 	int j;
393e7d50326SJeff Roberson 	int i;
394e7d50326SJeff Roberson 
395e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
396e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
397e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
398e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
399e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
400e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
401e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
4029727e637SJeff Roberson 				TAILQ_FOREACH(td, rqh, td_runq) {
403e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
4049727e637SJeff Roberson 					    td, td->td_name, td->td_priority,
4059727e637SJeff Roberson 					    td->td_rqindex, pri);
406e7d50326SJeff Roberson 				}
407e7d50326SJeff Roberson 			}
408e7d50326SJeff Roberson 	}
409e7d50326SJeff Roberson }
410e7d50326SJeff Roberson 
411ae7a6b38SJeff Roberson /*
412ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
413ae7a6b38SJeff Roberson  */
41411484ad8SMark Johnston static void __unused
415ad1e7d28SJulian Elischer tdq_print(int cpu)
41615dc847eSJeff Roberson {
417ad1e7d28SJulian Elischer 	struct tdq *tdq;
41815dc847eSJeff Roberson 
419ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
42015dc847eSJeff Roberson 
421c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
42262fa74d9SJeff Roberson 	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
42362fa74d9SJeff Roberson 	printf("\tLock name:      %s\n", tdq->tdq_name);
424d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
4251690c6c1SJeff Roberson 	printf("\tswitch cnt:     %d\n", tdq->tdq_switchcnt);
4261690c6c1SJeff Roberson 	printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
427e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
4283f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
4291690c6c1SJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
4301690c6c1SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
431e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
432e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
433e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
434e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
435e7d50326SJeff Roberson 	printf("\tidle runq:\n");
436e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
43715dc847eSJeff Roberson }
43815dc847eSJeff Roberson 
439ff256d9cSJeff Roberson static inline int
440ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote)
441ff256d9cSJeff Roberson {
442ff256d9cSJeff Roberson 	/*
443ff256d9cSJeff Roberson 	 * If the new priority is not better than the current priority there is
444ff256d9cSJeff Roberson 	 * nothing to do.
445ff256d9cSJeff Roberson 	 */
446ff256d9cSJeff Roberson 	if (pri >= cpri)
447ff256d9cSJeff Roberson 		return (0);
448ff256d9cSJeff Roberson 	/*
449ff256d9cSJeff Roberson 	 * Always preempt idle.
450ff256d9cSJeff Roberson 	 */
451ff256d9cSJeff Roberson 	if (cpri >= PRI_MIN_IDLE)
452ff256d9cSJeff Roberson 		return (1);
453ff256d9cSJeff Roberson 	/*
454ff256d9cSJeff Roberson 	 * If preemption is disabled don't preempt others.
455ff256d9cSJeff Roberson 	 */
456ff256d9cSJeff Roberson 	if (preempt_thresh == 0)
457ff256d9cSJeff Roberson 		return (0);
458ff256d9cSJeff Roberson 	/*
459ff256d9cSJeff Roberson 	 * Preempt if we exceed the threshold.
460ff256d9cSJeff Roberson 	 */
461ff256d9cSJeff Roberson 	if (pri <= preempt_thresh)
462ff256d9cSJeff Roberson 		return (1);
463ff256d9cSJeff Roberson 	/*
46412d56c0fSJohn Baldwin 	 * If we're interactive or better and there is non-interactive
46512d56c0fSJohn Baldwin 	 * or worse running preempt only remote processors.
466ff256d9cSJeff Roberson 	 */
46712d56c0fSJohn Baldwin 	if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
468ff256d9cSJeff Roberson 		return (1);
469ff256d9cSJeff Roberson 	return (0);
470ff256d9cSJeff Roberson }
471ff256d9cSJeff Roberson 
472ae7a6b38SJeff Roberson /*
473ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
474ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
475ae7a6b38SJeff Roberson  * queue position for timeshare threads.
476ae7a6b38SJeff Roberson  */
477155b9987SJeff Roberson static __inline void
4789727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
479155b9987SJeff Roberson {
4809727e637SJeff Roberson 	struct td_sched *ts;
481c143ac21SJeff Roberson 	u_char pri;
482c143ac21SJeff Roberson 
483ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
48461a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
48573daf66fSJeff Roberson 
4869727e637SJeff Roberson 	pri = td->td_priority;
48793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
4889727e637SJeff Roberson 	TD_SET_RUNQ(td);
4899727e637SJeff Roberson 	if (THREAD_CAN_MIGRATE(td)) {
490d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
491ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
49280f86c9fSJeff Roberson 	}
49312d56c0fSJohn Baldwin 	if (pri < PRI_MIN_BATCH) {
494c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
49512d56c0fSJohn Baldwin 	} else if (pri <= PRI_MAX_BATCH) {
496c143ac21SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
49712d56c0fSJohn Baldwin 		KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
498e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
499e7d50326SJeff Roberson 		/*
500e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
501ba71333fSMark Johnston 		 * batch.  Use the whole queue to represent these values.
502e7d50326SJeff Roberson 		 */
503c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
50416705791SAndriy Gapon 			pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
505e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
5063f872f85SJeff Roberson 			/*
5073f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
5083f872f85SJeff Roberson 			 * can have a one slot difference between idx and
5093f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
5103f872f85SJeff Roberson 			 */
5113f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
5123f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
5134499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
514e7d50326SJeff Roberson 		} else
5153f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
5169727e637SJeff Roberson 		runq_add_pri(ts->ts_runq, td, pri, flags);
517c143ac21SJeff Roberson 		return;
518e7d50326SJeff Roberson 	} else
51973daf66fSJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
5209727e637SJeff Roberson 	runq_add(ts->ts_runq, td, flags);
52173daf66fSJeff Roberson }
52273daf66fSJeff Roberson 
52373daf66fSJeff Roberson /*
524ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
525ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
526ae7a6b38SJeff Roberson  * transferable count does not reflect them.
527ae7a6b38SJeff Roberson  */
528155b9987SJeff Roberson static __inline void
5299727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td)
530155b9987SJeff Roberson {
5319727e637SJeff Roberson 	struct td_sched *ts;
5329727e637SJeff Roberson 
53393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
534ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
53561a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
536ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
5379727e637SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", td));
538ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
539d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
540ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
54180f86c9fSJeff Roberson 	}
5423f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
5433f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
5449727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
545e7d50326SJeff Roberson 		else
5469727e637SJeff Roberson 			runq_remove_idx(ts->ts_runq, td, NULL);
5473f872f85SJeff Roberson 	} else
5489727e637SJeff Roberson 		runq_remove(ts->ts_runq, td);
549155b9987SJeff Roberson }
550155b9987SJeff Roberson 
551ae7a6b38SJeff Roberson /*
552ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
553ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
554ae7a6b38SJeff Roberson  */
555a8949de2SJeff Roberson static void
5569727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td)
5575d7ef00cSJeff Roberson {
558ae7a6b38SJeff Roberson 
559ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
56061a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
56103d17db7SJeff Roberson 
562d2ad694cSJeff Roberson 	tdq->tdq_load++;
5631b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
564d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
5658f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
566d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
5675d7ef00cSJeff Roberson }
56815dc847eSJeff Roberson 
569ae7a6b38SJeff Roberson /*
570ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
571ae7a6b38SJeff Roberson  * exiting.
572ae7a6b38SJeff Roberson  */
573a8949de2SJeff Roberson static void
5749727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td)
5755d7ef00cSJeff Roberson {
576ae7a6b38SJeff Roberson 
577ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
57861a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
579ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
580c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
58103d17db7SJeff Roberson 
582d2ad694cSJeff Roberson 	tdq->tdq_load--;
5831b9d701fSAttilio Rao 	if ((td->td_flags & TDF_NOLOAD) == 0)
58403d17db7SJeff Roberson 		tdq->tdq_sysload--;
5858f51ad55SJeff Roberson 	KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
586d9fae5abSAndriy Gapon 	SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
58715dc847eSJeff Roberson }
58815dc847eSJeff Roberson 
589356500a3SJeff Roberson /*
5905e5c3873SJeff Roberson  * Bound timeshare latency by decreasing slice size as load increases.  We
5915e5c3873SJeff Roberson  * consider the maximum latency as the sum of the threads waiting to run
5925e5c3873SJeff Roberson  * aside from curthread and target no more than sched_slice latency but
5935e5c3873SJeff Roberson  * no less than sched_slice_min runtime.
5945e5c3873SJeff Roberson  */
5955e5c3873SJeff Roberson static inline int
5965e5c3873SJeff Roberson tdq_slice(struct tdq *tdq)
5975e5c3873SJeff Roberson {
5985e5c3873SJeff Roberson 	int load;
5995e5c3873SJeff Roberson 
6005e5c3873SJeff Roberson 	/*
6015e5c3873SJeff Roberson 	 * It is safe to use sys_load here because this is called from
6025e5c3873SJeff Roberson 	 * contexts where timeshare threads are running and so there
6035e5c3873SJeff Roberson 	 * cannot be higher priority load in the system.
6045e5c3873SJeff Roberson 	 */
6055e5c3873SJeff Roberson 	load = tdq->tdq_sysload - 1;
6065e5c3873SJeff Roberson 	if (load >= SCHED_SLICE_MIN_DIVISOR)
6075e5c3873SJeff Roberson 		return (sched_slice_min);
6085e5c3873SJeff Roberson 	if (load <= 1)
6095e5c3873SJeff Roberson 		return (sched_slice);
6105e5c3873SJeff Roberson 	return (sched_slice / load);
6115e5c3873SJeff Roberson }
6125e5c3873SJeff Roberson 
6135e5c3873SJeff Roberson /*
61462fa74d9SJeff Roberson  * Set lowpri to its exact value by searching the run-queue and
61562fa74d9SJeff Roberson  * evaluating curthread.  curthread may be passed as an optimization.
616356500a3SJeff Roberson  */
61722bf7d9aSJeff Roberson static void
61862fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
61962fa74d9SJeff Roberson {
62062fa74d9SJeff Roberson 	struct thread *td;
62162fa74d9SJeff Roberson 
62262fa74d9SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
62362fa74d9SJeff Roberson 	if (ctd == NULL)
62411484ad8SMark Johnston 		ctd = tdq->tdq_curthread;
6259727e637SJeff Roberson 	td = tdq_choose(tdq);
6269727e637SJeff Roberson 	if (td == NULL || td->td_priority > ctd->td_priority)
62762fa74d9SJeff Roberson 		tdq->tdq_lowpri = ctd->td_priority;
62862fa74d9SJeff Roberson 	else
62962fa74d9SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
63062fa74d9SJeff Roberson }
63162fa74d9SJeff Roberson 
63262fa74d9SJeff Roberson #ifdef SMP
6339129dd59SPedro F. Giffuni /*
6349129dd59SPedro F. Giffuni  * We need some randomness. Implement a classic Linear Congruential
6359129dd59SPedro F. Giffuni  * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
6369129dd59SPedro F. Giffuni  * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
6379129dd59SPedro F. Giffuni  * of the random state (in the low bits of our answer) to keep
6389129dd59SPedro F. Giffuni  * the maximum randomness.
6399129dd59SPedro F. Giffuni  */
6409129dd59SPedro F. Giffuni static uint32_t
6419129dd59SPedro F. Giffuni sched_random(void)
6429129dd59SPedro F. Giffuni {
6439129dd59SPedro F. Giffuni 	uint32_t *rndptr;
6449129dd59SPedro F. Giffuni 
6459129dd59SPedro F. Giffuni 	rndptr = DPCPU_PTR(randomval);
6469129dd59SPedro F. Giffuni 	*rndptr = *rndptr * 69069 + 5;
6479129dd59SPedro F. Giffuni 
6489129dd59SPedro F. Giffuni 	return (*rndptr >> 16);
6499129dd59SPedro F. Giffuni }
6509129dd59SPedro F. Giffuni 
65162fa74d9SJeff Roberson struct cpu_search {
652e745d729SAlexander Motin 	cpuset_t *cs_mask;	/* The mask of allowed CPUs to choose from. */
653e745d729SAlexander Motin 	int	cs_prefer;	/* Prefer this CPU and groups including it. */
654e745d729SAlexander Motin 	int	cs_running;	/* The thread is now running at cs_prefer. */
65536acfc65SAlexander Motin 	int	cs_pri;		/* Min priority for low. */
65608063e9fSAlexander Motin 	int	cs_load;	/* Max load for low, min load for high. */
65708063e9fSAlexander Motin 	int	cs_trans;	/* Min transferable load for high. */
658aefe0a8cSAlexander Motin };
659aefe0a8cSAlexander Motin 
660aefe0a8cSAlexander Motin struct cpu_search_res {
66108063e9fSAlexander Motin 	int	csr_cpu;	/* The best CPU found. */
66208063e9fSAlexander Motin 	int	csr_load;	/* The load of cs_cpu. */
66362fa74d9SJeff Roberson };
66462fa74d9SJeff Roberson 
66562fa74d9SJeff Roberson /*
666aefe0a8cSAlexander Motin  * Search the tree of cpu_groups for the lowest or highest loaded CPU.
667aefe0a8cSAlexander Motin  * These routines actually compare the load on all paths through the tree
668aefe0a8cSAlexander Motin  * and find the least loaded cpu on the least loaded path, which may differ
669aefe0a8cSAlexander Motin  * from the least loaded cpu in the system.  This balances work among caches
670aefe0a8cSAlexander Motin  * and buses.
67162fa74d9SJeff Roberson  */
672aefe0a8cSAlexander Motin static int
673aefe0a8cSAlexander Motin cpu_search_lowest(const struct cpu_group *cg, const struct cpu_search *s,
674aefe0a8cSAlexander Motin     struct cpu_search_res *r)
67562fa74d9SJeff Roberson {
676aefe0a8cSAlexander Motin 	struct cpu_search_res lr;
67736acfc65SAlexander Motin 	struct tdq *tdq;
678e745d729SAlexander Motin 	int c, bload, l, load, p, total;
67962fa74d9SJeff Roberson 
68036acfc65SAlexander Motin 	total = 0;
681aefe0a8cSAlexander Motin 	bload = INT_MAX;
68208063e9fSAlexander Motin 	r->csr_cpu = -1;
68336acfc65SAlexander Motin 
684aefe0a8cSAlexander Motin 	/* Loop through children CPU groups if there are any. */
685aefe0a8cSAlexander Motin 	if (cg->cg_children > 0) {
686aefe0a8cSAlexander Motin 		for (c = cg->cg_children - 1; c >= 0; c--) {
687aefe0a8cSAlexander Motin 			load = cpu_search_lowest(&cg->cg_child[c], s, &lr);
68836acfc65SAlexander Motin 			total += load;
689e745d729SAlexander Motin 
690e745d729SAlexander Motin 			/*
691e745d729SAlexander Motin 			 * When balancing do not prefer SMT groups with load >1.
692e745d729SAlexander Motin 			 * It allows round-robin between SMT groups with equal
693e745d729SAlexander Motin 			 * load within parent group for more fair scheduling.
694e745d729SAlexander Motin 			 */
695e745d729SAlexander Motin 			if (__predict_false(s->cs_running) &&
696e745d729SAlexander Motin 			    (cg->cg_child[c].cg_flags & CG_FLAG_THREAD) &&
697e745d729SAlexander Motin 			    load >= 128 && (load & 128) != 0)
698e745d729SAlexander Motin 				load += 128;
699e745d729SAlexander Motin 
70008063e9fSAlexander Motin 			if (lr.csr_cpu >= 0 && (load < bload ||
70108063e9fSAlexander Motin 			    (load == bload && lr.csr_load < r->csr_load))) {
702aefe0a8cSAlexander Motin 				bload = load;
70308063e9fSAlexander Motin 				r->csr_cpu = lr.csr_cpu;
70408063e9fSAlexander Motin 				r->csr_load = lr.csr_load;
70536acfc65SAlexander Motin 			}
70636acfc65SAlexander Motin 		}
70762fa74d9SJeff Roberson 		return (total);
70862fa74d9SJeff Roberson 	}
70962fa74d9SJeff Roberson 
710aefe0a8cSAlexander Motin 	/* Loop through children CPUs otherwise. */
711aefe0a8cSAlexander Motin 	for (c = cg->cg_last; c >= cg->cg_first; c--) {
712aefe0a8cSAlexander Motin 		if (!CPU_ISSET(c, &cg->cg_mask))
713aefe0a8cSAlexander Motin 			continue;
714aefe0a8cSAlexander Motin 		tdq = TDQ_CPU(c);
71511484ad8SMark Johnston 		l = TDQ_LOAD(tdq);
716e745d729SAlexander Motin 		if (c == s->cs_prefer) {
717e745d729SAlexander Motin 			if (__predict_false(s->cs_running))
718e745d729SAlexander Motin 				l--;
719e745d729SAlexander Motin 			p = 128;
720e745d729SAlexander Motin 		} else
721e745d729SAlexander Motin 			p = 0;
722aefe0a8cSAlexander Motin 		load = l * 256;
723e745d729SAlexander Motin 		total += load - p;
724e745d729SAlexander Motin 
725e745d729SAlexander Motin 		/*
726e745d729SAlexander Motin 		 * Check this CPU is acceptable.
727e745d729SAlexander Motin 		 * If the threads is already on the CPU, don't look on the TDQ
728e745d729SAlexander Motin 		 * priority, since it can be the priority of the thread itself.
729e745d729SAlexander Motin 		 */
73011484ad8SMark Johnston 		if (l > s->cs_load ||
73111484ad8SMark Johnston 		    (atomic_load_char(&tdq->tdq_lowpri) <= s->cs_pri &&
732e745d729SAlexander Motin 		     (!s->cs_running || c != s->cs_prefer)) ||
733aefe0a8cSAlexander Motin 		    !CPU_ISSET(c, s->cs_mask))
734aefe0a8cSAlexander Motin 			continue;
735e745d729SAlexander Motin 
736e745d729SAlexander Motin 		/*
737e745d729SAlexander Motin 		 * When balancing do not prefer CPUs with load > 1.
738e745d729SAlexander Motin 		 * It allows round-robin between CPUs with equal load
739e745d729SAlexander Motin 		 * within the CPU group for more fair scheduling.
740e745d729SAlexander Motin 		 */
741e745d729SAlexander Motin 		if (__predict_false(s->cs_running) && l > 0)
742e745d729SAlexander Motin 			p = 0;
743e745d729SAlexander Motin 
744aefe0a8cSAlexander Motin 		load -= sched_random() % 128;
745e745d729SAlexander Motin 		if (bload > load - p) {
746e745d729SAlexander Motin 			bload = load - p;
74708063e9fSAlexander Motin 			r->csr_cpu = c;
74808063e9fSAlexander Motin 			r->csr_load = load;
749aefe0a8cSAlexander Motin 		}
750aefe0a8cSAlexander Motin 	}
751aefe0a8cSAlexander Motin 	return (total);
75262fa74d9SJeff Roberson }
75362fa74d9SJeff Roberson 
754aefe0a8cSAlexander Motin static int
755aefe0a8cSAlexander Motin cpu_search_highest(const struct cpu_group *cg, const struct cpu_search *s,
756aefe0a8cSAlexander Motin     struct cpu_search_res *r)
75762fa74d9SJeff Roberson {
758aefe0a8cSAlexander Motin 	struct cpu_search_res lr;
759aefe0a8cSAlexander Motin 	struct tdq *tdq;
760aefe0a8cSAlexander Motin 	int c, bload, l, load, total;
761aefe0a8cSAlexander Motin 
762aefe0a8cSAlexander Motin 	total = 0;
763aefe0a8cSAlexander Motin 	bload = INT_MIN;
76408063e9fSAlexander Motin 	r->csr_cpu = -1;
765aefe0a8cSAlexander Motin 
766aefe0a8cSAlexander Motin 	/* Loop through children CPU groups if there are any. */
767aefe0a8cSAlexander Motin 	if (cg->cg_children > 0) {
768aefe0a8cSAlexander Motin 		for (c = cg->cg_children - 1; c >= 0; c--) {
769aefe0a8cSAlexander Motin 			load = cpu_search_highest(&cg->cg_child[c], s, &lr);
770aefe0a8cSAlexander Motin 			total += load;
77108063e9fSAlexander Motin 			if (lr.csr_cpu >= 0 && (load > bload ||
77208063e9fSAlexander Motin 			    (load == bload && lr.csr_load > r->csr_load))) {
773aefe0a8cSAlexander Motin 				bload = load;
77408063e9fSAlexander Motin 				r->csr_cpu = lr.csr_cpu;
77508063e9fSAlexander Motin 				r->csr_load = lr.csr_load;
776aefe0a8cSAlexander Motin 			}
777aefe0a8cSAlexander Motin 		}
778aefe0a8cSAlexander Motin 		return (total);
77962fa74d9SJeff Roberson 	}
78062fa74d9SJeff Roberson 
781aefe0a8cSAlexander Motin 	/* Loop through children CPUs otherwise. */
782aefe0a8cSAlexander Motin 	for (c = cg->cg_last; c >= cg->cg_first; c--) {
783aefe0a8cSAlexander Motin 		if (!CPU_ISSET(c, &cg->cg_mask))
784aefe0a8cSAlexander Motin 			continue;
785aefe0a8cSAlexander Motin 		tdq = TDQ_CPU(c);
78611484ad8SMark Johnston 		l = TDQ_LOAD(tdq);
787aefe0a8cSAlexander Motin 		load = l * 256;
788aefe0a8cSAlexander Motin 		total += load;
789e745d729SAlexander Motin 
790e745d729SAlexander Motin 		/*
791e745d729SAlexander Motin 		 * Check this CPU is acceptable.
792e745d729SAlexander Motin 		 */
79311484ad8SMark Johnston 		if (l < s->cs_load || TDQ_TRANSFERABLE(tdq) < s->cs_trans ||
794aefe0a8cSAlexander Motin 		    !CPU_ISSET(c, s->cs_mask))
795aefe0a8cSAlexander Motin 			continue;
796e745d729SAlexander Motin 
797ca34553bSAlexander Motin 		load -= sched_random() % 256;
798aefe0a8cSAlexander Motin 		if (load > bload) {
799aefe0a8cSAlexander Motin 			bload = load;
80008063e9fSAlexander Motin 			r->csr_cpu = c;
801aefe0a8cSAlexander Motin 		}
802aefe0a8cSAlexander Motin 	}
80308063e9fSAlexander Motin 	r->csr_load = bload;
804aefe0a8cSAlexander Motin 	return (total);
80562fa74d9SJeff Roberson }
80662fa74d9SJeff Roberson 
80762fa74d9SJeff Roberson /*
80862fa74d9SJeff Roberson  * Find the cpu with the least load via the least loaded path that has a
80962fa74d9SJeff Roberson  * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
81062fa74d9SJeff Roberson  * acceptable.
81162fa74d9SJeff Roberson  */
81262fa74d9SJeff Roberson static inline int
813aefe0a8cSAlexander Motin sched_lowest(const struct cpu_group *cg, cpuset_t *mask, int pri, int maxload,
814e745d729SAlexander Motin     int prefer, int running)
81562fa74d9SJeff Roberson {
816aefe0a8cSAlexander Motin 	struct cpu_search s;
817aefe0a8cSAlexander Motin 	struct cpu_search_res r;
81862fa74d9SJeff Roberson 
819aefe0a8cSAlexander Motin 	s.cs_prefer = prefer;
820e745d729SAlexander Motin 	s.cs_running = running;
821aefe0a8cSAlexander Motin 	s.cs_mask = mask;
822aefe0a8cSAlexander Motin 	s.cs_pri = pri;
82308063e9fSAlexander Motin 	s.cs_load = maxload;
824aefe0a8cSAlexander Motin 	cpu_search_lowest(cg, &s, &r);
82508063e9fSAlexander Motin 	return (r.csr_cpu);
82662fa74d9SJeff Roberson }
82762fa74d9SJeff Roberson 
82862fa74d9SJeff Roberson /*
82962fa74d9SJeff Roberson  * Find the cpu with the highest load via the highest loaded path.
83062fa74d9SJeff Roberson  */
83162fa74d9SJeff Roberson static inline int
83208063e9fSAlexander Motin sched_highest(const struct cpu_group *cg, cpuset_t *mask, int minload,
83308063e9fSAlexander Motin     int mintrans)
83462fa74d9SJeff Roberson {
835aefe0a8cSAlexander Motin 	struct cpu_search s;
836aefe0a8cSAlexander Motin 	struct cpu_search_res r;
83762fa74d9SJeff Roberson 
838aefe0a8cSAlexander Motin 	s.cs_mask = mask;
83908063e9fSAlexander Motin 	s.cs_load = minload;
84008063e9fSAlexander Motin 	s.cs_trans = mintrans;
841aefe0a8cSAlexander Motin 	cpu_search_highest(cg, &s, &r);
84208063e9fSAlexander Motin 	return (r.csr_cpu);
84362fa74d9SJeff Roberson }
84462fa74d9SJeff Roberson 
84562fa74d9SJeff Roberson static void
84662fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg)
84762fa74d9SJeff Roberson {
848018ff686SJeff Roberson 	struct tdq *tdq;
849e745d729SAlexander Motin 	struct thread *td;
85036acfc65SAlexander Motin 	cpuset_t hmask, lmask;
85136acfc65SAlexander Motin 	int high, low, anylow;
85262fa74d9SJeff Roberson 
85336acfc65SAlexander Motin 	CPU_FILL(&hmask);
85462fa74d9SJeff Roberson 	for (;;) {
85508063e9fSAlexander Motin 		high = sched_highest(cg, &hmask, 1, 0);
85636acfc65SAlexander Motin 		/* Stop if there is no more CPU with transferrable threads. */
85736acfc65SAlexander Motin 		if (high == -1)
85862fa74d9SJeff Roberson 			break;
85936acfc65SAlexander Motin 		CPU_CLR(high, &hmask);
86036acfc65SAlexander Motin 		CPU_COPY(&hmask, &lmask);
86136acfc65SAlexander Motin 		/* Stop if there is no more CPU left for low. */
86236acfc65SAlexander Motin 		if (CPU_EMPTY(&lmask))
86362fa74d9SJeff Roberson 			break;
864018ff686SJeff Roberson 		tdq = TDQ_CPU(high);
86511484ad8SMark Johnston 		if (TDQ_LOAD(tdq) == 1) {
866e745d729SAlexander Motin 			/*
867e745d729SAlexander Motin 			 * There is only one running thread.  We can't move
868e745d729SAlexander Motin 			 * it from here, so tell it to pick new CPU by itself.
869e745d729SAlexander Motin 			 */
870e745d729SAlexander Motin 			TDQ_LOCK(tdq);
87111484ad8SMark Johnston 			td = tdq->tdq_curthread;
872bd980ca8SMark Johnston 			if (td->td_lock == TDQ_LOCKPTR(tdq) &&
873bd980ca8SMark Johnston 			    (td->td_flags & TDF_IDLETD) == 0 &&
874e745d729SAlexander Motin 			    THREAD_CAN_MIGRATE(td)) {
875c6d31b83SKonstantin Belousov 				td->td_flags |= TDF_PICKCPU;
876c6d31b83SKonstantin Belousov 				ast_sched_locked(td, TDA_SCHED);
877e745d729SAlexander Motin 				if (high != curcpu)
878e745d729SAlexander Motin 					ipi_cpu(high, IPI_AST);
879e745d729SAlexander Motin 			}
880e745d729SAlexander Motin 			TDQ_UNLOCK(tdq);
881e745d729SAlexander Motin 			break;
882e745d729SAlexander Motin 		}
883e745d729SAlexander Motin 		anylow = 1;
88436acfc65SAlexander Motin nextlow:
88511484ad8SMark Johnston 		if (TDQ_TRANSFERABLE(tdq) == 0)
886e745d729SAlexander Motin 			continue;
88711484ad8SMark Johnston 		low = sched_lowest(cg, &lmask, -1, TDQ_LOAD(tdq) - 1, high, 1);
88836acfc65SAlexander Motin 		/* Stop if we looked well and found no less loaded CPU. */
88936acfc65SAlexander Motin 		if (anylow && low == -1)
89036acfc65SAlexander Motin 			break;
89136acfc65SAlexander Motin 		/* Go to next high if we found no less loaded CPU. */
89236acfc65SAlexander Motin 		if (low == -1)
89336acfc65SAlexander Motin 			continue;
89436acfc65SAlexander Motin 		/* Transfer thread from high to low. */
895018ff686SJeff Roberson 		if (sched_balance_pair(tdq, TDQ_CPU(low))) {
89636acfc65SAlexander Motin 			/* CPU that got thread can no longer be a donor. */
89736acfc65SAlexander Motin 			CPU_CLR(low, &hmask);
89836acfc65SAlexander Motin 		} else {
89962fa74d9SJeff Roberson 			/*
90036acfc65SAlexander Motin 			 * If failed, then there is no threads on high
90136acfc65SAlexander Motin 			 * that can run on this low. Drop low from low
90236acfc65SAlexander Motin 			 * mask and look for different one.
90362fa74d9SJeff Roberson 			 */
90436acfc65SAlexander Motin 			CPU_CLR(low, &lmask);
90536acfc65SAlexander Motin 			anylow = 0;
90636acfc65SAlexander Motin 			goto nextlow;
90762fa74d9SJeff Roberson 		}
90836acfc65SAlexander Motin 	}
90962fa74d9SJeff Roberson }
91062fa74d9SJeff Roberson 
91162fa74d9SJeff Roberson static void
91262375ca8SEd Schouten sched_balance(void)
913356500a3SJeff Roberson {
9147fcf154aSJeff Roberson 	struct tdq *tdq;
915356500a3SJeff Roberson 
9160567b6ccSWarner Losh 	balance_ticks = max(balance_interval / 2, 1) +
917b250ad34SWarner Losh 	    (sched_random() % balance_interval);
9187fcf154aSJeff Roberson 	tdq = TDQ_SELF();
9197fcf154aSJeff Roberson 	TDQ_UNLOCK(tdq);
92062fa74d9SJeff Roberson 	sched_balance_group(cpu_top);
9217fcf154aSJeff Roberson 	TDQ_LOCK(tdq);
922cac77d04SJeff Roberson }
92386f8ae96SJeff Roberson 
924ae7a6b38SJeff Roberson /*
925ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
926ae7a6b38SJeff Roberson  */
927ae7a6b38SJeff Roberson static void
928ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
929ae7a6b38SJeff Roberson {
930ae7a6b38SJeff Roberson 	if (one < two) {
931ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
932ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
933ae7a6b38SJeff Roberson 	} else {
934ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
935ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
936ae7a6b38SJeff Roberson 	}
937ae7a6b38SJeff Roberson }
938ae7a6b38SJeff Roberson 
939ae7a6b38SJeff Roberson /*
9407fcf154aSJeff Roberson  * Unlock two thread queues.  Order is not important here.
9417fcf154aSJeff Roberson  */
9427fcf154aSJeff Roberson static void
9437fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two)
9447fcf154aSJeff Roberson {
9457fcf154aSJeff Roberson 	TDQ_UNLOCK(one);
9467fcf154aSJeff Roberson 	TDQ_UNLOCK(two);
9477fcf154aSJeff Roberson }
9487fcf154aSJeff Roberson 
9497fcf154aSJeff Roberson /*
9506d3f74a1SMark Johnston  * Transfer load between two imbalanced thread queues.  Returns true if a thread
9516d3f74a1SMark Johnston  * was moved between the queues, and false otherwise.
952ae7a6b38SJeff Roberson  */
9536d3f74a1SMark Johnston static bool
954ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
955cac77d04SJeff Roberson {
9566d3f74a1SMark Johnston 	int cpu, lowpri;
9576d3f74a1SMark Johnston 	bool ret;
958cac77d04SJeff Roberson 
9596d3f74a1SMark Johnston 	ret = false;
960ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
9616d3f74a1SMark Johnston 
962155b9987SJeff Roberson 	/*
96397e9382dSDon Lewis 	 * Transfer a thread from high to low.
964155b9987SJeff Roberson 	 */
9656d3f74a1SMark Johnston 	if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load) {
9666d3f74a1SMark Johnston 		lowpri = tdq_move(high, low);
9676d3f74a1SMark Johnston 		if (lowpri != -1) {
968a5423ea3SJeff Roberson 			/*
9690927ff78SMark Johnston 			 * In case the target isn't the current CPU notify it of
9706d3f74a1SMark Johnston 			 * the new load, possibly sending an IPI to force it to
9710927ff78SMark Johnston 			 * reschedule.  Otherwise maybe schedule a preemption.
972a5423ea3SJeff Roberson 			 */
973880bf8b9SMarius Strobl 			cpu = TDQ_ID(low);
974880bf8b9SMarius Strobl 			if (cpu != PCPU_GET(cpuid))
9756d3f74a1SMark Johnston 				tdq_notify(low, lowpri);
9760927ff78SMark Johnston 			else
9770927ff78SMark Johnston 				sched_setpreempt(low->tdq_lowpri);
9786d3f74a1SMark Johnston 			ret = true;
9796d3f74a1SMark Johnston 		}
980ae7a6b38SJeff Roberson 	}
9817fcf154aSJeff Roberson 	tdq_unlock_pair(high, low);
9826d3f74a1SMark Johnston 	return (ret);
983356500a3SJeff Roberson }
984356500a3SJeff Roberson 
985ae7a6b38SJeff Roberson /*
9866d3f74a1SMark Johnston  * Move a thread from one thread queue to another.  Returns -1 if the source
9876d3f74a1SMark Johnston  * queue was empty, else returns the maximum priority of all threads in
9886d3f74a1SMark Johnston  * the destination queue prior to the addition of the new thread.  In the latter
9896d3f74a1SMark Johnston  * case, this priority can be used to determine whether an IPI needs to be
9906d3f74a1SMark Johnston  * delivered.
991ae7a6b38SJeff Roberson  */
9926d3f74a1SMark Johnston static int
993ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
994356500a3SJeff Roberson {
995ae7a6b38SJeff Roberson 	struct thread *td;
996ae7a6b38SJeff Roberson 	int cpu;
997356500a3SJeff Roberson 
9987fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(from, MA_OWNED);
9997fcf154aSJeff Roberson 	TDQ_LOCK_ASSERT(to, MA_OWNED);
10007fcf154aSJeff Roberson 
1001ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
100235dd6d6cSMark Johnston 	td = tdq_steal(from, cpu);
10039727e637SJeff Roberson 	if (td == NULL)
10046d3f74a1SMark Johnston 		return (-1);
100561a74c5cSJeff Roberson 
1006ae7a6b38SJeff Roberson 	/*
100761a74c5cSJeff Roberson 	 * Although the run queue is locked the thread may be
100861a74c5cSJeff Roberson 	 * blocked.  We can not set the lock until it is unblocked.
1009ae7a6b38SJeff Roberson 	 */
101061a74c5cSJeff Roberson 	thread_lock_block_wait(td);
1011ae7a6b38SJeff Roberson 	sched_rem(td);
101261a74c5cSJeff Roberson 	THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(from));
1013ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
101461a74c5cSJeff Roberson 	td_get_sched(td)->ts_cpu = cpu;
10156d3f74a1SMark Johnston 	return (tdq_add(to, td, SRQ_YIELDING));
1016356500a3SJeff Roberson }
101722bf7d9aSJeff Roberson 
1018ae7a6b38SJeff Roberson /*
1019ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
1020ae7a6b38SJeff Roberson  * to it.
1021ae7a6b38SJeff Roberson  */
102280f86c9fSJeff Roberson static int
1023ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
102422bf7d9aSJeff Roberson {
10252668bb2aSAlexander Motin 	struct cpu_group *cg, *parent;
1026ad1e7d28SJulian Elischer 	struct tdq *steal;
1027c76ee827SJeff Roberson 	cpuset_t mask;
10282668bb2aSAlexander Motin 	int cpu, switchcnt, goup;
102980f86c9fSJeff Roberson 
103097e9382dSDon Lewis 	if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
103188f530ccSJeff Roberson 		return (1);
1032c76ee827SJeff Roberson 	CPU_FILL(&mask);
1033c76ee827SJeff Roberson 	CPU_CLR(PCPU_GET(cpuid), &mask);
103497e9382dSDon Lewis restart:
103511484ad8SMark Johnston 	switchcnt = TDQ_SWITCHCNT(tdq);
10362668bb2aSAlexander Motin 	for (cg = tdq->tdq_cg, goup = 0; ; ) {
103708063e9fSAlexander Motin 		cpu = sched_highest(cg, &mask, steal_thresh, 1);
103897e9382dSDon Lewis 		/*
103997e9382dSDon Lewis 		 * We were assigned a thread but not preempted.  Returning
104097e9382dSDon Lewis 		 * 0 here will cause our caller to switch to it.
104197e9382dSDon Lewis 		 */
104211484ad8SMark Johnston 		if (TDQ_LOAD(tdq))
104397e9382dSDon Lewis 			return (0);
10442668bb2aSAlexander Motin 
10452668bb2aSAlexander Motin 		/*
10462668bb2aSAlexander Motin 		 * We found no CPU to steal from in this group.  Escalate to
10472668bb2aSAlexander Motin 		 * the parent and repeat.  But if parent has only two children
10482668bb2aSAlexander Motin 		 * groups we can avoid searching this group again by searching
10492668bb2aSAlexander Motin 		 * the other one specifically and then escalating two levels.
10502668bb2aSAlexander Motin 		 */
105162fa74d9SJeff Roberson 		if (cpu == -1) {
10522668bb2aSAlexander Motin 			if (goup) {
105362fa74d9SJeff Roberson 				cg = cg->cg_parent;
10542668bb2aSAlexander Motin 				goup = 0;
10552668bb2aSAlexander Motin 			}
10562668bb2aSAlexander Motin 			parent = cg->cg_parent;
10572668bb2aSAlexander Motin 			if (parent == NULL)
105897e9382dSDon Lewis 				return (1);
10592668bb2aSAlexander Motin 			if (parent->cg_children == 2) {
10602668bb2aSAlexander Motin 				if (cg == &parent->cg_child[0])
10612668bb2aSAlexander Motin 					cg = &parent->cg_child[1];
10622668bb2aSAlexander Motin 				else
10632668bb2aSAlexander Motin 					cg = &parent->cg_child[0];
10642668bb2aSAlexander Motin 				goup = 1;
10652668bb2aSAlexander Motin 			} else
10662668bb2aSAlexander Motin 				cg = parent;
106780f86c9fSJeff Roberson 			continue;
10687b8bfa0dSJeff Roberson 		}
10697b8bfa0dSJeff Roberson 		steal = TDQ_CPU(cpu);
107097e9382dSDon Lewis 		/*
107197e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
107297e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
107397e9382dSDon Lewis 		 *
107497e9382dSDon Lewis 		 * Testing this ahead of tdq_lock_pair() only catches
107597e9382dSDon Lewis 		 * this situation about 20% of the time on an 8 core
107697e9382dSDon Lewis 		 * 16 thread Ryzen 7, but it still helps performance.
107797e9382dSDon Lewis 		 */
107811484ad8SMark Johnston 		if (TDQ_LOAD(steal) < steal_thresh ||
107911484ad8SMark Johnston 		    TDQ_TRANSFERABLE(steal) == 0)
108097e9382dSDon Lewis 			goto restart;
108197e9382dSDon Lewis 		/*
10828bb173fbSAlexander Motin 		 * Try to lock both queues. If we are assigned a thread while
10838bb173fbSAlexander Motin 		 * waited for the lock, switch to it now instead of stealing.
10848bb173fbSAlexander Motin 		 * If we can't get the lock, then somebody likely got there
10858bb173fbSAlexander Motin 		 * first so continue searching.
108697e9382dSDon Lewis 		 */
10878bb173fbSAlexander Motin 		TDQ_LOCK(tdq);
10888bb173fbSAlexander Motin 		if (tdq->tdq_load > 0) {
10898bb173fbSAlexander Motin 			mi_switch(SW_VOL | SWT_IDLE);
10908bb173fbSAlexander Motin 			return (0);
10918bb173fbSAlexander Motin 		}
10928bb173fbSAlexander Motin 		if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0) {
10938bb173fbSAlexander Motin 			TDQ_UNLOCK(tdq);
10948bb173fbSAlexander Motin 			CPU_CLR(cpu, &mask);
10958bb173fbSAlexander Motin 			continue;
10968bb173fbSAlexander Motin 		}
109797e9382dSDon Lewis 		/*
109897e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
109997e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread, or
110097e9382dSDon Lewis 		 * we were preempted and the CPU loading info may be out
110197e9382dSDon Lewis 		 * of date.  The latter is rare.  In either case restart
110297e9382dSDon Lewis 		 * the search.
110397e9382dSDon Lewis 		 */
110411484ad8SMark Johnston 		if (TDQ_LOAD(steal) < steal_thresh ||
110511484ad8SMark Johnston 		    TDQ_TRANSFERABLE(steal) == 0 ||
110611484ad8SMark Johnston 		    switchcnt != TDQ_SWITCHCNT(tdq)) {
11077fcf154aSJeff Roberson 			tdq_unlock_pair(tdq, steal);
110897e9382dSDon Lewis 			goto restart;
110962fa74d9SJeff Roberson 		}
111062fa74d9SJeff Roberson 		/*
111197e9382dSDon Lewis 		 * Steal the thread and switch to it.
111262fa74d9SJeff Roberson 		 */
11136d3f74a1SMark Johnston 		if (tdq_move(steal, tdq) != -1)
111497e9382dSDon Lewis 			break;
111597e9382dSDon Lewis 		/*
111697e9382dSDon Lewis 		 * We failed to acquire a thread even though it looked
111797e9382dSDon Lewis 		 * like one was available.  This could be due to affinity
111897e9382dSDon Lewis 		 * restrictions or for other reasons.  Loop again after
111997e9382dSDon Lewis 		 * removing this CPU from the set.  The restart logic
112097e9382dSDon Lewis 		 * above does not restore this CPU to the set due to the
112197e9382dSDon Lewis 		 * likelyhood of failing here again.
112297e9382dSDon Lewis 		 */
112397e9382dSDon Lewis 		CPU_CLR(cpu, &mask);
112462fa74d9SJeff Roberson 		tdq_unlock_pair(tdq, steal);
112580f86c9fSJeff Roberson 	}
1126ae7a6b38SJeff Roberson 	TDQ_UNLOCK(steal);
1127686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_IDLE);
11287b8bfa0dSJeff Roberson 	return (0);
112922bf7d9aSJeff Roberson }
113022bf7d9aSJeff Roberson 
1131ae7a6b38SJeff Roberson /*
1132ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
11336d3f74a1SMark Johnston  *
11346d3f74a1SMark Johnston  * "lowpri" is the minimum scheduling priority among all threads on
11356d3f74a1SMark Johnston  * the queue prior to the addition of the new thread.
1136ae7a6b38SJeff Roberson  */
113722bf7d9aSJeff Roberson static void
11386d3f74a1SMark Johnston tdq_notify(struct tdq *tdq, int lowpri)
113922bf7d9aSJeff Roberson {
11407b8bfa0dSJeff Roberson 	int cpu;
114122bf7d9aSJeff Roberson 
11426d3f74a1SMark Johnston 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
11436d3f74a1SMark Johnston 	KASSERT(tdq->tdq_lowpri <= lowpri,
11446d3f74a1SMark Johnston 	    ("tdq_notify: lowpri %d > tdq_lowpri %d", lowpri, tdq->tdq_lowpri));
11456d3f74a1SMark Johnston 
11467789ab32SMark Johnston 	if (tdq->tdq_owepreempt)
1147ff256d9cSJeff Roberson 		return;
11486d3f74a1SMark Johnston 
11496d3f74a1SMark Johnston 	/*
11506d3f74a1SMark Johnston 	 * Check to see if the newly added thread should preempt the one
11516d3f74a1SMark Johnston 	 * currently running.
11526d3f74a1SMark Johnston 	 */
11536d3f74a1SMark Johnston 	if (!sched_shouldpreempt(tdq->tdq_lowpri, lowpri, 1))
11546b2f763fSJeff Roberson 		return;
115579654969SAlexander Motin 
115679654969SAlexander Motin 	/*
1157ae9e9b4fSAlexander Motin 	 * Make sure that our caller's earlier update to tdq_load is
1158ae9e9b4fSAlexander Motin 	 * globally visible before we read tdq_cpu_idle.  Idle thread
115979654969SAlexander Motin 	 * accesses both of them without locks, and the order is important.
116079654969SAlexander Motin 	 */
1161e8677f38SKonstantin Belousov 	atomic_thread_fence_seq_cst();
116279654969SAlexander Motin 
11631690c6c1SJeff Roberson 	/*
11646d3f74a1SMark Johnston 	 * Try to figure out if we can signal the idle thread instead of sending
11656d3f74a1SMark Johnston 	 * an IPI.  This check is racy; at worst, we will deliever an IPI
11666d3f74a1SMark Johnston 	 * unnecessarily.
11676c47aaaeSJeff Roberson 	 */
11686d3f74a1SMark Johnston 	cpu = TDQ_ID(tdq);
11696d3f74a1SMark Johnston 	if (TD_IS_IDLETHREAD(tdq->tdq_curthread) &&
117011484ad8SMark Johnston 	    (atomic_load_int(&tdq->tdq_cpu_idle) == 0 || cpu_idle_wakeup(cpu)))
11716c47aaaeSJeff Roberson 		return;
11727789ab32SMark Johnston 
11737789ab32SMark Johnston 	/*
11747789ab32SMark Johnston 	 * The run queues have been updated, so any switch on the remote CPU
11757789ab32SMark Johnston 	 * will satisfy the preemption request.
11767789ab32SMark Johnston 	 */
11777789ab32SMark Johnston 	tdq->tdq_owepreempt = 1;
1178d9d8d144SJohn Baldwin 	ipi_cpu(cpu, IPI_PREEMPT);
117922bf7d9aSJeff Roberson }
118022bf7d9aSJeff Roberson 
1181ae7a6b38SJeff Roberson /*
1182ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
1183ae7a6b38SJeff Roberson  * index.
1184ae7a6b38SJeff Roberson  */
11859727e637SJeff Roberson static struct thread *
118662fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start)
1187ae7a6b38SJeff Roberson {
1188ae7a6b38SJeff Roberson 	struct rqbits *rqb;
1189ae7a6b38SJeff Roberson 	struct rqhead *rqh;
119036acfc65SAlexander Motin 	struct thread *td, *first;
1191ae7a6b38SJeff Roberson 	int bit;
1192ae7a6b38SJeff Roberson 	int i;
1193ae7a6b38SJeff Roberson 
1194ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
1195ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
119636acfc65SAlexander Motin 	first = NULL;
1197ae7a6b38SJeff Roberson again:
1198ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1199ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
1200ae7a6b38SJeff Roberson 			continue;
12018bc713f6SJeff Roberson 		if (bit == 0)
12028bc713f6SJeff Roberson 			bit = RQB_FFS(rqb->rqb_bits[i]);
12038bc713f6SJeff Roberson 		for (; bit < RQB_BPW; bit++) {
12048bc713f6SJeff Roberson 			if ((rqb->rqb_bits[i] & (1ul << bit)) == 0)
1205ae7a6b38SJeff Roberson 				continue;
12068bc713f6SJeff Roberson 			rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)];
12079727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq) {
1208bd84094aSAlexander Motin 				if (first) {
1209bd84094aSAlexander Motin 					if (THREAD_CAN_MIGRATE(td) &&
12109727e637SJeff Roberson 					    THREAD_CAN_SCHED(td, cpu))
12119727e637SJeff Roberson 						return (td);
1212bd84094aSAlexander Motin 				} else
121336acfc65SAlexander Motin 					first = td;
1214ae7a6b38SJeff Roberson 			}
1215ae7a6b38SJeff Roberson 		}
12168bc713f6SJeff Roberson 	}
1217ae7a6b38SJeff Roberson 	if (start != 0) {
1218ae7a6b38SJeff Roberson 		start = 0;
1219ae7a6b38SJeff Roberson 		goto again;
1220ae7a6b38SJeff Roberson 	}
1221ae7a6b38SJeff Roberson 
122236acfc65SAlexander Motin 	if (first && THREAD_CAN_MIGRATE(first) &&
122336acfc65SAlexander Motin 	    THREAD_CAN_SCHED(first, cpu))
122436acfc65SAlexander Motin 		return (first);
1225ae7a6b38SJeff Roberson 	return (NULL);
1226ae7a6b38SJeff Roberson }
1227ae7a6b38SJeff Roberson 
1228ae7a6b38SJeff Roberson /*
1229ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
1230ae7a6b38SJeff Roberson  */
12319727e637SJeff Roberson static struct thread *
123262fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu)
123322bf7d9aSJeff Roberson {
123422bf7d9aSJeff Roberson 	struct rqhead *rqh;
123522bf7d9aSJeff Roberson 	struct rqbits *rqb;
12369727e637SJeff Roberson 	struct thread *td;
123722bf7d9aSJeff Roberson 	int word;
123822bf7d9aSJeff Roberson 	int bit;
123922bf7d9aSJeff Roberson 
124022bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
124122bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
124222bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
124322bf7d9aSJeff Roberson 			continue;
124422bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
1245a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
124622bf7d9aSJeff Roberson 				continue;
124722bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
12489727e637SJeff Roberson 			TAILQ_FOREACH(td, rqh, td_runq)
12499727e637SJeff Roberson 				if (THREAD_CAN_MIGRATE(td) &&
12509727e637SJeff Roberson 				    THREAD_CAN_SCHED(td, cpu))
12519727e637SJeff Roberson 					return (td);
125222bf7d9aSJeff Roberson 		}
125322bf7d9aSJeff Roberson 	}
125422bf7d9aSJeff Roberson 	return (NULL);
125522bf7d9aSJeff Roberson }
125622bf7d9aSJeff Roberson 
1257ae7a6b38SJeff Roberson /*
1258ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
1259ae7a6b38SJeff Roberson  */
12609727e637SJeff Roberson static struct thread *
126162fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu)
126222bf7d9aSJeff Roberson {
12639727e637SJeff Roberson 	struct thread *td;
126422bf7d9aSJeff Roberson 
1265ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
12669727e637SJeff Roberson 	if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
12679727e637SJeff Roberson 		return (td);
12689727e637SJeff Roberson 	if ((td = runq_steal_from(&tdq->tdq_timeshare,
12699727e637SJeff Roberson 	    cpu, tdq->tdq_ridx)) != NULL)
12709727e637SJeff Roberson 		return (td);
127162fa74d9SJeff Roberson 	return (runq_steal(&tdq->tdq_idle, cpu));
127222bf7d9aSJeff Roberson }
127380f86c9fSJeff Roberson 
1274ae7a6b38SJeff Roberson /*
1275ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
12767fcf154aSJeff Roberson  * current lock and returns with the assigned queue locked.
1277ae7a6b38SJeff Roberson  */
1278ae7a6b38SJeff Roberson static inline struct tdq *
12799727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags)
128080f86c9fSJeff Roberson {
12819727e637SJeff Roberson 
1282ae7a6b38SJeff Roberson 	struct tdq *tdq;
128361a74c5cSJeff Roberson 	struct mtx *mtx;
128480f86c9fSJeff Roberson 
12859727e637SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1286ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
128793ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_cpu = cpu;
12889727e637SJeff Roberson 	/*
12899727e637SJeff Roberson 	 * If the lock matches just return the queue.
12909727e637SJeff Roberson 	 */
129161a74c5cSJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq)) {
129261a74c5cSJeff Roberson 		KASSERT((flags & SRQ_HOLD) == 0,
129361a74c5cSJeff Roberson 		    ("sched_setcpu: Invalid lock for SRQ_HOLD"));
1294ae7a6b38SJeff Roberson 		return (tdq);
1295ae7a6b38SJeff Roberson 	}
129661a74c5cSJeff Roberson 
129780f86c9fSJeff Roberson 	/*
1298ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
1299ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
13007b8bfa0dSJeff Roberson 	 */
1301b0b9dee5SAttilio Rao 	spinlock_enter();
130261a74c5cSJeff Roberson 	mtx = thread_lock_block(td);
130361a74c5cSJeff Roberson 	if ((flags & SRQ_HOLD) == 0)
130461a74c5cSJeff Roberson 		mtx_unlock_spin(mtx);
1305ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1306ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1307b0b9dee5SAttilio Rao 	spinlock_exit();
1308ae7a6b38SJeff Roberson 	return (tdq);
130980f86c9fSJeff Roberson }
13102454aaf5SJeff Roberson 
13118df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
13128df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
13138df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
13148df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
13158df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
13168df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
13178df78c41SJeff Roberson 
1318ae7a6b38SJeff Roberson static int
13199727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags)
1320ae7a6b38SJeff Roberson {
132136acfc65SAlexander Motin 	struct cpu_group *cg, *ccg;
13229727e637SJeff Roberson 	struct td_sched *ts;
1323ae7a6b38SJeff Roberson 	struct tdq *tdq;
1324aefe0a8cSAlexander Motin 	cpuset_t *mask;
1325e745d729SAlexander Motin 	int cpu, pri, r, self, intr;
13267b8bfa0dSJeff Roberson 
132762fa74d9SJeff Roberson 	self = PCPU_GET(cpuid);
132893ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1329efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
1330efe67753SNathan Whitehorn 	    "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
13317b8bfa0dSJeff Roberson 	if (smp_started == 0)
13327b8bfa0dSJeff Roberson 		return (self);
133328994a58SJeff Roberson 	/*
133428994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
133528994a58SJeff Roberson 	 */
133662fa74d9SJeff Roberson 	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
133762fa74d9SJeff Roberson 		return (ts->ts_cpu);
13387b8bfa0dSJeff Roberson 	/*
133962fa74d9SJeff Roberson 	 * Prefer to run interrupt threads on the processors that generate
134062fa74d9SJeff Roberson 	 * the interrupt.
13417b8bfa0dSJeff Roberson 	 */
134262fa74d9SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1343c9205e35SAlexander Motin 	    curthread->td_intr_nesting_level) {
1344c55dc51cSAlexander Motin 		tdq = TDQ_SELF();
1345c55dc51cSAlexander Motin 		if (tdq->tdq_lowpri >= PRI_MIN_IDLE) {
1346c55dc51cSAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
1347c55dc51cSAlexander Motin 			return (self);
1348c55dc51cSAlexander Motin 		}
134962fa74d9SJeff Roberson 		ts->ts_cpu = self;
1350c9205e35SAlexander Motin 		intr = 1;
1351c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1352c55dc51cSAlexander Motin 		goto llc;
1353c55dc51cSAlexander Motin 	} else {
1354c9205e35SAlexander Motin 		intr = 0;
1355c55dc51cSAlexander Motin 		tdq = TDQ_CPU(ts->ts_cpu);
1356c55dc51cSAlexander Motin 		cg = tdq->tdq_cg;
1357c55dc51cSAlexander Motin 	}
13587b8bfa0dSJeff Roberson 	/*
135936acfc65SAlexander Motin 	 * If the thread can run on the last cpu and the affinity has not
13600127914cSEric van Gyzen 	 * expired and it is idle, run it there.
13617b8bfa0dSJeff Roberson 	 */
136236acfc65SAlexander Motin 	if (THREAD_CAN_SCHED(td, ts->ts_cpu) &&
13636cbc4cebSMark Johnston 	    atomic_load_char(&tdq->tdq_lowpri) >= PRI_MIN_IDLE &&
136436acfc65SAlexander Motin 	    SCHED_AFFINITY(ts, CG_SHARE_L2)) {
1365c55dc51cSAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD) {
1366176dd236SAlexander Motin 			/* Check all SMT threads for being idle. */
1367aefe0a8cSAlexander Motin 			for (cpu = cg->cg_first; cpu <= cg->cg_last; cpu++) {
136811484ad8SMark Johnston 				pri =
136911484ad8SMark Johnston 				    atomic_load_char(&TDQ_CPU(cpu)->tdq_lowpri);
1370176dd236SAlexander Motin 				if (CPU_ISSET(cpu, &cg->cg_mask) &&
137111484ad8SMark Johnston 				    pri < PRI_MIN_IDLE)
137262fa74d9SJeff Roberson 					break;
1373aefe0a8cSAlexander Motin 			}
1374aefe0a8cSAlexander Motin 			if (cpu > cg->cg_last) {
1375176dd236SAlexander Motin 				SCHED_STAT_INC(pickcpu_idle_affinity);
1376176dd236SAlexander Motin 				return (ts->ts_cpu);
137736acfc65SAlexander Motin 			}
1378176dd236SAlexander Motin 		} else {
137936acfc65SAlexander Motin 			SCHED_STAT_INC(pickcpu_idle_affinity);
138036acfc65SAlexander Motin 			return (ts->ts_cpu);
138136acfc65SAlexander Motin 		}
138236acfc65SAlexander Motin 	}
1383c55dc51cSAlexander Motin llc:
138436acfc65SAlexander Motin 	/*
138536acfc65SAlexander Motin 	 * Search for the last level cache CPU group in the tree.
1386c9205e35SAlexander Motin 	 * Skip SMT, identical groups and caches with expired affinity.
1387c9205e35SAlexander Motin 	 * Interrupt threads affinity is explicit and never expires.
138836acfc65SAlexander Motin 	 */
138936acfc65SAlexander Motin 	for (ccg = NULL; cg != NULL; cg = cg->cg_parent) {
139036acfc65SAlexander Motin 		if (cg->cg_flags & CG_FLAG_THREAD)
139136acfc65SAlexander Motin 			continue;
1392c9205e35SAlexander Motin 		if (cg->cg_children == 1 || cg->cg_count == 1)
1393c9205e35SAlexander Motin 			continue;
1394c9205e35SAlexander Motin 		if (cg->cg_level == CG_SHARE_NONE ||
1395c9205e35SAlexander Motin 		    (!intr && !SCHED_AFFINITY(ts, cg->cg_level)))
139636acfc65SAlexander Motin 			continue;
139736acfc65SAlexander Motin 		ccg = cg;
139836acfc65SAlexander Motin 	}
1399c9205e35SAlexander Motin 	/* Found LLC shared by all CPUs, so do a global search. */
1400c9205e35SAlexander Motin 	if (ccg == cpu_top)
1401c9205e35SAlexander Motin 		ccg = NULL;
140262fa74d9SJeff Roberson 	cpu = -1;
1403aefe0a8cSAlexander Motin 	mask = &td->td_cpuset->cs_mask;
1404c9205e35SAlexander Motin 	pri = td->td_priority;
1405e745d729SAlexander Motin 	r = TD_IS_RUNNING(td);
1406c9205e35SAlexander Motin 	/*
1407c9205e35SAlexander Motin 	 * Try hard to keep interrupts within found LLC.  Search the LLC for
1408c9205e35SAlexander Motin 	 * the least loaded CPU we can run now.  For NUMA systems it should
1409c9205e35SAlexander Motin 	 * be within target domain, and it also reduces scheduling overhead.
1410c9205e35SAlexander Motin 	 */
1411c9205e35SAlexander Motin 	if (ccg != NULL && intr) {
1412e745d729SAlexander Motin 		cpu = sched_lowest(ccg, mask, pri, INT_MAX, ts->ts_cpu, r);
1413c9205e35SAlexander Motin 		if (cpu >= 0)
1414c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_intrbind);
1415c9205e35SAlexander Motin 	} else
1416c9205e35SAlexander Motin 	/* Search the LLC for the least loaded idle CPU we can run now. */
1417c9205e35SAlexander Motin 	if (ccg != NULL) {
1418c9205e35SAlexander Motin 		cpu = sched_lowest(ccg, mask, max(pri, PRI_MAX_TIMESHARE),
1419e745d729SAlexander Motin 		    INT_MAX, ts->ts_cpu, r);
1420c9205e35SAlexander Motin 		if (cpu >= 0)
1421c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_affinity);
1422c9205e35SAlexander Motin 	}
1423c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU we can run now. */
1424c9205e35SAlexander Motin 	if (cpu < 0) {
1425e745d729SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu, r);
1426c9205e35SAlexander Motin 		if (cpu >= 0)
1427c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1428c9205e35SAlexander Motin 	}
1429c9205e35SAlexander Motin 	/* Search globally for the least loaded CPU. */
1430c9205e35SAlexander Motin 	if (cpu < 0) {
1431e745d729SAlexander Motin 		cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu, r);
1432c9205e35SAlexander Motin 		if (cpu >= 0)
1433c9205e35SAlexander Motin 			SCHED_STAT_INC(pickcpu_lowest);
1434c9205e35SAlexander Motin 	}
1435bb3dfc6aSAlexander Motin 	KASSERT(cpu >= 0, ("sched_pickcpu: Failed to find a cpu."));
1436efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
143762fa74d9SJeff Roberson 	/*
143862fa74d9SJeff Roberson 	 * Compare the lowest loaded cpu to current cpu.
143962fa74d9SJeff Roberson 	 */
1440018ff686SJeff Roberson 	tdq = TDQ_CPU(cpu);
1441018ff686SJeff Roberson 	if (THREAD_CAN_SCHED(td, self) && TDQ_SELF()->tdq_lowpri > pri &&
144211484ad8SMark Johnston 	    atomic_load_char(&tdq->tdq_lowpri) < PRI_MIN_IDLE &&
144311484ad8SMark Johnston 	    TDQ_LOAD(TDQ_SELF()) <= TDQ_LOAD(tdq) + 1) {
14448df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_local);
144562fa74d9SJeff Roberson 		cpu = self;
1446c9205e35SAlexander Motin 	}
14478df78c41SJeff Roberson 	if (cpu != ts->ts_cpu)
14488df78c41SJeff Roberson 		SCHED_STAT_INC(pickcpu_migration);
1449ae7a6b38SJeff Roberson 	return (cpu);
145080f86c9fSJeff Roberson }
145162fa74d9SJeff Roberson #endif
145222bf7d9aSJeff Roberson 
145322bf7d9aSJeff Roberson /*
145422bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
14550c0a98b2SJeff Roberson  */
14569727e637SJeff Roberson static struct thread *
1457ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
14585d7ef00cSJeff Roberson {
14599727e637SJeff Roberson 	struct thread *td;
14605d7ef00cSJeff Roberson 
1461ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
14629727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_realtime);
14639727e637SJeff Roberson 	if (td != NULL)
14649727e637SJeff Roberson 		return (td);
14659727e637SJeff Roberson 	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
14669727e637SJeff Roberson 	if (td != NULL) {
146712d56c0fSJohn Baldwin 		KASSERT(td->td_priority >= PRI_MIN_BATCH,
1468e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
14699727e637SJeff Roberson 		    td->td_priority));
14709727e637SJeff Roberson 		return (td);
147115dc847eSJeff Roberson 	}
14729727e637SJeff Roberson 	td = runq_choose(&tdq->tdq_idle);
14739727e637SJeff Roberson 	if (td != NULL) {
14749727e637SJeff Roberson 		KASSERT(td->td_priority >= PRI_MIN_IDLE,
1475e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
14769727e637SJeff Roberson 		    td->td_priority));
14779727e637SJeff Roberson 		return (td);
1478e7d50326SJeff Roberson 	}
1479e7d50326SJeff Roberson 
1480e7d50326SJeff Roberson 	return (NULL);
1481245f3abfSJeff Roberson }
14820a016a05SJeff Roberson 
1483ae7a6b38SJeff Roberson /*
1484ae7a6b38SJeff Roberson  * Initialize a thread queue.
1485ae7a6b38SJeff Roberson  */
14860a016a05SJeff Roberson static void
1487018ff686SJeff Roberson tdq_setup(struct tdq *tdq, int id)
14880a016a05SJeff Roberson {
1489ae7a6b38SJeff Roberson 
1490c47f202bSJeff Roberson 	if (bootverbose)
1491018ff686SJeff Roberson 		printf("ULE: setup cpu %d\n", id);
1492e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1493e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1494d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
1495018ff686SJeff Roberson 	tdq->tdq_id = id;
149662fa74d9SJeff Roberson 	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
149762fa74d9SJeff Roberson 	    "sched lock %d", (int)TDQ_ID(tdq));
149861a74c5cSJeff Roberson 	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN);
14998f51ad55SJeff Roberson #ifdef KTR
15008f51ad55SJeff Roberson 	snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
15018f51ad55SJeff Roberson 	    "CPU %d load", (int)TDQ_ID(tdq));
15028f51ad55SJeff Roberson #endif
15030a016a05SJeff Roberson }
15040a016a05SJeff Roberson 
1505c47f202bSJeff Roberson #ifdef SMP
1506c47f202bSJeff Roberson static void
1507c47f202bSJeff Roberson sched_setup_smp(void)
1508c47f202bSJeff Roberson {
1509c47f202bSJeff Roberson 	struct tdq *tdq;
1510c47f202bSJeff Roberson 	int i;
1511c47f202bSJeff Roberson 
151262fa74d9SJeff Roberson 	cpu_top = smp_topo();
15133aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
1514018ff686SJeff Roberson 		tdq = DPCPU_ID_PTR(i, tdq);
1515018ff686SJeff Roberson 		tdq_setup(tdq, i);
151662fa74d9SJeff Roberson 		tdq->tdq_cg = smp_topo_find(cpu_top, i);
151762fa74d9SJeff Roberson 		if (tdq->tdq_cg == NULL)
151862fa74d9SJeff Roberson 			panic("Can't find cpu group for %d\n", i);
1519ca34553bSAlexander Motin 		DPCPU_ID_SET(i, randomval, i * 69069 + 5);
1520c47f202bSJeff Roberson 	}
1521018ff686SJeff Roberson 	PCPU_SET(sched, DPCPU_PTR(tdq));
152262fa74d9SJeff Roberson 	balance_tdq = TDQ_SELF();
1523c47f202bSJeff Roberson }
1524c47f202bSJeff Roberson #endif
1525c47f202bSJeff Roberson 
1526ae7a6b38SJeff Roberson /*
1527ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1528ae7a6b38SJeff Roberson  * information.
1529ae7a6b38SJeff Roberson  */
153035e6168fSJeff Roberson static void
153135e6168fSJeff Roberson sched_setup(void *dummy)
153235e6168fSJeff Roberson {
1533ae7a6b38SJeff Roberson 	struct tdq *tdq;
1534c47f202bSJeff Roberson 
15350ec896fdSJeff Roberson #ifdef SMP
1536c47f202bSJeff Roberson 	sched_setup_smp();
1537749d01b0SJeff Roberson #else
1538018ff686SJeff Roberson 	tdq_setup(TDQ_SELF(), 0);
1539356500a3SJeff Roberson #endif
1540018ff686SJeff Roberson 	tdq = TDQ_SELF();
1541ae7a6b38SJeff Roberson 
1542ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1543ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1544e1504695SJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(tdq);
15459727e637SJeff Roberson 	tdq_load_add(tdq, &thread0);
15466d3f74a1SMark Johnston 	tdq->tdq_curthread = &thread0;
154762fa74d9SJeff Roberson 	tdq->tdq_lowpri = thread0.td_priority;
1548ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
154935e6168fSJeff Roberson }
155035e6168fSJeff Roberson 
1551ae7a6b38SJeff Roberson /*
1552579895dfSAlexander Motin  * This routine determines time constants after stathz and hz are setup.
1553ae7a6b38SJeff Roberson  */
1554a1d4fe69SDavid Xu /* ARGSUSED */
1555a1d4fe69SDavid Xu static void
1556a1d4fe69SDavid Xu sched_initticks(void *dummy)
1557a1d4fe69SDavid Xu {
1558ae7a6b38SJeff Roberson 	int incr;
1559ae7a6b38SJeff Roberson 
1560a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
15615e5c3873SJeff Roberson 	sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
15625e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
156337f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
156437f4e025SAlexander Motin 	    realstathz);
1565a1d4fe69SDavid Xu 
1566a1d4fe69SDavid Xu 	/*
1567e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
15683f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1569e7d50326SJeff Roberson 	 */
1570ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1571e7d50326SJeff Roberson 	/*
1572e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1573e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1574a1d4fe69SDavid Xu 	 */
1575ae7a6b38SJeff Roberson 	if (incr == 0)
1576ae7a6b38SJeff Roberson 		incr = 1;
1577ae7a6b38SJeff Roberson 	tickincr = incr;
15787b8bfa0dSJeff Roberson #ifdef SMP
15799862717aSJeff Roberson 	/*
15807fcf154aSJeff Roberson 	 * Set the default balance interval now that we know
15817fcf154aSJeff Roberson 	 * what realstathz is.
15827fcf154aSJeff Roberson 	 */
15837fcf154aSJeff Roberson 	balance_interval = realstathz;
1584290d9060SDon Lewis 	balance_ticks = balance_interval;
15857b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
15867b8bfa0dSJeff Roberson #endif
1587b3f40a41SAlexander Motin 	if (sched_idlespinthresh < 0)
15882c27cb3aSAlexander Motin 		sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
1589a1d4fe69SDavid Xu }
1590a1d4fe69SDavid Xu 
159135e6168fSJeff Roberson /*
1592ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1593ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1594ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1595ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1596ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
159757031f79SGeorge V. Neville-Neil  *
159857031f79SGeorge V. Neville-Neil  * When a thread's sleep time is greater than its run time the
159957031f79SGeorge V. Neville-Neil  * calculation is:
160057031f79SGeorge V. Neville-Neil  *
160157031f79SGeorge V. Neville-Neil  *                           scaling factor
160257031f79SGeorge V. Neville-Neil  * interactivity score =  ---------------------
160357031f79SGeorge V. Neville-Neil  *                        sleep time / run time
160457031f79SGeorge V. Neville-Neil  *
160557031f79SGeorge V. Neville-Neil  *
160657031f79SGeorge V. Neville-Neil  * When a thread's run time is greater than its sleep time the
160757031f79SGeorge V. Neville-Neil  * calculation is:
160857031f79SGeorge V. Neville-Neil  *
160957031f79SGeorge V. Neville-Neil  *                                                 scaling factor
161043521b46Swiklam  * interactivity score = 2 * scaling factor  -  ---------------------
161157031f79SGeorge V. Neville-Neil  *                                              run time / sleep time
1612ae7a6b38SJeff Roberson  */
1613ae7a6b38SJeff Roberson static int
1614ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1615ae7a6b38SJeff Roberson {
1616ae7a6b38SJeff Roberson 	struct td_sched *ts;
1617ae7a6b38SJeff Roberson 	int div;
1618ae7a6b38SJeff Roberson 
161993ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1620ae7a6b38SJeff Roberson 	/*
1621ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1622ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1623ae7a6b38SJeff Roberson 	 * no chance.
1624ae7a6b38SJeff Roberson 	 */
1625ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1626ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1627ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1628ae7a6b38SJeff Roberson 
1629ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1630ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1631ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1632ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1633ae7a6b38SJeff Roberson 	}
1634ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1635ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1636ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1637ae7a6b38SJeff Roberson 	}
1638ae7a6b38SJeff Roberson 	/* runtime == slptime */
1639ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1640ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1641ae7a6b38SJeff Roberson 
1642ae7a6b38SJeff Roberson 	/*
1643ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1644ae7a6b38SJeff Roberson 	 */
1645ae7a6b38SJeff Roberson 	return (0);
1646ae7a6b38SJeff Roberson 
1647ae7a6b38SJeff Roberson }
1648ae7a6b38SJeff Roberson 
1649ae7a6b38SJeff Roberson /*
165035e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
165135e6168fSJeff Roberson  * process.
165235e6168fSJeff Roberson  */
165315dc847eSJeff Roberson static void
16548460a577SJohn Birrell sched_priority(struct thread *td)
165535e6168fSJeff Roberson {
16561c119e17SAlexander Motin 	u_int pri, score;
165735e6168fSJeff Roberson 
1658c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
165915dc847eSJeff Roberson 		return;
1660e7d50326SJeff Roberson 	/*
1661e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1662e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1663e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1664e7d50326SJeff Roberson 	 *
1665ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1666e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1667e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1668a5423ea3SJeff Roberson 	 *
1669a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1670a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1671a5423ea3SJeff Roberson 	 * considered interactive.
1672e7d50326SJeff Roberson 	 */
1673a0f15352SJohn Baldwin 	score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1674e7d50326SJeff Roberson 	if (score < sched_interact) {
167512d56c0fSJohn Baldwin 		pri = PRI_MIN_INTERACT;
16761c119e17SAlexander Motin 		pri += (PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) * score /
16771c119e17SAlexander Motin 		    sched_interact;
167812d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
16791c119e17SAlexander Motin 		    ("sched_priority: invalid interactive priority %u score %u",
16809a93305aSJeff Roberson 		    pri, score));
1681e7d50326SJeff Roberson 	} else {
1682e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
168393ccd6bfSKonstantin Belousov 		if (td_get_sched(td)->ts_ticks)
168493ccd6bfSKonstantin Belousov 			pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
16855457fa23SJohn Baldwin 			    SCHED_PRI_RANGE - 1);
1686e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
168712d56c0fSJohn Baldwin 		KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
16881c119e17SAlexander Motin 		    ("sched_priority: invalid priority %u: nice %d, "
1689ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
169093ccd6bfSKonstantin Belousov 		    pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
169193ccd6bfSKonstantin Belousov 		    td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
169293ccd6bfSKonstantin Belousov 		    SCHED_PRI_TICKS(td_get_sched(td))));
1693e7d50326SJeff Roberson 	}
16948460a577SJohn Birrell 	sched_user_prio(td, pri);
169535e6168fSJeff Roberson 
169615dc847eSJeff Roberson 	return;
169735e6168fSJeff Roberson }
169835e6168fSJeff Roberson 
169935e6168fSJeff Roberson /*
1700d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1701ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1702ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1703d322132cSJeff Roberson  */
17044b60e324SJeff Roberson static void
17058460a577SJohn Birrell sched_interact_update(struct thread *td)
17064b60e324SJeff Roberson {
1707155b6ca1SJeff Roberson 	struct td_sched *ts;
17089a93305aSJeff Roberson 	u_int sum;
17093f741ca1SJeff Roberson 
171093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
1711ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1712d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1713d322132cSJeff Roberson 		return;
1714d322132cSJeff Roberson 	/*
1715155b6ca1SJeff Roberson 	 * This only happens from two places:
1716155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1717155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1718155b6ca1SJeff Roberson 	 */
1719155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1720ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1721ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1722ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1723155b6ca1SJeff Roberson 		} else {
1724ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1725ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1726155b6ca1SJeff Roberson 		}
1727155b6ca1SJeff Roberson 		return;
1728155b6ca1SJeff Roberson 	}
1729155b6ca1SJeff Roberson 	/*
1730d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1731d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
17322454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1733d322132cSJeff Roberson 	 */
173437a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1735ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1736ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1737d322132cSJeff Roberson 		return;
1738d322132cSJeff Roberson 	}
1739ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1740ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1741d322132cSJeff Roberson }
1742d322132cSJeff Roberson 
1743ae7a6b38SJeff Roberson /*
1744ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1745ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1746ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1747ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1748ae7a6b38SJeff Roberson  */
1749d322132cSJeff Roberson static void
17508460a577SJohn Birrell sched_interact_fork(struct thread *td)
1751d322132cSJeff Roberson {
175293ccd6bfSKonstantin Belousov 	struct td_sched *ts;
1753d322132cSJeff Roberson 	int ratio;
1754d322132cSJeff Roberson 	int sum;
1755d322132cSJeff Roberson 
175693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
175793ccd6bfSKonstantin Belousov 	sum = ts->ts_runtime + ts->ts_slptime;
1758d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1759d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
176093ccd6bfSKonstantin Belousov 		ts->ts_runtime /= ratio;
176193ccd6bfSKonstantin Belousov 		ts->ts_slptime /= ratio;
17624b60e324SJeff Roberson 	}
17634b60e324SJeff Roberson }
17644b60e324SJeff Roberson 
176515dc847eSJeff Roberson /*
1766ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1767ed062c8dSJulian Elischer  */
1768ed062c8dSJulian Elischer void
1769ed062c8dSJulian Elischer schedinit(void)
1770ed062c8dSJulian Elischer {
177193ccd6bfSKonstantin Belousov 	struct td_sched *ts0;
1772e7d50326SJeff Roberson 
1773ed062c8dSJulian Elischer 	/*
177493ccd6bfSKonstantin Belousov 	 * Set up the scheduler specific parts of thread0.
1775ed062c8dSJulian Elischer 	 */
177693ccd6bfSKonstantin Belousov 	ts0 = td_get_sched(&thread0);
177793ccd6bfSKonstantin Belousov 	ts0->ts_ltick = ticks;
177893ccd6bfSKonstantin Belousov 	ts0->ts_ftick = ticks;
177993ccd6bfSKonstantin Belousov 	ts0->ts_slice = 0;
17801408b84aSHans Petter Selasky 	ts0->ts_cpu = curcpu;	/* set valid CPU number */
1781ed062c8dSJulian Elischer }
1782ed062c8dSJulian Elischer 
1783ed062c8dSJulian Elischer /*
1784589aed00SKyle Evans  * schedinit_ap() is needed prior to calling sched_throw(NULL) to ensure that
1785589aed00SKyle Evans  * the pcpu requirements are met for any calls in the period between curthread
1786589aed00SKyle Evans  * initialization and sched_throw().  One can safely add threads to the queue
1787589aed00SKyle Evans  * before sched_throw(), for instance, as long as the thread lock is setup
1788589aed00SKyle Evans  * correctly.
1789589aed00SKyle Evans  *
1790589aed00SKyle Evans  * TDQ_SELF() relies on the below sched pcpu setting; it may be used only
1791589aed00SKyle Evans  * after schedinit_ap().
1792589aed00SKyle Evans  */
1793589aed00SKyle Evans void
1794589aed00SKyle Evans schedinit_ap(void)
1795589aed00SKyle Evans {
1796589aed00SKyle Evans 
1797589aed00SKyle Evans #ifdef SMP
1798589aed00SKyle Evans 	PCPU_SET(sched, DPCPU_PTR(tdq));
1799589aed00SKyle Evans #endif
1800589aed00SKyle Evans 	PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1801589aed00SKyle Evans }
1802589aed00SKyle Evans 
1803589aed00SKyle Evans /*
180415dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
180515dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1806e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
180715dc847eSJeff Roberson  */
180835e6168fSJeff Roberson int
180935e6168fSJeff Roberson sched_rr_interval(void)
181035e6168fSJeff Roberson {
1811e7d50326SJeff Roberson 
1812579895dfSAlexander Motin 	/* Convert sched_slice from stathz to hz. */
181337f4e025SAlexander Motin 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
181435e6168fSJeff Roberson }
181535e6168fSJeff Roberson 
1816ae7a6b38SJeff Roberson /*
1817ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1818ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1819ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1820ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1821ae7a6b38SJeff Roberson  */
182222bf7d9aSJeff Roberson static void
18237295465eSAlexander Motin sched_pctcpu_update(struct td_sched *ts, int run)
182435e6168fSJeff Roberson {
18257295465eSAlexander Motin 	int t = ticks;
1826e7d50326SJeff Roberson 
182778133024SMark Johnston 	/*
182878133024SMark Johnston 	 * The signed difference may be negative if the thread hasn't run for
182978133024SMark Johnston 	 * over half of the ticks rollover period.
183078133024SMark Johnston 	 */
183178133024SMark Johnston 	if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) {
1832ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
18337295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
18347295465eSAlexander Motin 	} else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
18357295465eSAlexander Motin 		ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
18367295465eSAlexander Motin 		    (ts->ts_ltick - (t - SCHED_TICK_TARG));
18377295465eSAlexander Motin 		ts->ts_ftick = t - SCHED_TICK_TARG;
18387295465eSAlexander Motin 	}
18397295465eSAlexander Motin 	if (run)
18407295465eSAlexander Motin 		ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
18417295465eSAlexander Motin 	ts->ts_ltick = t;
184235e6168fSJeff Roberson }
184335e6168fSJeff Roberson 
1844ae7a6b38SJeff Roberson /*
1845ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1846ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1847ae7a6b38SJeff Roberson  * functions.
1848ae7a6b38SJeff Roberson  */
1849e7d50326SJeff Roberson static void
1850f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
185135e6168fSJeff Roberson {
185273daf66fSJeff Roberson 	struct tdq *tdq;
185373daf66fSJeff Roberson 	int oldpri;
185435e6168fSJeff Roberson 
18558f51ad55SJeff Roberson 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
18568f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "new prio:%d", prio,
18578f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(curthread));
1858d9fae5abSAndriy Gapon 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
1859e87fc7cfSAndriy Gapon 	if (td != curthread && prio < td->td_priority) {
18608f51ad55SJeff Roberson 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
18618f51ad55SJeff Roberson 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
18628f51ad55SJeff Roberson 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
1863d9fae5abSAndriy Gapon 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
1864b3e9e682SRyan Stone 		    curthread);
18658f51ad55SJeff Roberson 	}
18667b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1867f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1868f5c157d9SJohn Baldwin 		return;
18693f741ca1SJeff Roberson 	/*
18703f741ca1SJeff Roberson 	 * If the priority has been elevated due to priority
18713f741ca1SJeff Roberson 	 * propagation, we may have to move ourselves to a new
1872e7d50326SJeff Roberson 	 * queue.  This could be optimized to not re-add in some
1873e7d50326SJeff Roberson 	 * cases.
1874f2b74cbfSJeff Roberson 	 */
18756d55b3ecSJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1876e7d50326SJeff Roberson 		sched_rem(td);
1877e7d50326SJeff Roberson 		td->td_priority = prio;
187861a74c5cSJeff Roberson 		sched_add(td, SRQ_BORROWING | SRQ_HOLDTD);
187973daf66fSJeff Roberson 		return;
188073daf66fSJeff Roberson 	}
18816d55b3ecSJeff Roberson 	/*
18826d55b3ecSJeff Roberson 	 * If the thread is currently running we may have to adjust the lowpri
18836d55b3ecSJeff Roberson 	 * information so other cpus are aware of our current priority.
18846d55b3ecSJeff Roberson 	 */
18856d55b3ecSJeff Roberson 	if (TD_IS_RUNNING(td)) {
18864aec1984SJohn Baldwin 		tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
188762fa74d9SJeff Roberson 		oldpri = td->td_priority;
18883f741ca1SJeff Roberson 		td->td_priority = prio;
188962fa74d9SJeff Roberson 		if (prio < tdq->tdq_lowpri)
189062fa74d9SJeff Roberson 			tdq->tdq_lowpri = prio;
189162fa74d9SJeff Roberson 		else if (tdq->tdq_lowpri == oldpri)
189262fa74d9SJeff Roberson 			tdq_setlowpri(tdq, td);
18936d55b3ecSJeff Roberson 		return;
189473daf66fSJeff Roberson 	}
18956d55b3ecSJeff Roberson 	td->td_priority = prio;
1896ae7a6b38SJeff Roberson }
189735e6168fSJeff Roberson 
1898f5c157d9SJohn Baldwin /*
1899f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1900f5c157d9SJohn Baldwin  * priority.
1901f5c157d9SJohn Baldwin  */
1902f5c157d9SJohn Baldwin void
1903f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1904f5c157d9SJohn Baldwin {
1905f5c157d9SJohn Baldwin 
1906f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1907f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1908f5c157d9SJohn Baldwin }
1909f5c157d9SJohn Baldwin 
1910f5c157d9SJohn Baldwin /*
1911f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1912f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1913f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1914f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1915f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1916f5c157d9SJohn Baldwin  * of prio.
1917f5c157d9SJohn Baldwin  */
1918f5c157d9SJohn Baldwin void
1919f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1920f5c157d9SJohn Baldwin {
1921f5c157d9SJohn Baldwin 	u_char base_pri;
1922f5c157d9SJohn Baldwin 
1923f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1924f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
19258460a577SJohn Birrell 		base_pri = td->td_user_pri;
1926f5c157d9SJohn Baldwin 	else
1927f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1928f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1929f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1930f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1931f5c157d9SJohn Baldwin 	} else
1932f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1933f5c157d9SJohn Baldwin }
1934f5c157d9SJohn Baldwin 
1935ae7a6b38SJeff Roberson /*
1936ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1937ae7a6b38SJeff Roberson  */
1938f5c157d9SJohn Baldwin void
1939f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1940f5c157d9SJohn Baldwin {
1941f5c157d9SJohn Baldwin 	u_char oldprio;
1942f5c157d9SJohn Baldwin 
1943f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1944f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1945f5c157d9SJohn Baldwin 
1946f5c157d9SJohn Baldwin 	/*
194750aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1948f5c157d9SJohn Baldwin 	 * ever lower the priority.
1949f5c157d9SJohn Baldwin 	 */
1950f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1951f5c157d9SJohn Baldwin 		return;
1952f5c157d9SJohn Baldwin 
1953f5c157d9SJohn Baldwin 	/* Change the real priority. */
1954f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1955f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1956f5c157d9SJohn Baldwin 
1957f5c157d9SJohn Baldwin 	/*
1958f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1959f5c157d9SJohn Baldwin 	 * its state.
1960f5c157d9SJohn Baldwin 	 */
1961f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1962f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1963f5c157d9SJohn Baldwin }
1964f5c157d9SJohn Baldwin 
1965ae7a6b38SJeff Roberson /*
1966fea89a28SJohn Baldwin  * Set the base interrupt thread priority.
1967fea89a28SJohn Baldwin  */
1968fea89a28SJohn Baldwin void
1969fea89a28SJohn Baldwin sched_ithread_prio(struct thread *td, u_char prio)
1970fea89a28SJohn Baldwin {
1971fea89a28SJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1972fea89a28SJohn Baldwin 	MPASS(td->td_pri_class == PRI_ITHD);
1973fea89a28SJohn Baldwin 	td->td_base_ithread_pri = prio;
1974fea89a28SJohn Baldwin 	sched_prio(td, prio);
1975fea89a28SJohn Baldwin }
1976fea89a28SJohn Baldwin 
1977fea89a28SJohn Baldwin /*
1978ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1979ae7a6b38SJeff Roberson  */
198035e6168fSJeff Roberson void
19818460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
19823db720fdSDavid Xu {
19833db720fdSDavid Xu 
19848460a577SJohn Birrell 	td->td_base_user_pri = prio;
1985acbe332aSDavid Xu 	if (td->td_lend_user_pri <= prio)
1986fc6c30f6SJulian Elischer 		return;
19878460a577SJohn Birrell 	td->td_user_pri = prio;
19883db720fdSDavid Xu }
19893db720fdSDavid Xu 
19903db720fdSDavid Xu void
19913db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
19923db720fdSDavid Xu {
19933db720fdSDavid Xu 
1994435806d3SDavid Xu 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1995acbe332aSDavid Xu 	td->td_lend_user_pri = prio;
1996c8e368a9SDavid Xu 	td->td_user_pri = min(prio, td->td_base_user_pri);
1997c8e368a9SDavid Xu 	if (td->td_priority > td->td_user_pri)
1998c8e368a9SDavid Xu 		sched_prio(td, td->td_user_pri);
1999c8e368a9SDavid Xu 	else if (td->td_priority != td->td_user_pri)
2000c6d31b83SKonstantin Belousov 		ast_sched_locked(td, TDA_SCHED);
2001435806d3SDavid Xu }
20023db720fdSDavid Xu 
2003ac97da9aSMateusz Guzik /*
2004ac97da9aSMateusz Guzik  * Like the above but first check if there is anything to do.
2005ac97da9aSMateusz Guzik  */
2006ac97da9aSMateusz Guzik void
2007ac97da9aSMateusz Guzik sched_lend_user_prio_cond(struct thread *td, u_char prio)
2008ac97da9aSMateusz Guzik {
2009ac97da9aSMateusz Guzik 
2010*aeff15b3SOlivier Certner 	if (td->td_lend_user_pri == prio)
2011ac97da9aSMateusz Guzik 		return;
2012ac97da9aSMateusz Guzik 
2013ac97da9aSMateusz Guzik 	thread_lock(td);
2014ac97da9aSMateusz Guzik 	sched_lend_user_prio(td, prio);
2015ac97da9aSMateusz Guzik 	thread_unlock(td);
2016ac97da9aSMateusz Guzik }
2017ac97da9aSMateusz Guzik 
20184c8a8cfcSKonstantin Belousov #ifdef SMP
2019ae7a6b38SJeff Roberson /*
202097e9382dSDon Lewis  * This tdq is about to idle.  Try to steal a thread from another CPU before
202197e9382dSDon Lewis  * choosing the idle thread.
202297e9382dSDon Lewis  */
202397e9382dSDon Lewis static void
202497e9382dSDon Lewis tdq_trysteal(struct tdq *tdq)
202597e9382dSDon Lewis {
20262668bb2aSAlexander Motin 	struct cpu_group *cg, *parent;
202797e9382dSDon Lewis 	struct tdq *steal;
202897e9382dSDon Lewis 	cpuset_t mask;
20292668bb2aSAlexander Motin 	int cpu, i, goup;
203097e9382dSDon Lewis 
203108063e9fSAlexander Motin 	if (smp_started == 0 || steal_idle == 0 || trysteal_limit == 0 ||
203208063e9fSAlexander Motin 	    tdq->tdq_cg == NULL)
203397e9382dSDon Lewis 		return;
203497e9382dSDon Lewis 	CPU_FILL(&mask);
203597e9382dSDon Lewis 	CPU_CLR(PCPU_GET(cpuid), &mask);
203697e9382dSDon Lewis 	/* We don't want to be preempted while we're iterating. */
203797e9382dSDon Lewis 	spinlock_enter();
203897e9382dSDon Lewis 	TDQ_UNLOCK(tdq);
20392668bb2aSAlexander Motin 	for (i = 1, cg = tdq->tdq_cg, goup = 0; ; ) {
204008063e9fSAlexander Motin 		cpu = sched_highest(cg, &mask, steal_thresh, 1);
204197e9382dSDon Lewis 		/*
204297e9382dSDon Lewis 		 * If a thread was added while interrupts were disabled don't
204397e9382dSDon Lewis 		 * steal one here.
204497e9382dSDon Lewis 		 */
204511484ad8SMark Johnston 		if (TDQ_LOAD(tdq) > 0) {
204697e9382dSDon Lewis 			TDQ_LOCK(tdq);
204797e9382dSDon Lewis 			break;
204897e9382dSDon Lewis 		}
20492668bb2aSAlexander Motin 
20502668bb2aSAlexander Motin 		/*
20512668bb2aSAlexander Motin 		 * We found no CPU to steal from in this group.  Escalate to
20522668bb2aSAlexander Motin 		 * the parent and repeat.  But if parent has only two children
20532668bb2aSAlexander Motin 		 * groups we can avoid searching this group again by searching
20542668bb2aSAlexander Motin 		 * the other one specifically and then escalating two levels.
20552668bb2aSAlexander Motin 		 */
205697e9382dSDon Lewis 		if (cpu == -1) {
20572668bb2aSAlexander Motin 			if (goup) {
205897e9382dSDon Lewis 				cg = cg->cg_parent;
20592668bb2aSAlexander Motin 				goup = 0;
20602668bb2aSAlexander Motin 			}
20612668bb2aSAlexander Motin 			if (++i > trysteal_limit) {
206297e9382dSDon Lewis 				TDQ_LOCK(tdq);
206397e9382dSDon Lewis 				break;
206497e9382dSDon Lewis 			}
20652668bb2aSAlexander Motin 			parent = cg->cg_parent;
20662668bb2aSAlexander Motin 			if (parent == NULL) {
20672668bb2aSAlexander Motin 				TDQ_LOCK(tdq);
20682668bb2aSAlexander Motin 				break;
20692668bb2aSAlexander Motin 			}
20702668bb2aSAlexander Motin 			if (parent->cg_children == 2) {
20712668bb2aSAlexander Motin 				if (cg == &parent->cg_child[0])
20722668bb2aSAlexander Motin 					cg = &parent->cg_child[1];
20732668bb2aSAlexander Motin 				else
20742668bb2aSAlexander Motin 					cg = &parent->cg_child[0];
20752668bb2aSAlexander Motin 				goup = 1;
20762668bb2aSAlexander Motin 			} else
20772668bb2aSAlexander Motin 				cg = parent;
207897e9382dSDon Lewis 			continue;
207997e9382dSDon Lewis 		}
208097e9382dSDon Lewis 		steal = TDQ_CPU(cpu);
208197e9382dSDon Lewis 		/*
208297e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
208397e9382dSDon Lewis 		 * the chosen CPU no longer has an eligible thread.
208415b5c347SGordon Bergling 		 * At this point unconditionally exit the loop to bound
208508063e9fSAlexander Motin 		 * the time spent in the critcal section.
208697e9382dSDon Lewis 		 */
208711484ad8SMark Johnston 		if (TDQ_LOAD(steal) < steal_thresh ||
208811484ad8SMark Johnston 		    TDQ_TRANSFERABLE(steal) == 0)
208997e9382dSDon Lewis 			continue;
209097e9382dSDon Lewis 		/*
20918bb173fbSAlexander Motin 		 * Try to lock both queues. If we are assigned a thread while
20928bb173fbSAlexander Motin 		 * waited for the lock, switch to it now instead of stealing.
20938bb173fbSAlexander Motin 		 * If we can't get the lock, then somebody likely got there
209408063e9fSAlexander Motin 		 * first.
209597e9382dSDon Lewis 		 */
20968bb173fbSAlexander Motin 		TDQ_LOCK(tdq);
20978bb173fbSAlexander Motin 		if (tdq->tdq_load > 0)
209897e9382dSDon Lewis 			break;
20998bb173fbSAlexander Motin 		if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0)
21008bb173fbSAlexander Motin 			break;
210197e9382dSDon Lewis 		/*
210297e9382dSDon Lewis 		 * The data returned by sched_highest() is stale and
210397e9382dSDon Lewis                  * the chosen CPU no longer has an eligible thread.
210497e9382dSDon Lewis 		 */
210511484ad8SMark Johnston 		if (TDQ_LOAD(steal) < steal_thresh ||
210611484ad8SMark Johnston 		    TDQ_TRANSFERABLE(steal) == 0) {
210797e9382dSDon Lewis 			TDQ_UNLOCK(steal);
210897e9382dSDon Lewis 			break;
210997e9382dSDon Lewis 		}
211097e9382dSDon Lewis 		/*
211197e9382dSDon Lewis 		 * If we fail to acquire one due to affinity restrictions,
211297e9382dSDon Lewis 		 * bail out and let the idle thread to a more complete search
211397e9382dSDon Lewis 		 * outside of a critical section.
211497e9382dSDon Lewis 		 */
21156d3f74a1SMark Johnston 		if (tdq_move(steal, tdq) == -1) {
211697e9382dSDon Lewis 			TDQ_UNLOCK(steal);
211797e9382dSDon Lewis 			break;
211897e9382dSDon Lewis 		}
211997e9382dSDon Lewis 		TDQ_UNLOCK(steal);
212097e9382dSDon Lewis 		break;
212197e9382dSDon Lewis 	}
212297e9382dSDon Lewis 	spinlock_exit();
212397e9382dSDon Lewis }
21244c8a8cfcSKonstantin Belousov #endif
212597e9382dSDon Lewis 
212697e9382dSDon Lewis /*
2127c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
2128c47f202bSJeff Roberson  * cpu binding.
2129c47f202bSJeff Roberson  */
2130c47f202bSJeff Roberson static struct mtx *
2131c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
2132c47f202bSJeff Roberson {
2133c47f202bSJeff Roberson 	struct tdq *tdn;
21346eeba7dbSMateusz Guzik #ifdef SMP
21356d3f74a1SMark Johnston 	int lowpri;
21366eeba7dbSMateusz Guzik #endif
2137c47f202bSJeff Roberson 
2138686bcb5cSJeff Roberson 	KASSERT(THREAD_CAN_MIGRATE(td) ||
2139686bcb5cSJeff Roberson 	    (td_get_sched(td)->ts_flags & TSF_BOUND) != 0,
2140686bcb5cSJeff Roberson 	    ("Thread %p shouldn't migrate", td));
2141efe67753SNathan Whitehorn 	KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
2142efe67753SNathan Whitehorn 	    "thread %s queued on absent CPU %d.", td->td_name,
2143efe67753SNathan Whitehorn 	    td_get_sched(td)->ts_cpu));
214493ccd6bfSKonstantin Belousov 	tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
2145c47f202bSJeff Roberson #ifdef SMP
21469727e637SJeff Roberson 	tdq_load_rem(tdq, td);
2147c47f202bSJeff Roberson 	/*
2148686bcb5cSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We have an
2149686bcb5cSJeff Roberson 	 * extra spinlock nesting from sched_switch() which will
2150686bcb5cSJeff Roberson 	 * prevent preemption while we're holding neither run-queue lock.
2151c47f202bSJeff Roberson 	 */
2152686bcb5cSJeff Roberson 	TDQ_UNLOCK(tdq);
2153686bcb5cSJeff Roberson 	TDQ_LOCK(tdn);
21546d3f74a1SMark Johnston 	lowpri = tdq_add(tdn, td, flags);
21556d3f74a1SMark Johnston 	tdq_notify(tdn, lowpri);
2156c47f202bSJeff Roberson 	TDQ_UNLOCK(tdn);
2157686bcb5cSJeff Roberson 	TDQ_LOCK(tdq);
2158c47f202bSJeff Roberson #endif
2159c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
2160c47f202bSJeff Roberson }
2161c47f202bSJeff Roberson 
2162c47f202bSJeff Roberson /*
216361a74c5cSJeff Roberson  * thread_lock_unblock() that does not assume td_lock is blocked.
2164ae7a6b38SJeff Roberson  */
2165ae7a6b38SJeff Roberson static inline void
2166ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
2167ae7a6b38SJeff Roberson {
2168ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
2169ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
2170ae7a6b38SJeff Roberson }
2171ae7a6b38SJeff Roberson 
2172ae7a6b38SJeff Roberson /*
2173ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
2174ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
2175ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
2176ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
2177ae7a6b38SJeff Roberson  */
21783db720fdSDavid Xu void
2179686bcb5cSJeff Roberson sched_switch(struct thread *td, int flags)
218035e6168fSJeff Roberson {
2181686bcb5cSJeff Roberson 	struct thread *newtd;
2182c02bbb43SJeff Roberson 	struct tdq *tdq;
2183ad1e7d28SJulian Elischer 	struct td_sched *ts;
2184ae7a6b38SJeff Roberson 	struct mtx *mtx;
2185c47f202bSJeff Roberson 	int srqflag;
21868db16699SAlexander Motin 	int cpuid, preempted;
21878db16699SAlexander Motin #ifdef SMP
21888db16699SAlexander Motin 	int pickcpu;
21898db16699SAlexander Motin #endif
219035e6168fSJeff Roberson 
21917b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
219235e6168fSJeff Roberson 
2193ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2194018ff686SJeff Roberson 	tdq = TDQ_SELF();
219593ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
21967295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
21978db16699SAlexander Motin #ifdef SMP
2198e745d729SAlexander Motin 	pickcpu = (td->td_flags & TDF_PICKCPU) != 0;
2199e745d729SAlexander Motin 	if (pickcpu)
2200e745d729SAlexander Motin 		ts->ts_rltick = ticks - affinity * MAX_CACHE_LEVELS;
2201e745d729SAlexander Motin 	else
2202ae7a6b38SJeff Roberson 		ts->ts_rltick = ticks;
22038db16699SAlexander Motin #endif
2204060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
2205ad9dadc4SAndriy Gapon 	preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
2206ad9dadc4SAndriy Gapon 	    (flags & SW_PREEMPT) != 0;
2207c6d31b83SKonstantin Belousov 	td->td_flags &= ~(TDF_PICKCPU | TDF_SLICEEND);
2208c6d31b83SKonstantin Belousov 	ast_unsched_locked(td, TDA_SCHED);
220977918643SStephan Uphoff 	td->td_owepreempt = 0;
221011484ad8SMark Johnston 	atomic_store_char(&tdq->tdq_owepreempt, 0);
22112c27cb3aSAlexander Motin 	if (!TD_IS_IDLETHREAD(td))
221211484ad8SMark Johnston 		TDQ_SWITCHCNT_INC(tdq);
22137789ab32SMark Johnston 
2214b11fdad0SJeff Roberson 	/*
2215686bcb5cSJeff Roberson 	 * Always block the thread lock so we can drop the tdq lock early.
2216b11fdad0SJeff Roberson 	 */
2217686bcb5cSJeff Roberson 	mtx = thread_lock_block(td);
2218686bcb5cSJeff Roberson 	spinlock_enter();
2219486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
2220686bcb5cSJeff Roberson 		MPASS(mtx == TDQ_LOCKPTR(tdq));
2221bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
22227b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
2223686bcb5cSJeff Roberson 		MPASS(mtx == TDQ_LOCKPTR(tdq));
22246a3c02bcSOlivier Certner 		srqflag = SRQ_OURSELF | SRQ_YIELDING |
22256a3c02bcSOlivier Certner 		    (preempted ? SRQ_PREEMPTED : 0);
2226ba4932b5SMatthew D Fleming #ifdef SMP
2227e745d729SAlexander Motin 		if (THREAD_CAN_MIGRATE(td) && (!THREAD_CAN_SCHED(td, ts->ts_cpu)
2228e745d729SAlexander Motin 		    || pickcpu))
22290f7a0ebdSMatthew D Fleming 			ts->ts_cpu = sched_pickcpu(td, 0);
2230ba4932b5SMatthew D Fleming #endif
2231c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
22329727e637SJeff Roberson 			tdq_runq_add(tdq, td, srqflag);
2233686bcb5cSJeff Roberson 		else
2234c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
2235ae7a6b38SJeff Roberson 	} else {
2236ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
223761a74c5cSJeff Roberson 		if (mtx != TDQ_LOCKPTR(tdq)) {
223861a74c5cSJeff Roberson 			mtx_unlock_spin(mtx);
223961a74c5cSJeff Roberson 			TDQ_LOCK(tdq);
224061a74c5cSJeff Roberson 		}
22419727e637SJeff Roberson 		tdq_load_rem(tdq, td);
22424c8a8cfcSKonstantin Belousov #ifdef SMP
224397e9382dSDon Lewis 		if (tdq->tdq_load == 0)
224497e9382dSDon Lewis 			tdq_trysteal(tdq);
22454c8a8cfcSKonstantin Belousov #endif
2246ae7a6b38SJeff Roberson 	}
2247afa0a46cSAndriy Gapon 
2248afa0a46cSAndriy Gapon #if (KTR_COMPILE & KTR_SCHED) != 0
2249afa0a46cSAndriy Gapon 	if (TD_IS_IDLETHREAD(td))
2250afa0a46cSAndriy Gapon 		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
2251afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority);
2252afa0a46cSAndriy Gapon 	else
2253afa0a46cSAndriy Gapon 		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
2254afa0a46cSAndriy Gapon 		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
2255afa0a46cSAndriy Gapon 		    "lockname:\"%s\"", td->td_lockname);
2256afa0a46cSAndriy Gapon #endif
2257afa0a46cSAndriy Gapon 
2258ae7a6b38SJeff Roberson 	/*
2259ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
2260ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
2261ae7a6b38SJeff Roberson 	 * thread-queue locked.
2262ae7a6b38SJeff Roberson 	 */
2263ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
22646d3f74a1SMark Johnston 	MPASS(td == tdq->tdq_curthread);
22652454aaf5SJeff Roberson 	newtd = choosethread();
2266686bcb5cSJeff Roberson 	sched_pctcpu_update(td_get_sched(newtd), 0);
2267686bcb5cSJeff Roberson 	TDQ_UNLOCK(tdq);
2268686bcb5cSJeff Roberson 
2269ae7a6b38SJeff Roberson 	/*
2270ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
2271ae7a6b38SJeff Roberson 	 */
2272ebccf1e3SJoseph Koshy 	if (td != newtd) {
2273ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2274ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2275ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
2276ebccf1e3SJoseph Koshy #endif
2277d9fae5abSAndriy Gapon 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
22786f5f25e5SJohn Birrell 
22796f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS
22806f5f25e5SJohn Birrell 		/*
22816f5f25e5SJohn Birrell 		 * If DTrace has set the active vtime enum to anything
22826f5f25e5SJohn Birrell 		 * other than INACTIVE (0), then it should have set the
22836f5f25e5SJohn Birrell 		 * function to call.
22846f5f25e5SJohn Birrell 		 */
22856f5f25e5SJohn Birrell 		if (dtrace_vtime_active)
22866f5f25e5SJohn Birrell 			(*dtrace_vtime_switch_func)(newtd);
22876f5f25e5SJohn Birrell #endif
2288686bcb5cSJeff Roberson 		td->td_oncpu = NOCPU;
2289ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
2290a89c2c8cSMark Johnston 		cpuid = td->td_oncpu = PCPU_GET(cpuid);
2291b3e9e682SRyan Stone 
2292d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , on__cpu);
2293ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
2294ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2295ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
2296ebccf1e3SJoseph Koshy #endif
2297b3e9e682SRyan Stone 	} else {
2298ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
2299d9fae5abSAndriy Gapon 		SDT_PROBE0(sched, , , remain__cpu);
2300b3e9e682SRyan Stone 	}
2301686bcb5cSJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1,
2302686bcb5cSJeff Roberson 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
2303afa0a46cSAndriy Gapon 
2304afa0a46cSAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2305afa0a46cSAndriy Gapon 	    "prio:%d", td->td_priority);
230635e6168fSJeff Roberson }
230735e6168fSJeff Roberson 
2308ae7a6b38SJeff Roberson /*
2309ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
2310ae7a6b38SJeff Roberson  */
231135e6168fSJeff Roberson void
2312fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
231335e6168fSJeff Roberson {
231435e6168fSJeff Roberson 	struct thread *td;
231535e6168fSJeff Roberson 
2316fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
2317e7d50326SJeff Roberson 
2318fa885116SJulian Elischer 	p->p_nice = nice;
23198460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
23207b20fb19SJeff Roberson 		thread_lock(td);
23218460a577SJohn Birrell 		sched_priority(td);
2322e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
23237b20fb19SJeff Roberson 		thread_unlock(td);
232435e6168fSJeff Roberson 	}
2325fa885116SJulian Elischer }
232635e6168fSJeff Roberson 
2327ae7a6b38SJeff Roberson /*
2328ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
2329ae7a6b38SJeff Roberson  */
233035e6168fSJeff Roberson void
2331c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio)
233235e6168fSJeff Roberson {
2333e7d50326SJeff Roberson 
23347b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
233535e6168fSJeff Roberson 
233654b0e65fSJeff Roberson 	td->td_slptick = ticks;
23372dc29adbSJohn Baldwin 	if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
23382dc29adbSJohn Baldwin 		return;
23390502fe2eSJeff Roberson 	if (static_boost == 1 && prio)
2340c5aa6b58SJeff Roberson 		sched_prio(td, prio);
23410502fe2eSJeff Roberson 	else if (static_boost && td->td_priority > static_boost)
23420502fe2eSJeff Roberson 		sched_prio(td, static_boost);
234335e6168fSJeff Roberson }
234435e6168fSJeff Roberson 
2345ae7a6b38SJeff Roberson /*
2346ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
2347ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
234861a74c5cSJeff Roberson  *
234961a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2350ae7a6b38SJeff Roberson  */
235135e6168fSJeff Roberson void
235261a74c5cSJeff Roberson sched_wakeup(struct thread *td, int srqflags)
235335e6168fSJeff Roberson {
235414618990SJeff Roberson 	struct td_sched *ts;
2355ae7a6b38SJeff Roberson 	int slptick;
2356e7d50326SJeff Roberson 
23577b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
235893ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
235961a74c5cSJeff Roberson 
236035e6168fSJeff Roberson 	/*
2361e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
2362e7d50326SJeff Roberson 	 * priority.
236335e6168fSJeff Roberson 	 */
236454b0e65fSJeff Roberson 	slptick = td->td_slptick;
236554b0e65fSJeff Roberson 	td->td_slptick = 0;
2366ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
23677295465eSAlexander Motin 		ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
23688460a577SJohn Birrell 		sched_interact_update(td);
23697295465eSAlexander Motin 		sched_pctcpu_update(ts, 0);
2370f1e8dc4aSJeff Roberson 	}
2371954cffe9SJohn Baldwin 
2372954cffe9SJohn Baldwin 	/*
2373954cffe9SJohn Baldwin 	 * When resuming an idle ithread, restore its base ithread
2374954cffe9SJohn Baldwin 	 * priority.
2375954cffe9SJohn Baldwin 	 */
2376954cffe9SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_ITHD &&
2377954cffe9SJohn Baldwin 	    td->td_priority != td->td_base_ithread_pri)
2378954cffe9SJohn Baldwin 		sched_prio(td, td->td_base_ithread_pri);
2379954cffe9SJohn Baldwin 
23805e5c3873SJeff Roberson 	/*
23815e5c3873SJeff Roberson 	 * Reset the slice value since we slept and advanced the round-robin.
23825e5c3873SJeff Roberson 	 */
23835e5c3873SJeff Roberson 	ts->ts_slice = 0;
238461a74c5cSJeff Roberson 	sched_add(td, SRQ_BORING | srqflags);
238535e6168fSJeff Roberson }
238635e6168fSJeff Roberson 
238735e6168fSJeff Roberson /*
238835e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
238935e6168fSJeff Roberson  * priority.
239035e6168fSJeff Roberson  */
239135e6168fSJeff Roberson void
23928460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
239315dc847eSJeff Roberson {
23947b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
239593ccd6bfSKonstantin Belousov 	sched_pctcpu_update(td_get_sched(td), 1);
2396ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
2397e7d50326SJeff Roberson 	/*
2398e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
2399e7d50326SJeff Roberson 	 */
2400e7d50326SJeff Roberson 	sched_interact_fork(child);
2401e7d50326SJeff Roberson 	sched_priority(child);
240293ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += tickincr;
2403e7d50326SJeff Roberson 	sched_interact_update(td);
2404e7d50326SJeff Roberson 	sched_priority(td);
2405ad1e7d28SJulian Elischer }
2406ad1e7d28SJulian Elischer 
2407ae7a6b38SJeff Roberson /*
2408ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
2409ae7a6b38SJeff Roberson  */
2410ad1e7d28SJulian Elischer void
2411ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
2412ad1e7d28SJulian Elischer {
2413ad1e7d28SJulian Elischer 	struct td_sched *ts;
2414ad1e7d28SJulian Elischer 	struct td_sched *ts2;
24155e5c3873SJeff Roberson 	struct tdq *tdq;
24168460a577SJohn Birrell 
24175e5c3873SJeff Roberson 	tdq = TDQ_SELF();
24188b16c208SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2419e7d50326SJeff Roberson 	/*
2420e7d50326SJeff Roberson 	 * Initialize child.
2421e7d50326SJeff Roberson 	 */
242293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
242393ccd6bfSKonstantin Belousov 	ts2 = td_get_sched(child);
242492de34dfSJohn Baldwin 	child->td_oncpu = NOCPU;
242592de34dfSJohn Baldwin 	child->td_lastcpu = NOCPU;
24265e5c3873SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(tdq);
24278b16c208SJeff Roberson 	child->td_cpuset = cpuset_ref(td->td_cpuset);
24283f289c3fSJeff Roberson 	child->td_domain.dr_policy = td->td_cpuset->cs_domain;
2429ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
24308b16c208SJeff Roberson 	ts2->ts_flags = 0;
2431e7d50326SJeff Roberson 	/*
243222d19207SJohn Baldwin 	 * Grab our parents cpu estimation information.
2433e7d50326SJeff Roberson 	 */
2434ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
2435ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
2436ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
243722d19207SJohn Baldwin 	/*
243822d19207SJohn Baldwin 	 * Do not inherit any borrowed priority from the parent.
243922d19207SJohn Baldwin 	 */
244022d19207SJohn Baldwin 	child->td_priority = child->td_base_pri;
2441e7d50326SJeff Roberson 	/*
2442e7d50326SJeff Roberson 	 * And update interactivity score.
2443e7d50326SJeff Roberson 	 */
2444ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2445ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
24465e5c3873SJeff Roberson 	/* Attempt to quickly learn interactivity. */
24475e5c3873SJeff Roberson 	ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
24488f51ad55SJeff Roberson #ifdef KTR
24498f51ad55SJeff Roberson 	bzero(ts2->ts_name, sizeof(ts2->ts_name));
24508f51ad55SJeff Roberson #endif
245115dc847eSJeff Roberson }
245215dc847eSJeff Roberson 
2453ae7a6b38SJeff Roberson /*
2454ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2455ae7a6b38SJeff Roberson  */
245615dc847eSJeff Roberson void
24578460a577SJohn Birrell sched_class(struct thread *td, int class)
245815dc847eSJeff Roberson {
245915dc847eSJeff Roberson 
24607b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
24618460a577SJohn Birrell 	if (td->td_pri_class == class)
246215dc847eSJeff Roberson 		return;
24638460a577SJohn Birrell 	td->td_pri_class = class;
246435e6168fSJeff Roberson }
246535e6168fSJeff Roberson 
246635e6168fSJeff Roberson /*
246735e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
246835e6168fSJeff Roberson  */
246935e6168fSJeff Roberson void
2470fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
247135e6168fSJeff Roberson {
2472e7d50326SJeff Roberson 	struct thread *td;
2473141ad61cSJeff Roberson 
24748f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2475cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2476374ae2a3SJeff Roberson 	PROC_LOCK_ASSERT(p, MA_OWNED);
2477e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2478e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2479ad1e7d28SJulian Elischer }
2480ad1e7d28SJulian Elischer 
2481ae7a6b38SJeff Roberson /*
2482ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2483ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2484ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2485ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2486ae7a6b38SJeff Roberson  */
2487ad1e7d28SJulian Elischer void
2488fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2489ad1e7d28SJulian Elischer {
2490fc6c30f6SJulian Elischer 
24918f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2492cd39bb09SXin LI 	    "prio:%d", child->td_priority);
2493e7d50326SJeff Roberson 	/*
2494e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2495e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2496e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2497e7d50326SJeff Roberson 	 */
24987b20fb19SJeff Roberson 	thread_lock(td);
249993ccd6bfSKonstantin Belousov 	td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
2500fc6c30f6SJulian Elischer 	sched_interact_update(td);
2501e7d50326SJeff Roberson 	sched_priority(td);
25027b20fb19SJeff Roberson 	thread_unlock(td);
2503ad1e7d28SJulian Elischer }
2504ad1e7d28SJulian Elischer 
2505ff256d9cSJeff Roberson void
2506ff256d9cSJeff Roberson sched_preempt(struct thread *td)
2507ff256d9cSJeff Roberson {
2508ff256d9cSJeff Roberson 	struct tdq *tdq;
2509686bcb5cSJeff Roberson 	int flags;
2510ff256d9cSJeff Roberson 
2511b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
2512b3e9e682SRyan Stone 
2513ff256d9cSJeff Roberson 	thread_lock(td);
2514ff256d9cSJeff Roberson 	tdq = TDQ_SELF();
2515ff256d9cSJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2516ff256d9cSJeff Roberson 	if (td->td_priority > tdq->tdq_lowpri) {
2517686bcb5cSJeff Roberson 		if (td->td_critnest == 1) {
25188df78c41SJeff Roberson 			flags = SW_INVOL | SW_PREEMPT;
2519686bcb5cSJeff Roberson 			flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE :
2520686bcb5cSJeff Roberson 			    SWT_REMOTEPREEMPT;
2521686bcb5cSJeff Roberson 			mi_switch(flags);
2522686bcb5cSJeff Roberson 			/* Switch dropped thread lock. */
2523686bcb5cSJeff Roberson 			return;
2524686bcb5cSJeff Roberson 		}
2525ff256d9cSJeff Roberson 		td->td_owepreempt = 1;
25267789ab32SMark Johnston 	} else {
25277789ab32SMark Johnston 		tdq->tdq_owepreempt = 0;
2528ff256d9cSJeff Roberson 	}
2529ff256d9cSJeff Roberson 	thread_unlock(td);
2530ff256d9cSJeff Roberson }
2531ff256d9cSJeff Roberson 
2532ae7a6b38SJeff Roberson /*
2533ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2534ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2535ae7a6b38SJeff Roberson  */
2536ad1e7d28SJulian Elischer void
253728240885SMateusz Guzik sched_userret_slowpath(struct thread *td)
2538ad1e7d28SJulian Elischer {
253928240885SMateusz Guzik 
25407b20fb19SJeff Roberson 	thread_lock(td);
2541ad1e7d28SJulian Elischer 	td->td_priority = td->td_user_pri;
2542ad1e7d28SJulian Elischer 	td->td_base_pri = td->td_user_pri;
254362fa74d9SJeff Roberson 	tdq_setlowpri(TDQ_SELF(), td);
25447b20fb19SJeff Roberson 	thread_unlock(td);
2545ad1e7d28SJulian Elischer }
254635e6168fSJeff Roberson 
2547954cffe9SJohn Baldwin SCHED_STAT_DEFINE(ithread_demotions, "Interrupt thread priority demotions");
2548954cffe9SJohn Baldwin SCHED_STAT_DEFINE(ithread_preemptions,
2549954cffe9SJohn Baldwin     "Interrupt thread preemptions due to time-sharing");
2550954cffe9SJohn Baldwin 
2551954cffe9SJohn Baldwin /*
2552954cffe9SJohn Baldwin  * Return time slice for a given thread.  For ithreads this is
2553954cffe9SJohn Baldwin  * sched_slice.  For other threads it is tdq_slice(tdq).
2554954cffe9SJohn Baldwin  */
2555954cffe9SJohn Baldwin static inline int
2556954cffe9SJohn Baldwin td_slice(struct thread *td, struct tdq *tdq)
2557954cffe9SJohn Baldwin {
2558954cffe9SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_ITHD)
2559954cffe9SJohn Baldwin 		return (sched_slice);
2560954cffe9SJohn Baldwin 	return (tdq_slice(tdq));
2561954cffe9SJohn Baldwin }
2562954cffe9SJohn Baldwin 
2563ae7a6b38SJeff Roberson /*
2564ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2565954cffe9SJohn Baldwin  * and interrupt threads.
2566ae7a6b38SJeff Roberson  */
256735e6168fSJeff Roberson void
2568c3cccf95SJeff Roberson sched_clock(struct thread *td, int cnt)
256935e6168fSJeff Roberson {
2570ad1e7d28SJulian Elischer 	struct tdq *tdq;
2571ad1e7d28SJulian Elischer 	struct td_sched *ts;
257235e6168fSJeff Roberson 
2573ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
25743f872f85SJeff Roberson 	tdq = TDQ_SELF();
25757fcf154aSJeff Roberson #ifdef SMP
25767fcf154aSJeff Roberson 	/*
25777fcf154aSJeff Roberson 	 * We run the long term load balancer infrequently on the first cpu.
25787fcf154aSJeff Roberson 	 */
2579c3cccf95SJeff Roberson 	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
2580c3cccf95SJeff Roberson 	    balance_ticks != 0) {
2581c3cccf95SJeff Roberson 		balance_ticks -= cnt;
2582c3cccf95SJeff Roberson 		if (balance_ticks <= 0)
25837fcf154aSJeff Roberson 			sched_balance();
25847fcf154aSJeff Roberson 	}
25857fcf154aSJeff Roberson #endif
25863f872f85SJeff Roberson 	/*
25871690c6c1SJeff Roberson 	 * Save the old switch count so we have a record of the last ticks
25881690c6c1SJeff Roberson 	 * activity.   Initialize the new switch count based on our load.
25891690c6c1SJeff Roberson 	 * If there is some activity seed it to reflect that.
25901690c6c1SJeff Roberson 	 */
25911690c6c1SJeff Roberson 	tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
25926c47aaaeSJeff Roberson 	tdq->tdq_switchcnt = tdq->tdq_load;
259311484ad8SMark Johnston 
25941690c6c1SJeff Roberson 	/*
25953f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
25963f872f85SJeff Roberson 	 * threads get a chance to run.
25973f872f85SJeff Roberson 	 */
25983f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
25993f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
26003f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
26013f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
26023f872f85SJeff Roberson 	}
260393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
26047295465eSAlexander Motin 	sched_pctcpu_update(ts, 1);
2605c3cccf95SJeff Roberson 	if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td))
2606a8949de2SJeff Roberson 		return;
2607c3cccf95SJeff Roberson 
2608c9a8cba4SJohn Baldwin 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2609a8949de2SJeff Roberson 		/*
2610fd0b8c78SJeff Roberson 		 * We used a tick; charge it to the thread so
2611fd0b8c78SJeff Roberson 		 * that we can compute our interactivity.
261215dc847eSJeff Roberson 		 */
2613c3cccf95SJeff Roberson 		td_get_sched(td)->ts_runtime += tickincr * cnt;
26148460a577SJohn Birrell 		sched_interact_update(td);
261573daf66fSJeff Roberson 		sched_priority(td);
2616fd0b8c78SJeff Roberson 	}
2617579895dfSAlexander Motin 
261835e6168fSJeff Roberson 	/*
2619579895dfSAlexander Motin 	 * Force a context switch if the current thread has used up a full
2620579895dfSAlexander Motin 	 * time slice (default is 100ms).
262135e6168fSJeff Roberson 	 */
2622c3cccf95SJeff Roberson 	ts->ts_slice += cnt;
2623954cffe9SJohn Baldwin 	if (ts->ts_slice >= td_slice(td, tdq)) {
26245e5c3873SJeff Roberson 		ts->ts_slice = 0;
2625954cffe9SJohn Baldwin 
2626954cffe9SJohn Baldwin 		/*
2627954cffe9SJohn Baldwin 		 * If an ithread uses a full quantum, demote its
2628954cffe9SJohn Baldwin 		 * priority and preempt it.
2629954cffe9SJohn Baldwin 		 */
2630954cffe9SJohn Baldwin 		if (PRI_BASE(td->td_pri_class) == PRI_ITHD) {
2631954cffe9SJohn Baldwin 			SCHED_STAT_INC(ithread_preemptions);
2632954cffe9SJohn Baldwin 			td->td_owepreempt = 1;
2633954cffe9SJohn Baldwin 			if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) {
2634954cffe9SJohn Baldwin 				SCHED_STAT_INC(ithread_demotions);
2635954cffe9SJohn Baldwin 				sched_prio(td, td->td_base_pri + RQ_PPQ);
2636954cffe9SJohn Baldwin 			}
2637c6d31b83SKonstantin Belousov 		} else {
2638c6d31b83SKonstantin Belousov 			ast_sched_locked(td, TDA_SCHED);
2639c6d31b83SKonstantin Belousov 			td->td_flags |= TDF_SLICEEND;
2640c6d31b83SKonstantin Belousov 		}
264135e6168fSJeff Roberson 	}
2642579895dfSAlexander Motin }
264335e6168fSJeff Roberson 
2644ccd0ec40SKonstantin Belousov u_int
2645ccd0ec40SKonstantin Belousov sched_estcpu(struct thread *td __unused)
2646ae7a6b38SJeff Roberson {
2647ae7a6b38SJeff Roberson 
2648ccd0ec40SKonstantin Belousov 	return (0);
2649ae7a6b38SJeff Roberson }
2650ae7a6b38SJeff Roberson 
2651ae7a6b38SJeff Roberson /*
2652ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2653ae7a6b38SJeff Roberson  * cooperative idle threads.
2654ae7a6b38SJeff Roberson  */
265535e6168fSJeff Roberson int
265635e6168fSJeff Roberson sched_runnable(void)
265735e6168fSJeff Roberson {
2658ad1e7d28SJulian Elischer 	struct tdq *tdq;
2659b90816f1SJeff Roberson 	int load;
266035e6168fSJeff Roberson 
2661b90816f1SJeff Roberson 	load = 1;
2662b90816f1SJeff Roberson 
2663ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
26643f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
266511484ad8SMark Johnston 		if (TDQ_LOAD(tdq) > 0)
26663f741ca1SJeff Roberson 			goto out;
26673f741ca1SJeff Roberson 	} else
266811484ad8SMark Johnston 		if (TDQ_LOAD(tdq) - 1 > 0)
2669b90816f1SJeff Roberson 			goto out;
2670b90816f1SJeff Roberson 	load = 0;
2671b90816f1SJeff Roberson out:
2672b90816f1SJeff Roberson 	return (load);
267335e6168fSJeff Roberson }
267435e6168fSJeff Roberson 
2675ae7a6b38SJeff Roberson /*
2676ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2677ef80894cSMark Johnston  * the run-queue while running however the load remains.
2678ae7a6b38SJeff Roberson  */
26797a5e5e2aSJeff Roberson struct thread *
2680c9f25d8fSJeff Roberson sched_choose(void)
2681c9f25d8fSJeff Roberson {
26829727e637SJeff Roberson 	struct thread *td;
2683ae7a6b38SJeff Roberson 	struct tdq *tdq;
2684ae7a6b38SJeff Roberson 
2685ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2686ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
26879727e637SJeff Roberson 	td = tdq_choose(tdq);
26886d3f74a1SMark Johnston 	if (td != NULL) {
26899727e637SJeff Roberson 		tdq_runq_rem(tdq, td);
26900502fe2eSJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
26916d3f74a1SMark Johnston 	} else {
26920502fe2eSJeff Roberson 		tdq->tdq_lowpri = PRI_MAX_IDLE;
26936d3f74a1SMark Johnston 		td = PCPU_GET(idlethread);
26946d3f74a1SMark Johnston 	}
26956d3f74a1SMark Johnston 	tdq->tdq_curthread = td;
26966d3f74a1SMark Johnston 	return (td);
26977a5e5e2aSJeff Roberson }
26987a5e5e2aSJeff Roberson 
2699ae7a6b38SJeff Roberson /*
27000927ff78SMark Johnston  * Set owepreempt if the currently running thread has lower priority than "pri".
27010927ff78SMark Johnston  * Preemption never happens directly in ULE, we always request it once we exit a
27020927ff78SMark Johnston  * critical section.
2703ae7a6b38SJeff Roberson  */
27040927ff78SMark Johnston static void
27050927ff78SMark Johnston sched_setpreempt(int pri)
27067a5e5e2aSJeff Roberson {
27077a5e5e2aSJeff Roberson 	struct thread *ctd;
27087a5e5e2aSJeff Roberson 	int cpri;
2709ff256d9cSJeff Roberson 
27107a5e5e2aSJeff Roberson 	ctd = curthread;
27110927ff78SMark Johnston 	THREAD_LOCK_ASSERT(ctd, MA_OWNED);
27120927ff78SMark Johnston 
27137a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2714ff256d9cSJeff Roberson 	if (pri < cpri)
2715c6d31b83SKonstantin Belousov 		ast_sched_locked(ctd, TDA_SCHED);
2716879e0604SMateusz Guzik 	if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2717ae7a6b38SJeff Roberson 		return;
2718ff256d9cSJeff Roberson 	if (!sched_shouldpreempt(pri, cpri, 0))
2719ae7a6b38SJeff Roberson 		return;
27207a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
272135e6168fSJeff Roberson }
272235e6168fSJeff Roberson 
2723ae7a6b38SJeff Roberson /*
272473daf66fSJeff Roberson  * Add a thread to a thread queue.  Select the appropriate runq and add the
272573daf66fSJeff Roberson  * thread to it.  This is the internal function called when the tdq is
272673daf66fSJeff Roberson  * predetermined.
2727ae7a6b38SJeff Roberson  */
27286d3f74a1SMark Johnston static int
2729ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
273035e6168fSJeff Roberson {
27316d3f74a1SMark Johnston 	int lowpri;
2732c9f25d8fSJeff Roberson 
2733ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
273461a74c5cSJeff Roberson 	THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED);
27357a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
27367a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
27377a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
27387a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2739b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2740b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2741ae7a6b38SJeff Roberson 
27426d3f74a1SMark Johnston 	lowpri = tdq->tdq_lowpri;
27436d3f74a1SMark Johnston 	if (td->td_priority < lowpri)
2744ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
27459727e637SJeff Roberson 	tdq_runq_add(tdq, td, flags);
27469727e637SJeff Roberson 	tdq_load_add(tdq, td);
27476d3f74a1SMark Johnston 	return (lowpri);
2748ae7a6b38SJeff Roberson }
2749ae7a6b38SJeff Roberson 
2750ae7a6b38SJeff Roberson /*
2751ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2752ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
275361a74c5cSJeff Roberson  *
275461a74c5cSJeff Roberson  * Requires the thread lock on entry, drops on exit.
2755ae7a6b38SJeff Roberson  */
2756ae7a6b38SJeff Roberson void
2757ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2758ae7a6b38SJeff Roberson {
2759ae7a6b38SJeff Roberson 	struct tdq *tdq;
27607b8bfa0dSJeff Roberson #ifdef SMP
27616d3f74a1SMark Johnston 	int cpu, lowpri;
2762ae7a6b38SJeff Roberson #endif
27638f51ad55SJeff Roberson 
27648f51ad55SJeff Roberson 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
27658f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
27668f51ad55SJeff Roberson 	    sched_tdname(curthread));
27678f51ad55SJeff Roberson 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
27688f51ad55SJeff Roberson 	    KTR_ATTR_LINKED, sched_tdname(td));
2769b3e9e682SRyan Stone 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
2770b3e9e682SRyan Stone 	    flags & SRQ_PREEMPTED);
2771ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2772ae7a6b38SJeff Roberson 	/*
2773ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2774ae7a6b38SJeff Roberson 	 * run-queue.
2775ae7a6b38SJeff Roberson 	 */
2776ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2777ae7a6b38SJeff Roberson 		sched_priority(td);
2778ae7a6b38SJeff Roberson #ifdef SMP
2779ae7a6b38SJeff Roberson 	/*
2780ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2781ae7a6b38SJeff Roberson 	 * target cpu.
2782ae7a6b38SJeff Roberson 	 */
27839727e637SJeff Roberson 	cpu = sched_pickcpu(td, flags);
27849727e637SJeff Roberson 	tdq = sched_setcpu(td, cpu, flags);
27856d3f74a1SMark Johnston 	lowpri = tdq_add(tdq, td, flags);
278661a74c5cSJeff Roberson 	if (cpu != PCPU_GET(cpuid))
27876d3f74a1SMark Johnston 		tdq_notify(tdq, lowpri);
278861a74c5cSJeff Roberson 	else if (!(flags & SRQ_YIELDING))
27890927ff78SMark Johnston 		sched_setpreempt(td->td_priority);
2790ae7a6b38SJeff Roberson #else
2791ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2792ae7a6b38SJeff Roberson 	/*
2793ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2794ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2795ae7a6b38SJeff Roberson 	 */
2796e4894505SMark Johnston 	if (td->td_lock != TDQ_LOCKPTR(tdq)) {
2797e4894505SMark Johnston 		TDQ_LOCK(tdq);
279861a74c5cSJeff Roberson 		if ((flags & SRQ_HOLD) != 0)
279961a74c5cSJeff Roberson 			td->td_lock = TDQ_LOCKPTR(tdq);
280061a74c5cSJeff Roberson 		else
2801ae7a6b38SJeff Roberson 			thread_lock_set(td, TDQ_LOCKPTR(tdq));
2802e4894505SMark Johnston 	}
28036d3f74a1SMark Johnston 	(void)tdq_add(tdq, td, flags);
2804ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
28050927ff78SMark Johnston 		sched_setpreempt(td->td_priority);
280661a74c5cSJeff Roberson #endif
280761a74c5cSJeff Roberson 	if (!(flags & SRQ_HOLDTD))
280861a74c5cSJeff Roberson 		thread_unlock(td);
280935e6168fSJeff Roberson }
281035e6168fSJeff Roberson 
2811ae7a6b38SJeff Roberson /*
2812ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2813ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2814ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2815ae7a6b38SJeff Roberson  */
281635e6168fSJeff Roberson void
28177cf90fb3SJeff Roberson sched_rem(struct thread *td)
281835e6168fSJeff Roberson {
2819ad1e7d28SJulian Elischer 	struct tdq *tdq;
28207cf90fb3SJeff Roberson 
28218f51ad55SJeff Roberson 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
28228f51ad55SJeff Roberson 	    "prio:%d", td->td_priority);
2823b3e9e682SRyan Stone 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
282493ccd6bfSKonstantin Belousov 	tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2825ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2826ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
28277a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2828ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
28299727e637SJeff Roberson 	tdq_runq_rem(tdq, td);
28309727e637SJeff Roberson 	tdq_load_rem(tdq, td);
28317a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
283262fa74d9SJeff Roberson 	if (td->td_priority == tdq->tdq_lowpri)
283362fa74d9SJeff Roberson 		tdq_setlowpri(tdq, NULL);
283435e6168fSJeff Roberson }
283535e6168fSJeff Roberson 
2836ae7a6b38SJeff Roberson /*
2837ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2838ae7a6b38SJeff Roberson  */
283935e6168fSJeff Roberson fixpt_t
28407cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
284135e6168fSJeff Roberson {
284235e6168fSJeff Roberson 	fixpt_t pctcpu;
2843ad1e7d28SJulian Elischer 	struct td_sched *ts;
284435e6168fSJeff Roberson 
284535e6168fSJeff Roberson 	pctcpu = 0;
284693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
284735e6168fSJeff Roberson 
28483da35a0aSJohn Baldwin 	THREAD_LOCK_ASSERT(td, MA_OWNED);
28497295465eSAlexander Motin 	sched_pctcpu_update(ts, TD_IS_RUNNING(td));
2850ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
285135e6168fSJeff Roberson 		int rtick;
285235e6168fSJeff Roberson 
285335e6168fSJeff Roberson 		/* How many rtick per second ? */
2854e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2855e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
285635e6168fSJeff Roberson 	}
285735e6168fSJeff Roberson 
285835e6168fSJeff Roberson 	return (pctcpu);
285935e6168fSJeff Roberson }
286035e6168fSJeff Roberson 
286162fa74d9SJeff Roberson /*
286262fa74d9SJeff Roberson  * Enforce affinity settings for a thread.  Called after adjustments to
286362fa74d9SJeff Roberson  * cpumask.
286462fa74d9SJeff Roberson  */
2865885d51a3SJeff Roberson void
2866885d51a3SJeff Roberson sched_affinity(struct thread *td)
2867885d51a3SJeff Roberson {
286862fa74d9SJeff Roberson #ifdef SMP
286962fa74d9SJeff Roberson 	struct td_sched *ts;
287062fa74d9SJeff Roberson 
287162fa74d9SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
287293ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
287362fa74d9SJeff Roberson 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
287462fa74d9SJeff Roberson 		return;
287553a6c8b3SJeff Roberson 	if (TD_ON_RUNQ(td)) {
287653a6c8b3SJeff Roberson 		sched_rem(td);
2877d8d5f036SJeff Roberson 		sched_add(td, SRQ_BORING | SRQ_HOLDTD);
287853a6c8b3SJeff Roberson 		return;
287953a6c8b3SJeff Roberson 	}
288062fa74d9SJeff Roberson 	if (!TD_IS_RUNNING(td))
288162fa74d9SJeff Roberson 		return;
288262fa74d9SJeff Roberson 	/*
28830f7a0ebdSMatthew D Fleming 	 * Force a switch before returning to userspace.  If the
28840f7a0ebdSMatthew D Fleming 	 * target thread is not running locally send an ipi to force
28850f7a0ebdSMatthew D Fleming 	 * the issue.
288662fa74d9SJeff Roberson 	 */
2887c6d31b83SKonstantin Belousov 	ast_sched_locked(td, TDA_SCHED);
28880f7a0ebdSMatthew D Fleming 	if (td != curthread)
28890f7a0ebdSMatthew D Fleming 		ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
289062fa74d9SJeff Roberson #endif
2891885d51a3SJeff Roberson }
2892885d51a3SJeff Roberson 
2893ae7a6b38SJeff Roberson /*
2894ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2895ae7a6b38SJeff Roberson  */
28969bacd788SJeff Roberson void
28979bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
28989bacd788SJeff Roberson {
2899ad1e7d28SJulian Elischer 	struct td_sched *ts;
29009bacd788SJeff Roberson 
2901c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
29021d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
290393ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
29046b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2905c95d2db2SJeff Roberson 		sched_unbind(td);
29060f7a0ebdSMatthew D Fleming 	KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2907ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
29086b2f763fSJeff Roberson 	sched_pin();
290980f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
29109bacd788SJeff Roberson 		return;
29116b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
29129bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
29131029dab6SMitchell Horne 	mi_switch(SW_VOL | SWT_BIND);
2914686bcb5cSJeff Roberson 	thread_lock(td);
29159bacd788SJeff Roberson }
29169bacd788SJeff Roberson 
2917ae7a6b38SJeff Roberson /*
2918ae7a6b38SJeff Roberson  * Release a bound thread.
2919ae7a6b38SJeff Roberson  */
29209bacd788SJeff Roberson void
29219bacd788SJeff Roberson sched_unbind(struct thread *td)
29229bacd788SJeff Roberson {
2923e7d50326SJeff Roberson 	struct td_sched *ts;
2924e7d50326SJeff Roberson 
29257b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
29261d7830edSJohn Baldwin 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
292793ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
29286b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
29296b2f763fSJeff Roberson 		return;
2930e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2931e7d50326SJeff Roberson 	sched_unpin();
29329bacd788SJeff Roberson }
29339bacd788SJeff Roberson 
293435e6168fSJeff Roberson int
2935ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2936ebccf1e3SJoseph Koshy {
29377b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
293893ccd6bfSKonstantin Belousov 	return (td_get_sched(td)->ts_flags & TSF_BOUND);
2939ebccf1e3SJoseph Koshy }
2940ebccf1e3SJoseph Koshy 
2941ae7a6b38SJeff Roberson /*
2942ae7a6b38SJeff Roberson  * Basic yield call.
2943ae7a6b38SJeff Roberson  */
294436ec198bSDavid Xu void
294536ec198bSDavid Xu sched_relinquish(struct thread *td)
294636ec198bSDavid Xu {
29477b20fb19SJeff Roberson 	thread_lock(td);
2948686bcb5cSJeff Roberson 	mi_switch(SW_VOL | SWT_RELINQUISH);
294936ec198bSDavid Xu }
295036ec198bSDavid Xu 
2951ae7a6b38SJeff Roberson /*
2952ae7a6b38SJeff Roberson  * Return the total system load.
2953ae7a6b38SJeff Roberson  */
2954ebccf1e3SJoseph Koshy int
295533916c36SJeff Roberson sched_load(void)
295633916c36SJeff Roberson {
295733916c36SJeff Roberson #ifdef SMP
295833916c36SJeff Roberson 	int total;
295933916c36SJeff Roberson 	int i;
296033916c36SJeff Roberson 
296133916c36SJeff Roberson 	total = 0;
29623aa6d94eSJohn Baldwin 	CPU_FOREACH(i)
296311484ad8SMark Johnston 		total += atomic_load_int(&TDQ_CPU(i)->tdq_sysload);
296433916c36SJeff Roberson 	return (total);
296533916c36SJeff Roberson #else
296611484ad8SMark Johnston 	return (atomic_load_int(&TDQ_SELF()->tdq_sysload));
296733916c36SJeff Roberson #endif
296833916c36SJeff Roberson }
296933916c36SJeff Roberson 
297033916c36SJeff Roberson int
297135e6168fSJeff Roberson sched_sizeof_proc(void)
297235e6168fSJeff Roberson {
297335e6168fSJeff Roberson 	return (sizeof(struct proc));
297435e6168fSJeff Roberson }
297535e6168fSJeff Roberson 
297635e6168fSJeff Roberson int
297735e6168fSJeff Roberson sched_sizeof_thread(void)
297835e6168fSJeff Roberson {
297935e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
298035e6168fSJeff Roberson }
2981b41f1452SDavid Xu 
298209c8a4ccSJeff Roberson #ifdef SMP
298309c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)						\
298409c8a4ccSJeff Roberson     ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
298509c8a4ccSJeff Roberson #else
298609c8a4ccSJeff Roberson #define	TDQ_IDLESPIN(tdq)	1
298709c8a4ccSJeff Roberson #endif
298809c8a4ccSJeff Roberson 
29897a5e5e2aSJeff Roberson /*
29907a5e5e2aSJeff Roberson  * The actual idle process.
29917a5e5e2aSJeff Roberson  */
29927a5e5e2aSJeff Roberson void
29937a5e5e2aSJeff Roberson sched_idletd(void *dummy)
29947a5e5e2aSJeff Roberson {
29957a5e5e2aSJeff Roberson 	struct thread *td;
2996ae7a6b38SJeff Roberson 	struct tdq *tdq;
29972c27cb3aSAlexander Motin 	int oldswitchcnt, switchcnt;
29981690c6c1SJeff Roberson 	int i;
29997a5e5e2aSJeff Roberson 
30007b55ab05SJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
30017a5e5e2aSJeff Roberson 	td = curthread;
3002ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
3003ba96d2d8SJohn Baldwin 	THREAD_NO_SLEEPING();
30042c27cb3aSAlexander Motin 	oldswitchcnt = -1;
3005ae7a6b38SJeff Roberson 	for (;;) {
300611484ad8SMark Johnston 		if (TDQ_LOAD(tdq)) {
30072c27cb3aSAlexander Motin 			thread_lock(td);
3008686bcb5cSJeff Roberson 			mi_switch(SW_VOL | SWT_IDLE);
30092c27cb3aSAlexander Motin 		}
301011484ad8SMark Johnston 		switchcnt = TDQ_SWITCHCNT(tdq);
3011ae7a6b38SJeff Roberson #ifdef SMP
301297e9382dSDon Lewis 		if (always_steal || switchcnt != oldswitchcnt) {
30132c27cb3aSAlexander Motin 			oldswitchcnt = switchcnt;
30141690c6c1SJeff Roberson 			if (tdq_idled(tdq) == 0)
30151690c6c1SJeff Roberson 				continue;
30162c27cb3aSAlexander Motin 		}
301711484ad8SMark Johnston 		switchcnt = TDQ_SWITCHCNT(tdq);
30182fd4047fSAlexander Motin #else
30192fd4047fSAlexander Motin 		oldswitchcnt = switchcnt;
30202fd4047fSAlexander Motin #endif
30211690c6c1SJeff Roberson 		/*
30221690c6c1SJeff Roberson 		 * If we're switching very frequently, spin while checking
30231690c6c1SJeff Roberson 		 * for load rather than entering a low power state that
30247b55ab05SJeff Roberson 		 * may require an IPI.  However, don't do any busy
30257b55ab05SJeff Roberson 		 * loops while on SMT machines as this simply steals
30267b55ab05SJeff Roberson 		 * cycles from cores doing useful work.
30271690c6c1SJeff Roberson 		 */
302809c8a4ccSJeff Roberson 		if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
30291690c6c1SJeff Roberson 			for (i = 0; i < sched_idlespins; i++) {
303011484ad8SMark Johnston 				if (TDQ_LOAD(tdq))
30311690c6c1SJeff Roberson 					break;
30321690c6c1SJeff Roberson 				cpu_spinwait();
30331690c6c1SJeff Roberson 			}
30341690c6c1SJeff Roberson 		}
30352c27cb3aSAlexander Motin 
30362c27cb3aSAlexander Motin 		/* If there was context switch during spin, restart it. */
303711484ad8SMark Johnston 		switchcnt = TDQ_SWITCHCNT(tdq);
303811484ad8SMark Johnston 		if (TDQ_LOAD(tdq) != 0 || switchcnt != oldswitchcnt)
30392c27cb3aSAlexander Motin 			continue;
30402c27cb3aSAlexander Motin 
30412c27cb3aSAlexander Motin 		/* Run main MD idle handler. */
304211484ad8SMark Johnston 		atomic_store_int(&tdq->tdq_cpu_idle, 1);
304379654969SAlexander Motin 		/*
30446d3f74a1SMark Johnston 		 * Make sure that the tdq_cpu_idle update is globally visible
30456d3f74a1SMark Johnston 		 * before cpu_idle() reads tdq_load.  The order is important
30466d3f74a1SMark Johnston 		 * to avoid races with tdq_notify().
304779654969SAlexander Motin 		 */
3048e8677f38SKonstantin Belousov 		atomic_thread_fence_seq_cst();
304997e9382dSDon Lewis 		/*
305097e9382dSDon Lewis 		 * Checking for again after the fence picks up assigned
305197e9382dSDon Lewis 		 * threads often enough to make it worthwhile to do so in
305297e9382dSDon Lewis 		 * order to avoid calling cpu_idle().
305397e9382dSDon Lewis 		 */
305411484ad8SMark Johnston 		if (TDQ_LOAD(tdq) != 0) {
305511484ad8SMark Johnston 			atomic_store_int(&tdq->tdq_cpu_idle, 0);
305697e9382dSDon Lewis 			continue;
305797e9382dSDon Lewis 		}
30582c27cb3aSAlexander Motin 		cpu_idle(switchcnt * 4 > sched_idlespinthresh);
305911484ad8SMark Johnston 		atomic_store_int(&tdq->tdq_cpu_idle, 0);
30602c27cb3aSAlexander Motin 
30612c27cb3aSAlexander Motin 		/*
30622c27cb3aSAlexander Motin 		 * Account thread-less hardware interrupts and
30632c27cb3aSAlexander Motin 		 * other wakeup reasons equal to context switches.
30642c27cb3aSAlexander Motin 		 */
306511484ad8SMark Johnston 		switchcnt = TDQ_SWITCHCNT(tdq);
30662c27cb3aSAlexander Motin 		if (switchcnt != oldswitchcnt)
30672c27cb3aSAlexander Motin 			continue;
306811484ad8SMark Johnston 		TDQ_SWITCHCNT_INC(tdq);
30692c27cb3aSAlexander Motin 		oldswitchcnt++;
3070ae7a6b38SJeff Roberson 	}
3071b41f1452SDavid Xu }
3072e7d50326SJeff Roberson 
30737b20fb19SJeff Roberson /*
30746a8ea6d1SKyle Evans  * sched_throw_grab() chooses a thread from the queue to switch to
30756a8ea6d1SKyle Evans  * next.  It returns with the tdq lock dropped in a spinlock section to
30766a8ea6d1SKyle Evans  * keep interrupts disabled until the CPU is running in a proper threaded
30776a8ea6d1SKyle Evans  * context.
30786a8ea6d1SKyle Evans  */
30796a8ea6d1SKyle Evans static struct thread *
30806a8ea6d1SKyle Evans sched_throw_grab(struct tdq *tdq)
30816a8ea6d1SKyle Evans {
30826a8ea6d1SKyle Evans 	struct thread *newtd;
30836a8ea6d1SKyle Evans 
30846a8ea6d1SKyle Evans 	newtd = choosethread();
30856a8ea6d1SKyle Evans 	spinlock_enter();
30866a8ea6d1SKyle Evans 	TDQ_UNLOCK(tdq);
30876a8ea6d1SKyle Evans 	KASSERT(curthread->td_md.md_spinlock_count == 1,
30886a8ea6d1SKyle Evans 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
30896a8ea6d1SKyle Evans 	return (newtd);
30906a8ea6d1SKyle Evans }
30916a8ea6d1SKyle Evans 
30926a8ea6d1SKyle Evans /*
30936a8ea6d1SKyle Evans  * A CPU is entering for the first time.
30946a8ea6d1SKyle Evans  */
30956a8ea6d1SKyle Evans void
30966a8ea6d1SKyle Evans sched_ap_entry(void)
30976a8ea6d1SKyle Evans {
30986a8ea6d1SKyle Evans 	struct thread *newtd;
30996a8ea6d1SKyle Evans 	struct tdq *tdq;
31006a8ea6d1SKyle Evans 
31016a8ea6d1SKyle Evans 	tdq = TDQ_SELF();
31026a8ea6d1SKyle Evans 
31036a8ea6d1SKyle Evans 	/* This should have been setup in schedinit_ap(). */
31046a8ea6d1SKyle Evans 	THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq));
31056a8ea6d1SKyle Evans 
31066a8ea6d1SKyle Evans 	TDQ_LOCK(tdq);
31076a8ea6d1SKyle Evans 	/* Correct spinlock nesting. */
31086a8ea6d1SKyle Evans 	spinlock_exit();
31096a8ea6d1SKyle Evans 	PCPU_SET(switchtime, cpu_ticks());
31106a8ea6d1SKyle Evans 	PCPU_SET(switchticks, ticks);
31116a8ea6d1SKyle Evans 
31126a8ea6d1SKyle Evans 	newtd = sched_throw_grab(tdq);
31136a8ea6d1SKyle Evans 
31146a8ea6d1SKyle Evans 	/* doesn't return */
31156a8ea6d1SKyle Evans 	cpu_throw(NULL, newtd);
31166a8ea6d1SKyle Evans }
31176a8ea6d1SKyle Evans 
31186a8ea6d1SKyle Evans /*
31196a8ea6d1SKyle Evans  * A thread is exiting.
31207b20fb19SJeff Roberson  */
31217b20fb19SJeff Roberson void
31227b20fb19SJeff Roberson sched_throw(struct thread *td)
31237b20fb19SJeff Roberson {
312459c68134SJeff Roberson 	struct thread *newtd;
3125ae7a6b38SJeff Roberson 	struct tdq *tdq;
3126ae7a6b38SJeff Roberson 
3127018ff686SJeff Roberson 	tdq = TDQ_SELF();
31286a8ea6d1SKyle Evans 
31296a8ea6d1SKyle Evans 	MPASS(td != NULL);
3130686bcb5cSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
3131686bcb5cSJeff Roberson 	THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq));
31326a8ea6d1SKyle Evans 
31339727e637SJeff Roberson 	tdq_load_rem(tdq, td);
313492de34dfSJohn Baldwin 	td->td_lastcpu = td->td_oncpu;
313592de34dfSJohn Baldwin 	td->td_oncpu = NOCPU;
31361eb13fceSJeff Roberson 	thread_lock_block(td);
31376a8ea6d1SKyle Evans 
31386a8ea6d1SKyle Evans 	newtd = sched_throw_grab(tdq);
31396a8ea6d1SKyle Evans 
31401eb13fceSJeff Roberson 	/* doesn't return */
31411eb13fceSJeff Roberson 	cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
31427b20fb19SJeff Roberson }
31437b20fb19SJeff Roberson 
3144ae7a6b38SJeff Roberson /*
3145ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
3146ae7a6b38SJeff Roberson  * let fork do the rest of the work.
3147ae7a6b38SJeff Roberson  */
31487b20fb19SJeff Roberson void
3149fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
31507b20fb19SJeff Roberson {
3151ae7a6b38SJeff Roberson 	struct tdq *tdq;
3152ae7a6b38SJeff Roberson 	int cpuid;
31537b20fb19SJeff Roberson 
31547b20fb19SJeff Roberson 	/*
31557b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
3156ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
31577b20fb19SJeff Roberson 	 */
3158686bcb5cSJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1,
3159686bcb5cSJeff Roberson 	    ("invalid count %d", curthread->td_md.md_spinlock_count));
3160ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
3161018ff686SJeff Roberson 	tdq = TDQ_SELF();
3162686bcb5cSJeff Roberson 	TDQ_LOCK(tdq);
3163686bcb5cSJeff Roberson 	spinlock_exit();
3164ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
3165ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
316628ef18b8SAndriy Gapon 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
316728ef18b8SAndriy Gapon 	    "prio:%d", td->td_priority);
316828ef18b8SAndriy Gapon 	SDT_PROBE0(sched, , , on__cpu);
31697b20fb19SJeff Roberson }
31707b20fb19SJeff Roberson 
31718f51ad55SJeff Roberson /*
317215b5c347SGordon Bergling  * Create on first use to catch odd startup conditions.
31738f51ad55SJeff Roberson  */
31748f51ad55SJeff Roberson char *
31758f51ad55SJeff Roberson sched_tdname(struct thread *td)
31768f51ad55SJeff Roberson {
31778f51ad55SJeff Roberson #ifdef KTR
31788f51ad55SJeff Roberson 	struct td_sched *ts;
31798f51ad55SJeff Roberson 
318093ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
31818f51ad55SJeff Roberson 	if (ts->ts_name[0] == '\0')
31828f51ad55SJeff Roberson 		snprintf(ts->ts_name, sizeof(ts->ts_name),
31838f51ad55SJeff Roberson 		    "%s tid %d", td->td_name, td->td_tid);
31848f51ad55SJeff Roberson 	return (ts->ts_name);
31858f51ad55SJeff Roberson #else
31868f51ad55SJeff Roberson 	return (td->td_name);
31878f51ad55SJeff Roberson #endif
31888f51ad55SJeff Roberson }
31898f51ad55SJeff Roberson 
319044ad5475SJohn Baldwin #ifdef KTR
319144ad5475SJohn Baldwin void
319244ad5475SJohn Baldwin sched_clear_tdname(struct thread *td)
319344ad5475SJohn Baldwin {
319444ad5475SJohn Baldwin 	struct td_sched *ts;
319544ad5475SJohn Baldwin 
319693ccd6bfSKonstantin Belousov 	ts = td_get_sched(td);
319744ad5475SJohn Baldwin 	ts->ts_name[0] = '\0';
319844ad5475SJohn Baldwin }
319944ad5475SJohn Baldwin #endif
320044ad5475SJohn Baldwin 
320107095abfSIvan Voras #ifdef SMP
320207095abfSIvan Voras 
320307095abfSIvan Voras /*
320407095abfSIvan Voras  * Build the CPU topology dump string. Is recursively called to collect
320507095abfSIvan Voras  * the topology tree.
320607095abfSIvan Voras  */
320707095abfSIvan Voras static int
320807095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
320907095abfSIvan Voras     int indent)
321007095abfSIvan Voras {
321171a19bdcSAttilio Rao 	char cpusetbuf[CPUSETBUFSIZ];
321207095abfSIvan Voras 	int i, first;
321307095abfSIvan Voras 
321407095abfSIvan Voras 	sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
321519b8a6dbSAndriy Gapon 	    "", 1 + indent / 2, cg->cg_level);
321671a19bdcSAttilio Rao 	sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
321771a19bdcSAttilio Rao 	    cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
321807095abfSIvan Voras 	first = TRUE;
3219aefe0a8cSAlexander Motin 	for (i = cg->cg_first; i <= cg->cg_last; i++) {
322071a19bdcSAttilio Rao 		if (CPU_ISSET(i, &cg->cg_mask)) {
322107095abfSIvan Voras 			if (!first)
32220a713948SAlexander Motin 				sbuf_cat(sb, ", ");
322307095abfSIvan Voras 			else
322407095abfSIvan Voras 				first = FALSE;
322507095abfSIvan Voras 			sbuf_printf(sb, "%d", i);
322607095abfSIvan Voras 		}
322707095abfSIvan Voras 	}
32280a713948SAlexander Motin 	sbuf_cat(sb, "</cpu>\n");
322907095abfSIvan Voras 
323007095abfSIvan Voras 	if (cg->cg_flags != 0) {
3231611daf7eSIvan Voras 		sbuf_printf(sb, "%*s <flags>", indent, "");
323207095abfSIvan Voras 		if ((cg->cg_flags & CG_FLAG_HTT) != 0)
32330a713948SAlexander Motin 			sbuf_cat(sb, "<flag name=\"HTT\">HTT group</flag>");
3234a401f2d0SIvan Voras 		if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
32350a713948SAlexander Motin 			sbuf_cat(sb, "<flag name=\"THREAD\">THREAD group</flag>");
32367b55ab05SJeff Roberson 		if ((cg->cg_flags & CG_FLAG_SMT) != 0)
32370a713948SAlexander Motin 			sbuf_cat(sb, "<flag name=\"SMT\">SMT group</flag>");
3238ef50d5fbSAlexander Motin 		if ((cg->cg_flags & CG_FLAG_NODE) != 0)
32390a713948SAlexander Motin 			sbuf_cat(sb, "<flag name=\"NODE\">NUMA node</flag>");
32400a713948SAlexander Motin 		sbuf_cat(sb, "</flags>\n");
3241611daf7eSIvan Voras 	}
324207095abfSIvan Voras 
324307095abfSIvan Voras 	if (cg->cg_children > 0) {
324407095abfSIvan Voras 		sbuf_printf(sb, "%*s <children>\n", indent, "");
324507095abfSIvan Voras 		for (i = 0; i < cg->cg_children; i++)
324607095abfSIvan Voras 			sysctl_kern_sched_topology_spec_internal(sb,
324707095abfSIvan Voras 			    &cg->cg_child[i], indent+2);
324807095abfSIvan Voras 		sbuf_printf(sb, "%*s </children>\n", indent, "");
324907095abfSIvan Voras 	}
325007095abfSIvan Voras 	sbuf_printf(sb, "%*s</group>\n", indent, "");
325107095abfSIvan Voras 	return (0);
325207095abfSIvan Voras }
325307095abfSIvan Voras 
325407095abfSIvan Voras /*
325507095abfSIvan Voras  * Sysctl handler for retrieving topology dump. It's a wrapper for
325607095abfSIvan Voras  * the recursive sysctl_kern_smp_topology_spec_internal().
325707095abfSIvan Voras  */
325807095abfSIvan Voras static int
325907095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
326007095abfSIvan Voras {
326107095abfSIvan Voras 	struct sbuf *topo;
326207095abfSIvan Voras 	int err;
326307095abfSIvan Voras 
326407095abfSIvan Voras 	KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
326507095abfSIvan Voras 
3266b97fa22cSIan Lepore 	topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
326707095abfSIvan Voras 	if (topo == NULL)
326807095abfSIvan Voras 		return (ENOMEM);
326907095abfSIvan Voras 
32700a713948SAlexander Motin 	sbuf_cat(topo, "<groups>\n");
327107095abfSIvan Voras 	err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
32720a713948SAlexander Motin 	sbuf_cat(topo, "</groups>\n");
327307095abfSIvan Voras 
327407095abfSIvan Voras 	if (err == 0) {
3275b97fa22cSIan Lepore 		err = sbuf_finish(topo);
327607095abfSIvan Voras 	}
327707095abfSIvan Voras 	sbuf_delete(topo);
327807095abfSIvan Voras 	return (err);
327907095abfSIvan Voras }
3280b67cc292SDavid Xu 
328107095abfSIvan Voras #endif
328207095abfSIvan Voras 
3283579895dfSAlexander Motin static int
3284579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
3285579895dfSAlexander Motin {
3286579895dfSAlexander Motin 	int error, new_val, period;
3287579895dfSAlexander Motin 
3288579895dfSAlexander Motin 	period = 1000000 / realstathz;
3289579895dfSAlexander Motin 	new_val = period * sched_slice;
3290579895dfSAlexander Motin 	error = sysctl_handle_int(oidp, &new_val, 0, req);
3291579895dfSAlexander Motin 	if (error != 0 || req->newptr == NULL)
3292579895dfSAlexander Motin 		return (error);
3293579895dfSAlexander Motin 	if (new_val <= 0)
3294579895dfSAlexander Motin 		return (EINVAL);
329537f4e025SAlexander Motin 	sched_slice = imax(1, (new_val + period / 2) / period);
32965e5c3873SJeff Roberson 	sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
329737f4e025SAlexander Motin 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
329837f4e025SAlexander Motin 	    realstathz);
3299579895dfSAlexander Motin 	return (0);
3300579895dfSAlexander Motin }
3301579895dfSAlexander Motin 
33027029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
33037029da5cSPawel Biernacki     "Scheduler");
3304ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
3305e7d50326SJeff Roberson     "Scheduler name");
33067029da5cSPawel Biernacki SYSCTL_PROC(_kern_sched, OID_AUTO, quantum,
33077029da5cSPawel Biernacki     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
33087029da5cSPawel Biernacki     sysctl_kern_quantum, "I",
330937f4e025SAlexander Motin     "Quantum for timeshare threads in microseconds");
3310ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
331137f4e025SAlexander Motin     "Quantum for timeshare threads in stathz ticks");
33121c119e17SAlexander Motin SYSCTL_UINT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
3313ae7a6b38SJeff Roberson     "Interactivity score threshold");
331437f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW,
331537f4e025SAlexander Motin     &preempt_thresh, 0,
331637f4e025SAlexander Motin     "Maximal (lowest) priority for preemption");
331737f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0,
331837f4e025SAlexander Motin     "Assign static kernel priorities to sleeping threads");
331937f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0,
332037f4e025SAlexander Motin     "Number of times idle thread will spin waiting for new work");
332137f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW,
332237f4e025SAlexander Motin     &sched_idlespinthresh, 0,
332337f4e025SAlexander Motin     "Threshold before we will permit idle thread spinning");
33247b8bfa0dSJeff Roberson #ifdef SMP
3325ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
3326ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
3327ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
3328ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
33297fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
33307fcf154aSJeff Roberson     &balance_interval, 0,
3331579895dfSAlexander Motin     "Average period in stathz ticks to run the long-term balancer");
3332ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
3333ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
333428994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
333537f4e025SAlexander Motin     "Minimum load on remote CPU before we'll steal");
333697e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit,
333797e9382dSDon Lewis     0, "Topological distance limit for stealing threads in sched_switch()");
333897e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0,
333997e9382dSDon Lewis     "Always run the stealer from the idle thread");
334007095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
3341c69a1a50SMateusz Guzik     CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
334207095abfSIvan Voras     "XML dump of detected CPU topology");
33437b8bfa0dSJeff Roberson #endif
3344e7d50326SJeff Roberson 
334554b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
3346a5423ea3SJeff Roberson static int ccpu = 0;
3347b05ca429SPawel Biernacki SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0,
3348b05ca429SPawel Biernacki     "Decay factor used for updating %CPU in 4BSD scheduler");
3349