xref: /freebsd/sys/kern/sched_ule.c (revision 02e2d6b44594cda2a1436e48662fbca23953eb7d)
135e6168fSJeff Roberson /*-
2e7d50326SJeff Roberson  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
335e6168fSJeff Roberson  * All rights reserved.
435e6168fSJeff Roberson  *
535e6168fSJeff Roberson  * Redistribution and use in source and binary forms, with or without
635e6168fSJeff Roberson  * modification, are permitted provided that the following conditions
735e6168fSJeff Roberson  * are met:
835e6168fSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
935e6168fSJeff Roberson  *    notice unmodified, this list of conditions, and the following
1035e6168fSJeff Roberson  *    disclaimer.
1135e6168fSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
1235e6168fSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
1335e6168fSJeff Roberson  *    documentation and/or other materials provided with the distribution.
1435e6168fSJeff Roberson  *
1535e6168fSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1635e6168fSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1735e6168fSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1835e6168fSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1935e6168fSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2035e6168fSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2135e6168fSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2235e6168fSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2335e6168fSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2435e6168fSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2535e6168fSJeff Roberson  */
2635e6168fSJeff Roberson 
27ae7a6b38SJeff Roberson /*
28ae7a6b38SJeff Roberson  * This file implements the ULE scheduler.  ULE supports independent CPU
29ae7a6b38SJeff Roberson  * run queues and fine grain locking.  It has superior interactive
30ae7a6b38SJeff Roberson  * performance under load even on uni-processor systems.
31ae7a6b38SJeff Roberson  *
32ae7a6b38SJeff Roberson  * etymology:
33a5423ea3SJeff Roberson  *   ULE is the last three letters in schedule.  It owes its name to a
34ae7a6b38SJeff Roberson  * generic user created for a scheduling system by Paul Mikesell at
35ae7a6b38SJeff Roberson  * Isilon Systems and a general lack of creativity on the part of the author.
36ae7a6b38SJeff Roberson  */
37ae7a6b38SJeff Roberson 
38677b542eSDavid E. O'Brien #include <sys/cdefs.h>
39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
40677b542eSDavid E. O'Brien 
414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
424da0d332SPeter Wemm #include "opt_sched.h"
439923b511SScott Long 
4435e6168fSJeff Roberson #include <sys/param.h>
4535e6168fSJeff Roberson #include <sys/systm.h>
462c3490b1SMarcel Moolenaar #include <sys/kdb.h>
4735e6168fSJeff Roberson #include <sys/kernel.h>
4835e6168fSJeff Roberson #include <sys/ktr.h>
4935e6168fSJeff Roberson #include <sys/lock.h>
5035e6168fSJeff Roberson #include <sys/mutex.h>
5135e6168fSJeff Roberson #include <sys/proc.h>
52245f3abfSJeff Roberson #include <sys/resource.h>
539bacd788SJeff Roberson #include <sys/resourcevar.h>
5435e6168fSJeff Roberson #include <sys/sched.h>
5535e6168fSJeff Roberson #include <sys/smp.h>
5635e6168fSJeff Roberson #include <sys/sx.h>
5735e6168fSJeff Roberson #include <sys/sysctl.h>
5835e6168fSJeff Roberson #include <sys/sysproto.h>
59f5c157d9SJohn Baldwin #include <sys/turnstile.h>
603db720fdSDavid Xu #include <sys/umtx.h>
6135e6168fSJeff Roberson #include <sys/vmmeter.h>
6235e6168fSJeff Roberson #ifdef KTRACE
6335e6168fSJeff Roberson #include <sys/uio.h>
6435e6168fSJeff Roberson #include <sys/ktrace.h>
6535e6168fSJeff Roberson #endif
6635e6168fSJeff Roberson 
67ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS
68ebccf1e3SJoseph Koshy #include <sys/pmckern.h>
69ebccf1e3SJoseph Koshy #endif
70ebccf1e3SJoseph Koshy 
7135e6168fSJeff Roberson #include <machine/cpu.h>
7222bf7d9aSJeff Roberson #include <machine/smp.h>
7335e6168fSJeff Roberson 
7402e2d6b4SJeff Roberson #if !defined(__i386__) && !defined(__amd64__)
7502e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE"
767a5e5e2aSJeff Roberson #endif
777a5e5e2aSJeff Roberson 
78ae7a6b38SJeff Roberson #define	KTR_ULE	0
7914618990SJeff Roberson 
806b2f763fSJeff Roberson /*
81ae7a6b38SJeff Roberson  * Thread scheduler specific section.  All fields are protected
82ae7a6b38SJeff Roberson  * by the thread lock.
83ed062c8dSJulian Elischer  */
84ad1e7d28SJulian Elischer struct td_sched {
85ae7a6b38SJeff Roberson 	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
86ae7a6b38SJeff Roberson 	struct thread	*ts_thread;	/* Active associated thread. */
87ae7a6b38SJeff Roberson 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
88ae7a6b38SJeff Roberson 	short		ts_flags;	/* TSF_* flags. */
89ae7a6b38SJeff Roberson 	u_char		ts_rqindex;	/* Run queue index. */
90ad1e7d28SJulian Elischer 	u_char		ts_cpu;		/* CPU that we have affinity for. */
91ae7a6b38SJeff Roberson 	int		ts_slice;	/* Ticks of slice remaining. */
92ae7a6b38SJeff Roberson 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
93ae7a6b38SJeff Roberson 	u_int		ts_runtime;	/* Number of ticks we were running */
94ed062c8dSJulian Elischer 	/* The following variables are only used for pctcpu calculation */
95ad1e7d28SJulian Elischer 	int		ts_ltick;	/* Last tick that we were running on */
96ad1e7d28SJulian Elischer 	int		ts_ftick;	/* First tick that we were running on */
97ad1e7d28SJulian Elischer 	int		ts_ticks;	/* Tick count */
987b8bfa0dSJeff Roberson #ifdef SMP
997b8bfa0dSJeff Roberson 	int		ts_rltick;	/* Real last tick, for affinity. */
1007b8bfa0dSJeff Roberson #endif
101ed062c8dSJulian Elischer };
102ad1e7d28SJulian Elischer /* flags kept in ts_flags */
1037b8bfa0dSJeff Roberson #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
1047b8bfa0dSJeff Roberson #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
10535e6168fSJeff Roberson 
106ad1e7d28SJulian Elischer static struct td_sched td_sched0;
10735e6168fSJeff Roberson 
10835e6168fSJeff Roberson /*
109e7d50326SJeff Roberson  * Cpu percentage computation macros and defines.
110e1f89c22SJeff Roberson  *
111e7d50326SJeff Roberson  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
112e7d50326SJeff Roberson  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
1138ab80cf0SJeff Roberson  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
114e7d50326SJeff Roberson  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
115e7d50326SJeff Roberson  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
116e7d50326SJeff Roberson  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
11735e6168fSJeff Roberson  */
118e7d50326SJeff Roberson #define	SCHED_TICK_SECS		10
119e7d50326SJeff Roberson #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
1208ab80cf0SJeff Roberson #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
121e7d50326SJeff Roberson #define	SCHED_TICK_SHIFT	10
122e7d50326SJeff Roberson #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
123eddb4efaSJeff Roberson #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
12435e6168fSJeff Roberson 
12535e6168fSJeff Roberson /*
126e7d50326SJeff Roberson  * These macros determine priorities for non-interactive threads.  They are
127e7d50326SJeff Roberson  * assigned a priority based on their recent cpu utilization as expressed
128e7d50326SJeff Roberson  * by the ratio of ticks to the tick total.  NHALF priorities at the start
129e7d50326SJeff Roberson  * and end of the MIN to MAX timeshare range are only reachable with negative
130e7d50326SJeff Roberson  * or positive nice respectively.
131e7d50326SJeff Roberson  *
132e7d50326SJeff Roberson  * PRI_RANGE:	Priority range for utilization dependent priorities.
133e7d50326SJeff Roberson  * PRI_NRESV:	Number of nice values.
134e7d50326SJeff Roberson  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
135e7d50326SJeff Roberson  * PRI_NICE:	Determines the part of the priority inherited from nice.
136e7d50326SJeff Roberson  */
137e7d50326SJeff Roberson #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
138e7d50326SJeff Roberson #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
139e7d50326SJeff Roberson #define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
140e7d50326SJeff Roberson #define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
141dda713dfSJeff Roberson #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
142e7d50326SJeff Roberson #define	SCHED_PRI_TICKS(ts)						\
143e7d50326SJeff Roberson     (SCHED_TICK_HZ((ts)) /						\
1441e516cf5SJeff Roberson     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
145e7d50326SJeff Roberson #define	SCHED_PRI_NICE(nice)	(nice)
146e7d50326SJeff Roberson 
147e7d50326SJeff Roberson /*
148e7d50326SJeff Roberson  * These determine the interactivity of a process.  Interactivity differs from
149e7d50326SJeff Roberson  * cpu utilization in that it expresses the voluntary time slept vs time ran
150e7d50326SJeff Roberson  * while cpu utilization includes all time not running.  This more accurately
151e7d50326SJeff Roberson  * models the intent of the thread.
15235e6168fSJeff Roberson  *
153407b0157SJeff Roberson  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
154407b0157SJeff Roberson  *		before throttling back.
155d322132cSJeff Roberson  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
156210491d3SJeff Roberson  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
157e1f89c22SJeff Roberson  * INTERACT_THRESH:	Threshhold for placement on the current runq.
15835e6168fSJeff Roberson  */
159e7d50326SJeff Roberson #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
160e7d50326SJeff Roberson #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
161210491d3SJeff Roberson #define	SCHED_INTERACT_MAX	(100)
162210491d3SJeff Roberson #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
1634c9612c6SJeff Roberson #define	SCHED_INTERACT_THRESH	(30)
164e1f89c22SJeff Roberson 
16535e6168fSJeff Roberson /*
166e7d50326SJeff Roberson  * tickincr:		Converts a stathz tick into a hz domain scaled by
167e7d50326SJeff Roberson  *			the shift factor.  Without the shift the error rate
168e7d50326SJeff Roberson  *			due to rounding would be unacceptably high.
169e7d50326SJeff Roberson  * realstathz:		stathz is sometimes 0 and run off of hz.
170e7d50326SJeff Roberson  * sched_slice:		Runtime of each thread before rescheduling.
171ae7a6b38SJeff Roberson  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
17235e6168fSJeff Roberson  */
173e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH;
174e7d50326SJeff Roberson static int realstathz;
175e7d50326SJeff Roberson static int tickincr;
176e7d50326SJeff Roberson static int sched_slice;
17702e2d6b4SJeff Roberson #ifdef PREEMPTION
17802e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION
17902e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE;
18002e2d6b4SJeff Roberson #else
181ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN;
18202e2d6b4SJeff Roberson #endif
18302e2d6b4SJeff Roberson #else
18402e2d6b4SJeff Roberson static int preempt_thresh = 0;
18502e2d6b4SJeff Roberson #endif
186ae7a6b38SJeff Roberson 
18735e6168fSJeff Roberson /*
188ae7a6b38SJeff Roberson  * tdq - per processor runqs and statistics.  All fields are protected by the
189ae7a6b38SJeff Roberson  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
190ae7a6b38SJeff Roberson  * locking in sched_pickcpu();
19135e6168fSJeff Roberson  */
192ad1e7d28SJulian Elischer struct tdq {
193c47f202bSJeff Roberson 	struct mtx	*tdq_lock;		/* Pointer to group lock. */
194e7d50326SJeff Roberson 	struct runq	tdq_realtime;		/* real-time run queue. */
195ae7a6b38SJeff Roberson 	struct runq	tdq_timeshare;		/* timeshare run queue. */
196ae7a6b38SJeff Roberson 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
197ae7a6b38SJeff Roberson 	int		tdq_load;		/* Aggregate load. */
198ed0e8f2fSJeff Roberson 	u_char		tdq_idx;		/* Current insert index. */
199ed0e8f2fSJeff Roberson 	u_char		tdq_ridx;		/* Current removal index. */
2005d7ef00cSJeff Roberson #ifdef SMP
201ae7a6b38SJeff Roberson 	u_char		tdq_lowpri;		/* Lowest priority thread. */
202ae7a6b38SJeff Roberson 	int		tdq_transferable;	/* Transferable thread count. */
203d2ad694cSJeff Roberson 	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
204d2ad694cSJeff Roberson 	struct tdq_group *tdq_group;		/* Our processor group. */
20533916c36SJeff Roberson #else
206d2ad694cSJeff Roberson 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
2075d7ef00cSJeff Roberson #endif
208ae7a6b38SJeff Roberson } __aligned(64);
20935e6168fSJeff Roberson 
2107b8bfa0dSJeff Roberson 
21180f86c9fSJeff Roberson #ifdef SMP
21280f86c9fSJeff Roberson /*
213ad1e7d28SJulian Elischer  * tdq groups are groups of processors which can cheaply share threads.  When
21480f86c9fSJeff Roberson  * one processor in the group goes idle it will check the runqs of the other
21580f86c9fSJeff Roberson  * processors in its group prior to halting and waiting for an interrupt.
21680f86c9fSJeff Roberson  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
21780f86c9fSJeff Roberson  * In a numa environment we'd want an idle bitmap per group and a two tiered
21880f86c9fSJeff Roberson  * load balancer.
21980f86c9fSJeff Roberson  */
220ad1e7d28SJulian Elischer struct tdq_group {
221c47f202bSJeff Roberson 	struct mtx	tdg_lock;	/* Protects all fields below. */
222d2ad694cSJeff Roberson 	int		tdg_cpus;	/* Count of CPUs in this tdq group. */
223d2ad694cSJeff Roberson 	cpumask_t 	tdg_cpumask;	/* Mask of cpus in this group. */
224d2ad694cSJeff Roberson 	cpumask_t 	tdg_idlemask;	/* Idle cpus in this group. */
225d2ad694cSJeff Roberson 	cpumask_t 	tdg_mask;	/* Bit mask for first cpu. */
226d2ad694cSJeff Roberson 	int		tdg_load;	/* Total load of this group. */
227d2ad694cSJeff Roberson 	int	tdg_transferable;	/* Transferable load of this group. */
228d2ad694cSJeff Roberson 	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
229c47f202bSJeff Roberson 	char		tdg_name[16];	/* lock name. */
230ae7a6b38SJeff Roberson } __aligned(64);
2317b8bfa0dSJeff Roberson 
232ae7a6b38SJeff Roberson #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 300))
2337b8bfa0dSJeff Roberson #define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
2347b8bfa0dSJeff Roberson 
2357b8bfa0dSJeff Roberson /*
2367b8bfa0dSJeff Roberson  * Run-time tunables.
2377b8bfa0dSJeff Roberson  */
23828994a58SJeff Roberson static int rebalance = 1;
23928994a58SJeff Roberson static int balance_secs = 1;
24028994a58SJeff Roberson static int pick_pri = 1;
2417b8bfa0dSJeff Roberson static int affinity;
2427b8bfa0dSJeff Roberson static int tryself = 1;
243ae7a6b38SJeff Roberson static int steal_htt = 0;
24428994a58SJeff Roberson static int steal_idle = 1;
24528994a58SJeff Roberson static int steal_thresh = 2;
2467b20fb19SJeff Roberson static int topology = 0;
24780f86c9fSJeff Roberson 
24835e6168fSJeff Roberson /*
249d2ad694cSJeff Roberson  * One thread queue per processor.
25035e6168fSJeff Roberson  */
2517b8bfa0dSJeff Roberson static volatile cpumask_t tdq_idle;
252d2ad694cSJeff Roberson static int tdg_maxid;
253ad1e7d28SJulian Elischer static struct tdq	tdq_cpu[MAXCPU];
254ad1e7d28SJulian Elischer static struct tdq_group tdq_groups[MAXCPU];
255ae7a6b38SJeff Roberson static struct callout balco;
256ae7a6b38SJeff Roberson static struct callout gbalco;
257dc03363dSJeff Roberson 
258ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
259ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu[(x)])
260c47f202bSJeff Roberson #define	TDQ_ID(x)	((int)((x) - tdq_cpu))
261ad1e7d28SJulian Elischer #define	TDQ_GROUP(x)	(&tdq_groups[(x)])
262c47f202bSJeff Roberson #define	TDG_ID(x)	((int)((x) - tdq_groups))
26380f86c9fSJeff Roberson #else	/* !SMP */
264ad1e7d28SJulian Elischer static struct tdq	tdq_cpu;
265c47f202bSJeff Roberson static struct mtx	tdq_lock;
266dc03363dSJeff Roberson 
26736b36916SJeff Roberson #define	TDQ_ID(x)	(0)
268ad1e7d28SJulian Elischer #define	TDQ_SELF()	(&tdq_cpu)
269ad1e7d28SJulian Elischer #define	TDQ_CPU(x)	(&tdq_cpu)
2700a016a05SJeff Roberson #endif
27135e6168fSJeff Roberson 
272ae7a6b38SJeff Roberson #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
273ae7a6b38SJeff Roberson #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
274ae7a6b38SJeff Roberson #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
275ae7a6b38SJeff Roberson #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
276c47f202bSJeff Roberson #define	TDQ_LOCKPTR(t)		((t)->tdq_lock)
277ae7a6b38SJeff Roberson 
2788460a577SJohn Birrell static void sched_priority(struct thread *);
27921381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char);
2808460a577SJohn Birrell static int sched_interact_score(struct thread *);
2818460a577SJohn Birrell static void sched_interact_update(struct thread *);
2828460a577SJohn Birrell static void sched_interact_fork(struct thread *);
283ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *);
28435e6168fSJeff Roberson 
2855d7ef00cSJeff Roberson /* Operations on per processor queues */
286ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *);
287ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *);
288ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *);
289ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *);
290ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
291ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
292ad1e7d28SJulian Elischer void tdq_print(int cpu);
293e7d50326SJeff Roberson static void runq_print(struct runq *rq);
294ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int);
2955d7ef00cSJeff Roberson #ifdef SMP
296ae7a6b38SJeff Roberson static void tdq_move(struct tdq *, struct tdq *);
297ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *);
2987b8bfa0dSJeff Roberson static void tdq_notify(struct td_sched *);
299ad1e7d28SJulian Elischer static struct td_sched *tdq_steal(struct tdq *, int);
300ae7a6b38SJeff Roberson static struct td_sched *runq_steal(struct runq *);
301ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int);
302ae7a6b38SJeff Roberson static void sched_balance(void *);
303ae7a6b38SJeff Roberson static void sched_balance_groups(void *);
304ae7a6b38SJeff Roberson static void sched_balance_group(struct tdq_group *);
305ae7a6b38SJeff Roberson static void sched_balance_pair(struct tdq *, struct tdq *);
306ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
307ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *);
308ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *);
309c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
3101e516cf5SJeff Roberson 
3117b8bfa0dSJeff Roberson #define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
3125d7ef00cSJeff Roberson #endif
3135d7ef00cSJeff Roberson 
314e7d50326SJeff Roberson static void sched_setup(void *dummy);
315e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
316e7d50326SJeff Roberson 
317e7d50326SJeff Roberson static void sched_initticks(void *dummy);
318e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
319e7d50326SJeff Roberson 
320ae7a6b38SJeff Roberson /*
321ae7a6b38SJeff Roberson  * Print the threads waiting on a run-queue.
322ae7a6b38SJeff Roberson  */
323e7d50326SJeff Roberson static void
324e7d50326SJeff Roberson runq_print(struct runq *rq)
325e7d50326SJeff Roberson {
326e7d50326SJeff Roberson 	struct rqhead *rqh;
327e7d50326SJeff Roberson 	struct td_sched *ts;
328e7d50326SJeff Roberson 	int pri;
329e7d50326SJeff Roberson 	int j;
330e7d50326SJeff Roberson 	int i;
331e7d50326SJeff Roberson 
332e7d50326SJeff Roberson 	for (i = 0; i < RQB_LEN; i++) {
333e7d50326SJeff Roberson 		printf("\t\trunq bits %d 0x%zx\n",
334e7d50326SJeff Roberson 		    i, rq->rq_status.rqb_bits[i]);
335e7d50326SJeff Roberson 		for (j = 0; j < RQB_BPW; j++)
336e7d50326SJeff Roberson 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
337e7d50326SJeff Roberson 				pri = j + (i << RQB_L2BPW);
338e7d50326SJeff Roberson 				rqh = &rq->rq_queues[pri];
339e7d50326SJeff Roberson 				TAILQ_FOREACH(ts, rqh, ts_procq) {
340e7d50326SJeff Roberson 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
341e7d50326SJeff Roberson 					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
342e7d50326SJeff Roberson 				}
343e7d50326SJeff Roberson 			}
344e7d50326SJeff Roberson 	}
345e7d50326SJeff Roberson }
346e7d50326SJeff Roberson 
347ae7a6b38SJeff Roberson /*
348ae7a6b38SJeff Roberson  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
349ae7a6b38SJeff Roberson  */
35015dc847eSJeff Roberson void
351ad1e7d28SJulian Elischer tdq_print(int cpu)
35215dc847eSJeff Roberson {
353ad1e7d28SJulian Elischer 	struct tdq *tdq;
35415dc847eSJeff Roberson 
355ad1e7d28SJulian Elischer 	tdq = TDQ_CPU(cpu);
35615dc847eSJeff Roberson 
357c47f202bSJeff Roberson 	printf("tdq %d:\n", TDQ_ID(tdq));
358ae7a6b38SJeff Roberson 	printf("\tlockptr         %p\n", TDQ_LOCKPTR(tdq));
359d2ad694cSJeff Roberson 	printf("\tload:           %d\n", tdq->tdq_load);
360e7d50326SJeff Roberson 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
3613f872f85SJeff Roberson 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
362e7d50326SJeff Roberson 	printf("\trealtime runq:\n");
363e7d50326SJeff Roberson 	runq_print(&tdq->tdq_realtime);
364e7d50326SJeff Roberson 	printf("\ttimeshare runq:\n");
365e7d50326SJeff Roberson 	runq_print(&tdq->tdq_timeshare);
366e7d50326SJeff Roberson 	printf("\tidle runq:\n");
367e7d50326SJeff Roberson 	runq_print(&tdq->tdq_idle);
368ef1134c9SJeff Roberson #ifdef SMP
369d2ad694cSJeff Roberson 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
370ae7a6b38SJeff Roberson 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
371c47f202bSJeff Roberson 	printf("\tgroup:             %d\n", TDG_ID(tdq->tdq_group));
372c47f202bSJeff Roberson 	printf("\tLock name:         %s\n", tdq->tdq_group->tdg_name);
373ef1134c9SJeff Roberson #endif
37415dc847eSJeff Roberson }
37515dc847eSJeff Roberson 
376ae7a6b38SJeff Roberson #define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
377ae7a6b38SJeff Roberson /*
378ae7a6b38SJeff Roberson  * Add a thread to the actual run-queue.  Keeps transferable counts up to
379ae7a6b38SJeff Roberson  * date with what is actually on the run-queue.  Selects the correct
380ae7a6b38SJeff Roberson  * queue position for timeshare threads.
381ae7a6b38SJeff Roberson  */
382155b9987SJeff Roberson static __inline void
383ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
384155b9987SJeff Roberson {
385ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
386ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
387155b9987SJeff Roberson #ifdef SMP
388e7d50326SJeff Roberson 	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
389d2ad694cSJeff Roberson 		tdq->tdq_transferable++;
390d2ad694cSJeff Roberson 		tdq->tdq_group->tdg_transferable++;
391ad1e7d28SJulian Elischer 		ts->ts_flags |= TSF_XFERABLE;
39280f86c9fSJeff Roberson 	}
393155b9987SJeff Roberson #endif
394e7d50326SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
395ed0e8f2fSJeff Roberson 		u_char pri;
396e7d50326SJeff Roberson 
397e7d50326SJeff Roberson 		pri = ts->ts_thread->td_priority;
398e7d50326SJeff Roberson 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
399e7d50326SJeff Roberson 			("Invalid priority %d on timeshare runq", pri));
400e7d50326SJeff Roberson 		/*
401e7d50326SJeff Roberson 		 * This queue contains only priorities between MIN and MAX
402e7d50326SJeff Roberson 		 * realtime.  Use the whole queue to represent these values.
403e7d50326SJeff Roberson 		 */
404c47f202bSJeff Roberson 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
405e7d50326SJeff Roberson 			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
406e7d50326SJeff Roberson 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
4073f872f85SJeff Roberson 			/*
4083f872f85SJeff Roberson 			 * This effectively shortens the queue by one so we
4093f872f85SJeff Roberson 			 * can have a one slot difference between idx and
4103f872f85SJeff Roberson 			 * ridx while we wait for threads to drain.
4113f872f85SJeff Roberson 			 */
4123f872f85SJeff Roberson 			if (tdq->tdq_ridx != tdq->tdq_idx &&
4133f872f85SJeff Roberson 			    pri == tdq->tdq_ridx)
4144499aff6SJeff Roberson 				pri = (unsigned char)(pri - 1) % RQ_NQS;
415e7d50326SJeff Roberson 		} else
4163f872f85SJeff Roberson 			pri = tdq->tdq_ridx;
417e7d50326SJeff Roberson 		runq_add_pri(ts->ts_runq, ts, pri, flags);
418e7d50326SJeff Roberson 	} else
419ad1e7d28SJulian Elischer 		runq_add(ts->ts_runq, ts, flags);
420155b9987SJeff Roberson }
421155b9987SJeff Roberson 
422ae7a6b38SJeff Roberson /*
423ae7a6b38SJeff Roberson  * Remove a thread from a run-queue.  This typically happens when a thread
424ae7a6b38SJeff Roberson  * is selected to run.  Running threads are not on the queue and the
425ae7a6b38SJeff Roberson  * transferable count does not reflect them.
426ae7a6b38SJeff Roberson  */
427155b9987SJeff Roberson static __inline void
428ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
429155b9987SJeff Roberson {
430ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
431ae7a6b38SJeff Roberson 	KASSERT(ts->ts_runq != NULL,
432ae7a6b38SJeff Roberson 	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
433155b9987SJeff Roberson #ifdef SMP
434ad1e7d28SJulian Elischer 	if (ts->ts_flags & TSF_XFERABLE) {
435d2ad694cSJeff Roberson 		tdq->tdq_transferable--;
436d2ad694cSJeff Roberson 		tdq->tdq_group->tdg_transferable--;
437ad1e7d28SJulian Elischer 		ts->ts_flags &= ~TSF_XFERABLE;
43880f86c9fSJeff Roberson 	}
439155b9987SJeff Roberson #endif
4403f872f85SJeff Roberson 	if (ts->ts_runq == &tdq->tdq_timeshare) {
4413f872f85SJeff Roberson 		if (tdq->tdq_idx != tdq->tdq_ridx)
4423f872f85SJeff Roberson 			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
443e7d50326SJeff Roberson 		else
4443f872f85SJeff Roberson 			runq_remove_idx(ts->ts_runq, ts, NULL);
4458ab80cf0SJeff Roberson 		/*
4468ab80cf0SJeff Roberson 		 * For timeshare threads we update the priority here so
4478ab80cf0SJeff Roberson 		 * the priority reflects the time we've been sleeping.
4488ab80cf0SJeff Roberson 		 */
4498ab80cf0SJeff Roberson 		ts->ts_ltick = ticks;
4508ab80cf0SJeff Roberson 		sched_pctcpu_update(ts);
4518ab80cf0SJeff Roberson 		sched_priority(ts->ts_thread);
4523f872f85SJeff Roberson 	} else
453ad1e7d28SJulian Elischer 		runq_remove(ts->ts_runq, ts);
454155b9987SJeff Roberson }
455155b9987SJeff Roberson 
456ae7a6b38SJeff Roberson /*
457ae7a6b38SJeff Roberson  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
458ae7a6b38SJeff Roberson  * for this thread to the referenced thread queue.
459ae7a6b38SJeff Roberson  */
460a8949de2SJeff Roberson static void
461ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts)
4625d7ef00cSJeff Roberson {
463ef1134c9SJeff Roberson 	int class;
464ae7a6b38SJeff Roberson 
465ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
466ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
467ad1e7d28SJulian Elischer 	class = PRI_BASE(ts->ts_thread->td_pri_class);
468d2ad694cSJeff Roberson 	tdq->tdq_load++;
469c47f202bSJeff Roberson 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
4707b8bfa0dSJeff Roberson 	if (class != PRI_ITHD &&
4717b8bfa0dSJeff Roberson 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
47233916c36SJeff Roberson #ifdef SMP
473d2ad694cSJeff Roberson 		tdq->tdq_group->tdg_load++;
47433916c36SJeff Roberson #else
475d2ad694cSJeff Roberson 		tdq->tdq_sysload++;
476cac77d04SJeff Roberson #endif
4775d7ef00cSJeff Roberson }
47815dc847eSJeff Roberson 
479ae7a6b38SJeff Roberson /*
480ae7a6b38SJeff Roberson  * Remove the load from a thread that is transitioning to a sleep state or
481ae7a6b38SJeff Roberson  * exiting.
482ae7a6b38SJeff Roberson  */
483a8949de2SJeff Roberson static void
484ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
4855d7ef00cSJeff Roberson {
486ef1134c9SJeff Roberson 	int class;
487ae7a6b38SJeff Roberson 
488ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
489ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
490ad1e7d28SJulian Elischer 	class = PRI_BASE(ts->ts_thread->td_pri_class);
4917b8bfa0dSJeff Roberson 	if (class != PRI_ITHD &&
4927b8bfa0dSJeff Roberson 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
49333916c36SJeff Roberson #ifdef SMP
494d2ad694cSJeff Roberson 		tdq->tdq_group->tdg_load--;
49533916c36SJeff Roberson #else
496d2ad694cSJeff Roberson 		tdq->tdq_sysload--;
497cac77d04SJeff Roberson #endif
498ae7a6b38SJeff Roberson 	KASSERT(tdq->tdq_load != 0,
499c47f202bSJeff Roberson 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
500d2ad694cSJeff Roberson 	tdq->tdq_load--;
501d2ad694cSJeff Roberson 	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
502ad1e7d28SJulian Elischer 	ts->ts_runq = NULL;
50315dc847eSJeff Roberson }
50415dc847eSJeff Roberson 
5055d7ef00cSJeff Roberson #ifdef SMP
506356500a3SJeff Roberson /*
507155b9987SJeff Roberson  * sched_balance is a simple CPU load balancing algorithm.  It operates by
508356500a3SJeff Roberson  * finding the least loaded and most loaded cpu and equalizing their load
509356500a3SJeff Roberson  * by migrating some processes.
510356500a3SJeff Roberson  *
511356500a3SJeff Roberson  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
512356500a3SJeff Roberson  * installations will only have 2 cpus.  Secondly, load balancing too much at
513356500a3SJeff Roberson  * once can have an unpleasant effect on the system.  The scheduler rarely has
514356500a3SJeff Roberson  * enough information to make perfect decisions.  So this algorithm chooses
515ae7a6b38SJeff Roberson  * simplicity and more gradual effects on load in larger systems.
516356500a3SJeff Roberson  *
517356500a3SJeff Roberson  */
51822bf7d9aSJeff Roberson static void
519ae7a6b38SJeff Roberson sched_balance(void *arg)
520356500a3SJeff Roberson {
521ad1e7d28SJulian Elischer 	struct tdq_group *high;
522ad1e7d28SJulian Elischer 	struct tdq_group *low;
523d2ad694cSJeff Roberson 	struct tdq_group *tdg;
524cac77d04SJeff Roberson 	int cnt;
525356500a3SJeff Roberson 	int i;
526356500a3SJeff Roberson 
52728994a58SJeff Roberson 	callout_reset(&balco, max(hz / 2, random() % (hz * balance_secs)),
528ae7a6b38SJeff Roberson 	    sched_balance, NULL);
529ae7a6b38SJeff Roberson 	if (smp_started == 0 || rebalance == 0)
530598b368dSJeff Roberson 		return;
531cac77d04SJeff Roberson 	low = high = NULL;
532d2ad694cSJeff Roberson 	i = random() % (tdg_maxid + 1);
533d2ad694cSJeff Roberson 	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
534d2ad694cSJeff Roberson 		tdg = TDQ_GROUP(i);
535cac77d04SJeff Roberson 		/*
536cac77d04SJeff Roberson 		 * Find the CPU with the highest load that has some
537cac77d04SJeff Roberson 		 * threads to transfer.
538cac77d04SJeff Roberson 		 */
539d2ad694cSJeff Roberson 		if ((high == NULL || tdg->tdg_load > high->tdg_load)
540d2ad694cSJeff Roberson 		    && tdg->tdg_transferable)
541d2ad694cSJeff Roberson 			high = tdg;
542d2ad694cSJeff Roberson 		if (low == NULL || tdg->tdg_load < low->tdg_load)
543d2ad694cSJeff Roberson 			low = tdg;
544d2ad694cSJeff Roberson 		if (++i > tdg_maxid)
545cac77d04SJeff Roberson 			i = 0;
546cac77d04SJeff Roberson 	}
547cac77d04SJeff Roberson 	if (low != NULL && high != NULL && high != low)
548d2ad694cSJeff Roberson 		sched_balance_pair(LIST_FIRST(&high->tdg_members),
549d2ad694cSJeff Roberson 		    LIST_FIRST(&low->tdg_members));
550cac77d04SJeff Roberson }
55186f8ae96SJeff Roberson 
552ae7a6b38SJeff Roberson /*
553ae7a6b38SJeff Roberson  * Balance load between CPUs in a group.  Will only migrate within the group.
554ae7a6b38SJeff Roberson  */
555cac77d04SJeff Roberson static void
556ae7a6b38SJeff Roberson sched_balance_groups(void *arg)
557cac77d04SJeff Roberson {
558cac77d04SJeff Roberson 	int i;
559cac77d04SJeff Roberson 
56028994a58SJeff Roberson 	callout_reset(&gbalco, max(hz / 2, random() % (hz * balance_secs)),
561ae7a6b38SJeff Roberson 	    sched_balance_groups, NULL);
562ae7a6b38SJeff Roberson 	if (smp_started == 0 || rebalance == 0)
563ae7a6b38SJeff Roberson 		return;
564d2ad694cSJeff Roberson 	for (i = 0; i <= tdg_maxid; i++)
565ad1e7d28SJulian Elischer 		sched_balance_group(TDQ_GROUP(i));
566356500a3SJeff Roberson }
567cac77d04SJeff Roberson 
568ae7a6b38SJeff Roberson /*
569ae7a6b38SJeff Roberson  * Finds the greatest imbalance between two tdqs in a group.
570ae7a6b38SJeff Roberson  */
571cac77d04SJeff Roberson static void
572d2ad694cSJeff Roberson sched_balance_group(struct tdq_group *tdg)
573cac77d04SJeff Roberson {
574ad1e7d28SJulian Elischer 	struct tdq *tdq;
575ad1e7d28SJulian Elischer 	struct tdq *high;
576ad1e7d28SJulian Elischer 	struct tdq *low;
577cac77d04SJeff Roberson 	int load;
578cac77d04SJeff Roberson 
579d2ad694cSJeff Roberson 	if (tdg->tdg_transferable == 0)
580cac77d04SJeff Roberson 		return;
581cac77d04SJeff Roberson 	low = NULL;
582cac77d04SJeff Roberson 	high = NULL;
583d2ad694cSJeff Roberson 	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
584d2ad694cSJeff Roberson 		load = tdq->tdq_load;
585d2ad694cSJeff Roberson 		if (high == NULL || load > high->tdq_load)
586ad1e7d28SJulian Elischer 			high = tdq;
587d2ad694cSJeff Roberson 		if (low == NULL || load < low->tdq_load)
588ad1e7d28SJulian Elischer 			low = tdq;
589356500a3SJeff Roberson 	}
590cac77d04SJeff Roberson 	if (high != NULL && low != NULL && high != low)
591cac77d04SJeff Roberson 		sched_balance_pair(high, low);
592356500a3SJeff Roberson }
593cac77d04SJeff Roberson 
594ae7a6b38SJeff Roberson /*
595ae7a6b38SJeff Roberson  * Lock two thread queues using their address to maintain lock order.
596ae7a6b38SJeff Roberson  */
597ae7a6b38SJeff Roberson static void
598ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two)
599ae7a6b38SJeff Roberson {
600ae7a6b38SJeff Roberson 	if (one < two) {
601ae7a6b38SJeff Roberson 		TDQ_LOCK(one);
602ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
603ae7a6b38SJeff Roberson 	} else {
604ae7a6b38SJeff Roberson 		TDQ_LOCK(two);
605ae7a6b38SJeff Roberson 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
606ae7a6b38SJeff Roberson 	}
607ae7a6b38SJeff Roberson }
608ae7a6b38SJeff Roberson 
609ae7a6b38SJeff Roberson /*
610ae7a6b38SJeff Roberson  * Transfer load between two imbalanced thread queues.
611ae7a6b38SJeff Roberson  */
612cac77d04SJeff Roberson static void
613ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low)
614cac77d04SJeff Roberson {
615cac77d04SJeff Roberson 	int transferable;
616cac77d04SJeff Roberson 	int high_load;
617cac77d04SJeff Roberson 	int low_load;
618cac77d04SJeff Roberson 	int move;
619cac77d04SJeff Roberson 	int diff;
620cac77d04SJeff Roberson 	int i;
621cac77d04SJeff Roberson 
622ae7a6b38SJeff Roberson 	tdq_lock_pair(high, low);
62380f86c9fSJeff Roberson 	/*
62480f86c9fSJeff Roberson 	 * If we're transfering within a group we have to use this specific
625ad1e7d28SJulian Elischer 	 * tdq's transferable count, otherwise we can steal from other members
62680f86c9fSJeff Roberson 	 * of the group.
62780f86c9fSJeff Roberson 	 */
628d2ad694cSJeff Roberson 	if (high->tdq_group == low->tdq_group) {
629d2ad694cSJeff Roberson 		transferable = high->tdq_transferable;
630d2ad694cSJeff Roberson 		high_load = high->tdq_load;
631d2ad694cSJeff Roberson 		low_load = low->tdq_load;
632cac77d04SJeff Roberson 	} else {
633d2ad694cSJeff Roberson 		transferable = high->tdq_group->tdg_transferable;
634d2ad694cSJeff Roberson 		high_load = high->tdq_group->tdg_load;
635d2ad694cSJeff Roberson 		low_load = low->tdq_group->tdg_load;
636cac77d04SJeff Roberson 	}
637155b9987SJeff Roberson 	/*
638155b9987SJeff Roberson 	 * Determine what the imbalance is and then adjust that to how many
639d2ad694cSJeff Roberson 	 * threads we actually have to give up (transferable).
640155b9987SJeff Roberson 	 */
641ae7a6b38SJeff Roberson 	if (transferable != 0) {
642cac77d04SJeff Roberson 		diff = high_load - low_load;
643356500a3SJeff Roberson 		move = diff / 2;
644356500a3SJeff Roberson 		if (diff & 0x1)
645356500a3SJeff Roberson 			move++;
64680f86c9fSJeff Roberson 		move = min(move, transferable);
647356500a3SJeff Roberson 		for (i = 0; i < move; i++)
648ae7a6b38SJeff Roberson 			tdq_move(high, low);
649a5423ea3SJeff Roberson 		/*
650a5423ea3SJeff Roberson 		 * IPI the target cpu to force it to reschedule with the new
651a5423ea3SJeff Roberson 		 * workload.
652a5423ea3SJeff Roberson 		 */
653a5423ea3SJeff Roberson 		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
654ae7a6b38SJeff Roberson 	}
655ae7a6b38SJeff Roberson 	TDQ_UNLOCK(high);
656ae7a6b38SJeff Roberson 	TDQ_UNLOCK(low);
657356500a3SJeff Roberson 	return;
658356500a3SJeff Roberson }
659356500a3SJeff Roberson 
660ae7a6b38SJeff Roberson /*
661ae7a6b38SJeff Roberson  * Move a thread from one thread queue to another.
662ae7a6b38SJeff Roberson  */
66322bf7d9aSJeff Roberson static void
664ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to)
665356500a3SJeff Roberson {
666ad1e7d28SJulian Elischer 	struct td_sched *ts;
667ae7a6b38SJeff Roberson 	struct thread *td;
668ae7a6b38SJeff Roberson 	struct tdq *tdq;
669ae7a6b38SJeff Roberson 	int cpu;
670356500a3SJeff Roberson 
671ad1e7d28SJulian Elischer 	tdq = from;
672ae7a6b38SJeff Roberson 	cpu = TDQ_ID(to);
673ad1e7d28SJulian Elischer 	ts = tdq_steal(tdq, 1);
674ad1e7d28SJulian Elischer 	if (ts == NULL) {
675d2ad694cSJeff Roberson 		struct tdq_group *tdg;
67680f86c9fSJeff Roberson 
677d2ad694cSJeff Roberson 		tdg = tdq->tdq_group;
678d2ad694cSJeff Roberson 		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
679d2ad694cSJeff Roberson 			if (tdq == from || tdq->tdq_transferable == 0)
68080f86c9fSJeff Roberson 				continue;
681ad1e7d28SJulian Elischer 			ts = tdq_steal(tdq, 1);
68280f86c9fSJeff Roberson 			break;
68380f86c9fSJeff Roberson 		}
684ad1e7d28SJulian Elischer 		if (ts == NULL)
685ae7a6b38SJeff Roberson 			return;
68680f86c9fSJeff Roberson 	}
687ad1e7d28SJulian Elischer 	if (tdq == to)
68880f86c9fSJeff Roberson 		return;
689ae7a6b38SJeff Roberson 	td = ts->ts_thread;
690ae7a6b38SJeff Roberson 	/*
691ae7a6b38SJeff Roberson 	 * Although the run queue is locked the thread may be blocked.  Lock
692ae7a6b38SJeff Roberson 	 * it to clear this.
693ae7a6b38SJeff Roberson 	 */
694ae7a6b38SJeff Roberson 	thread_lock(td);
695ae7a6b38SJeff Roberson 	/* Drop recursive lock on from. */
696ae7a6b38SJeff Roberson 	TDQ_UNLOCK(from);
697ae7a6b38SJeff Roberson 	sched_rem(td);
6987b8bfa0dSJeff Roberson 	ts->ts_cpu = cpu;
699ae7a6b38SJeff Roberson 	td->td_lock = TDQ_LOCKPTR(to);
700ae7a6b38SJeff Roberson 	tdq_add(to, td, SRQ_YIELDING);
701356500a3SJeff Roberson }
70222bf7d9aSJeff Roberson 
703ae7a6b38SJeff Roberson /*
704ae7a6b38SJeff Roberson  * This tdq has idled.  Try to steal a thread from another cpu and switch
705ae7a6b38SJeff Roberson  * to it.
706ae7a6b38SJeff Roberson  */
70780f86c9fSJeff Roberson static int
708ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq)
70922bf7d9aSJeff Roberson {
710d2ad694cSJeff Roberson 	struct tdq_group *tdg;
711ad1e7d28SJulian Elischer 	struct tdq *steal;
712ad1e7d28SJulian Elischer 	struct td_sched *ts;
713ae7a6b38SJeff Roberson 	struct thread *td;
714ae7a6b38SJeff Roberson 	int highload;
715ae7a6b38SJeff Roberson 	int highcpu;
716ae7a6b38SJeff Roberson 	int load;
717ae7a6b38SJeff Roberson 	int cpu;
71880f86c9fSJeff Roberson 
719ae7a6b38SJeff Roberson 	/* We don't want to be preempted while we're iterating over tdqs */
720ae7a6b38SJeff Roberson 	spinlock_enter();
721d2ad694cSJeff Roberson 	tdg = tdq->tdq_group;
72280f86c9fSJeff Roberson 	/*
723d2ad694cSJeff Roberson 	 * If we're in a cpu group, try and steal threads from another cpu in
72480f86c9fSJeff Roberson 	 * the group before idling.
72580f86c9fSJeff Roberson 	 */
7267b8bfa0dSJeff Roberson 	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
727d2ad694cSJeff Roberson 		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
728d2ad694cSJeff Roberson 			if (steal == tdq || steal->tdq_transferable == 0)
72980f86c9fSJeff Roberson 				continue;
730ae7a6b38SJeff Roberson 			TDQ_LOCK(steal);
731ad1e7d28SJulian Elischer 			ts = tdq_steal(steal, 0);
7327b8bfa0dSJeff Roberson 			if (ts)
7337b8bfa0dSJeff Roberson 				goto steal;
734ae7a6b38SJeff Roberson 			TDQ_UNLOCK(steal);
7357b8bfa0dSJeff Roberson 		}
7367b8bfa0dSJeff Roberson 	}
737ae7a6b38SJeff Roberson 	for (;;) {
738ae7a6b38SJeff Roberson 		if (steal_idle == 0)
7397b8bfa0dSJeff Roberson 			break;
740ae7a6b38SJeff Roberson 		highcpu = 0;
741ae7a6b38SJeff Roberson 		highload = 0;
742ae7a6b38SJeff Roberson 		for (cpu = 0; cpu <= mp_maxid; cpu++) {
743ae7a6b38SJeff Roberson 			if (CPU_ABSENT(cpu))
744ae7a6b38SJeff Roberson 				continue;
7457b8bfa0dSJeff Roberson 			steal = TDQ_CPU(cpu);
746ae7a6b38SJeff Roberson 			load = TDQ_CPU(cpu)->tdq_transferable;
747ae7a6b38SJeff Roberson 			if (load < highload)
7487b8bfa0dSJeff Roberson 				continue;
749ae7a6b38SJeff Roberson 			highload = load;
750ae7a6b38SJeff Roberson 			highcpu = cpu;
751ae7a6b38SJeff Roberson 		}
75228994a58SJeff Roberson 		if (highload < steal_thresh)
753ae7a6b38SJeff Roberson 			break;
754ae7a6b38SJeff Roberson 		steal = TDQ_CPU(highcpu);
755ae7a6b38SJeff Roberson 		TDQ_LOCK(steal);
75628994a58SJeff Roberson 		if (steal->tdq_transferable >= steal_thresh &&
757ae7a6b38SJeff Roberson 		    (ts = tdq_steal(steal, 1)) != NULL)
7587b8bfa0dSJeff Roberson 			goto steal;
759ae7a6b38SJeff Roberson 		TDQ_UNLOCK(steal);
760ae7a6b38SJeff Roberson 		break;
76180f86c9fSJeff Roberson 	}
762ae7a6b38SJeff Roberson 	spinlock_exit();
76380f86c9fSJeff Roberson 	return (1);
7647b8bfa0dSJeff Roberson steal:
765ae7a6b38SJeff Roberson 	td = ts->ts_thread;
766ae7a6b38SJeff Roberson 	thread_lock(td);
767ae7a6b38SJeff Roberson 	spinlock_exit();
768ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(steal));
769ae7a6b38SJeff Roberson 	TDQ_UNLOCK(steal);
770ae7a6b38SJeff Roberson 	sched_rem(td);
771ae7a6b38SJeff Roberson 	sched_setcpu(ts, PCPU_GET(cpuid), SRQ_YIELDING);
772ae7a6b38SJeff Roberson 	tdq_add(tdq, td, SRQ_YIELDING);
773ae7a6b38SJeff Roberson 	MPASS(td->td_lock == curthread->td_lock);
774ae7a6b38SJeff Roberson 	mi_switch(SW_VOL, NULL);
775ae7a6b38SJeff Roberson 	thread_unlock(curthread);
7767b8bfa0dSJeff Roberson 
7777b8bfa0dSJeff Roberson 	return (0);
77822bf7d9aSJeff Roberson }
77922bf7d9aSJeff Roberson 
780ae7a6b38SJeff Roberson /*
781ae7a6b38SJeff Roberson  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
782ae7a6b38SJeff Roberson  */
78322bf7d9aSJeff Roberson static void
7847b8bfa0dSJeff Roberson tdq_notify(struct td_sched *ts)
78522bf7d9aSJeff Roberson {
786fc3a97dcSJeff Roberson 	struct thread *ctd;
78722bf7d9aSJeff Roberson 	struct pcpu *pcpu;
788fc3a97dcSJeff Roberson 	int cpri;
789fc3a97dcSJeff Roberson 	int pri;
7907b8bfa0dSJeff Roberson 	int cpu;
79122bf7d9aSJeff Roberson 
7927b8bfa0dSJeff Roberson 	cpu = ts->ts_cpu;
793fc3a97dcSJeff Roberson 	pri = ts->ts_thread->td_priority;
79422bf7d9aSJeff Roberson 	pcpu = pcpu_find(cpu);
795fc3a97dcSJeff Roberson 	ctd = pcpu->pc_curthread;
796fc3a97dcSJeff Roberson 	cpri = ctd->td_priority;
7976b2f763fSJeff Roberson 
7986b2f763fSJeff Roberson 	/*
7996b2f763fSJeff Roberson 	 * If our priority is not better than the current priority there is
8006b2f763fSJeff Roberson 	 * nothing to do.
8016b2f763fSJeff Roberson 	 */
802fc3a97dcSJeff Roberson 	if (pri > cpri)
8036b2f763fSJeff Roberson 		return;
8047b8bfa0dSJeff Roberson 	/*
805fc3a97dcSJeff Roberson 	 * Always IPI idle.
8067b8bfa0dSJeff Roberson 	 */
807fc3a97dcSJeff Roberson 	if (cpri > PRI_MIN_IDLE)
808fc3a97dcSJeff Roberson 		goto sendipi;
809fc3a97dcSJeff Roberson 	/*
810fc3a97dcSJeff Roberson 	 * If we're realtime or better and there is timeshare or worse running
811fc3a97dcSJeff Roberson 	 * send an IPI.
812fc3a97dcSJeff Roberson 	 */
813fc3a97dcSJeff Roberson 	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
814fc3a97dcSJeff Roberson 		goto sendipi;
815fc3a97dcSJeff Roberson 	/*
816fc3a97dcSJeff Roberson 	 * Otherwise only IPI if we exceed the threshold.
817fc3a97dcSJeff Roberson 	 */
818ae7a6b38SJeff Roberson 	if (pri > preempt_thresh)
8197b8bfa0dSJeff Roberson 		return;
820fc3a97dcSJeff Roberson sendipi:
821fc3a97dcSJeff Roberson 	ctd->td_flags |= TDF_NEEDRESCHED;
82214618990SJeff Roberson 	ipi_selected(1 << cpu, IPI_PREEMPT);
82322bf7d9aSJeff Roberson }
82422bf7d9aSJeff Roberson 
825ae7a6b38SJeff Roberson /*
826ae7a6b38SJeff Roberson  * Steals load from a timeshare queue.  Honors the rotating queue head
827ae7a6b38SJeff Roberson  * index.
828ae7a6b38SJeff Roberson  */
829ae7a6b38SJeff Roberson static struct td_sched *
830ae7a6b38SJeff Roberson runq_steal_from(struct runq *rq, u_char start)
831ae7a6b38SJeff Roberson {
832ae7a6b38SJeff Roberson 	struct td_sched *ts;
833ae7a6b38SJeff Roberson 	struct rqbits *rqb;
834ae7a6b38SJeff Roberson 	struct rqhead *rqh;
835ae7a6b38SJeff Roberson 	int first;
836ae7a6b38SJeff Roberson 	int bit;
837ae7a6b38SJeff Roberson 	int pri;
838ae7a6b38SJeff Roberson 	int i;
839ae7a6b38SJeff Roberson 
840ae7a6b38SJeff Roberson 	rqb = &rq->rq_status;
841ae7a6b38SJeff Roberson 	bit = start & (RQB_BPW -1);
842ae7a6b38SJeff Roberson 	pri = 0;
843ae7a6b38SJeff Roberson 	first = 0;
844ae7a6b38SJeff Roberson again:
845ae7a6b38SJeff Roberson 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
846ae7a6b38SJeff Roberson 		if (rqb->rqb_bits[i] == 0)
847ae7a6b38SJeff Roberson 			continue;
848ae7a6b38SJeff Roberson 		if (bit != 0) {
849ae7a6b38SJeff Roberson 			for (pri = bit; pri < RQB_BPW; pri++)
850ae7a6b38SJeff Roberson 				if (rqb->rqb_bits[i] & (1ul << pri))
851ae7a6b38SJeff Roberson 					break;
852ae7a6b38SJeff Roberson 			if (pri >= RQB_BPW)
853ae7a6b38SJeff Roberson 				continue;
854ae7a6b38SJeff Roberson 		} else
855ae7a6b38SJeff Roberson 			pri = RQB_FFS(rqb->rqb_bits[i]);
856ae7a6b38SJeff Roberson 		pri += (i << RQB_L2BPW);
857ae7a6b38SJeff Roberson 		rqh = &rq->rq_queues[pri];
858ae7a6b38SJeff Roberson 		TAILQ_FOREACH(ts, rqh, ts_procq) {
859ae7a6b38SJeff Roberson 			if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
860ae7a6b38SJeff Roberson 				return (ts);
861ae7a6b38SJeff Roberson 			first = 1;
862ae7a6b38SJeff Roberson 		}
863ae7a6b38SJeff Roberson 	}
864ae7a6b38SJeff Roberson 	if (start != 0) {
865ae7a6b38SJeff Roberson 		start = 0;
866ae7a6b38SJeff Roberson 		goto again;
867ae7a6b38SJeff Roberson 	}
868ae7a6b38SJeff Roberson 
869ae7a6b38SJeff Roberson 	return (NULL);
870ae7a6b38SJeff Roberson }
871ae7a6b38SJeff Roberson 
872ae7a6b38SJeff Roberson /*
873ae7a6b38SJeff Roberson  * Steals load from a standard linear queue.
874ae7a6b38SJeff Roberson  */
875ad1e7d28SJulian Elischer static struct td_sched *
87622bf7d9aSJeff Roberson runq_steal(struct runq *rq)
87722bf7d9aSJeff Roberson {
87822bf7d9aSJeff Roberson 	struct rqhead *rqh;
87922bf7d9aSJeff Roberson 	struct rqbits *rqb;
880ad1e7d28SJulian Elischer 	struct td_sched *ts;
88122bf7d9aSJeff Roberson 	int word;
88222bf7d9aSJeff Roberson 	int bit;
88322bf7d9aSJeff Roberson 
88422bf7d9aSJeff Roberson 	rqb = &rq->rq_status;
88522bf7d9aSJeff Roberson 	for (word = 0; word < RQB_LEN; word++) {
88622bf7d9aSJeff Roberson 		if (rqb->rqb_bits[word] == 0)
88722bf7d9aSJeff Roberson 			continue;
88822bf7d9aSJeff Roberson 		for (bit = 0; bit < RQB_BPW; bit++) {
889a2640c9bSPeter Wemm 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
89022bf7d9aSJeff Roberson 				continue;
89122bf7d9aSJeff Roberson 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
89228994a58SJeff Roberson 			TAILQ_FOREACH(ts, rqh, ts_procq)
89328994a58SJeff Roberson 				if (THREAD_CAN_MIGRATE(ts->ts_thread))
894ad1e7d28SJulian Elischer 					return (ts);
89522bf7d9aSJeff Roberson 		}
89622bf7d9aSJeff Roberson 	}
89722bf7d9aSJeff Roberson 	return (NULL);
89822bf7d9aSJeff Roberson }
89922bf7d9aSJeff Roberson 
900ae7a6b38SJeff Roberson /*
901ae7a6b38SJeff Roberson  * Attempt to steal a thread in priority order from a thread queue.
902ae7a6b38SJeff Roberson  */
903ad1e7d28SJulian Elischer static struct td_sched *
904ad1e7d28SJulian Elischer tdq_steal(struct tdq *tdq, int stealidle)
90522bf7d9aSJeff Roberson {
906ad1e7d28SJulian Elischer 	struct td_sched *ts;
90722bf7d9aSJeff Roberson 
908ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
909e7d50326SJeff Roberson 	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
910ad1e7d28SJulian Elischer 		return (ts);
911ae7a6b38SJeff Roberson 	if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
912ad1e7d28SJulian Elischer 		return (ts);
91380f86c9fSJeff Roberson 	if (stealidle)
914d2ad694cSJeff Roberson 		return (runq_steal(&tdq->tdq_idle));
91580f86c9fSJeff Roberson 	return (NULL);
91622bf7d9aSJeff Roberson }
91780f86c9fSJeff Roberson 
918ae7a6b38SJeff Roberson /*
919ae7a6b38SJeff Roberson  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
920ae7a6b38SJeff Roberson  * current lock and returns with the assigned queue locked.  If this is
921ae7a6b38SJeff Roberson  * via sched_switch() we leave the thread in a blocked state as an
922ae7a6b38SJeff Roberson  * optimization.
923ae7a6b38SJeff Roberson  */
924ae7a6b38SJeff Roberson static inline struct tdq *
925ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags)
92680f86c9fSJeff Roberson {
927ae7a6b38SJeff Roberson 	struct thread *td;
928ae7a6b38SJeff Roberson 	struct tdq *tdq;
92980f86c9fSJeff Roberson 
930ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
931ae7a6b38SJeff Roberson 
932ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
933ae7a6b38SJeff Roberson 	td = ts->ts_thread;
934ae7a6b38SJeff Roberson 	ts->ts_cpu = cpu;
935c47f202bSJeff Roberson 
936c47f202bSJeff Roberson 	/* If the lock matches just return the queue. */
937ae7a6b38SJeff Roberson 	if (td->td_lock == TDQ_LOCKPTR(tdq))
938ae7a6b38SJeff Roberson 		return (tdq);
939ae7a6b38SJeff Roberson #ifdef notyet
94080f86c9fSJeff Roberson 	/*
941a5423ea3SJeff Roberson 	 * If the thread isn't running its lockptr is a
942ae7a6b38SJeff Roberson 	 * turnstile or a sleepqueue.  We can just lock_set without
943ae7a6b38SJeff Roberson 	 * blocking.
944670c524fSJeff Roberson 	 */
945ae7a6b38SJeff Roberson 	if (TD_CAN_RUN(td)) {
946ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
947ae7a6b38SJeff Roberson 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
948ae7a6b38SJeff Roberson 		return (tdq);
949ae7a6b38SJeff Roberson 	}
950ae7a6b38SJeff Roberson #endif
95180f86c9fSJeff Roberson 	/*
952ae7a6b38SJeff Roberson 	 * The hard case, migration, we need to block the thread first to
953ae7a6b38SJeff Roberson 	 * prevent order reversals with other cpus locks.
9547b8bfa0dSJeff Roberson 	 */
955ae7a6b38SJeff Roberson 	thread_lock_block(td);
956ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
957ae7a6b38SJeff Roberson 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
958ae7a6b38SJeff Roberson 	return (tdq);
95980f86c9fSJeff Roberson }
9602454aaf5SJeff Roberson 
961ae7a6b38SJeff Roberson /*
962ae7a6b38SJeff Roberson  * Find the thread queue running the lowest priority thread.
963ae7a6b38SJeff Roberson  */
9647b8bfa0dSJeff Roberson static int
965ae7a6b38SJeff Roberson tdq_lowestpri(void)
9667b8bfa0dSJeff Roberson {
967ae7a6b38SJeff Roberson 	struct tdq *tdq;
9687b8bfa0dSJeff Roberson 	int lowpri;
9697b8bfa0dSJeff Roberson 	int lowcpu;
9707b8bfa0dSJeff Roberson 	int lowload;
9717b8bfa0dSJeff Roberson 	int load;
972ae7a6b38SJeff Roberson 	int cpu;
973ae7a6b38SJeff Roberson 	int pri;
974ae7a6b38SJeff Roberson 
975ae7a6b38SJeff Roberson 	lowload = 0;
976ae7a6b38SJeff Roberson 	lowpri = lowcpu = 0;
977ae7a6b38SJeff Roberson 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
978ae7a6b38SJeff Roberson 		if (CPU_ABSENT(cpu))
979ae7a6b38SJeff Roberson 			continue;
980ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(cpu);
981ae7a6b38SJeff Roberson 		pri = tdq->tdq_lowpri;
982ae7a6b38SJeff Roberson 		load = TDQ_CPU(cpu)->tdq_load;
983ae7a6b38SJeff Roberson 		CTR4(KTR_ULE,
984ae7a6b38SJeff Roberson 		    "cpu %d pri %d lowcpu %d lowpri %d",
985ae7a6b38SJeff Roberson 		    cpu, pri, lowcpu, lowpri);
986ae7a6b38SJeff Roberson 		if (pri < lowpri)
987ae7a6b38SJeff Roberson 			continue;
988ae7a6b38SJeff Roberson 		if (lowpri && lowpri == pri && load > lowload)
989ae7a6b38SJeff Roberson 			continue;
990ae7a6b38SJeff Roberson 		lowpri = pri;
991ae7a6b38SJeff Roberson 		lowcpu = cpu;
992ae7a6b38SJeff Roberson 		lowload = load;
993ae7a6b38SJeff Roberson 	}
994ae7a6b38SJeff Roberson 
995ae7a6b38SJeff Roberson 	return (lowcpu);
996ae7a6b38SJeff Roberson }
997ae7a6b38SJeff Roberson 
998ae7a6b38SJeff Roberson /*
999ae7a6b38SJeff Roberson  * Find the thread queue with the least load.
1000ae7a6b38SJeff Roberson  */
1001ae7a6b38SJeff Roberson static int
1002ae7a6b38SJeff Roberson tdq_lowestload(void)
1003ae7a6b38SJeff Roberson {
1004ae7a6b38SJeff Roberson 	struct tdq *tdq;
1005ae7a6b38SJeff Roberson 	int lowload;
1006ae7a6b38SJeff Roberson 	int lowpri;
1007ae7a6b38SJeff Roberson 	int lowcpu;
1008ae7a6b38SJeff Roberson 	int load;
1009ae7a6b38SJeff Roberson 	int cpu;
1010ae7a6b38SJeff Roberson 	int pri;
1011ae7a6b38SJeff Roberson 
1012ae7a6b38SJeff Roberson 	lowcpu = 0;
1013ae7a6b38SJeff Roberson 	lowload = TDQ_CPU(0)->tdq_load;
1014ae7a6b38SJeff Roberson 	lowpri = TDQ_CPU(0)->tdq_lowpri;
1015ae7a6b38SJeff Roberson 	for (cpu = 1; cpu <= mp_maxid; cpu++) {
1016ae7a6b38SJeff Roberson 		if (CPU_ABSENT(cpu))
1017ae7a6b38SJeff Roberson 			continue;
1018ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(cpu);
1019ae7a6b38SJeff Roberson 		load = tdq->tdq_load;
1020ae7a6b38SJeff Roberson 		pri = tdq->tdq_lowpri;
1021ae7a6b38SJeff Roberson 		CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1022ae7a6b38SJeff Roberson 		    cpu, load, lowcpu, lowload);
1023ae7a6b38SJeff Roberson 		if (load > lowload)
1024ae7a6b38SJeff Roberson 			continue;
1025ae7a6b38SJeff Roberson 		if (load == lowload && pri < lowpri)
1026ae7a6b38SJeff Roberson 			continue;
1027ae7a6b38SJeff Roberson 		lowcpu = cpu;
1028ae7a6b38SJeff Roberson 		lowload = load;
1029ae7a6b38SJeff Roberson 		lowpri = pri;
1030ae7a6b38SJeff Roberson 	}
1031ae7a6b38SJeff Roberson 
1032ae7a6b38SJeff Roberson 	return (lowcpu);
1033ae7a6b38SJeff Roberson }
1034ae7a6b38SJeff Roberson 
1035ae7a6b38SJeff Roberson /*
1036ae7a6b38SJeff Roberson  * Pick the destination cpu for sched_add().  Respects affinity and makes
1037ae7a6b38SJeff Roberson  * a determination based on load or priority of available processors.
1038ae7a6b38SJeff Roberson  */
1039ae7a6b38SJeff Roberson static int
1040ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags)
1041ae7a6b38SJeff Roberson {
1042ae7a6b38SJeff Roberson 	struct tdq *tdq;
10437b8bfa0dSJeff Roberson 	int self;
10447b8bfa0dSJeff Roberson 	int pri;
10457b8bfa0dSJeff Roberson 	int cpu;
10467b8bfa0dSJeff Roberson 
1047ae7a6b38SJeff Roberson 	cpu = self = PCPU_GET(cpuid);
10487b8bfa0dSJeff Roberson 	if (smp_started == 0)
10497b8bfa0dSJeff Roberson 		return (self);
105028994a58SJeff Roberson 	/*
105128994a58SJeff Roberson 	 * Don't migrate a running thread from sched_switch().
105228994a58SJeff Roberson 	 */
105328994a58SJeff Roberson 	if (flags & SRQ_OURSELF) {
105428994a58SJeff Roberson 		CTR1(KTR_ULE, "YIELDING %d",
105528994a58SJeff Roberson 		    curthread->td_priority);
105628994a58SJeff Roberson 		return (self);
105728994a58SJeff Roberson 	}
10587b8bfa0dSJeff Roberson 	pri = ts->ts_thread->td_priority;
1059ae7a6b38SJeff Roberson 	cpu = ts->ts_cpu;
10607b8bfa0dSJeff Roberson 	/*
10617b8bfa0dSJeff Roberson 	 * Regardless of affinity, if the last cpu is idle send it there.
10627b8bfa0dSJeff Roberson 	 */
1063ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpu);
1064ae7a6b38SJeff Roberson 	if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
106514618990SJeff Roberson 		CTR5(KTR_ULE,
10667b8bfa0dSJeff Roberson 		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
10677b8bfa0dSJeff Roberson 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1068ae7a6b38SJeff Roberson 		    tdq->tdq_lowpri);
10697b8bfa0dSJeff Roberson 		return (ts->ts_cpu);
10707b8bfa0dSJeff Roberson 	}
10717b8bfa0dSJeff Roberson 	/*
10727b8bfa0dSJeff Roberson 	 * If we have affinity, try to place it on the cpu we last ran on.
10737b8bfa0dSJeff Roberson 	 */
1074ae7a6b38SJeff Roberson 	if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
107514618990SJeff Roberson 		CTR5(KTR_ULE,
10767b8bfa0dSJeff Roberson 		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
10777b8bfa0dSJeff Roberson 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1078ae7a6b38SJeff Roberson 		    tdq->tdq_lowpri);
10797b8bfa0dSJeff Roberson 		return (ts->ts_cpu);
10807b8bfa0dSJeff Roberson 	}
10817b8bfa0dSJeff Roberson 	/*
10827b8bfa0dSJeff Roberson 	 * Look for an idle group.
10837b8bfa0dSJeff Roberson 	 */
108414618990SJeff Roberson 	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
10857b8bfa0dSJeff Roberson 	cpu = ffs(tdq_idle);
10867b8bfa0dSJeff Roberson 	if (cpu)
1087ae7a6b38SJeff Roberson 		return (--cpu);
108828994a58SJeff Roberson 	/*
108928994a58SJeff Roberson 	 * If there are no idle cores see if we can run the thread locally.  This may
109028994a58SJeff Roberson 	 * improve locality among sleepers and wakers when there is shared data.
109128994a58SJeff Roberson 	 */
109228994a58SJeff Roberson 	if (tryself && pri < curthread->td_priority) {
109328994a58SJeff Roberson 		CTR1(KTR_ULE, "tryself %d",
10947b8bfa0dSJeff Roberson 		    curthread->td_priority);
10957b8bfa0dSJeff Roberson 		return (self);
10967b8bfa0dSJeff Roberson 	}
10977b8bfa0dSJeff Roberson 	/*
10987b8bfa0dSJeff Roberson  	 * Now search for the cpu running the lowest priority thread with
10997b8bfa0dSJeff Roberson 	 * the least load.
11007b8bfa0dSJeff Roberson 	 */
1101ae7a6b38SJeff Roberson 	if (pick_pri)
1102ae7a6b38SJeff Roberson 		cpu = tdq_lowestpri();
1103ae7a6b38SJeff Roberson 	else
1104ae7a6b38SJeff Roberson 		cpu = tdq_lowestload();
1105ae7a6b38SJeff Roberson 	return (cpu);
110680f86c9fSJeff Roberson }
110780f86c9fSJeff Roberson 
110822bf7d9aSJeff Roberson #endif	/* SMP */
110922bf7d9aSJeff Roberson 
111022bf7d9aSJeff Roberson /*
111122bf7d9aSJeff Roberson  * Pick the highest priority task we have and return it.
11120c0a98b2SJeff Roberson  */
1113ad1e7d28SJulian Elischer static struct td_sched *
1114ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq)
11155d7ef00cSJeff Roberson {
1116ad1e7d28SJulian Elischer 	struct td_sched *ts;
11175d7ef00cSJeff Roberson 
1118ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1119e7d50326SJeff Roberson 	ts = runq_choose(&tdq->tdq_realtime);
1120dda713dfSJeff Roberson 	if (ts != NULL)
1121e7d50326SJeff Roberson 		return (ts);
11223f872f85SJeff Roberson 	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1123e7d50326SJeff Roberson 	if (ts != NULL) {
1124dda713dfSJeff Roberson 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1125e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on timeshare queue %d",
1126e7d50326SJeff Roberson 		    ts->ts_thread->td_priority));
1127ad1e7d28SJulian Elischer 		return (ts);
112815dc847eSJeff Roberson 	}
112915dc847eSJeff Roberson 
1130e7d50326SJeff Roberson 	ts = runq_choose(&tdq->tdq_idle);
1131e7d50326SJeff Roberson 	if (ts != NULL) {
1132e7d50326SJeff Roberson 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1133e7d50326SJeff Roberson 		    ("tdq_choose: Invalid priority on idle queue %d",
1134e7d50326SJeff Roberson 		    ts->ts_thread->td_priority));
1135e7d50326SJeff Roberson 		return (ts);
1136e7d50326SJeff Roberson 	}
1137e7d50326SJeff Roberson 
1138e7d50326SJeff Roberson 	return (NULL);
1139245f3abfSJeff Roberson }
11400a016a05SJeff Roberson 
1141ae7a6b38SJeff Roberson /*
1142ae7a6b38SJeff Roberson  * Initialize a thread queue.
1143ae7a6b38SJeff Roberson  */
11440a016a05SJeff Roberson static void
1145ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq)
11460a016a05SJeff Roberson {
1147ae7a6b38SJeff Roberson 
1148c47f202bSJeff Roberson 	if (bootverbose)
1149c47f202bSJeff Roberson 		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1150e7d50326SJeff Roberson 	runq_init(&tdq->tdq_realtime);
1151e7d50326SJeff Roberson 	runq_init(&tdq->tdq_timeshare);
1152d2ad694cSJeff Roberson 	runq_init(&tdq->tdq_idle);
1153d2ad694cSJeff Roberson 	tdq->tdq_load = 0;
11540a016a05SJeff Roberson }
11550a016a05SJeff Roberson 
1156c47f202bSJeff Roberson #ifdef SMP
1157c47f202bSJeff Roberson static void
1158c47f202bSJeff Roberson tdg_setup(struct tdq_group *tdg)
1159c47f202bSJeff Roberson {
1160c47f202bSJeff Roberson 	if (bootverbose)
1161c47f202bSJeff Roberson 		printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1162c47f202bSJeff Roberson 	snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1163c47f202bSJeff Roberson 	    "sched lock %d", (int)TDG_ID(tdg));
1164c47f202bSJeff Roberson 	mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1165c47f202bSJeff Roberson 	    MTX_SPIN | MTX_RECURSE);
1166c47f202bSJeff Roberson 	LIST_INIT(&tdg->tdg_members);
1167c47f202bSJeff Roberson 	tdg->tdg_load = 0;
1168c47f202bSJeff Roberson 	tdg->tdg_transferable = 0;
1169c47f202bSJeff Roberson 	tdg->tdg_cpus = 0;
1170c47f202bSJeff Roberson 	tdg->tdg_mask = 0;
1171c47f202bSJeff Roberson 	tdg->tdg_cpumask = 0;
1172c47f202bSJeff Roberson 	tdg->tdg_idlemask = 0;
1173c47f202bSJeff Roberson }
1174c47f202bSJeff Roberson 
1175c47f202bSJeff Roberson static void
1176c47f202bSJeff Roberson tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1177c47f202bSJeff Roberson {
1178c47f202bSJeff Roberson 	if (tdg->tdg_mask == 0)
1179c47f202bSJeff Roberson 		tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1180c47f202bSJeff Roberson 	tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1181c47f202bSJeff Roberson 	tdg->tdg_cpus++;
1182c47f202bSJeff Roberson 	tdq->tdq_group = tdg;
1183c47f202bSJeff Roberson 	tdq->tdq_lock = &tdg->tdg_lock;
1184c47f202bSJeff Roberson 	LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1185c47f202bSJeff Roberson 	if (bootverbose)
1186c47f202bSJeff Roberson 		printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1187c47f202bSJeff Roberson 		    TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1188c47f202bSJeff Roberson }
1189c47f202bSJeff Roberson 
1190c47f202bSJeff Roberson static void
1191c47f202bSJeff Roberson sched_setup_topology(void)
1192c47f202bSJeff Roberson {
1193c47f202bSJeff Roberson 	struct tdq_group *tdg;
1194c47f202bSJeff Roberson 	struct cpu_group *cg;
1195c47f202bSJeff Roberson 	int balance_groups;
1196c47f202bSJeff Roberson 	struct tdq *tdq;
1197c47f202bSJeff Roberson 	int i;
1198c47f202bSJeff Roberson 	int j;
1199c47f202bSJeff Roberson 
1200c47f202bSJeff Roberson 	topology = 1;
1201c47f202bSJeff Roberson 	balance_groups = 0;
1202c47f202bSJeff Roberson 	for (i = 0; i < smp_topology->ct_count; i++) {
1203c47f202bSJeff Roberson 		cg = &smp_topology->ct_group[i];
1204c47f202bSJeff Roberson 		tdg = &tdq_groups[i];
1205c47f202bSJeff Roberson 		/*
1206c47f202bSJeff Roberson 		 * Initialize the group.
1207c47f202bSJeff Roberson 		 */
1208c47f202bSJeff Roberson 		tdg_setup(tdg);
1209c47f202bSJeff Roberson 		/*
1210c47f202bSJeff Roberson 		 * Find all of the group members and add them.
1211c47f202bSJeff Roberson 		 */
1212c47f202bSJeff Roberson 		for (j = 0; j < MAXCPU; j++) {
1213c47f202bSJeff Roberson 			if ((cg->cg_mask & (1 << j)) != 0) {
1214c47f202bSJeff Roberson 				tdq = TDQ_CPU(j);
1215c47f202bSJeff Roberson 				tdq_setup(tdq);
1216c47f202bSJeff Roberson 				tdg_add(tdg, tdq);
1217c47f202bSJeff Roberson 			}
1218c47f202bSJeff Roberson 		}
1219c47f202bSJeff Roberson 		if (tdg->tdg_cpus > 1)
1220c47f202bSJeff Roberson 			balance_groups = 1;
1221c47f202bSJeff Roberson 	}
1222c47f202bSJeff Roberson 	tdg_maxid = smp_topology->ct_count - 1;
1223c47f202bSJeff Roberson 	if (balance_groups)
1224c47f202bSJeff Roberson 		sched_balance_groups(NULL);
1225c47f202bSJeff Roberson }
1226c47f202bSJeff Roberson 
1227c47f202bSJeff Roberson static void
1228c47f202bSJeff Roberson sched_setup_smp(void)
1229c47f202bSJeff Roberson {
1230c47f202bSJeff Roberson 	struct tdq_group *tdg;
1231c47f202bSJeff Roberson 	struct tdq *tdq;
1232c47f202bSJeff Roberson 	int cpus;
1233c47f202bSJeff Roberson 	int i;
1234c47f202bSJeff Roberson 
1235c47f202bSJeff Roberson 	for (cpus = 0, i = 0; i < MAXCPU; i++) {
1236c47f202bSJeff Roberson 		if (CPU_ABSENT(i))
1237c47f202bSJeff Roberson 			continue;
1238c47f202bSJeff Roberson 		tdq = &tdq_cpu[i];
1239c47f202bSJeff Roberson 		tdg = &tdq_groups[i];
1240c47f202bSJeff Roberson 		/*
1241c47f202bSJeff Roberson 		 * Setup a tdq group with one member.
1242c47f202bSJeff Roberson 		 */
1243c47f202bSJeff Roberson 		tdg_setup(tdg);
1244c47f202bSJeff Roberson 		tdq_setup(tdq);
1245c47f202bSJeff Roberson 		tdg_add(tdg, tdq);
1246c47f202bSJeff Roberson 		cpus++;
1247c47f202bSJeff Roberson 	}
1248c47f202bSJeff Roberson 	tdg_maxid = cpus - 1;
1249c47f202bSJeff Roberson }
1250c47f202bSJeff Roberson 
1251c47f202bSJeff Roberson /*
1252c47f202bSJeff Roberson  * Fake a topology with one group containing all CPUs.
1253c47f202bSJeff Roberson  */
1254c47f202bSJeff Roberson static void
1255c47f202bSJeff Roberson sched_fake_topo(void)
1256c47f202bSJeff Roberson {
1257c47f202bSJeff Roberson #ifdef SCHED_FAKE_TOPOLOGY
1258c47f202bSJeff Roberson 	static struct cpu_top top;
1259c47f202bSJeff Roberson 	static struct cpu_group group;
1260c47f202bSJeff Roberson 
1261c47f202bSJeff Roberson 	top.ct_count = 1;
1262c47f202bSJeff Roberson 	top.ct_group = &group;
1263c47f202bSJeff Roberson 	group.cg_mask = all_cpus;
1264c47f202bSJeff Roberson 	group.cg_count = mp_ncpus;
1265c47f202bSJeff Roberson 	group.cg_children = 0;
1266c47f202bSJeff Roberson 	smp_topology = &top;
1267c47f202bSJeff Roberson #endif
1268c47f202bSJeff Roberson }
1269c47f202bSJeff Roberson #endif
1270c47f202bSJeff Roberson 
1271ae7a6b38SJeff Roberson /*
1272ae7a6b38SJeff Roberson  * Setup the thread queues and initialize the topology based on MD
1273ae7a6b38SJeff Roberson  * information.
1274ae7a6b38SJeff Roberson  */
127535e6168fSJeff Roberson static void
127635e6168fSJeff Roberson sched_setup(void *dummy)
127735e6168fSJeff Roberson {
1278ae7a6b38SJeff Roberson 	struct tdq *tdq;
1279c47f202bSJeff Roberson 
1280c47f202bSJeff Roberson 	tdq = TDQ_SELF();
12810ec896fdSJeff Roberson #ifdef SMP
1282cac77d04SJeff Roberson 	/*
1283ae7a6b38SJeff Roberson 	 * Initialize long-term cpu balancing algorithm.
1284cac77d04SJeff Roberson 	 */
1285ae7a6b38SJeff Roberson 	callout_init(&balco, CALLOUT_MPSAFE);
1286ae7a6b38SJeff Roberson 	callout_init(&gbalco, CALLOUT_MPSAFE);
1287c47f202bSJeff Roberson 	sched_fake_topo();
1288c47f202bSJeff Roberson 	/*
1289c47f202bSJeff Roberson 	 * Setup tdqs based on a topology configuration or vanilla SMP based
1290c47f202bSJeff Roberson 	 * on mp_maxid.
1291c47f202bSJeff Roberson 	 */
1292c47f202bSJeff Roberson 	if (smp_topology == NULL)
1293c47f202bSJeff Roberson 		sched_setup_smp();
1294c47f202bSJeff Roberson 	else
1295c47f202bSJeff Roberson 		sched_setup_topology();
1296ae7a6b38SJeff Roberson 	sched_balance(NULL);
1297749d01b0SJeff Roberson #else
1298c47f202bSJeff Roberson 	tdq_setup(tdq);
1299c47f202bSJeff Roberson 	mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1300c47f202bSJeff Roberson 	tdq->tdq_lock = &tdq_lock;
1301356500a3SJeff Roberson #endif
1302ae7a6b38SJeff Roberson 	/*
1303ae7a6b38SJeff Roberson 	 * To avoid divide-by-zero, we set realstathz a dummy value
1304ae7a6b38SJeff Roberson 	 * in case which sched_clock() called before sched_initticks().
1305ae7a6b38SJeff Roberson 	 */
1306ae7a6b38SJeff Roberson 	realstathz = hz;
1307ae7a6b38SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1308ae7a6b38SJeff Roberson 	tickincr = 1 << SCHED_TICK_SHIFT;
1309ae7a6b38SJeff Roberson 
1310ae7a6b38SJeff Roberson 	/* Add thread0's load since it's running. */
1311ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
1312c47f202bSJeff Roberson 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1313ae7a6b38SJeff Roberson 	tdq_load_add(tdq, &td_sched0);
1314ae7a6b38SJeff Roberson 	TDQ_UNLOCK(tdq);
131535e6168fSJeff Roberson }
131635e6168fSJeff Roberson 
1317ae7a6b38SJeff Roberson /*
1318ae7a6b38SJeff Roberson  * This routine determines the tickincr after stathz and hz are setup.
1319ae7a6b38SJeff Roberson  */
1320a1d4fe69SDavid Xu /* ARGSUSED */
1321a1d4fe69SDavid Xu static void
1322a1d4fe69SDavid Xu sched_initticks(void *dummy)
1323a1d4fe69SDavid Xu {
1324ae7a6b38SJeff Roberson 	int incr;
1325ae7a6b38SJeff Roberson 
1326a1d4fe69SDavid Xu 	realstathz = stathz ? stathz : hz;
132714618990SJeff Roberson 	sched_slice = (realstathz/10);	/* ~100ms */
1328a1d4fe69SDavid Xu 
1329a1d4fe69SDavid Xu 	/*
1330e7d50326SJeff Roberson 	 * tickincr is shifted out by 10 to avoid rounding errors due to
13313f872f85SJeff Roberson 	 * hz not being evenly divisible by stathz on all platforms.
1332e7d50326SJeff Roberson 	 */
1333ae7a6b38SJeff Roberson 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1334e7d50326SJeff Roberson 	/*
1335e7d50326SJeff Roberson 	 * This does not work for values of stathz that are more than
1336e7d50326SJeff Roberson 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1337a1d4fe69SDavid Xu 	 */
1338ae7a6b38SJeff Roberson 	if (incr == 0)
1339ae7a6b38SJeff Roberson 		incr = 1;
1340ae7a6b38SJeff Roberson 	tickincr = incr;
13417b8bfa0dSJeff Roberson #ifdef SMP
13429862717aSJeff Roberson 	/*
13439862717aSJeff Roberson 	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
13449862717aSJeff Roberson 	 * prevents excess thrashing on large machines and excess idle on
13459862717aSJeff Roberson 	 * smaller machines.
13469862717aSJeff Roberson 	 */
13479862717aSJeff Roberson 	steal_thresh = min(ffs(mp_ncpus) - 1, 4);
13487b8bfa0dSJeff Roberson 	affinity = SCHED_AFFINITY_DEFAULT;
13497b8bfa0dSJeff Roberson #endif
1350a1d4fe69SDavid Xu }
1351a1d4fe69SDavid Xu 
1352a1d4fe69SDavid Xu 
135335e6168fSJeff Roberson /*
1354ae7a6b38SJeff Roberson  * This is the core of the interactivity algorithm.  Determines a score based
1355ae7a6b38SJeff Roberson  * on past behavior.  It is the ratio of sleep time to run time scaled to
1356ae7a6b38SJeff Roberson  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1357ae7a6b38SJeff Roberson  * differs from the cpu usage because it does not account for time spent
1358ae7a6b38SJeff Roberson  * waiting on a run-queue.  Would be prettier if we had floating point.
1359ae7a6b38SJeff Roberson  */
1360ae7a6b38SJeff Roberson static int
1361ae7a6b38SJeff Roberson sched_interact_score(struct thread *td)
1362ae7a6b38SJeff Roberson {
1363ae7a6b38SJeff Roberson 	struct td_sched *ts;
1364ae7a6b38SJeff Roberson 	int div;
1365ae7a6b38SJeff Roberson 
1366ae7a6b38SJeff Roberson 	ts = td->td_sched;
1367ae7a6b38SJeff Roberson 	/*
1368ae7a6b38SJeff Roberson 	 * The score is only needed if this is likely to be an interactive
1369ae7a6b38SJeff Roberson 	 * task.  Don't go through the expense of computing it if there's
1370ae7a6b38SJeff Roberson 	 * no chance.
1371ae7a6b38SJeff Roberson 	 */
1372ae7a6b38SJeff Roberson 	if (sched_interact <= SCHED_INTERACT_HALF &&
1373ae7a6b38SJeff Roberson 		ts->ts_runtime >= ts->ts_slptime)
1374ae7a6b38SJeff Roberson 			return (SCHED_INTERACT_HALF);
1375ae7a6b38SJeff Roberson 
1376ae7a6b38SJeff Roberson 	if (ts->ts_runtime > ts->ts_slptime) {
1377ae7a6b38SJeff Roberson 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1378ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF +
1379ae7a6b38SJeff Roberson 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1380ae7a6b38SJeff Roberson 	}
1381ae7a6b38SJeff Roberson 	if (ts->ts_slptime > ts->ts_runtime) {
1382ae7a6b38SJeff Roberson 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1383ae7a6b38SJeff Roberson 		return (ts->ts_runtime / div);
1384ae7a6b38SJeff Roberson 	}
1385ae7a6b38SJeff Roberson 	/* runtime == slptime */
1386ae7a6b38SJeff Roberson 	if (ts->ts_runtime)
1387ae7a6b38SJeff Roberson 		return (SCHED_INTERACT_HALF);
1388ae7a6b38SJeff Roberson 
1389ae7a6b38SJeff Roberson 	/*
1390ae7a6b38SJeff Roberson 	 * This can happen if slptime and runtime are 0.
1391ae7a6b38SJeff Roberson 	 */
1392ae7a6b38SJeff Roberson 	return (0);
1393ae7a6b38SJeff Roberson 
1394ae7a6b38SJeff Roberson }
1395ae7a6b38SJeff Roberson 
1396ae7a6b38SJeff Roberson /*
139735e6168fSJeff Roberson  * Scale the scheduling priority according to the "interactivity" of this
139835e6168fSJeff Roberson  * process.
139935e6168fSJeff Roberson  */
140015dc847eSJeff Roberson static void
14018460a577SJohn Birrell sched_priority(struct thread *td)
140235e6168fSJeff Roberson {
1403e7d50326SJeff Roberson 	int score;
140435e6168fSJeff Roberson 	int pri;
140535e6168fSJeff Roberson 
14068460a577SJohn Birrell 	if (td->td_pri_class != PRI_TIMESHARE)
140715dc847eSJeff Roberson 		return;
1408e7d50326SJeff Roberson 	/*
1409e7d50326SJeff Roberson 	 * If the score is interactive we place the thread in the realtime
1410e7d50326SJeff Roberson 	 * queue with a priority that is less than kernel and interrupt
1411e7d50326SJeff Roberson 	 * priorities.  These threads are not subject to nice restrictions.
1412e7d50326SJeff Roberson 	 *
1413ae7a6b38SJeff Roberson 	 * Scores greater than this are placed on the normal timeshare queue
1414e7d50326SJeff Roberson 	 * where the priority is partially decided by the most recent cpu
1415e7d50326SJeff Roberson 	 * utilization and the rest is decided by nice value.
1416a5423ea3SJeff Roberson 	 *
1417a5423ea3SJeff Roberson 	 * The nice value of the process has a linear effect on the calculated
1418a5423ea3SJeff Roberson 	 * score.  Negative nice values make it easier for a thread to be
1419a5423ea3SJeff Roberson 	 * considered interactive.
1420e7d50326SJeff Roberson 	 */
1421e270652bSJeff Roberson 	score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1422e7d50326SJeff Roberson 	if (score < sched_interact) {
1423e7d50326SJeff Roberson 		pri = PRI_MIN_REALTIME;
1424e7d50326SJeff Roberson 		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1425e7d50326SJeff Roberson 		    * score;
1426e7d50326SJeff Roberson 		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
14279a93305aSJeff Roberson 		    ("sched_priority: invalid interactive priority %d score %d",
14289a93305aSJeff Roberson 		    pri, score));
1429e7d50326SJeff Roberson 	} else {
1430e7d50326SJeff Roberson 		pri = SCHED_PRI_MIN;
1431e7d50326SJeff Roberson 		if (td->td_sched->ts_ticks)
1432e7d50326SJeff Roberson 			pri += SCHED_PRI_TICKS(td->td_sched);
1433e7d50326SJeff Roberson 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1434ae7a6b38SJeff Roberson 		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1435ae7a6b38SJeff Roberson 		    ("sched_priority: invalid priority %d: nice %d, "
1436ae7a6b38SJeff Roberson 		    "ticks %d ftick %d ltick %d tick pri %d",
1437ae7a6b38SJeff Roberson 		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1438ae7a6b38SJeff Roberson 		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1439ae7a6b38SJeff Roberson 		    SCHED_PRI_TICKS(td->td_sched)));
1440e7d50326SJeff Roberson 	}
14418460a577SJohn Birrell 	sched_user_prio(td, pri);
144235e6168fSJeff Roberson 
144315dc847eSJeff Roberson 	return;
144435e6168fSJeff Roberson }
144535e6168fSJeff Roberson 
144635e6168fSJeff Roberson /*
1447d322132cSJeff Roberson  * This routine enforces a maximum limit on the amount of scheduling history
1448ae7a6b38SJeff Roberson  * kept.  It is called after either the slptime or runtime is adjusted.  This
1449ae7a6b38SJeff Roberson  * function is ugly due to integer math.
1450d322132cSJeff Roberson  */
14514b60e324SJeff Roberson static void
14528460a577SJohn Birrell sched_interact_update(struct thread *td)
14534b60e324SJeff Roberson {
1454155b6ca1SJeff Roberson 	struct td_sched *ts;
14559a93305aSJeff Roberson 	u_int sum;
14563f741ca1SJeff Roberson 
1457155b6ca1SJeff Roberson 	ts = td->td_sched;
1458ae7a6b38SJeff Roberson 	sum = ts->ts_runtime + ts->ts_slptime;
1459d322132cSJeff Roberson 	if (sum < SCHED_SLP_RUN_MAX)
1460d322132cSJeff Roberson 		return;
1461d322132cSJeff Roberson 	/*
1462155b6ca1SJeff Roberson 	 * This only happens from two places:
1463155b6ca1SJeff Roberson 	 * 1) We have added an unusual amount of run time from fork_exit.
1464155b6ca1SJeff Roberson 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1465155b6ca1SJeff Roberson 	 */
1466155b6ca1SJeff Roberson 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1467ae7a6b38SJeff Roberson 		if (ts->ts_runtime > ts->ts_slptime) {
1468ae7a6b38SJeff Roberson 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1469ae7a6b38SJeff Roberson 			ts->ts_slptime = 1;
1470155b6ca1SJeff Roberson 		} else {
1471ae7a6b38SJeff Roberson 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1472ae7a6b38SJeff Roberson 			ts->ts_runtime = 1;
1473155b6ca1SJeff Roberson 		}
1474155b6ca1SJeff Roberson 		return;
1475155b6ca1SJeff Roberson 	}
1476155b6ca1SJeff Roberson 	/*
1477d322132cSJeff Roberson 	 * If we have exceeded by more than 1/5th then the algorithm below
1478d322132cSJeff Roberson 	 * will not bring us back into range.  Dividing by two here forces
14792454aaf5SJeff Roberson 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1480d322132cSJeff Roberson 	 */
148137a35e4aSJeff Roberson 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1482ae7a6b38SJeff Roberson 		ts->ts_runtime /= 2;
1483ae7a6b38SJeff Roberson 		ts->ts_slptime /= 2;
1484d322132cSJeff Roberson 		return;
1485d322132cSJeff Roberson 	}
1486ae7a6b38SJeff Roberson 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1487ae7a6b38SJeff Roberson 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1488d322132cSJeff Roberson }
1489d322132cSJeff Roberson 
1490ae7a6b38SJeff Roberson /*
1491ae7a6b38SJeff Roberson  * Scale back the interactivity history when a child thread is created.  The
1492ae7a6b38SJeff Roberson  * history is inherited from the parent but the thread may behave totally
1493ae7a6b38SJeff Roberson  * differently.  For example, a shell spawning a compiler process.  We want
1494ae7a6b38SJeff Roberson  * to learn that the compiler is behaving badly very quickly.
1495ae7a6b38SJeff Roberson  */
1496d322132cSJeff Roberson static void
14978460a577SJohn Birrell sched_interact_fork(struct thread *td)
1498d322132cSJeff Roberson {
1499d322132cSJeff Roberson 	int ratio;
1500d322132cSJeff Roberson 	int sum;
1501d322132cSJeff Roberson 
1502ae7a6b38SJeff Roberson 	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1503d322132cSJeff Roberson 	if (sum > SCHED_SLP_RUN_FORK) {
1504d322132cSJeff Roberson 		ratio = sum / SCHED_SLP_RUN_FORK;
1505ae7a6b38SJeff Roberson 		td->td_sched->ts_runtime /= ratio;
1506ae7a6b38SJeff Roberson 		td->td_sched->ts_slptime /= ratio;
15074b60e324SJeff Roberson 	}
15084b60e324SJeff Roberson }
15094b60e324SJeff Roberson 
151015dc847eSJeff Roberson /*
1511ae7a6b38SJeff Roberson  * Called from proc0_init() to setup the scheduler fields.
1512ed062c8dSJulian Elischer  */
1513ed062c8dSJulian Elischer void
1514ed062c8dSJulian Elischer schedinit(void)
1515ed062c8dSJulian Elischer {
1516e7d50326SJeff Roberson 
1517ed062c8dSJulian Elischer 	/*
1518ed062c8dSJulian Elischer 	 * Set up the scheduler specific parts of proc0.
1519ed062c8dSJulian Elischer 	 */
1520ed062c8dSJulian Elischer 	proc0.p_sched = NULL; /* XXX */
1521ad1e7d28SJulian Elischer 	thread0.td_sched = &td_sched0;
1522e7d50326SJeff Roberson 	td_sched0.ts_ltick = ticks;
15238ab80cf0SJeff Roberson 	td_sched0.ts_ftick = ticks;
1524ad1e7d28SJulian Elischer 	td_sched0.ts_thread = &thread0;
1525ed062c8dSJulian Elischer }
1526ed062c8dSJulian Elischer 
1527ed062c8dSJulian Elischer /*
152815dc847eSJeff Roberson  * This is only somewhat accurate since given many processes of the same
152915dc847eSJeff Roberson  * priority they will switch when their slices run out, which will be
1530e7d50326SJeff Roberson  * at most sched_slice stathz ticks.
153115dc847eSJeff Roberson  */
153235e6168fSJeff Roberson int
153335e6168fSJeff Roberson sched_rr_interval(void)
153435e6168fSJeff Roberson {
1535e7d50326SJeff Roberson 
1536e7d50326SJeff Roberson 	/* Convert sched_slice to hz */
1537e7d50326SJeff Roberson 	return (hz/(realstathz/sched_slice));
153835e6168fSJeff Roberson }
153935e6168fSJeff Roberson 
1540ae7a6b38SJeff Roberson /*
1541ae7a6b38SJeff Roberson  * Update the percent cpu tracking information when it is requested or
1542ae7a6b38SJeff Roberson  * the total history exceeds the maximum.  We keep a sliding history of
1543ae7a6b38SJeff Roberson  * tick counts that slowly decays.  This is less precise than the 4BSD
1544ae7a6b38SJeff Roberson  * mechanism since it happens with less regular and frequent events.
1545ae7a6b38SJeff Roberson  */
154622bf7d9aSJeff Roberson static void
1547ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts)
154835e6168fSJeff Roberson {
1549e7d50326SJeff Roberson 
1550e7d50326SJeff Roberson 	if (ts->ts_ticks == 0)
1551e7d50326SJeff Roberson 		return;
15528ab80cf0SJeff Roberson 	if (ticks - (hz / 10) < ts->ts_ltick &&
15538ab80cf0SJeff Roberson 	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
15548ab80cf0SJeff Roberson 		return;
155535e6168fSJeff Roberson 	/*
155635e6168fSJeff Roberson 	 * Adjust counters and watermark for pctcpu calc.
1557210491d3SJeff Roberson 	 */
1558e7d50326SJeff Roberson 	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1559ad1e7d28SJulian Elischer 		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1560e7d50326SJeff Roberson 			    SCHED_TICK_TARG;
1561e7d50326SJeff Roberson 	else
1562ad1e7d28SJulian Elischer 		ts->ts_ticks = 0;
1563ad1e7d28SJulian Elischer 	ts->ts_ltick = ticks;
1564e7d50326SJeff Roberson 	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
156535e6168fSJeff Roberson }
156635e6168fSJeff Roberson 
1567ae7a6b38SJeff Roberson /*
1568ae7a6b38SJeff Roberson  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1569ae7a6b38SJeff Roberson  * if necessary.  This is the back-end for several priority related
1570ae7a6b38SJeff Roberson  * functions.
1571ae7a6b38SJeff Roberson  */
1572e7d50326SJeff Roberson static void
1573f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio)
157435e6168fSJeff Roberson {
1575ad1e7d28SJulian Elischer 	struct td_sched *ts;
157635e6168fSJeff Roberson 
157781d47d3fSJeff Roberson 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
157881d47d3fSJeff Roberson 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
157981d47d3fSJeff Roberson 	    curthread->td_proc->p_comm);
1580ad1e7d28SJulian Elischer 	ts = td->td_sched;
15817b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1582f5c157d9SJohn Baldwin 	if (td->td_priority == prio)
1583f5c157d9SJohn Baldwin 		return;
1584e7d50326SJeff Roberson 
15853f872f85SJeff Roberson 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
15863f741ca1SJeff Roberson 		/*
15873f741ca1SJeff Roberson 		 * If the priority has been elevated due to priority
15883f741ca1SJeff Roberson 		 * propagation, we may have to move ourselves to a new
1589e7d50326SJeff Roberson 		 * queue.  This could be optimized to not re-add in some
1590e7d50326SJeff Roberson 		 * cases.
1591f2b74cbfSJeff Roberson 		 */
1592e7d50326SJeff Roberson 		sched_rem(td);
1593e7d50326SJeff Roberson 		td->td_priority = prio;
1594ae7a6b38SJeff Roberson 		sched_add(td, SRQ_BORROWING);
1595ae7a6b38SJeff Roberson 	} else {
1596ae7a6b38SJeff Roberson #ifdef SMP
1597ae7a6b38SJeff Roberson 		struct tdq *tdq;
1598ae7a6b38SJeff Roberson 
1599ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(ts->ts_cpu);
1600ae7a6b38SJeff Roberson 		if (prio < tdq->tdq_lowpri)
1601ae7a6b38SJeff Roberson 			tdq->tdq_lowpri = prio;
1602ae7a6b38SJeff Roberson #endif
16033f741ca1SJeff Roberson 		td->td_priority = prio;
160435e6168fSJeff Roberson 	}
1605ae7a6b38SJeff Roberson }
160635e6168fSJeff Roberson 
1607f5c157d9SJohn Baldwin /*
1608f5c157d9SJohn Baldwin  * Update a thread's priority when it is lent another thread's
1609f5c157d9SJohn Baldwin  * priority.
1610f5c157d9SJohn Baldwin  */
1611f5c157d9SJohn Baldwin void
1612f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio)
1613f5c157d9SJohn Baldwin {
1614f5c157d9SJohn Baldwin 
1615f5c157d9SJohn Baldwin 	td->td_flags |= TDF_BORROWING;
1616f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1617f5c157d9SJohn Baldwin }
1618f5c157d9SJohn Baldwin 
1619f5c157d9SJohn Baldwin /*
1620f5c157d9SJohn Baldwin  * Restore a thread's priority when priority propagation is
1621f5c157d9SJohn Baldwin  * over.  The prio argument is the minimum priority the thread
1622f5c157d9SJohn Baldwin  * needs to have to satisfy other possible priority lending
1623f5c157d9SJohn Baldwin  * requests.  If the thread's regular priority is less
1624f5c157d9SJohn Baldwin  * important than prio, the thread will keep a priority boost
1625f5c157d9SJohn Baldwin  * of prio.
1626f5c157d9SJohn Baldwin  */
1627f5c157d9SJohn Baldwin void
1628f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio)
1629f5c157d9SJohn Baldwin {
1630f5c157d9SJohn Baldwin 	u_char base_pri;
1631f5c157d9SJohn Baldwin 
1632f5c157d9SJohn Baldwin 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1633f5c157d9SJohn Baldwin 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
16348460a577SJohn Birrell 		base_pri = td->td_user_pri;
1635f5c157d9SJohn Baldwin 	else
1636f5c157d9SJohn Baldwin 		base_pri = td->td_base_pri;
1637f5c157d9SJohn Baldwin 	if (prio >= base_pri) {
1638f5c157d9SJohn Baldwin 		td->td_flags &= ~TDF_BORROWING;
1639f5c157d9SJohn Baldwin 		sched_thread_priority(td, base_pri);
1640f5c157d9SJohn Baldwin 	} else
1641f5c157d9SJohn Baldwin 		sched_lend_prio(td, prio);
1642f5c157d9SJohn Baldwin }
1643f5c157d9SJohn Baldwin 
1644ae7a6b38SJeff Roberson /*
1645ae7a6b38SJeff Roberson  * Standard entry for setting the priority to an absolute value.
1646ae7a6b38SJeff Roberson  */
1647f5c157d9SJohn Baldwin void
1648f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio)
1649f5c157d9SJohn Baldwin {
1650f5c157d9SJohn Baldwin 	u_char oldprio;
1651f5c157d9SJohn Baldwin 
1652f5c157d9SJohn Baldwin 	/* First, update the base priority. */
1653f5c157d9SJohn Baldwin 	td->td_base_pri = prio;
1654f5c157d9SJohn Baldwin 
1655f5c157d9SJohn Baldwin 	/*
165650aaa791SJohn Baldwin 	 * If the thread is borrowing another thread's priority, don't
1657f5c157d9SJohn Baldwin 	 * ever lower the priority.
1658f5c157d9SJohn Baldwin 	 */
1659f5c157d9SJohn Baldwin 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1660f5c157d9SJohn Baldwin 		return;
1661f5c157d9SJohn Baldwin 
1662f5c157d9SJohn Baldwin 	/* Change the real priority. */
1663f5c157d9SJohn Baldwin 	oldprio = td->td_priority;
1664f5c157d9SJohn Baldwin 	sched_thread_priority(td, prio);
1665f5c157d9SJohn Baldwin 
1666f5c157d9SJohn Baldwin 	/*
1667f5c157d9SJohn Baldwin 	 * If the thread is on a turnstile, then let the turnstile update
1668f5c157d9SJohn Baldwin 	 * its state.
1669f5c157d9SJohn Baldwin 	 */
1670f5c157d9SJohn Baldwin 	if (TD_ON_LOCK(td) && oldprio != prio)
1671f5c157d9SJohn Baldwin 		turnstile_adjust(td, oldprio);
1672f5c157d9SJohn Baldwin }
1673f5c157d9SJohn Baldwin 
1674ae7a6b38SJeff Roberson /*
1675ae7a6b38SJeff Roberson  * Set the base user priority, does not effect current running priority.
1676ae7a6b38SJeff Roberson  */
167735e6168fSJeff Roberson void
16788460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio)
16793db720fdSDavid Xu {
16803db720fdSDavid Xu 	u_char oldprio;
16813db720fdSDavid Xu 
16828460a577SJohn Birrell 	td->td_base_user_pri = prio;
1683fc6c30f6SJulian Elischer 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1684fc6c30f6SJulian Elischer                 return;
16858460a577SJohn Birrell 	oldprio = td->td_user_pri;
16868460a577SJohn Birrell 	td->td_user_pri = prio;
16873db720fdSDavid Xu 
16883db720fdSDavid Xu 	if (TD_ON_UPILOCK(td) && oldprio != prio)
16893db720fdSDavid Xu 		umtx_pi_adjust(td, oldprio);
16903db720fdSDavid Xu }
16913db720fdSDavid Xu 
16923db720fdSDavid Xu void
16933db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio)
16943db720fdSDavid Xu {
16953db720fdSDavid Xu 	u_char oldprio;
16963db720fdSDavid Xu 
16973db720fdSDavid Xu 	td->td_flags |= TDF_UBORROWING;
16983db720fdSDavid Xu 
1699f645b5daSMaxim Konovalov 	oldprio = td->td_user_pri;
17008460a577SJohn Birrell 	td->td_user_pri = prio;
17013db720fdSDavid Xu 
17023db720fdSDavid Xu 	if (TD_ON_UPILOCK(td) && oldprio != prio)
17033db720fdSDavid Xu 		umtx_pi_adjust(td, oldprio);
17043db720fdSDavid Xu }
17053db720fdSDavid Xu 
17063db720fdSDavid Xu void
17073db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio)
17083db720fdSDavid Xu {
17093db720fdSDavid Xu 	u_char base_pri;
17103db720fdSDavid Xu 
17118460a577SJohn Birrell 	base_pri = td->td_base_user_pri;
17123db720fdSDavid Xu 	if (prio >= base_pri) {
17133db720fdSDavid Xu 		td->td_flags &= ~TDF_UBORROWING;
17148460a577SJohn Birrell 		sched_user_prio(td, base_pri);
17153db720fdSDavid Xu 	} else
17163db720fdSDavid Xu 		sched_lend_user_prio(td, prio);
17173db720fdSDavid Xu }
17183db720fdSDavid Xu 
1719ae7a6b38SJeff Roberson /*
172008c9a16cSJeff Roberson  * Add the thread passed as 'newtd' to the run queue before selecting
172108c9a16cSJeff Roberson  * the next thread to run.  This is only used for KSE.
172208c9a16cSJeff Roberson  */
172308c9a16cSJeff Roberson static void
172408c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td)
172508c9a16cSJeff Roberson {
172608c9a16cSJeff Roberson #ifdef SMP
172708c9a16cSJeff Roberson 	spinlock_enter();
172808c9a16cSJeff Roberson 	TDQ_UNLOCK(tdq);
172908c9a16cSJeff Roberson 	thread_lock(td);
173008c9a16cSJeff Roberson 	spinlock_exit();
173108c9a16cSJeff Roberson 	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
173208c9a16cSJeff Roberson #else
173308c9a16cSJeff Roberson 	td->td_lock = TDQ_LOCKPTR(tdq);
173408c9a16cSJeff Roberson #endif
173508c9a16cSJeff Roberson 	tdq_add(tdq, td, SRQ_YIELDING);
173608c9a16cSJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
173708c9a16cSJeff Roberson }
173808c9a16cSJeff Roberson 
173908c9a16cSJeff Roberson /*
1740c47f202bSJeff Roberson  * Handle migration from sched_switch().  This happens only for
1741c47f202bSJeff Roberson  * cpu binding.
1742c47f202bSJeff Roberson  */
1743c47f202bSJeff Roberson static struct mtx *
1744c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1745c47f202bSJeff Roberson {
1746c47f202bSJeff Roberson 	struct tdq *tdn;
1747c47f202bSJeff Roberson 
1748c47f202bSJeff Roberson 	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1749c47f202bSJeff Roberson #ifdef SMP
1750c47f202bSJeff Roberson 	/*
1751c47f202bSJeff Roberson 	 * Do the lock dance required to avoid LOR.  We grab an extra
1752c47f202bSJeff Roberson 	 * spinlock nesting to prevent preemption while we're
1753c47f202bSJeff Roberson 	 * not holding either run-queue lock.
1754c47f202bSJeff Roberson 	 */
1755c47f202bSJeff Roberson 	spinlock_enter();
1756c47f202bSJeff Roberson 	thread_block_switch(td);	/* This releases the lock on tdq. */
1757c47f202bSJeff Roberson 	TDQ_LOCK(tdn);
1758c47f202bSJeff Roberson 	tdq_add(tdn, td, flags);
1759c47f202bSJeff Roberson 	tdq_notify(td->td_sched);
1760c47f202bSJeff Roberson 	/*
1761c47f202bSJeff Roberson 	 * After we unlock tdn the new cpu still can't switch into this
1762c47f202bSJeff Roberson 	 * thread until we've unblocked it in cpu_switch().  The lock
1763c47f202bSJeff Roberson 	 * pointers may match in the case of HTT cores.  Don't unlock here
1764c47f202bSJeff Roberson 	 * or we can deadlock when the other CPU runs the IPI handler.
1765c47f202bSJeff Roberson 	 */
1766c47f202bSJeff Roberson 	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1767c47f202bSJeff Roberson 		TDQ_UNLOCK(tdn);
1768c47f202bSJeff Roberson 		TDQ_LOCK(tdq);
1769c47f202bSJeff Roberson 	}
1770c47f202bSJeff Roberson 	spinlock_exit();
1771c47f202bSJeff Roberson #endif
1772c47f202bSJeff Roberson 	return (TDQ_LOCKPTR(tdn));
1773c47f202bSJeff Roberson }
1774c47f202bSJeff Roberson 
1775c47f202bSJeff Roberson /*
1776ae7a6b38SJeff Roberson  * Block a thread for switching.  Similar to thread_block() but does not
1777ae7a6b38SJeff Roberson  * bump the spin count.
1778ae7a6b38SJeff Roberson  */
1779ae7a6b38SJeff Roberson static inline struct mtx *
1780ae7a6b38SJeff Roberson thread_block_switch(struct thread *td)
1781ae7a6b38SJeff Roberson {
1782ae7a6b38SJeff Roberson 	struct mtx *lock;
1783ae7a6b38SJeff Roberson 
1784ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1785ae7a6b38SJeff Roberson 	lock = td->td_lock;
1786ae7a6b38SJeff Roberson 	td->td_lock = &blocked_lock;
1787ae7a6b38SJeff Roberson 	mtx_unlock_spin(lock);
1788ae7a6b38SJeff Roberson 
1789ae7a6b38SJeff Roberson 	return (lock);
1790ae7a6b38SJeff Roberson }
1791ae7a6b38SJeff Roberson 
1792ae7a6b38SJeff Roberson /*
1793ae7a6b38SJeff Roberson  * Release a thread that was blocked with thread_block_switch().
1794ae7a6b38SJeff Roberson  */
1795ae7a6b38SJeff Roberson static inline void
1796ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx)
1797ae7a6b38SJeff Roberson {
1798ae7a6b38SJeff Roberson 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1799ae7a6b38SJeff Roberson 	    (uintptr_t)mtx);
1800ae7a6b38SJeff Roberson }
1801ae7a6b38SJeff Roberson 
1802ae7a6b38SJeff Roberson /*
1803ae7a6b38SJeff Roberson  * Switch threads.  This function has to handle threads coming in while
1804ae7a6b38SJeff Roberson  * blocked for some reason, running, or idle.  It also must deal with
1805ae7a6b38SJeff Roberson  * migrating a thread from one queue to another as running threads may
1806ae7a6b38SJeff Roberson  * be assigned elsewhere via binding.
1807ae7a6b38SJeff Roberson  */
18083db720fdSDavid Xu void
18093389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags)
181035e6168fSJeff Roberson {
1811c02bbb43SJeff Roberson 	struct tdq *tdq;
1812ad1e7d28SJulian Elischer 	struct td_sched *ts;
1813ae7a6b38SJeff Roberson 	struct mtx *mtx;
1814c47f202bSJeff Roberson 	int srqflag;
1815ae7a6b38SJeff Roberson 	int cpuid;
181635e6168fSJeff Roberson 
18177b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
181835e6168fSJeff Roberson 
1819ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
1820ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
1821e7d50326SJeff Roberson 	ts = td->td_sched;
1822c47f202bSJeff Roberson 	mtx = td->td_lock;
1823ae7a6b38SJeff Roberson #ifdef SMP
1824ae7a6b38SJeff Roberson 	ts->ts_rltick = ticks;
1825ae7a6b38SJeff Roberson 	if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1826ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = newtd->td_priority;
1827ae7a6b38SJeff Roberson #endif
1828060563ecSJulian Elischer 	td->td_lastcpu = td->td_oncpu;
1829060563ecSJulian Elischer 	td->td_oncpu = NOCPU;
183052eb8464SJohn Baldwin 	td->td_flags &= ~TDF_NEEDRESCHED;
183177918643SStephan Uphoff 	td->td_owepreempt = 0;
1832b11fdad0SJeff Roberson 	/*
1833ae7a6b38SJeff Roberson 	 * The lock pointer in an idle thread should never change.  Reset it
1834ae7a6b38SJeff Roberson 	 * to CAN_RUN as well.
1835b11fdad0SJeff Roberson 	 */
1836486a9414SJulian Elischer 	if (TD_IS_IDLETHREAD(td)) {
1837ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1838bf0acc27SJohn Baldwin 		TD_SET_CAN_RUN(td);
18397b20fb19SJeff Roberson 	} else if (TD_IS_RUNNING(td)) {
1840ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
18417b20fb19SJeff Roberson 		tdq_load_rem(tdq, ts);
1842c47f202bSJeff Roberson 		srqflag = (flags & SW_PREEMPT) ?
1843598b368dSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1844c47f202bSJeff Roberson 		    SRQ_OURSELF|SRQ_YIELDING;
1845c47f202bSJeff Roberson 		if (ts->ts_cpu == cpuid)
1846c47f202bSJeff Roberson 			tdq_add(tdq, td, srqflag);
1847c47f202bSJeff Roberson 		else
1848c47f202bSJeff Roberson 			mtx = sched_switch_migrate(tdq, td, srqflag);
1849ae7a6b38SJeff Roberson 	} else {
1850ae7a6b38SJeff Roberson 		/* This thread must be going to sleep. */
1851ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
1852ae7a6b38SJeff Roberson 		mtx = thread_block_switch(td);
1853ae7a6b38SJeff Roberson 		tdq_load_rem(tdq, ts);
1854ae7a6b38SJeff Roberson 	}
1855ae7a6b38SJeff Roberson 	/*
1856ae7a6b38SJeff Roberson 	 * We enter here with the thread blocked and assigned to the
1857ae7a6b38SJeff Roberson 	 * appropriate cpu run-queue or sleep-queue and with the current
1858ae7a6b38SJeff Roberson 	 * thread-queue locked.
1859ae7a6b38SJeff Roberson 	 */
1860ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1861ae7a6b38SJeff Roberson 	/*
186208c9a16cSJeff Roberson 	 * If KSE assigned a new thread just add it here and let choosethread
186308c9a16cSJeff Roberson 	 * select the best one.
1864ae7a6b38SJeff Roberson 	 */
186508c9a16cSJeff Roberson 	if (newtd != NULL)
186608c9a16cSJeff Roberson 		sched_switchin(tdq, newtd);
18672454aaf5SJeff Roberson 	newtd = choosethread();
1868ae7a6b38SJeff Roberson 	/*
1869ae7a6b38SJeff Roberson 	 * Call the MD code to switch contexts if necessary.
1870ae7a6b38SJeff Roberson 	 */
1871ebccf1e3SJoseph Koshy 	if (td != newtd) {
1872ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1873ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1874ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1875ebccf1e3SJoseph Koshy #endif
1876ae7a6b38SJeff Roberson 		cpu_switch(td, newtd, mtx);
1877ae7a6b38SJeff Roberson 		/*
1878ae7a6b38SJeff Roberson 		 * We may return from cpu_switch on a different cpu.  However,
1879ae7a6b38SJeff Roberson 		 * we always return with td_lock pointing to the current cpu's
1880ae7a6b38SJeff Roberson 		 * run queue lock.
1881ae7a6b38SJeff Roberson 		 */
1882ae7a6b38SJeff Roberson 		cpuid = PCPU_GET(cpuid);
1883ae7a6b38SJeff Roberson 		tdq = TDQ_CPU(cpuid);
1884ae7a6b38SJeff Roberson 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
1885ebccf1e3SJoseph Koshy #ifdef	HWPMC_HOOKS
1886ebccf1e3SJoseph Koshy 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1887ebccf1e3SJoseph Koshy 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1888ebccf1e3SJoseph Koshy #endif
1889ae7a6b38SJeff Roberson 	} else
1890ae7a6b38SJeff Roberson 		thread_unblock_switch(td, mtx);
1891ae7a6b38SJeff Roberson 	/*
1892ae7a6b38SJeff Roberson 	 * Assert that all went well and return.
1893ae7a6b38SJeff Roberson 	 */
1894ae7a6b38SJeff Roberson #ifdef SMP
1895ae7a6b38SJeff Roberson 	/* We should always get here with the lowest priority td possible */
1896ae7a6b38SJeff Roberson 	tdq->tdq_lowpri = td->td_priority;
1897ae7a6b38SJeff Roberson #endif
1898ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1899ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1900ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
190135e6168fSJeff Roberson }
190235e6168fSJeff Roberson 
1903ae7a6b38SJeff Roberson /*
1904ae7a6b38SJeff Roberson  * Adjust thread priorities as a result of a nice request.
1905ae7a6b38SJeff Roberson  */
190635e6168fSJeff Roberson void
1907fa885116SJulian Elischer sched_nice(struct proc *p, int nice)
190835e6168fSJeff Roberson {
190935e6168fSJeff Roberson 	struct thread *td;
191035e6168fSJeff Roberson 
1911fa885116SJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
19127b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1913e7d50326SJeff Roberson 
1914fa885116SJulian Elischer 	p->p_nice = nice;
19158460a577SJohn Birrell 	FOREACH_THREAD_IN_PROC(p, td) {
19167b20fb19SJeff Roberson 		thread_lock(td);
19178460a577SJohn Birrell 		sched_priority(td);
1918e7d50326SJeff Roberson 		sched_prio(td, td->td_base_user_pri);
19197b20fb19SJeff Roberson 		thread_unlock(td);
192035e6168fSJeff Roberson 	}
1921fa885116SJulian Elischer }
192235e6168fSJeff Roberson 
1923ae7a6b38SJeff Roberson /*
1924ae7a6b38SJeff Roberson  * Record the sleep time for the interactivity scorer.
1925ae7a6b38SJeff Roberson  */
192635e6168fSJeff Roberson void
192744f3b092SJohn Baldwin sched_sleep(struct thread *td)
192835e6168fSJeff Roberson {
1929e7d50326SJeff Roberson 
19307b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
193135e6168fSJeff Roberson 
193254b0e65fSJeff Roberson 	td->td_slptick = ticks;
193335e6168fSJeff Roberson }
193435e6168fSJeff Roberson 
1935ae7a6b38SJeff Roberson /*
1936ae7a6b38SJeff Roberson  * Schedule a thread to resume execution and record how long it voluntarily
1937ae7a6b38SJeff Roberson  * slept.  We also update the pctcpu, interactivity, and priority.
1938ae7a6b38SJeff Roberson  */
193935e6168fSJeff Roberson void
194035e6168fSJeff Roberson sched_wakeup(struct thread *td)
194135e6168fSJeff Roberson {
194214618990SJeff Roberson 	struct td_sched *ts;
1943ae7a6b38SJeff Roberson 	int slptick;
1944e7d50326SJeff Roberson 
19457b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
194614618990SJeff Roberson 	ts = td->td_sched;
194735e6168fSJeff Roberson 	/*
1948e7d50326SJeff Roberson 	 * If we slept for more than a tick update our interactivity and
1949e7d50326SJeff Roberson 	 * priority.
195035e6168fSJeff Roberson 	 */
195154b0e65fSJeff Roberson 	slptick = td->td_slptick;
195254b0e65fSJeff Roberson 	td->td_slptick = 0;
1953ae7a6b38SJeff Roberson 	if (slptick && slptick != ticks) {
19549a93305aSJeff Roberson 		u_int hzticks;
1955f1e8dc4aSJeff Roberson 
1956ae7a6b38SJeff Roberson 		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1957ae7a6b38SJeff Roberson 		ts->ts_slptime += hzticks;
19588460a577SJohn Birrell 		sched_interact_update(td);
195914618990SJeff Roberson 		sched_pctcpu_update(ts);
19608460a577SJohn Birrell 		sched_priority(td);
1961f1e8dc4aSJeff Roberson 	}
196214618990SJeff Roberson 	/* Reset the slice value after we sleep. */
196314618990SJeff Roberson 	ts->ts_slice = sched_slice;
19647a5e5e2aSJeff Roberson 	sched_add(td, SRQ_BORING);
196535e6168fSJeff Roberson }
196635e6168fSJeff Roberson 
196735e6168fSJeff Roberson /*
196835e6168fSJeff Roberson  * Penalize the parent for creating a new child and initialize the child's
196935e6168fSJeff Roberson  * priority.
197035e6168fSJeff Roberson  */
197135e6168fSJeff Roberson void
19728460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child)
197315dc847eSJeff Roberson {
19747b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1975ad1e7d28SJulian Elischer 	sched_fork_thread(td, child);
1976e7d50326SJeff Roberson 	/*
1977e7d50326SJeff Roberson 	 * Penalize the parent and child for forking.
1978e7d50326SJeff Roberson 	 */
1979e7d50326SJeff Roberson 	sched_interact_fork(child);
1980e7d50326SJeff Roberson 	sched_priority(child);
1981ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += tickincr;
1982e7d50326SJeff Roberson 	sched_interact_update(td);
1983e7d50326SJeff Roberson 	sched_priority(td);
1984ad1e7d28SJulian Elischer }
1985ad1e7d28SJulian Elischer 
1986ae7a6b38SJeff Roberson /*
1987ae7a6b38SJeff Roberson  * Fork a new thread, may be within the same process.
1988ae7a6b38SJeff Roberson  */
1989ad1e7d28SJulian Elischer void
1990ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child)
1991ad1e7d28SJulian Elischer {
1992ad1e7d28SJulian Elischer 	struct td_sched *ts;
1993ad1e7d28SJulian Elischer 	struct td_sched *ts2;
19948460a577SJohn Birrell 
1995e7d50326SJeff Roberson 	/*
1996e7d50326SJeff Roberson 	 * Initialize child.
1997e7d50326SJeff Roberson 	 */
19987b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1999ed062c8dSJulian Elischer 	sched_newthread(child);
2000ae7a6b38SJeff Roberson 	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
2001ad1e7d28SJulian Elischer 	ts = td->td_sched;
2002ad1e7d28SJulian Elischer 	ts2 = child->td_sched;
2003ad1e7d28SJulian Elischer 	ts2->ts_cpu = ts->ts_cpu;
2004ad1e7d28SJulian Elischer 	ts2->ts_runq = NULL;
2005e7d50326SJeff Roberson 	/*
2006e7d50326SJeff Roberson 	 * Grab our parents cpu estimation information and priority.
2007e7d50326SJeff Roberson 	 */
2008ad1e7d28SJulian Elischer 	ts2->ts_ticks = ts->ts_ticks;
2009ad1e7d28SJulian Elischer 	ts2->ts_ltick = ts->ts_ltick;
2010ad1e7d28SJulian Elischer 	ts2->ts_ftick = ts->ts_ftick;
2011e7d50326SJeff Roberson 	child->td_user_pri = td->td_user_pri;
2012e7d50326SJeff Roberson 	child->td_base_user_pri = td->td_base_user_pri;
2013e7d50326SJeff Roberson 	/*
2014e7d50326SJeff Roberson 	 * And update interactivity score.
2015e7d50326SJeff Roberson 	 */
2016ae7a6b38SJeff Roberson 	ts2->ts_slptime = ts->ts_slptime;
2017ae7a6b38SJeff Roberson 	ts2->ts_runtime = ts->ts_runtime;
2018e7d50326SJeff Roberson 	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
201915dc847eSJeff Roberson }
202015dc847eSJeff Roberson 
2021ae7a6b38SJeff Roberson /*
2022ae7a6b38SJeff Roberson  * Adjust the priority class of a thread.
2023ae7a6b38SJeff Roberson  */
202415dc847eSJeff Roberson void
20258460a577SJohn Birrell sched_class(struct thread *td, int class)
202615dc847eSJeff Roberson {
202715dc847eSJeff Roberson 
20287b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
20298460a577SJohn Birrell 	if (td->td_pri_class == class)
203015dc847eSJeff Roberson 		return;
203115dc847eSJeff Roberson 
2032ef1134c9SJeff Roberson #ifdef SMP
2033155b9987SJeff Roberson 	/*
2034155b9987SJeff Roberson 	 * On SMP if we're on the RUNQ we must adjust the transferable
2035155b9987SJeff Roberson 	 * count because could be changing to or from an interrupt
2036155b9987SJeff Roberson 	 * class.
2037155b9987SJeff Roberson 	 */
20387a5e5e2aSJeff Roberson 	if (TD_ON_RUNQ(td)) {
20391e516cf5SJeff Roberson 		struct tdq *tdq;
20401e516cf5SJeff Roberson 
20411e516cf5SJeff Roberson 		tdq = TDQ_CPU(td->td_sched->ts_cpu);
20421e516cf5SJeff Roberson 		if (THREAD_CAN_MIGRATE(td)) {
2043d2ad694cSJeff Roberson 			tdq->tdq_transferable--;
2044d2ad694cSJeff Roberson 			tdq->tdq_group->tdg_transferable--;
204580f86c9fSJeff Roberson 		}
20461e516cf5SJeff Roberson 		td->td_pri_class = class;
20471e516cf5SJeff Roberson 		if (THREAD_CAN_MIGRATE(td)) {
2048d2ad694cSJeff Roberson 			tdq->tdq_transferable++;
2049d2ad694cSJeff Roberson 			tdq->tdq_group->tdg_transferable++;
205080f86c9fSJeff Roberson 		}
2051155b9987SJeff Roberson 	}
2052ef1134c9SJeff Roberson #endif
20538460a577SJohn Birrell 	td->td_pri_class = class;
205435e6168fSJeff Roberson }
205535e6168fSJeff Roberson 
205635e6168fSJeff Roberson /*
205735e6168fSJeff Roberson  * Return some of the child's priority and interactivity to the parent.
205835e6168fSJeff Roberson  */
205935e6168fSJeff Roberson void
2060fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child)
206135e6168fSJeff Roberson {
2062e7d50326SJeff Roberson 	struct thread *td;
2063141ad61cSJeff Roberson 
20648460a577SJohn Birrell 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2065fc6c30f6SJulian Elischer 	    child, child->td_proc->p_comm, child->td_priority);
20668460a577SJohn Birrell 
20677b20fb19SJeff Roberson 	PROC_SLOCK_ASSERT(p, MA_OWNED);
2068e7d50326SJeff Roberson 	td = FIRST_THREAD_IN_PROC(p);
2069e7d50326SJeff Roberson 	sched_exit_thread(td, child);
2070ad1e7d28SJulian Elischer }
2071ad1e7d28SJulian Elischer 
2072ae7a6b38SJeff Roberson /*
2073ae7a6b38SJeff Roberson  * Penalize another thread for the time spent on this one.  This helps to
2074ae7a6b38SJeff Roberson  * worsen the priority and interactivity of processes which schedule batch
2075ae7a6b38SJeff Roberson  * jobs such as make.  This has little effect on the make process itself but
2076ae7a6b38SJeff Roberson  * causes new processes spawned by it to receive worse scores immediately.
2077ae7a6b38SJeff Roberson  */
2078ad1e7d28SJulian Elischer void
2079fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child)
2080ad1e7d28SJulian Elischer {
2081fc6c30f6SJulian Elischer 
2082e7d50326SJeff Roberson 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2083e7d50326SJeff Roberson 	    child, child->td_proc->p_comm, child->td_priority);
2084e7d50326SJeff Roberson 
2085e7d50326SJeff Roberson #ifdef KSE
2086e7d50326SJeff Roberson 	/*
2087e7d50326SJeff Roberson 	 * KSE forks and exits so often that this penalty causes short-lived
2088e7d50326SJeff Roberson 	 * threads to always be non-interactive.  This causes mozilla to
2089e7d50326SJeff Roberson 	 * crawl under load.
2090e7d50326SJeff Roberson 	 */
2091e7d50326SJeff Roberson 	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2092e7d50326SJeff Roberson 		return;
2093e7d50326SJeff Roberson #endif
2094e7d50326SJeff Roberson 	/*
2095e7d50326SJeff Roberson 	 * Give the child's runtime to the parent without returning the
2096e7d50326SJeff Roberson 	 * sleep time as a penalty to the parent.  This causes shells that
2097e7d50326SJeff Roberson 	 * launch expensive things to mark their children as expensive.
2098e7d50326SJeff Roberson 	 */
20997b20fb19SJeff Roberson 	thread_lock(td);
2100ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2101fc6c30f6SJulian Elischer 	sched_interact_update(td);
2102e7d50326SJeff Roberson 	sched_priority(td);
21037b20fb19SJeff Roberson 	thread_unlock(td);
2104ad1e7d28SJulian Elischer }
2105ad1e7d28SJulian Elischer 
2106ae7a6b38SJeff Roberson /*
2107ae7a6b38SJeff Roberson  * Fix priorities on return to user-space.  Priorities may be elevated due
2108ae7a6b38SJeff Roberson  * to static priorities in msleep() or similar.
2109ae7a6b38SJeff Roberson  */
2110ad1e7d28SJulian Elischer void
2111ad1e7d28SJulian Elischer sched_userret(struct thread *td)
2112ad1e7d28SJulian Elischer {
2113ad1e7d28SJulian Elischer 	/*
2114ad1e7d28SJulian Elischer 	 * XXX we cheat slightly on the locking here to avoid locking in
2115ad1e7d28SJulian Elischer 	 * the usual case.  Setting td_priority here is essentially an
2116ad1e7d28SJulian Elischer 	 * incomplete workaround for not setting it properly elsewhere.
2117ad1e7d28SJulian Elischer 	 * Now that some interrupt handlers are threads, not setting it
2118ad1e7d28SJulian Elischer 	 * properly elsewhere can clobber it in the window between setting
2119ad1e7d28SJulian Elischer 	 * it here and returning to user mode, so don't waste time setting
2120ad1e7d28SJulian Elischer 	 * it perfectly here.
2121ad1e7d28SJulian Elischer 	 */
2122ad1e7d28SJulian Elischer 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2123ad1e7d28SJulian Elischer 	    ("thread with borrowed priority returning to userland"));
2124ad1e7d28SJulian Elischer 	if (td->td_priority != td->td_user_pri) {
21257b20fb19SJeff Roberson 		thread_lock(td);
2126ad1e7d28SJulian Elischer 		td->td_priority = td->td_user_pri;
2127ad1e7d28SJulian Elischer 		td->td_base_pri = td->td_user_pri;
21287b20fb19SJeff Roberson 		thread_unlock(td);
2129ad1e7d28SJulian Elischer         }
213035e6168fSJeff Roberson }
213135e6168fSJeff Roberson 
2132ae7a6b38SJeff Roberson /*
2133ae7a6b38SJeff Roberson  * Handle a stathz tick.  This is really only relevant for timeshare
2134ae7a6b38SJeff Roberson  * threads.
2135ae7a6b38SJeff Roberson  */
213635e6168fSJeff Roberson void
21377cf90fb3SJeff Roberson sched_clock(struct thread *td)
213835e6168fSJeff Roberson {
2139ad1e7d28SJulian Elischer 	struct tdq *tdq;
2140ad1e7d28SJulian Elischer 	struct td_sched *ts;
214135e6168fSJeff Roberson 
2142ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
21433f872f85SJeff Roberson 	tdq = TDQ_SELF();
21443f872f85SJeff Roberson 	/*
21453f872f85SJeff Roberson 	 * Advance the insert index once for each tick to ensure that all
21463f872f85SJeff Roberson 	 * threads get a chance to run.
21473f872f85SJeff Roberson 	 */
21483f872f85SJeff Roberson 	if (tdq->tdq_idx == tdq->tdq_ridx) {
21493f872f85SJeff Roberson 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
21503f872f85SJeff Roberson 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
21513f872f85SJeff Roberson 			tdq->tdq_ridx = tdq->tdq_idx;
21523f872f85SJeff Roberson 	}
21533f872f85SJeff Roberson 	ts = td->td_sched;
21543f741ca1SJeff Roberson 	/*
21558460a577SJohn Birrell 	 * We only do slicing code for TIMESHARE threads.
2156a8949de2SJeff Roberson 	 */
21578460a577SJohn Birrell 	if (td->td_pri_class != PRI_TIMESHARE)
2158a8949de2SJeff Roberson 		return;
2159a8949de2SJeff Roberson 	/*
21603f872f85SJeff Roberson 	 * We used a tick; charge it to the thread so that we can compute our
216115dc847eSJeff Roberson 	 * interactivity.
216215dc847eSJeff Roberson 	 */
2163ae7a6b38SJeff Roberson 	td->td_sched->ts_runtime += tickincr;
21648460a577SJohn Birrell 	sched_interact_update(td);
216535e6168fSJeff Roberson 	/*
216635e6168fSJeff Roberson 	 * We used up one time slice.
216735e6168fSJeff Roberson 	 */
2168ad1e7d28SJulian Elischer 	if (--ts->ts_slice > 0)
216915dc847eSJeff Roberson 		return;
217035e6168fSJeff Roberson 	/*
217115dc847eSJeff Roberson 	 * We're out of time, recompute priorities and requeue.
217235e6168fSJeff Roberson 	 */
21738460a577SJohn Birrell 	sched_priority(td);
21744a338afdSJulian Elischer 	td->td_flags |= TDF_NEEDRESCHED;
217535e6168fSJeff Roberson }
217635e6168fSJeff Roberson 
2177ae7a6b38SJeff Roberson /*
2178ae7a6b38SJeff Roberson  * Called once per hz tick.  Used for cpu utilization information.  This
2179ae7a6b38SJeff Roberson  * is easier than trying to scale based on stathz.
2180ae7a6b38SJeff Roberson  */
2181ae7a6b38SJeff Roberson void
2182ae7a6b38SJeff Roberson sched_tick(void)
2183ae7a6b38SJeff Roberson {
2184ae7a6b38SJeff Roberson 	struct td_sched *ts;
2185ae7a6b38SJeff Roberson 
2186ae7a6b38SJeff Roberson 	ts = curthread->td_sched;
2187ae7a6b38SJeff Roberson 	/* Adjust ticks for pctcpu */
2188ae7a6b38SJeff Roberson 	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2189ae7a6b38SJeff Roberson 	ts->ts_ltick = ticks;
2190ae7a6b38SJeff Roberson 	/*
2191ae7a6b38SJeff Roberson 	 * Update if we've exceeded our desired tick threshhold by over one
2192ae7a6b38SJeff Roberson 	 * second.
2193ae7a6b38SJeff Roberson 	 */
2194ae7a6b38SJeff Roberson 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2195ae7a6b38SJeff Roberson 		sched_pctcpu_update(ts);
2196ae7a6b38SJeff Roberson }
2197ae7a6b38SJeff Roberson 
2198ae7a6b38SJeff Roberson /*
2199ae7a6b38SJeff Roberson  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2200ae7a6b38SJeff Roberson  * cooperative idle threads.
2201ae7a6b38SJeff Roberson  */
220235e6168fSJeff Roberson int
220335e6168fSJeff Roberson sched_runnable(void)
220435e6168fSJeff Roberson {
2205ad1e7d28SJulian Elischer 	struct tdq *tdq;
2206b90816f1SJeff Roberson 	int load;
220735e6168fSJeff Roberson 
2208b90816f1SJeff Roberson 	load = 1;
2209b90816f1SJeff Roberson 
2210ad1e7d28SJulian Elischer 	tdq = TDQ_SELF();
22113f741ca1SJeff Roberson 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2212d2ad694cSJeff Roberson 		if (tdq->tdq_load > 0)
22133f741ca1SJeff Roberson 			goto out;
22143f741ca1SJeff Roberson 	} else
2215d2ad694cSJeff Roberson 		if (tdq->tdq_load - 1 > 0)
2216b90816f1SJeff Roberson 			goto out;
2217b90816f1SJeff Roberson 	load = 0;
2218b90816f1SJeff Roberson out:
2219b90816f1SJeff Roberson 	return (load);
222035e6168fSJeff Roberson }
222135e6168fSJeff Roberson 
2222ae7a6b38SJeff Roberson /*
2223ae7a6b38SJeff Roberson  * Choose the highest priority thread to run.  The thread is removed from
2224ae7a6b38SJeff Roberson  * the run-queue while running however the load remains.  For SMP we set
2225ae7a6b38SJeff Roberson  * the tdq in the global idle bitmask if it idles here.
2226ae7a6b38SJeff Roberson  */
22277a5e5e2aSJeff Roberson struct thread *
2228c9f25d8fSJeff Roberson sched_choose(void)
2229c9f25d8fSJeff Roberson {
223015dc847eSJeff Roberson #ifdef SMP
2231ae7a6b38SJeff Roberson 	struct tdq_group *tdg;
223215dc847eSJeff Roberson #endif
2233ae7a6b38SJeff Roberson 	struct td_sched *ts;
2234ae7a6b38SJeff Roberson 	struct tdq *tdq;
2235ae7a6b38SJeff Roberson 
2236ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2237ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2238ad1e7d28SJulian Elischer 	ts = tdq_choose(tdq);
2239ad1e7d28SJulian Elischer 	if (ts) {
2240ad1e7d28SJulian Elischer 		tdq_runq_rem(tdq, ts);
22417a5e5e2aSJeff Roberson 		return (ts->ts_thread);
224235e6168fSJeff Roberson 	}
2243c9f25d8fSJeff Roberson #ifdef SMP
2244ae7a6b38SJeff Roberson 	/*
2245ae7a6b38SJeff Roberson 	 * We only set the idled bit when all of the cpus in the group are
2246ae7a6b38SJeff Roberson 	 * idle.  Otherwise we could get into a situation where a thread bounces
2247ae7a6b38SJeff Roberson 	 * back and forth between two idle cores on seperate physical CPUs.
2248ae7a6b38SJeff Roberson 	 */
2249ae7a6b38SJeff Roberson 	tdg = tdq->tdq_group;
2250ae7a6b38SJeff Roberson 	tdg->tdg_idlemask |= PCPU_GET(cpumask);
2251ae7a6b38SJeff Roberson 	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2252ae7a6b38SJeff Roberson 		atomic_set_int(&tdq_idle, tdg->tdg_mask);
2253ae7a6b38SJeff Roberson 	tdq->tdq_lowpri = PRI_MAX_IDLE;
2254c9f25d8fSJeff Roberson #endif
22557a5e5e2aSJeff Roberson 	return (PCPU_GET(idlethread));
22567a5e5e2aSJeff Roberson }
22577a5e5e2aSJeff Roberson 
2258ae7a6b38SJeff Roberson /*
2259ae7a6b38SJeff Roberson  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2260ae7a6b38SJeff Roberson  * we always request it once we exit a critical section.
2261ae7a6b38SJeff Roberson  */
2262ae7a6b38SJeff Roberson static inline void
2263ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td)
22647a5e5e2aSJeff Roberson {
22657a5e5e2aSJeff Roberson 	struct thread *ctd;
22667a5e5e2aSJeff Roberson 	int cpri;
22677a5e5e2aSJeff Roberson 	int pri;
22687a5e5e2aSJeff Roberson 
22697a5e5e2aSJeff Roberson 	ctd = curthread;
22707a5e5e2aSJeff Roberson 	pri = td->td_priority;
22717a5e5e2aSJeff Roberson 	cpri = ctd->td_priority;
2272ae7a6b38SJeff Roberson 	if (td->td_priority < ctd->td_priority)
2273ae7a6b38SJeff Roberson 		curthread->td_flags |= TDF_NEEDRESCHED;
22747a5e5e2aSJeff Roberson 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2275ae7a6b38SJeff Roberson 		return;
22767a5e5e2aSJeff Roberson 	/*
22777a5e5e2aSJeff Roberson 	 * Always preempt IDLE threads.  Otherwise only if the preempting
22787a5e5e2aSJeff Roberson 	 * thread is an ithread.
22797a5e5e2aSJeff Roberson 	 */
2280ae7a6b38SJeff Roberson 	if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2281ae7a6b38SJeff Roberson 		return;
22827a5e5e2aSJeff Roberson 	ctd->td_owepreempt = 1;
2283ae7a6b38SJeff Roberson 	return;
228435e6168fSJeff Roberson }
228535e6168fSJeff Roberson 
2286ae7a6b38SJeff Roberson /*
2287ae7a6b38SJeff Roberson  * Add a thread to a thread queue.  Initializes priority, slice, runq, and
2288ae7a6b38SJeff Roberson  * add it to the appropriate queue.  This is the internal function called
2289ae7a6b38SJeff Roberson  * when the tdq is predetermined.
2290ae7a6b38SJeff Roberson  */
229135e6168fSJeff Roberson void
2292ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags)
229335e6168fSJeff Roberson {
2294ad1e7d28SJulian Elischer 	struct td_sched *ts;
229522bf7d9aSJeff Roberson 	int class;
22967b8bfa0dSJeff Roberson #ifdef SMP
22977b8bfa0dSJeff Roberson 	int cpumask;
22987b8bfa0dSJeff Roberson #endif
2299c9f25d8fSJeff Roberson 
2300ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
23017a5e5e2aSJeff Roberson 	KASSERT((td->td_inhibitors == 0),
23027a5e5e2aSJeff Roberson 	    ("sched_add: trying to run inhibited thread"));
23037a5e5e2aSJeff Roberson 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
23047a5e5e2aSJeff Roberson 	    ("sched_add: bad thread state"));
2305b61ce5b0SJeff Roberson 	KASSERT(td->td_flags & TDF_INMEM,
2306b61ce5b0SJeff Roberson 	    ("sched_add: thread swapped out"));
2307ae7a6b38SJeff Roberson 
2308ae7a6b38SJeff Roberson 	ts = td->td_sched;
23097a5e5e2aSJeff Roberson 	class = PRI_BASE(td->td_pri_class);
2310ae7a6b38SJeff Roberson         TD_SET_RUNQ(td);
23117a5e5e2aSJeff Roberson 	if (ts->ts_slice == 0)
23127a5e5e2aSJeff Roberson 		ts->ts_slice = sched_slice;
23132454aaf5SJeff Roberson 	/*
2314ae7a6b38SJeff Roberson 	 * Pick the run queue based on priority.
23152454aaf5SJeff Roberson 	 */
2316ae7a6b38SJeff Roberson 	if (td->td_priority <= PRI_MAX_REALTIME)
2317ae7a6b38SJeff Roberson 		ts->ts_runq = &tdq->tdq_realtime;
2318ae7a6b38SJeff Roberson 	else if (td->td_priority <= PRI_MAX_TIMESHARE)
2319ae7a6b38SJeff Roberson 		ts->ts_runq = &tdq->tdq_timeshare;
23207b8bfa0dSJeff Roberson 	else
2321ae7a6b38SJeff Roberson 		ts->ts_runq = &tdq->tdq_idle;
2322ae7a6b38SJeff Roberson #ifdef SMP
23237b8bfa0dSJeff Roberson 	cpumask = 1 << ts->ts_cpu;
232422bf7d9aSJeff Roberson 	/*
2325670c524fSJeff Roberson 	 * If we had been idle, clear our bit in the group and potentially
23267b8bfa0dSJeff Roberson 	 * the global bitmap.
232722bf7d9aSJeff Roberson 	 */
2328e7d50326SJeff Roberson 	if ((class != PRI_IDLE && class != PRI_ITHD) &&
23297b8bfa0dSJeff Roberson 	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
233080f86c9fSJeff Roberson 		/*
233180f86c9fSJeff Roberson 		 * Check to see if our group is unidling, and if so, remove it
233280f86c9fSJeff Roberson 		 * from the global idle mask.
233380f86c9fSJeff Roberson 		 */
2334d2ad694cSJeff Roberson 		if (tdq->tdq_group->tdg_idlemask ==
2335d2ad694cSJeff Roberson 		    tdq->tdq_group->tdg_cpumask)
2336d2ad694cSJeff Roberson 			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
233780f86c9fSJeff Roberson 		/*
233880f86c9fSJeff Roberson 		 * Now remove ourselves from the group specific idle mask.
233980f86c9fSJeff Roberson 		 */
23407b8bfa0dSJeff Roberson 		tdq->tdq_group->tdg_idlemask &= ~cpumask;
23417b8bfa0dSJeff Roberson 	}
2342ae7a6b38SJeff Roberson 	if (td->td_priority < tdq->tdq_lowpri)
2343ae7a6b38SJeff Roberson 		tdq->tdq_lowpri = td->td_priority;
234422bf7d9aSJeff Roberson #endif
2345ad1e7d28SJulian Elischer 	tdq_runq_add(tdq, ts, flags);
2346ad1e7d28SJulian Elischer 	tdq_load_add(tdq, ts);
2347ae7a6b38SJeff Roberson }
2348ae7a6b38SJeff Roberson 
2349ae7a6b38SJeff Roberson /*
2350ae7a6b38SJeff Roberson  * Select the target thread queue and add a thread to it.  Request
2351ae7a6b38SJeff Roberson  * preemption or IPI a remote processor if required.
2352ae7a6b38SJeff Roberson  */
2353ae7a6b38SJeff Roberson void
2354ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags)
2355ae7a6b38SJeff Roberson {
2356ae7a6b38SJeff Roberson 	struct td_sched *ts;
2357ae7a6b38SJeff Roberson 	struct tdq *tdq;
23587b8bfa0dSJeff Roberson #ifdef SMP
2359ae7a6b38SJeff Roberson 	int cpuid;
2360ae7a6b38SJeff Roberson 	int cpu;
2361ae7a6b38SJeff Roberson #endif
2362ae7a6b38SJeff Roberson 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2363ae7a6b38SJeff Roberson 	    td, td->td_proc->p_comm, td->td_priority, curthread,
2364ae7a6b38SJeff Roberson 	    curthread->td_proc->p_comm);
2365ae7a6b38SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2366ae7a6b38SJeff Roberson 	ts = td->td_sched;
2367ae7a6b38SJeff Roberson 	/*
2368ae7a6b38SJeff Roberson 	 * Recalculate the priority before we select the target cpu or
2369ae7a6b38SJeff Roberson 	 * run-queue.
2370ae7a6b38SJeff Roberson 	 */
2371ae7a6b38SJeff Roberson 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2372ae7a6b38SJeff Roberson 		sched_priority(td);
2373ae7a6b38SJeff Roberson #ifdef SMP
2374ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2375ae7a6b38SJeff Roberson 	/*
2376ae7a6b38SJeff Roberson 	 * Pick the destination cpu and if it isn't ours transfer to the
2377ae7a6b38SJeff Roberson 	 * target cpu.
2378ae7a6b38SJeff Roberson 	 */
2379ae7a6b38SJeff Roberson 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td))
2380ae7a6b38SJeff Roberson 		cpu = cpuid;
2381ae7a6b38SJeff Roberson 	else if (!THREAD_CAN_MIGRATE(td))
2382ae7a6b38SJeff Roberson 		cpu = ts->ts_cpu;
2383ae7a6b38SJeff Roberson 	else
2384ae7a6b38SJeff Roberson 		cpu = sched_pickcpu(ts, flags);
2385ae7a6b38SJeff Roberson 	tdq = sched_setcpu(ts, cpu, flags);
2386ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
2387ae7a6b38SJeff Roberson 	if (cpu != cpuid) {
23887b8bfa0dSJeff Roberson 		tdq_notify(ts);
23897b8bfa0dSJeff Roberson 		return;
23907b8bfa0dSJeff Roberson 	}
2391ae7a6b38SJeff Roberson #else
2392ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
2393ae7a6b38SJeff Roberson 	TDQ_LOCK(tdq);
2394ae7a6b38SJeff Roberson 	/*
2395ae7a6b38SJeff Roberson 	 * Now that the thread is moving to the run-queue, set the lock
2396ae7a6b38SJeff Roberson 	 * to the scheduler's lock.
2397ae7a6b38SJeff Roberson 	 */
2398ae7a6b38SJeff Roberson 	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2399ae7a6b38SJeff Roberson 	tdq_add(tdq, td, flags);
24007b8bfa0dSJeff Roberson #endif
2401ae7a6b38SJeff Roberson 	if (!(flags & SRQ_YIELDING))
2402ae7a6b38SJeff Roberson 		sched_setpreempt(td);
240335e6168fSJeff Roberson }
240435e6168fSJeff Roberson 
2405ae7a6b38SJeff Roberson /*
2406ae7a6b38SJeff Roberson  * Remove a thread from a run-queue without running it.  This is used
2407ae7a6b38SJeff Roberson  * when we're stealing a thread from a remote queue.  Otherwise all threads
2408ae7a6b38SJeff Roberson  * exit by calling sched_exit_thread() and sched_throw() themselves.
2409ae7a6b38SJeff Roberson  */
241035e6168fSJeff Roberson void
24117cf90fb3SJeff Roberson sched_rem(struct thread *td)
241235e6168fSJeff Roberson {
2413ad1e7d28SJulian Elischer 	struct tdq *tdq;
2414ad1e7d28SJulian Elischer 	struct td_sched *ts;
24157cf90fb3SJeff Roberson 
241681d47d3fSJeff Roberson 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
241781d47d3fSJeff Roberson 	    td, td->td_proc->p_comm, td->td_priority, curthread,
241881d47d3fSJeff Roberson 	    curthread->td_proc->p_comm);
2419ad1e7d28SJulian Elischer 	ts = td->td_sched;
2420ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(ts->ts_cpu);
2421ae7a6b38SJeff Roberson 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2422ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
24237a5e5e2aSJeff Roberson 	KASSERT(TD_ON_RUNQ(td),
2424ad1e7d28SJulian Elischer 	    ("sched_rem: thread not on run queue"));
2425ad1e7d28SJulian Elischer 	tdq_runq_rem(tdq, ts);
2426ad1e7d28SJulian Elischer 	tdq_load_rem(tdq, ts);
24277a5e5e2aSJeff Roberson 	TD_SET_CAN_RUN(td);
242835e6168fSJeff Roberson }
242935e6168fSJeff Roberson 
2430ae7a6b38SJeff Roberson /*
2431ae7a6b38SJeff Roberson  * Fetch cpu utilization information.  Updates on demand.
2432ae7a6b38SJeff Roberson  */
243335e6168fSJeff Roberson fixpt_t
24347cf90fb3SJeff Roberson sched_pctcpu(struct thread *td)
243535e6168fSJeff Roberson {
243635e6168fSJeff Roberson 	fixpt_t pctcpu;
2437ad1e7d28SJulian Elischer 	struct td_sched *ts;
243835e6168fSJeff Roberson 
243935e6168fSJeff Roberson 	pctcpu = 0;
2440ad1e7d28SJulian Elischer 	ts = td->td_sched;
2441ad1e7d28SJulian Elischer 	if (ts == NULL)
2442484288deSJeff Roberson 		return (0);
244335e6168fSJeff Roberson 
24447b20fb19SJeff Roberson 	thread_lock(td);
2445ad1e7d28SJulian Elischer 	if (ts->ts_ticks) {
244635e6168fSJeff Roberson 		int rtick;
244735e6168fSJeff Roberson 
2448ad1e7d28SJulian Elischer 		sched_pctcpu_update(ts);
244935e6168fSJeff Roberson 		/* How many rtick per second ? */
2450e7d50326SJeff Roberson 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2451e7d50326SJeff Roberson 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
245235e6168fSJeff Roberson 	}
24537b20fb19SJeff Roberson 	thread_unlock(td);
245435e6168fSJeff Roberson 
245535e6168fSJeff Roberson 	return (pctcpu);
245635e6168fSJeff Roberson }
245735e6168fSJeff Roberson 
2458ae7a6b38SJeff Roberson /*
2459ae7a6b38SJeff Roberson  * Bind a thread to a target cpu.
2460ae7a6b38SJeff Roberson  */
24619bacd788SJeff Roberson void
24629bacd788SJeff Roberson sched_bind(struct thread *td, int cpu)
24639bacd788SJeff Roberson {
2464ad1e7d28SJulian Elischer 	struct td_sched *ts;
24659bacd788SJeff Roberson 
2466c47f202bSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2467ad1e7d28SJulian Elischer 	ts = td->td_sched;
24686b2f763fSJeff Roberson 	if (ts->ts_flags & TSF_BOUND)
2469c95d2db2SJeff Roberson 		sched_unbind(td);
2470ad1e7d28SJulian Elischer 	ts->ts_flags |= TSF_BOUND;
247180f86c9fSJeff Roberson #ifdef SMP
24726b2f763fSJeff Roberson 	sched_pin();
247380f86c9fSJeff Roberson 	if (PCPU_GET(cpuid) == cpu)
24749bacd788SJeff Roberson 		return;
24756b2f763fSJeff Roberson 	ts->ts_cpu = cpu;
24769bacd788SJeff Roberson 	/* When we return from mi_switch we'll be on the correct cpu. */
2477279f949eSPoul-Henning Kamp 	mi_switch(SW_VOL, NULL);
24789bacd788SJeff Roberson #endif
24799bacd788SJeff Roberson }
24809bacd788SJeff Roberson 
2481ae7a6b38SJeff Roberson /*
2482ae7a6b38SJeff Roberson  * Release a bound thread.
2483ae7a6b38SJeff Roberson  */
24849bacd788SJeff Roberson void
24859bacd788SJeff Roberson sched_unbind(struct thread *td)
24869bacd788SJeff Roberson {
2487e7d50326SJeff Roberson 	struct td_sched *ts;
2488e7d50326SJeff Roberson 
24897b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2490e7d50326SJeff Roberson 	ts = td->td_sched;
24916b2f763fSJeff Roberson 	if ((ts->ts_flags & TSF_BOUND) == 0)
24926b2f763fSJeff Roberson 		return;
2493e7d50326SJeff Roberson 	ts->ts_flags &= ~TSF_BOUND;
2494e7d50326SJeff Roberson #ifdef SMP
2495e7d50326SJeff Roberson 	sched_unpin();
2496e7d50326SJeff Roberson #endif
24979bacd788SJeff Roberson }
24989bacd788SJeff Roberson 
249935e6168fSJeff Roberson int
2500ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td)
2501ebccf1e3SJoseph Koshy {
25027b20fb19SJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2503ad1e7d28SJulian Elischer 	return (td->td_sched->ts_flags & TSF_BOUND);
2504ebccf1e3SJoseph Koshy }
2505ebccf1e3SJoseph Koshy 
2506ae7a6b38SJeff Roberson /*
2507ae7a6b38SJeff Roberson  * Basic yield call.
2508ae7a6b38SJeff Roberson  */
250936ec198bSDavid Xu void
251036ec198bSDavid Xu sched_relinquish(struct thread *td)
251136ec198bSDavid Xu {
25127b20fb19SJeff Roberson 	thread_lock(td);
25138460a577SJohn Birrell 	if (td->td_pri_class == PRI_TIMESHARE)
251436ec198bSDavid Xu 		sched_prio(td, PRI_MAX_TIMESHARE);
25157b20fb19SJeff Roberson 	SCHED_STAT_INC(switch_relinquish);
251636ec198bSDavid Xu 	mi_switch(SW_VOL, NULL);
25177b20fb19SJeff Roberson 	thread_unlock(td);
251836ec198bSDavid Xu }
251936ec198bSDavid Xu 
2520ae7a6b38SJeff Roberson /*
2521ae7a6b38SJeff Roberson  * Return the total system load.
2522ae7a6b38SJeff Roberson  */
2523ebccf1e3SJoseph Koshy int
252433916c36SJeff Roberson sched_load(void)
252533916c36SJeff Roberson {
252633916c36SJeff Roberson #ifdef SMP
252733916c36SJeff Roberson 	int total;
252833916c36SJeff Roberson 	int i;
252933916c36SJeff Roberson 
253033916c36SJeff Roberson 	total = 0;
2531d2ad694cSJeff Roberson 	for (i = 0; i <= tdg_maxid; i++)
2532d2ad694cSJeff Roberson 		total += TDQ_GROUP(i)->tdg_load;
253333916c36SJeff Roberson 	return (total);
253433916c36SJeff Roberson #else
2535d2ad694cSJeff Roberson 	return (TDQ_SELF()->tdq_sysload);
253633916c36SJeff Roberson #endif
253733916c36SJeff Roberson }
253833916c36SJeff Roberson 
253933916c36SJeff Roberson int
254035e6168fSJeff Roberson sched_sizeof_proc(void)
254135e6168fSJeff Roberson {
254235e6168fSJeff Roberson 	return (sizeof(struct proc));
254335e6168fSJeff Roberson }
254435e6168fSJeff Roberson 
254535e6168fSJeff Roberson int
254635e6168fSJeff Roberson sched_sizeof_thread(void)
254735e6168fSJeff Roberson {
254835e6168fSJeff Roberson 	return (sizeof(struct thread) + sizeof(struct td_sched));
254935e6168fSJeff Roberson }
2550b41f1452SDavid Xu 
25517a5e5e2aSJeff Roberson /*
25527a5e5e2aSJeff Roberson  * The actual idle process.
25537a5e5e2aSJeff Roberson  */
25547a5e5e2aSJeff Roberson void
25557a5e5e2aSJeff Roberson sched_idletd(void *dummy)
25567a5e5e2aSJeff Roberson {
25577a5e5e2aSJeff Roberson 	struct thread *td;
2558ae7a6b38SJeff Roberson 	struct tdq *tdq;
25597a5e5e2aSJeff Roberson 
25607a5e5e2aSJeff Roberson 	td = curthread;
2561ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
25627a5e5e2aSJeff Roberson 	mtx_assert(&Giant, MA_NOTOWNED);
2563ae7a6b38SJeff Roberson 	/* ULE relies on preemption for idle interruption. */
2564ae7a6b38SJeff Roberson 	for (;;) {
2565ae7a6b38SJeff Roberson #ifdef SMP
2566ae7a6b38SJeff Roberson 		if (tdq_idled(tdq))
25677a5e5e2aSJeff Roberson 			cpu_idle();
2568ae7a6b38SJeff Roberson #else
2569ae7a6b38SJeff Roberson 		cpu_idle();
2570ae7a6b38SJeff Roberson #endif
2571ae7a6b38SJeff Roberson 	}
2572b41f1452SDavid Xu }
2573e7d50326SJeff Roberson 
25747b20fb19SJeff Roberson /*
25757b20fb19SJeff Roberson  * A CPU is entering for the first time or a thread is exiting.
25767b20fb19SJeff Roberson  */
25777b20fb19SJeff Roberson void
25787b20fb19SJeff Roberson sched_throw(struct thread *td)
25797b20fb19SJeff Roberson {
2580ae7a6b38SJeff Roberson 	struct tdq *tdq;
2581ae7a6b38SJeff Roberson 
2582ae7a6b38SJeff Roberson 	tdq = TDQ_SELF();
25837b20fb19SJeff Roberson 	if (td == NULL) {
2584ae7a6b38SJeff Roberson 		/* Correct spinlock nesting and acquire the correct lock. */
2585ae7a6b38SJeff Roberson 		TDQ_LOCK(tdq);
25867b20fb19SJeff Roberson 		spinlock_exit();
25877b20fb19SJeff Roberson 	} else {
2588ae7a6b38SJeff Roberson 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2589ae7a6b38SJeff Roberson 		tdq_load_rem(tdq, td->td_sched);
25907b20fb19SJeff Roberson 	}
25917b20fb19SJeff Roberson 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
25927b20fb19SJeff Roberson 	PCPU_SET(switchtime, cpu_ticks());
25937b20fb19SJeff Roberson 	PCPU_SET(switchticks, ticks);
25947b20fb19SJeff Roberson 	cpu_throw(td, choosethread());	/* doesn't return */
25957b20fb19SJeff Roberson }
25967b20fb19SJeff Roberson 
2597ae7a6b38SJeff Roberson /*
2598ae7a6b38SJeff Roberson  * This is called from fork_exit().  Just acquire the correct locks and
2599ae7a6b38SJeff Roberson  * let fork do the rest of the work.
2600ae7a6b38SJeff Roberson  */
26017b20fb19SJeff Roberson void
2602fe54587fSJeff Roberson sched_fork_exit(struct thread *td)
26037b20fb19SJeff Roberson {
2604ae7a6b38SJeff Roberson 	struct td_sched *ts;
2605ae7a6b38SJeff Roberson 	struct tdq *tdq;
2606ae7a6b38SJeff Roberson 	int cpuid;
26077b20fb19SJeff Roberson 
26087b20fb19SJeff Roberson 	/*
26097b20fb19SJeff Roberson 	 * Finish setting up thread glue so that it begins execution in a
2610ae7a6b38SJeff Roberson 	 * non-nested critical section with the scheduler lock held.
26117b20fb19SJeff Roberson 	 */
2612ae7a6b38SJeff Roberson 	cpuid = PCPU_GET(cpuid);
2613ae7a6b38SJeff Roberson 	tdq = TDQ_CPU(cpuid);
2614ae7a6b38SJeff Roberson 	ts = td->td_sched;
2615ae7a6b38SJeff Roberson 	if (TD_IS_IDLETHREAD(td))
2616ae7a6b38SJeff Roberson 		td->td_lock = TDQ_LOCKPTR(tdq);
2617ae7a6b38SJeff Roberson 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2618ae7a6b38SJeff Roberson 	td->td_oncpu = cpuid;
2619ae7a6b38SJeff Roberson 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
2620fe54587fSJeff Roberson 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
26217b20fb19SJeff Roberson }
26227b20fb19SJeff Roberson 
2623ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2624ae7a6b38SJeff Roberson     "Scheduler");
2625ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2626e7d50326SJeff Roberson     "Scheduler name");
2627ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2628ae7a6b38SJeff Roberson     "Slice size for timeshare threads");
2629ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2630ae7a6b38SJeff Roberson      "Interactivity score threshold");
2631ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2632ae7a6b38SJeff Roberson      0,"Min priority for preemption, lower priorities have greater precedence");
26337b8bfa0dSJeff Roberson #ifdef SMP
2634ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2635ae7a6b38SJeff Roberson     "Pick the target cpu based on priority rather than load.");
2636ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2637ae7a6b38SJeff Roberson     "Number of hz ticks to keep thread affinity for");
2638ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2639ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2640ae7a6b38SJeff Roberson     "Enables the long-term load balancer");
264128994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_secs, CTLFLAG_RW, &balance_secs, 0,
264228994a58SJeff Roberson     "Average frequence in seconds to run the long-term balancer");
2643ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2644ae7a6b38SJeff Roberson     "Steals work from another hyper-threaded core on idle");
2645ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2646ae7a6b38SJeff Roberson     "Attempts to steal work from other cores before idling");
264728994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
264828994a58SJeff Roberson     "Minimum load on remote cpu before we'll steal");
2649ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2650ae7a6b38SJeff Roberson     "True when a topology has been specified by the MD code.");
26517b8bfa0dSJeff Roberson #endif
2652e7d50326SJeff Roberson 
265354b0e65fSJeff Roberson /* ps compat.  All cpu percentages from ULE are weighted. */
2654a5423ea3SJeff Roberson static int ccpu = 0;
2655e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2656e7d50326SJeff Roberson 
2657e7d50326SJeff Roberson 
2658ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1
2659ed062c8dSJulian Elischer #include "kern/kern_switch.c"
2660