135e6168fSJeff Roberson /*- 2e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27ae7a6b38SJeff Roberson /* 28ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 29ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 30ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 31ae7a6b38SJeff Roberson * 32ae7a6b38SJeff Roberson * etymology: 33a5423ea3SJeff Roberson * ULE is the last three letters in schedule. It owes its name to a 34ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 35ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 36ae7a6b38SJeff Roberson */ 37ae7a6b38SJeff Roberson 38677b542eSDavid E. O'Brien #include <sys/cdefs.h> 39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 40677b542eSDavid E. O'Brien 414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 424da0d332SPeter Wemm #include "opt_sched.h" 439923b511SScott Long 4435e6168fSJeff Roberson #include <sys/param.h> 4535e6168fSJeff Roberson #include <sys/systm.h> 462c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4735e6168fSJeff Roberson #include <sys/kernel.h> 4835e6168fSJeff Roberson #include <sys/ktr.h> 4935e6168fSJeff Roberson #include <sys/lock.h> 5035e6168fSJeff Roberson #include <sys/mutex.h> 5135e6168fSJeff Roberson #include <sys/proc.h> 52245f3abfSJeff Roberson #include <sys/resource.h> 539bacd788SJeff Roberson #include <sys/resourcevar.h> 5435e6168fSJeff Roberson #include <sys/sched.h> 5535e6168fSJeff Roberson #include <sys/smp.h> 5635e6168fSJeff Roberson #include <sys/sx.h> 5735e6168fSJeff Roberson #include <sys/sysctl.h> 5835e6168fSJeff Roberson #include <sys/sysproto.h> 59f5c157d9SJohn Baldwin #include <sys/turnstile.h> 603db720fdSDavid Xu #include <sys/umtx.h> 6135e6168fSJeff Roberson #include <sys/vmmeter.h> 6235e6168fSJeff Roberson #ifdef KTRACE 6335e6168fSJeff Roberson #include <sys/uio.h> 6435e6168fSJeff Roberson #include <sys/ktrace.h> 6535e6168fSJeff Roberson #endif 6635e6168fSJeff Roberson 67ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 68ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 69ebccf1e3SJoseph Koshy #endif 70ebccf1e3SJoseph Koshy 7135e6168fSJeff Roberson #include <machine/cpu.h> 7222bf7d9aSJeff Roberson #include <machine/smp.h> 7335e6168fSJeff Roberson 7402e2d6b4SJeff Roberson #if !defined(__i386__) && !defined(__amd64__) 7502e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE" 767a5e5e2aSJeff Roberson #endif 777a5e5e2aSJeff Roberson 78ae7a6b38SJeff Roberson #define KTR_ULE 0 7914618990SJeff Roberson 806b2f763fSJeff Roberson /* 81ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 82ae7a6b38SJeff Roberson * by the thread lock. 83ed062c8dSJulian Elischer */ 84ad1e7d28SJulian Elischer struct td_sched { 85ae7a6b38SJeff Roberson TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 86ae7a6b38SJeff Roberson struct thread *ts_thread; /* Active associated thread. */ 87ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 88ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 89ae7a6b38SJeff Roberson u_char ts_rqindex; /* Run queue index. */ 90ad1e7d28SJulian Elischer u_char ts_cpu; /* CPU that we have affinity for. */ 91ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 92ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 93ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 94ed062c8dSJulian Elischer /* The following variables are only used for pctcpu calculation */ 95ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 96ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 97ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 987b8bfa0dSJeff Roberson #ifdef SMP 997b8bfa0dSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 1007b8bfa0dSJeff Roberson #endif 101ed062c8dSJulian Elischer }; 102ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1037b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1047b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 10535e6168fSJeff Roberson 106ad1e7d28SJulian Elischer static struct td_sched td_sched0; 10735e6168fSJeff Roberson 10835e6168fSJeff Roberson /* 109e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 110e1f89c22SJeff Roberson * 111e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 112e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1138ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 114e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 115e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 116e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 11735e6168fSJeff Roberson */ 118e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 119e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1208ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 121e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 122e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 123eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 12435e6168fSJeff Roberson 12535e6168fSJeff Roberson /* 126e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 127e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 128e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 129e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 130e7d50326SJeff Roberson * or positive nice respectively. 131e7d50326SJeff Roberson * 132e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 133e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 134e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 135e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 136e7d50326SJeff Roberson */ 137e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 138e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 139e7d50326SJeff Roberson #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 140e7d50326SJeff Roberson #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 141dda713dfSJeff Roberson #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 142e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 143e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1441e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 145e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 146e7d50326SJeff Roberson 147e7d50326SJeff Roberson /* 148e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 149e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 150e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 151e7d50326SJeff Roberson * models the intent of the thread. 15235e6168fSJeff Roberson * 153407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154407b0157SJeff Roberson * before throttling back. 155d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 15835e6168fSJeff Roberson */ 159e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 160e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 161210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 162210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1634c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 164e1f89c22SJeff Roberson 16535e6168fSJeff Roberson /* 166e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 167e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 168e7d50326SJeff Roberson * due to rounding would be unacceptably high. 169e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 170e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 171ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 17235e6168fSJeff Roberson */ 173e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH; 174e7d50326SJeff Roberson static int realstathz; 175e7d50326SJeff Roberson static int tickincr; 176e7d50326SJeff Roberson static int sched_slice; 17702e2d6b4SJeff Roberson #ifdef PREEMPTION 17802e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION 17902e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE; 18002e2d6b4SJeff Roberson #else 181ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN; 18202e2d6b4SJeff Roberson #endif 18302e2d6b4SJeff Roberson #else 18402e2d6b4SJeff Roberson static int preempt_thresh = 0; 18502e2d6b4SJeff Roberson #endif 186ae7a6b38SJeff Roberson 18735e6168fSJeff Roberson /* 188ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 189ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 190ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 19135e6168fSJeff Roberson */ 192ad1e7d28SJulian Elischer struct tdq { 193c47f202bSJeff Roberson struct mtx *tdq_lock; /* Pointer to group lock. */ 194e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 195ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 196ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 197ae7a6b38SJeff Roberson int tdq_load; /* Aggregate load. */ 198ed0e8f2fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 199ed0e8f2fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 2005d7ef00cSJeff Roberson #ifdef SMP 201ae7a6b38SJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 202ae7a6b38SJeff Roberson int tdq_transferable; /* Transferable thread count. */ 203d2ad694cSJeff Roberson LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */ 204d2ad694cSJeff Roberson struct tdq_group *tdq_group; /* Our processor group. */ 20533916c36SJeff Roberson #else 206d2ad694cSJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 2075d7ef00cSJeff Roberson #endif 208ae7a6b38SJeff Roberson } __aligned(64); 20935e6168fSJeff Roberson 2107b8bfa0dSJeff Roberson 21180f86c9fSJeff Roberson #ifdef SMP 21280f86c9fSJeff Roberson /* 213ad1e7d28SJulian Elischer * tdq groups are groups of processors which can cheaply share threads. When 21480f86c9fSJeff Roberson * one processor in the group goes idle it will check the runqs of the other 21580f86c9fSJeff Roberson * processors in its group prior to halting and waiting for an interrupt. 21680f86c9fSJeff Roberson * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 21780f86c9fSJeff Roberson * In a numa environment we'd want an idle bitmap per group and a two tiered 21880f86c9fSJeff Roberson * load balancer. 21980f86c9fSJeff Roberson */ 220ad1e7d28SJulian Elischer struct tdq_group { 221c47f202bSJeff Roberson struct mtx tdg_lock; /* Protects all fields below. */ 222d2ad694cSJeff Roberson int tdg_cpus; /* Count of CPUs in this tdq group. */ 223d2ad694cSJeff Roberson cpumask_t tdg_cpumask; /* Mask of cpus in this group. */ 224d2ad694cSJeff Roberson cpumask_t tdg_idlemask; /* Idle cpus in this group. */ 225d2ad694cSJeff Roberson cpumask_t tdg_mask; /* Bit mask for first cpu. */ 226d2ad694cSJeff Roberson int tdg_load; /* Total load of this group. */ 227d2ad694cSJeff Roberson int tdg_transferable; /* Transferable load of this group. */ 228d2ad694cSJeff Roberson LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */ 229c47f202bSJeff Roberson char tdg_name[16]; /* lock name. */ 230ae7a6b38SJeff Roberson } __aligned(64); 2317b8bfa0dSJeff Roberson 232ae7a6b38SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 300)) 2337b8bfa0dSJeff Roberson #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity) 2347b8bfa0dSJeff Roberson 2357b8bfa0dSJeff Roberson /* 2367b8bfa0dSJeff Roberson * Run-time tunables. 2377b8bfa0dSJeff Roberson */ 23828994a58SJeff Roberson static int rebalance = 1; 2397fcf154aSJeff Roberson static int balance_interval = 128; /* Default set in sched_initticks(). */ 24028994a58SJeff Roberson static int pick_pri = 1; 2417b8bfa0dSJeff Roberson static int affinity; 2427b8bfa0dSJeff Roberson static int tryself = 1; 2437fcf154aSJeff Roberson static int steal_htt = 1; 24428994a58SJeff Roberson static int steal_idle = 1; 24528994a58SJeff Roberson static int steal_thresh = 2; 2467b20fb19SJeff Roberson static int topology = 0; 24780f86c9fSJeff Roberson 24835e6168fSJeff Roberson /* 249d2ad694cSJeff Roberson * One thread queue per processor. 25035e6168fSJeff Roberson */ 2517b8bfa0dSJeff Roberson static volatile cpumask_t tdq_idle; 252d2ad694cSJeff Roberson static int tdg_maxid; 253ad1e7d28SJulian Elischer static struct tdq tdq_cpu[MAXCPU]; 254ad1e7d28SJulian Elischer static struct tdq_group tdq_groups[MAXCPU]; 2557fcf154aSJeff Roberson static struct tdq *balance_tdq; 2567fcf154aSJeff Roberson static int balance_group_ticks; 2577fcf154aSJeff Roberson static int balance_ticks; 258dc03363dSJeff Roberson 259ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 260ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu[(x)]) 261c47f202bSJeff Roberson #define TDQ_ID(x) ((int)((x) - tdq_cpu)) 262ad1e7d28SJulian Elischer #define TDQ_GROUP(x) (&tdq_groups[(x)]) 263c47f202bSJeff Roberson #define TDG_ID(x) ((int)((x) - tdq_groups)) 26480f86c9fSJeff Roberson #else /* !SMP */ 265ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 266c47f202bSJeff Roberson static struct mtx tdq_lock; 267dc03363dSJeff Roberson 26836b36916SJeff Roberson #define TDQ_ID(x) (0) 269ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 270ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 2710a016a05SJeff Roberson #endif 27235e6168fSJeff Roberson 273ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 274ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 275ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 276ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 277c47f202bSJeff Roberson #define TDQ_LOCKPTR(t) ((t)->tdq_lock) 278ae7a6b38SJeff Roberson 2798460a577SJohn Birrell static void sched_priority(struct thread *); 28021381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 2818460a577SJohn Birrell static int sched_interact_score(struct thread *); 2828460a577SJohn Birrell static void sched_interact_update(struct thread *); 2838460a577SJohn Birrell static void sched_interact_fork(struct thread *); 284ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *); 28535e6168fSJeff Roberson 2865d7ef00cSJeff Roberson /* Operations on per processor queues */ 287ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *); 288ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *); 289ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *); 290ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *); 291ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 292ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 293ad1e7d28SJulian Elischer void tdq_print(int cpu); 294e7d50326SJeff Roberson static void runq_print(struct runq *rq); 295ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int); 2965d7ef00cSJeff Roberson #ifdef SMP 297ae7a6b38SJeff Roberson static void tdq_move(struct tdq *, struct tdq *); 298ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 2997b8bfa0dSJeff Roberson static void tdq_notify(struct td_sched *); 3007fcf154aSJeff Roberson static struct td_sched *tdq_steal(struct tdq *); 301ae7a6b38SJeff Roberson static struct td_sched *runq_steal(struct runq *); 302ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int); 3037fcf154aSJeff Roberson static void sched_balance(void); 3047fcf154aSJeff Roberson static void sched_balance_groups(void); 305ae7a6b38SJeff Roberson static void sched_balance_group(struct tdq_group *); 306ae7a6b38SJeff Roberson static void sched_balance_pair(struct tdq *, struct tdq *); 307ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 308ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *); 309ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 310c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 3111e516cf5SJeff Roberson 3127b8bfa0dSJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 3135d7ef00cSJeff Roberson #endif 3145d7ef00cSJeff Roberson 315e7d50326SJeff Roberson static void sched_setup(void *dummy); 316e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 317e7d50326SJeff Roberson 318e7d50326SJeff Roberson static void sched_initticks(void *dummy); 319e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 320e7d50326SJeff Roberson 321ae7a6b38SJeff Roberson /* 322ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 323ae7a6b38SJeff Roberson */ 324e7d50326SJeff Roberson static void 325e7d50326SJeff Roberson runq_print(struct runq *rq) 326e7d50326SJeff Roberson { 327e7d50326SJeff Roberson struct rqhead *rqh; 328e7d50326SJeff Roberson struct td_sched *ts; 329e7d50326SJeff Roberson int pri; 330e7d50326SJeff Roberson int j; 331e7d50326SJeff Roberson int i; 332e7d50326SJeff Roberson 333e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 334e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 335e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 336e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 337e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 338e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 339e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 340e7d50326SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 341e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 342e7d50326SJeff Roberson ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 343e7d50326SJeff Roberson } 344e7d50326SJeff Roberson } 345e7d50326SJeff Roberson } 346e7d50326SJeff Roberson } 347e7d50326SJeff Roberson 348ae7a6b38SJeff Roberson /* 349ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 350ae7a6b38SJeff Roberson */ 35115dc847eSJeff Roberson void 352ad1e7d28SJulian Elischer tdq_print(int cpu) 35315dc847eSJeff Roberson { 354ad1e7d28SJulian Elischer struct tdq *tdq; 35515dc847eSJeff Roberson 356ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 35715dc847eSJeff Roberson 358c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 359ae7a6b38SJeff Roberson printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq)); 360d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 361e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 3623f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 363e7d50326SJeff Roberson printf("\trealtime runq:\n"); 364e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 365e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 366e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 367e7d50326SJeff Roberson printf("\tidle runq:\n"); 368e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 369ef1134c9SJeff Roberson #ifdef SMP 370d2ad694cSJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 371ae7a6b38SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 372c47f202bSJeff Roberson printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group)); 373c47f202bSJeff Roberson printf("\tLock name: %s\n", tdq->tdq_group->tdg_name); 374ef1134c9SJeff Roberson #endif 37515dc847eSJeff Roberson } 37615dc847eSJeff Roberson 377ae7a6b38SJeff Roberson #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378ae7a6b38SJeff Roberson /* 379ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 380ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 381ae7a6b38SJeff Roberson * queue position for timeshare threads. 382ae7a6b38SJeff Roberson */ 383155b9987SJeff Roberson static __inline void 384ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 385155b9987SJeff Roberson { 386ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 387ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 388155b9987SJeff Roberson #ifdef SMP 389e7d50326SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 390d2ad694cSJeff Roberson tdq->tdq_transferable++; 391d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable++; 392ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 39380f86c9fSJeff Roberson } 394155b9987SJeff Roberson #endif 395e7d50326SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 396ed0e8f2fSJeff Roberson u_char pri; 397e7d50326SJeff Roberson 398e7d50326SJeff Roberson pri = ts->ts_thread->td_priority; 399e7d50326SJeff Roberson KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 400e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 401e7d50326SJeff Roberson /* 402e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 403e7d50326SJeff Roberson * realtime. Use the whole queue to represent these values. 404e7d50326SJeff Roberson */ 405c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 406e7d50326SJeff Roberson pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 407e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 4083f872f85SJeff Roberson /* 4093f872f85SJeff Roberson * This effectively shortens the queue by one so we 4103f872f85SJeff Roberson * can have a one slot difference between idx and 4113f872f85SJeff Roberson * ridx while we wait for threads to drain. 4123f872f85SJeff Roberson */ 4133f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 4143f872f85SJeff Roberson pri == tdq->tdq_ridx) 4154499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 416e7d50326SJeff Roberson } else 4173f872f85SJeff Roberson pri = tdq->tdq_ridx; 418e7d50326SJeff Roberson runq_add_pri(ts->ts_runq, ts, pri, flags); 419e7d50326SJeff Roberson } else 420ad1e7d28SJulian Elischer runq_add(ts->ts_runq, ts, flags); 421155b9987SJeff Roberson } 422155b9987SJeff Roberson 423ae7a6b38SJeff Roberson /* 424ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 425ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 426ae7a6b38SJeff Roberson * transferable count does not reflect them. 427ae7a6b38SJeff Roberson */ 428155b9987SJeff Roberson static __inline void 429ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 430155b9987SJeff Roberson { 431ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 432ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 433ae7a6b38SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 434155b9987SJeff Roberson #ifdef SMP 435ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 436d2ad694cSJeff Roberson tdq->tdq_transferable--; 437d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable--; 438ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 43980f86c9fSJeff Roberson } 440155b9987SJeff Roberson #endif 4413f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 4423f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 4433f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 444e7d50326SJeff Roberson else 4453f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, NULL); 4468ab80cf0SJeff Roberson /* 4478ab80cf0SJeff Roberson * For timeshare threads we update the priority here so 4488ab80cf0SJeff Roberson * the priority reflects the time we've been sleeping. 4498ab80cf0SJeff Roberson */ 4508ab80cf0SJeff Roberson ts->ts_ltick = ticks; 4518ab80cf0SJeff Roberson sched_pctcpu_update(ts); 4528ab80cf0SJeff Roberson sched_priority(ts->ts_thread); 4533f872f85SJeff Roberson } else 454ad1e7d28SJulian Elischer runq_remove(ts->ts_runq, ts); 455155b9987SJeff Roberson } 456155b9987SJeff Roberson 457ae7a6b38SJeff Roberson /* 458ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 459ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 460ae7a6b38SJeff Roberson */ 461a8949de2SJeff Roberson static void 462ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts) 4635d7ef00cSJeff Roberson { 464ef1134c9SJeff Roberson int class; 465ae7a6b38SJeff Roberson 466ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 467ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 468ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 469d2ad694cSJeff Roberson tdq->tdq_load++; 470c47f202bSJeff Roberson CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 4717b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4727b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 47333916c36SJeff Roberson #ifdef SMP 474d2ad694cSJeff Roberson tdq->tdq_group->tdg_load++; 47533916c36SJeff Roberson #else 476d2ad694cSJeff Roberson tdq->tdq_sysload++; 477cac77d04SJeff Roberson #endif 4785d7ef00cSJeff Roberson } 47915dc847eSJeff Roberson 480ae7a6b38SJeff Roberson /* 481ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 482ae7a6b38SJeff Roberson * exiting. 483ae7a6b38SJeff Roberson */ 484a8949de2SJeff Roberson static void 485ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 4865d7ef00cSJeff Roberson { 487ef1134c9SJeff Roberson int class; 488ae7a6b38SJeff Roberson 489ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 490ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 491ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 4927b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4937b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 49433916c36SJeff Roberson #ifdef SMP 495d2ad694cSJeff Roberson tdq->tdq_group->tdg_load--; 49633916c36SJeff Roberson #else 497d2ad694cSJeff Roberson tdq->tdq_sysload--; 498cac77d04SJeff Roberson #endif 499ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 500c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 501d2ad694cSJeff Roberson tdq->tdq_load--; 502d2ad694cSJeff Roberson CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 503ad1e7d28SJulian Elischer ts->ts_runq = NULL; 50415dc847eSJeff Roberson } 50515dc847eSJeff Roberson 5065d7ef00cSJeff Roberson #ifdef SMP 507356500a3SJeff Roberson /* 508155b9987SJeff Roberson * sched_balance is a simple CPU load balancing algorithm. It operates by 509356500a3SJeff Roberson * finding the least loaded and most loaded cpu and equalizing their load 510356500a3SJeff Roberson * by migrating some processes. 511356500a3SJeff Roberson * 512356500a3SJeff Roberson * Dealing only with two CPUs at a time has two advantages. Firstly, most 513356500a3SJeff Roberson * installations will only have 2 cpus. Secondly, load balancing too much at 514356500a3SJeff Roberson * once can have an unpleasant effect on the system. The scheduler rarely has 515356500a3SJeff Roberson * enough information to make perfect decisions. So this algorithm chooses 516ae7a6b38SJeff Roberson * simplicity and more gradual effects on load in larger systems. 517356500a3SJeff Roberson * 518356500a3SJeff Roberson */ 51922bf7d9aSJeff Roberson static void 5207fcf154aSJeff Roberson sched_balance() 521356500a3SJeff Roberson { 522ad1e7d28SJulian Elischer struct tdq_group *high; 523ad1e7d28SJulian Elischer struct tdq_group *low; 524d2ad694cSJeff Roberson struct tdq_group *tdg; 5257fcf154aSJeff Roberson struct tdq *tdq; 526cac77d04SJeff Roberson int cnt; 527356500a3SJeff Roberson int i; 528356500a3SJeff Roberson 5297fcf154aSJeff Roberson /* 5307fcf154aSJeff Roberson * Select a random time between .5 * balance_interval and 5317fcf154aSJeff Roberson * 1.5 * balance_interval. 5327fcf154aSJeff Roberson */ 5337fcf154aSJeff Roberson balance_ticks = max(balance_interval / 2, 1); 5347fcf154aSJeff Roberson balance_ticks += random() % balance_interval; 535ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 536598b368dSJeff Roberson return; 5377fcf154aSJeff Roberson tdq = TDQ_SELF(); 5387fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 539cac77d04SJeff Roberson low = high = NULL; 540d2ad694cSJeff Roberson i = random() % (tdg_maxid + 1); 541d2ad694cSJeff Roberson for (cnt = 0; cnt <= tdg_maxid; cnt++) { 542d2ad694cSJeff Roberson tdg = TDQ_GROUP(i); 543cac77d04SJeff Roberson /* 544cac77d04SJeff Roberson * Find the CPU with the highest load that has some 545cac77d04SJeff Roberson * threads to transfer. 546cac77d04SJeff Roberson */ 547d2ad694cSJeff Roberson if ((high == NULL || tdg->tdg_load > high->tdg_load) 548d2ad694cSJeff Roberson && tdg->tdg_transferable) 549d2ad694cSJeff Roberson high = tdg; 550d2ad694cSJeff Roberson if (low == NULL || tdg->tdg_load < low->tdg_load) 551d2ad694cSJeff Roberson low = tdg; 552d2ad694cSJeff Roberson if (++i > tdg_maxid) 553cac77d04SJeff Roberson i = 0; 554cac77d04SJeff Roberson } 555cac77d04SJeff Roberson if (low != NULL && high != NULL && high != low) 556d2ad694cSJeff Roberson sched_balance_pair(LIST_FIRST(&high->tdg_members), 557d2ad694cSJeff Roberson LIST_FIRST(&low->tdg_members)); 5587fcf154aSJeff Roberson TDQ_LOCK(tdq); 559cac77d04SJeff Roberson } 56086f8ae96SJeff Roberson 561ae7a6b38SJeff Roberson /* 562ae7a6b38SJeff Roberson * Balance load between CPUs in a group. Will only migrate within the group. 563ae7a6b38SJeff Roberson */ 564cac77d04SJeff Roberson static void 5657fcf154aSJeff Roberson sched_balance_groups() 566cac77d04SJeff Roberson { 5677fcf154aSJeff Roberson struct tdq *tdq; 568cac77d04SJeff Roberson int i; 569cac77d04SJeff Roberson 5707fcf154aSJeff Roberson /* 5717fcf154aSJeff Roberson * Select a random time between .5 * balance_interval and 5727fcf154aSJeff Roberson * 1.5 * balance_interval. 5737fcf154aSJeff Roberson */ 5747fcf154aSJeff Roberson balance_group_ticks = max(balance_interval / 2, 1); 5757fcf154aSJeff Roberson balance_group_ticks += random() % balance_interval; 576ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 577ae7a6b38SJeff Roberson return; 5787fcf154aSJeff Roberson tdq = TDQ_SELF(); 5797fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 580d2ad694cSJeff Roberson for (i = 0; i <= tdg_maxid; i++) 581ad1e7d28SJulian Elischer sched_balance_group(TDQ_GROUP(i)); 5827fcf154aSJeff Roberson TDQ_LOCK(tdq); 583356500a3SJeff Roberson } 584cac77d04SJeff Roberson 585ae7a6b38SJeff Roberson /* 586ae7a6b38SJeff Roberson * Finds the greatest imbalance between two tdqs in a group. 587ae7a6b38SJeff Roberson */ 588cac77d04SJeff Roberson static void 589d2ad694cSJeff Roberson sched_balance_group(struct tdq_group *tdg) 590cac77d04SJeff Roberson { 591ad1e7d28SJulian Elischer struct tdq *tdq; 592ad1e7d28SJulian Elischer struct tdq *high; 593ad1e7d28SJulian Elischer struct tdq *low; 594cac77d04SJeff Roberson int load; 595cac77d04SJeff Roberson 596d2ad694cSJeff Roberson if (tdg->tdg_transferable == 0) 597cac77d04SJeff Roberson return; 598cac77d04SJeff Roberson low = NULL; 599cac77d04SJeff Roberson high = NULL; 600d2ad694cSJeff Roberson LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 601d2ad694cSJeff Roberson load = tdq->tdq_load; 602d2ad694cSJeff Roberson if (high == NULL || load > high->tdq_load) 603ad1e7d28SJulian Elischer high = tdq; 604d2ad694cSJeff Roberson if (low == NULL || load < low->tdq_load) 605ad1e7d28SJulian Elischer low = tdq; 606356500a3SJeff Roberson } 607cac77d04SJeff Roberson if (high != NULL && low != NULL && high != low) 608cac77d04SJeff Roberson sched_balance_pair(high, low); 609356500a3SJeff Roberson } 610cac77d04SJeff Roberson 611ae7a6b38SJeff Roberson /* 612ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 613ae7a6b38SJeff Roberson */ 614ae7a6b38SJeff Roberson static void 615ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 616ae7a6b38SJeff Roberson { 617ae7a6b38SJeff Roberson if (one < two) { 618ae7a6b38SJeff Roberson TDQ_LOCK(one); 619ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 620ae7a6b38SJeff Roberson } else { 621ae7a6b38SJeff Roberson TDQ_LOCK(two); 622ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 623ae7a6b38SJeff Roberson } 624ae7a6b38SJeff Roberson } 625ae7a6b38SJeff Roberson 626ae7a6b38SJeff Roberson /* 6277fcf154aSJeff Roberson * Unlock two thread queues. Order is not important here. 6287fcf154aSJeff Roberson */ 6297fcf154aSJeff Roberson static void 6307fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two) 6317fcf154aSJeff Roberson { 6327fcf154aSJeff Roberson TDQ_UNLOCK(one); 6337fcf154aSJeff Roberson TDQ_UNLOCK(two); 6347fcf154aSJeff Roberson } 6357fcf154aSJeff Roberson 6367fcf154aSJeff Roberson /* 637ae7a6b38SJeff Roberson * Transfer load between two imbalanced thread queues. 638ae7a6b38SJeff Roberson */ 639cac77d04SJeff Roberson static void 640ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 641cac77d04SJeff Roberson { 642cac77d04SJeff Roberson int transferable; 643cac77d04SJeff Roberson int high_load; 644cac77d04SJeff Roberson int low_load; 645cac77d04SJeff Roberson int move; 646cac77d04SJeff Roberson int diff; 647cac77d04SJeff Roberson int i; 648cac77d04SJeff Roberson 649ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 65080f86c9fSJeff Roberson /* 65180f86c9fSJeff Roberson * If we're transfering within a group we have to use this specific 652ad1e7d28SJulian Elischer * tdq's transferable count, otherwise we can steal from other members 65380f86c9fSJeff Roberson * of the group. 65480f86c9fSJeff Roberson */ 655d2ad694cSJeff Roberson if (high->tdq_group == low->tdq_group) { 656d2ad694cSJeff Roberson transferable = high->tdq_transferable; 657d2ad694cSJeff Roberson high_load = high->tdq_load; 658d2ad694cSJeff Roberson low_load = low->tdq_load; 659cac77d04SJeff Roberson } else { 660d2ad694cSJeff Roberson transferable = high->tdq_group->tdg_transferable; 661d2ad694cSJeff Roberson high_load = high->tdq_group->tdg_load; 662d2ad694cSJeff Roberson low_load = low->tdq_group->tdg_load; 663cac77d04SJeff Roberson } 664155b9987SJeff Roberson /* 665155b9987SJeff Roberson * Determine what the imbalance is and then adjust that to how many 666d2ad694cSJeff Roberson * threads we actually have to give up (transferable). 667155b9987SJeff Roberson */ 668ae7a6b38SJeff Roberson if (transferable != 0) { 669cac77d04SJeff Roberson diff = high_load - low_load; 670356500a3SJeff Roberson move = diff / 2; 671356500a3SJeff Roberson if (diff & 0x1) 672356500a3SJeff Roberson move++; 67380f86c9fSJeff Roberson move = min(move, transferable); 674356500a3SJeff Roberson for (i = 0; i < move; i++) 675ae7a6b38SJeff Roberson tdq_move(high, low); 676a5423ea3SJeff Roberson /* 677a5423ea3SJeff Roberson * IPI the target cpu to force it to reschedule with the new 678a5423ea3SJeff Roberson * workload. 679a5423ea3SJeff Roberson */ 680a5423ea3SJeff Roberson ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 681ae7a6b38SJeff Roberson } 6827fcf154aSJeff Roberson tdq_unlock_pair(high, low); 683356500a3SJeff Roberson return; 684356500a3SJeff Roberson } 685356500a3SJeff Roberson 686ae7a6b38SJeff Roberson /* 687ae7a6b38SJeff Roberson * Move a thread from one thread queue to another. 688ae7a6b38SJeff Roberson */ 68922bf7d9aSJeff Roberson static void 690ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 691356500a3SJeff Roberson { 692ad1e7d28SJulian Elischer struct td_sched *ts; 693ae7a6b38SJeff Roberson struct thread *td; 694ae7a6b38SJeff Roberson struct tdq *tdq; 695ae7a6b38SJeff Roberson int cpu; 696356500a3SJeff Roberson 6977fcf154aSJeff Roberson TDQ_LOCK_ASSERT(from, MA_OWNED); 6987fcf154aSJeff Roberson TDQ_LOCK_ASSERT(to, MA_OWNED); 6997fcf154aSJeff Roberson 700ad1e7d28SJulian Elischer tdq = from; 701ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 7027fcf154aSJeff Roberson ts = tdq_steal(tdq); 703ad1e7d28SJulian Elischer if (ts == NULL) { 704d2ad694cSJeff Roberson struct tdq_group *tdg; 70580f86c9fSJeff Roberson 706d2ad694cSJeff Roberson tdg = tdq->tdq_group; 707d2ad694cSJeff Roberson LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 708d2ad694cSJeff Roberson if (tdq == from || tdq->tdq_transferable == 0) 70980f86c9fSJeff Roberson continue; 7107fcf154aSJeff Roberson ts = tdq_steal(tdq); 71180f86c9fSJeff Roberson break; 71280f86c9fSJeff Roberson } 713ad1e7d28SJulian Elischer if (ts == NULL) 714ae7a6b38SJeff Roberson return; 71580f86c9fSJeff Roberson } 716ad1e7d28SJulian Elischer if (tdq == to) 71780f86c9fSJeff Roberson return; 718ae7a6b38SJeff Roberson td = ts->ts_thread; 719ae7a6b38SJeff Roberson /* 720ae7a6b38SJeff Roberson * Although the run queue is locked the thread may be blocked. Lock 7217fcf154aSJeff Roberson * it to clear this and acquire the run-queue lock. 722ae7a6b38SJeff Roberson */ 723ae7a6b38SJeff Roberson thread_lock(td); 7247fcf154aSJeff Roberson /* Drop recursive lock on from acquired via thread_lock(). */ 725ae7a6b38SJeff Roberson TDQ_UNLOCK(from); 726ae7a6b38SJeff Roberson sched_rem(td); 7277b8bfa0dSJeff Roberson ts->ts_cpu = cpu; 728ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 729ae7a6b38SJeff Roberson tdq_add(to, td, SRQ_YIELDING); 730356500a3SJeff Roberson } 73122bf7d9aSJeff Roberson 732ae7a6b38SJeff Roberson /* 733ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 734ae7a6b38SJeff Roberson * to it. 735ae7a6b38SJeff Roberson */ 73680f86c9fSJeff Roberson static int 737ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 73822bf7d9aSJeff Roberson { 739d2ad694cSJeff Roberson struct tdq_group *tdg; 740ad1e7d28SJulian Elischer struct tdq *steal; 741ae7a6b38SJeff Roberson int highload; 742ae7a6b38SJeff Roberson int highcpu; 743ae7a6b38SJeff Roberson int load; 744ae7a6b38SJeff Roberson int cpu; 74580f86c9fSJeff Roberson 746ae7a6b38SJeff Roberson /* We don't want to be preempted while we're iterating over tdqs */ 747ae7a6b38SJeff Roberson spinlock_enter(); 748d2ad694cSJeff Roberson tdg = tdq->tdq_group; 74980f86c9fSJeff Roberson /* 750d2ad694cSJeff Roberson * If we're in a cpu group, try and steal threads from another cpu in 7517fcf154aSJeff Roberson * the group before idling. In a HTT group all cpus share the same 7527fcf154aSJeff Roberson * run-queue lock, however, we still need a recursive lock to 7537fcf154aSJeff Roberson * call tdq_move(). 75480f86c9fSJeff Roberson */ 7557b8bfa0dSJeff Roberson if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) { 7567fcf154aSJeff Roberson TDQ_LOCK(tdq); 757d2ad694cSJeff Roberson LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) { 758d2ad694cSJeff Roberson if (steal == tdq || steal->tdq_transferable == 0) 75980f86c9fSJeff Roberson continue; 760ae7a6b38SJeff Roberson TDQ_LOCK(steal); 7617b8bfa0dSJeff Roberson goto steal; 7627b8bfa0dSJeff Roberson } 7637fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 7647b8bfa0dSJeff Roberson } 765ae7a6b38SJeff Roberson for (;;) { 766ae7a6b38SJeff Roberson if (steal_idle == 0) 7677b8bfa0dSJeff Roberson break; 768ae7a6b38SJeff Roberson highcpu = 0; 769ae7a6b38SJeff Roberson highload = 0; 770ae7a6b38SJeff Roberson for (cpu = 0; cpu <= mp_maxid; cpu++) { 771ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 772ae7a6b38SJeff Roberson continue; 7737b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 774ae7a6b38SJeff Roberson load = TDQ_CPU(cpu)->tdq_transferable; 775ae7a6b38SJeff Roberson if (load < highload) 7767b8bfa0dSJeff Roberson continue; 777ae7a6b38SJeff Roberson highload = load; 778ae7a6b38SJeff Roberson highcpu = cpu; 779ae7a6b38SJeff Roberson } 78028994a58SJeff Roberson if (highload < steal_thresh) 781ae7a6b38SJeff Roberson break; 782ae7a6b38SJeff Roberson steal = TDQ_CPU(highcpu); 7837fcf154aSJeff Roberson tdq_lock_pair(tdq, steal); 7847fcf154aSJeff Roberson if (steal->tdq_transferable >= steal_thresh) 7857b8bfa0dSJeff Roberson goto steal; 7867fcf154aSJeff Roberson tdq_unlock_pair(tdq, steal); 787ae7a6b38SJeff Roberson break; 78880f86c9fSJeff Roberson } 789ae7a6b38SJeff Roberson spinlock_exit(); 79080f86c9fSJeff Roberson return (1); 7917b8bfa0dSJeff Roberson steal: 792ae7a6b38SJeff Roberson spinlock_exit(); 7937fcf154aSJeff Roberson tdq_move(steal, tdq); 794ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 795ae7a6b38SJeff Roberson mi_switch(SW_VOL, NULL); 796ae7a6b38SJeff Roberson thread_unlock(curthread); 7977b8bfa0dSJeff Roberson 7987b8bfa0dSJeff Roberson return (0); 79922bf7d9aSJeff Roberson } 80022bf7d9aSJeff Roberson 801ae7a6b38SJeff Roberson /* 802ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 803ae7a6b38SJeff Roberson */ 80422bf7d9aSJeff Roberson static void 8057b8bfa0dSJeff Roberson tdq_notify(struct td_sched *ts) 80622bf7d9aSJeff Roberson { 807fc3a97dcSJeff Roberson struct thread *ctd; 80822bf7d9aSJeff Roberson struct pcpu *pcpu; 809fc3a97dcSJeff Roberson int cpri; 810fc3a97dcSJeff Roberson int pri; 8117b8bfa0dSJeff Roberson int cpu; 81222bf7d9aSJeff Roberson 8137b8bfa0dSJeff Roberson cpu = ts->ts_cpu; 814fc3a97dcSJeff Roberson pri = ts->ts_thread->td_priority; 81522bf7d9aSJeff Roberson pcpu = pcpu_find(cpu); 816fc3a97dcSJeff Roberson ctd = pcpu->pc_curthread; 817fc3a97dcSJeff Roberson cpri = ctd->td_priority; 8186b2f763fSJeff Roberson 8196b2f763fSJeff Roberson /* 8206b2f763fSJeff Roberson * If our priority is not better than the current priority there is 8216b2f763fSJeff Roberson * nothing to do. 8226b2f763fSJeff Roberson */ 823fc3a97dcSJeff Roberson if (pri > cpri) 8246b2f763fSJeff Roberson return; 8257b8bfa0dSJeff Roberson /* 826fc3a97dcSJeff Roberson * Always IPI idle. 8277b8bfa0dSJeff Roberson */ 828fc3a97dcSJeff Roberson if (cpri > PRI_MIN_IDLE) 829fc3a97dcSJeff Roberson goto sendipi; 830fc3a97dcSJeff Roberson /* 831fc3a97dcSJeff Roberson * If we're realtime or better and there is timeshare or worse running 832fc3a97dcSJeff Roberson * send an IPI. 833fc3a97dcSJeff Roberson */ 834fc3a97dcSJeff Roberson if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 835fc3a97dcSJeff Roberson goto sendipi; 836fc3a97dcSJeff Roberson /* 837fc3a97dcSJeff Roberson * Otherwise only IPI if we exceed the threshold. 838fc3a97dcSJeff Roberson */ 839ae7a6b38SJeff Roberson if (pri > preempt_thresh) 8407b8bfa0dSJeff Roberson return; 841fc3a97dcSJeff Roberson sendipi: 842fc3a97dcSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 84314618990SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 84422bf7d9aSJeff Roberson } 84522bf7d9aSJeff Roberson 846ae7a6b38SJeff Roberson /* 847ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 848ae7a6b38SJeff Roberson * index. 849ae7a6b38SJeff Roberson */ 850ae7a6b38SJeff Roberson static struct td_sched * 851ae7a6b38SJeff Roberson runq_steal_from(struct runq *rq, u_char start) 852ae7a6b38SJeff Roberson { 853ae7a6b38SJeff Roberson struct td_sched *ts; 854ae7a6b38SJeff Roberson struct rqbits *rqb; 855ae7a6b38SJeff Roberson struct rqhead *rqh; 856ae7a6b38SJeff Roberson int first; 857ae7a6b38SJeff Roberson int bit; 858ae7a6b38SJeff Roberson int pri; 859ae7a6b38SJeff Roberson int i; 860ae7a6b38SJeff Roberson 861ae7a6b38SJeff Roberson rqb = &rq->rq_status; 862ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 863ae7a6b38SJeff Roberson pri = 0; 864ae7a6b38SJeff Roberson first = 0; 865ae7a6b38SJeff Roberson again: 866ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 867ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 868ae7a6b38SJeff Roberson continue; 869ae7a6b38SJeff Roberson if (bit != 0) { 870ae7a6b38SJeff Roberson for (pri = bit; pri < RQB_BPW; pri++) 871ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] & (1ul << pri)) 872ae7a6b38SJeff Roberson break; 873ae7a6b38SJeff Roberson if (pri >= RQB_BPW) 874ae7a6b38SJeff Roberson continue; 875ae7a6b38SJeff Roberson } else 876ae7a6b38SJeff Roberson pri = RQB_FFS(rqb->rqb_bits[i]); 877ae7a6b38SJeff Roberson pri += (i << RQB_L2BPW); 878ae7a6b38SJeff Roberson rqh = &rq->rq_queues[pri]; 879ae7a6b38SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 880ae7a6b38SJeff Roberson if (first && THREAD_CAN_MIGRATE(ts->ts_thread)) 881ae7a6b38SJeff Roberson return (ts); 882ae7a6b38SJeff Roberson first = 1; 883ae7a6b38SJeff Roberson } 884ae7a6b38SJeff Roberson } 885ae7a6b38SJeff Roberson if (start != 0) { 886ae7a6b38SJeff Roberson start = 0; 887ae7a6b38SJeff Roberson goto again; 888ae7a6b38SJeff Roberson } 889ae7a6b38SJeff Roberson 890ae7a6b38SJeff Roberson return (NULL); 891ae7a6b38SJeff Roberson } 892ae7a6b38SJeff Roberson 893ae7a6b38SJeff Roberson /* 894ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 895ae7a6b38SJeff Roberson */ 896ad1e7d28SJulian Elischer static struct td_sched * 89722bf7d9aSJeff Roberson runq_steal(struct runq *rq) 89822bf7d9aSJeff Roberson { 89922bf7d9aSJeff Roberson struct rqhead *rqh; 90022bf7d9aSJeff Roberson struct rqbits *rqb; 901ad1e7d28SJulian Elischer struct td_sched *ts; 90222bf7d9aSJeff Roberson int word; 90322bf7d9aSJeff Roberson int bit; 90422bf7d9aSJeff Roberson 90522bf7d9aSJeff Roberson rqb = &rq->rq_status; 90622bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 90722bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 90822bf7d9aSJeff Roberson continue; 90922bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 910a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 91122bf7d9aSJeff Roberson continue; 91222bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 91328994a58SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) 91428994a58SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) 915ad1e7d28SJulian Elischer return (ts); 91622bf7d9aSJeff Roberson } 91722bf7d9aSJeff Roberson } 91822bf7d9aSJeff Roberson return (NULL); 91922bf7d9aSJeff Roberson } 92022bf7d9aSJeff Roberson 921ae7a6b38SJeff Roberson /* 922ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 923ae7a6b38SJeff Roberson */ 924ad1e7d28SJulian Elischer static struct td_sched * 9257fcf154aSJeff Roberson tdq_steal(struct tdq *tdq) 92622bf7d9aSJeff Roberson { 927ad1e7d28SJulian Elischer struct td_sched *ts; 92822bf7d9aSJeff Roberson 929ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 930e7d50326SJeff Roberson if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL) 931ad1e7d28SJulian Elischer return (ts); 932ae7a6b38SJeff Roberson if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL) 933ad1e7d28SJulian Elischer return (ts); 934d2ad694cSJeff Roberson return (runq_steal(&tdq->tdq_idle)); 93522bf7d9aSJeff Roberson } 93680f86c9fSJeff Roberson 937ae7a6b38SJeff Roberson /* 938ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 9397fcf154aSJeff Roberson * current lock and returns with the assigned queue locked. 940ae7a6b38SJeff Roberson */ 941ae7a6b38SJeff Roberson static inline struct tdq * 942ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags) 94380f86c9fSJeff Roberson { 944ae7a6b38SJeff Roberson struct thread *td; 945ae7a6b38SJeff Roberson struct tdq *tdq; 94680f86c9fSJeff Roberson 947ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 948ae7a6b38SJeff Roberson 949ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 950ae7a6b38SJeff Roberson td = ts->ts_thread; 951ae7a6b38SJeff Roberson ts->ts_cpu = cpu; 952c47f202bSJeff Roberson 953c47f202bSJeff Roberson /* If the lock matches just return the queue. */ 954ae7a6b38SJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) 955ae7a6b38SJeff Roberson return (tdq); 956ae7a6b38SJeff Roberson #ifdef notyet 95780f86c9fSJeff Roberson /* 958a5423ea3SJeff Roberson * If the thread isn't running its lockptr is a 959ae7a6b38SJeff Roberson * turnstile or a sleepqueue. We can just lock_set without 960ae7a6b38SJeff Roberson * blocking. 961670c524fSJeff Roberson */ 962ae7a6b38SJeff Roberson if (TD_CAN_RUN(td)) { 963ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 964ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 965ae7a6b38SJeff Roberson return (tdq); 966ae7a6b38SJeff Roberson } 967ae7a6b38SJeff Roberson #endif 96880f86c9fSJeff Roberson /* 969ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 970ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 9717b8bfa0dSJeff Roberson */ 972ae7a6b38SJeff Roberson thread_lock_block(td); 973ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 974ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 975ae7a6b38SJeff Roberson return (tdq); 97680f86c9fSJeff Roberson } 9772454aaf5SJeff Roberson 978ae7a6b38SJeff Roberson /* 979ae7a6b38SJeff Roberson * Find the thread queue running the lowest priority thread. 980ae7a6b38SJeff Roberson */ 9817b8bfa0dSJeff Roberson static int 982ae7a6b38SJeff Roberson tdq_lowestpri(void) 9837b8bfa0dSJeff Roberson { 984ae7a6b38SJeff Roberson struct tdq *tdq; 9857b8bfa0dSJeff Roberson int lowpri; 9867b8bfa0dSJeff Roberson int lowcpu; 9877b8bfa0dSJeff Roberson int lowload; 9887b8bfa0dSJeff Roberson int load; 989ae7a6b38SJeff Roberson int cpu; 990ae7a6b38SJeff Roberson int pri; 991ae7a6b38SJeff Roberson 992ae7a6b38SJeff Roberson lowload = 0; 993ae7a6b38SJeff Roberson lowpri = lowcpu = 0; 994ae7a6b38SJeff Roberson for (cpu = 0; cpu <= mp_maxid; cpu++) { 995ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 996ae7a6b38SJeff Roberson continue; 997ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 998ae7a6b38SJeff Roberson pri = tdq->tdq_lowpri; 999ae7a6b38SJeff Roberson load = TDQ_CPU(cpu)->tdq_load; 1000ae7a6b38SJeff Roberson CTR4(KTR_ULE, 1001ae7a6b38SJeff Roberson "cpu %d pri %d lowcpu %d lowpri %d", 1002ae7a6b38SJeff Roberson cpu, pri, lowcpu, lowpri); 1003ae7a6b38SJeff Roberson if (pri < lowpri) 1004ae7a6b38SJeff Roberson continue; 1005ae7a6b38SJeff Roberson if (lowpri && lowpri == pri && load > lowload) 1006ae7a6b38SJeff Roberson continue; 1007ae7a6b38SJeff Roberson lowpri = pri; 1008ae7a6b38SJeff Roberson lowcpu = cpu; 1009ae7a6b38SJeff Roberson lowload = load; 1010ae7a6b38SJeff Roberson } 1011ae7a6b38SJeff Roberson 1012ae7a6b38SJeff Roberson return (lowcpu); 1013ae7a6b38SJeff Roberson } 1014ae7a6b38SJeff Roberson 1015ae7a6b38SJeff Roberson /* 1016ae7a6b38SJeff Roberson * Find the thread queue with the least load. 1017ae7a6b38SJeff Roberson */ 1018ae7a6b38SJeff Roberson static int 1019ae7a6b38SJeff Roberson tdq_lowestload(void) 1020ae7a6b38SJeff Roberson { 1021ae7a6b38SJeff Roberson struct tdq *tdq; 1022ae7a6b38SJeff Roberson int lowload; 1023ae7a6b38SJeff Roberson int lowpri; 1024ae7a6b38SJeff Roberson int lowcpu; 1025ae7a6b38SJeff Roberson int load; 1026ae7a6b38SJeff Roberson int cpu; 1027ae7a6b38SJeff Roberson int pri; 1028ae7a6b38SJeff Roberson 1029ae7a6b38SJeff Roberson lowcpu = 0; 1030ae7a6b38SJeff Roberson lowload = TDQ_CPU(0)->tdq_load; 1031ae7a6b38SJeff Roberson lowpri = TDQ_CPU(0)->tdq_lowpri; 1032ae7a6b38SJeff Roberson for (cpu = 1; cpu <= mp_maxid; cpu++) { 1033ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 1034ae7a6b38SJeff Roberson continue; 1035ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1036ae7a6b38SJeff Roberson load = tdq->tdq_load; 1037ae7a6b38SJeff Roberson pri = tdq->tdq_lowpri; 1038ae7a6b38SJeff Roberson CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d", 1039ae7a6b38SJeff Roberson cpu, load, lowcpu, lowload); 1040ae7a6b38SJeff Roberson if (load > lowload) 1041ae7a6b38SJeff Roberson continue; 1042ae7a6b38SJeff Roberson if (load == lowload && pri < lowpri) 1043ae7a6b38SJeff Roberson continue; 1044ae7a6b38SJeff Roberson lowcpu = cpu; 1045ae7a6b38SJeff Roberson lowload = load; 1046ae7a6b38SJeff Roberson lowpri = pri; 1047ae7a6b38SJeff Roberson } 1048ae7a6b38SJeff Roberson 1049ae7a6b38SJeff Roberson return (lowcpu); 1050ae7a6b38SJeff Roberson } 1051ae7a6b38SJeff Roberson 1052ae7a6b38SJeff Roberson /* 1053ae7a6b38SJeff Roberson * Pick the destination cpu for sched_add(). Respects affinity and makes 1054ae7a6b38SJeff Roberson * a determination based on load or priority of available processors. 1055ae7a6b38SJeff Roberson */ 1056ae7a6b38SJeff Roberson static int 1057ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags) 1058ae7a6b38SJeff Roberson { 1059ae7a6b38SJeff Roberson struct tdq *tdq; 10607b8bfa0dSJeff Roberson int self; 10617b8bfa0dSJeff Roberson int pri; 10627b8bfa0dSJeff Roberson int cpu; 10637b8bfa0dSJeff Roberson 1064ae7a6b38SJeff Roberson cpu = self = PCPU_GET(cpuid); 10657b8bfa0dSJeff Roberson if (smp_started == 0) 10667b8bfa0dSJeff Roberson return (self); 106728994a58SJeff Roberson /* 106828994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 106928994a58SJeff Roberson */ 107028994a58SJeff Roberson if (flags & SRQ_OURSELF) { 107128994a58SJeff Roberson CTR1(KTR_ULE, "YIELDING %d", 107228994a58SJeff Roberson curthread->td_priority); 107328994a58SJeff Roberson return (self); 107428994a58SJeff Roberson } 10757b8bfa0dSJeff Roberson pri = ts->ts_thread->td_priority; 1076ae7a6b38SJeff Roberson cpu = ts->ts_cpu; 10777b8bfa0dSJeff Roberson /* 10787b8bfa0dSJeff Roberson * Regardless of affinity, if the last cpu is idle send it there. 10797b8bfa0dSJeff Roberson */ 1080ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1081ae7a6b38SJeff Roberson if (tdq->tdq_lowpri > PRI_MIN_IDLE) { 108214618990SJeff Roberson CTR5(KTR_ULE, 10837b8bfa0dSJeff Roberson "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d", 10847b8bfa0dSJeff Roberson ts->ts_cpu, ts->ts_rltick, ticks, pri, 1085ae7a6b38SJeff Roberson tdq->tdq_lowpri); 10867b8bfa0dSJeff Roberson return (ts->ts_cpu); 10877b8bfa0dSJeff Roberson } 10887b8bfa0dSJeff Roberson /* 10897b8bfa0dSJeff Roberson * If we have affinity, try to place it on the cpu we last ran on. 10907b8bfa0dSJeff Roberson */ 1091ae7a6b38SJeff Roberson if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) { 109214618990SJeff Roberson CTR5(KTR_ULE, 10937b8bfa0dSJeff Roberson "affinity for %d, ltick %d ticks %d pri %d curthread %d", 10947b8bfa0dSJeff Roberson ts->ts_cpu, ts->ts_rltick, ticks, pri, 1095ae7a6b38SJeff Roberson tdq->tdq_lowpri); 10967b8bfa0dSJeff Roberson return (ts->ts_cpu); 10977b8bfa0dSJeff Roberson } 10987b8bfa0dSJeff Roberson /* 10997b8bfa0dSJeff Roberson * Look for an idle group. 11007b8bfa0dSJeff Roberson */ 110114618990SJeff Roberson CTR1(KTR_ULE, "tdq_idle %X", tdq_idle); 11027b8bfa0dSJeff Roberson cpu = ffs(tdq_idle); 11037b8bfa0dSJeff Roberson if (cpu) 1104ae7a6b38SJeff Roberson return (--cpu); 110528994a58SJeff Roberson /* 11067fcf154aSJeff Roberson * If there are no idle cores see if we can run the thread locally. 11077fcf154aSJeff Roberson * This may improve locality among sleepers and wakers when there 11087fcf154aSJeff Roberson * is shared data. 110928994a58SJeff Roberson */ 111028994a58SJeff Roberson if (tryself && pri < curthread->td_priority) { 111128994a58SJeff Roberson CTR1(KTR_ULE, "tryself %d", 11127b8bfa0dSJeff Roberson curthread->td_priority); 11137b8bfa0dSJeff Roberson return (self); 11147b8bfa0dSJeff Roberson } 11157b8bfa0dSJeff Roberson /* 11167b8bfa0dSJeff Roberson * Now search for the cpu running the lowest priority thread with 11177b8bfa0dSJeff Roberson * the least load. 11187b8bfa0dSJeff Roberson */ 1119ae7a6b38SJeff Roberson if (pick_pri) 1120ae7a6b38SJeff Roberson cpu = tdq_lowestpri(); 1121ae7a6b38SJeff Roberson else 1122ae7a6b38SJeff Roberson cpu = tdq_lowestload(); 1123ae7a6b38SJeff Roberson return (cpu); 112480f86c9fSJeff Roberson } 112580f86c9fSJeff Roberson 112622bf7d9aSJeff Roberson #endif /* SMP */ 112722bf7d9aSJeff Roberson 112822bf7d9aSJeff Roberson /* 112922bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 11300c0a98b2SJeff Roberson */ 1131ad1e7d28SJulian Elischer static struct td_sched * 1132ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 11335d7ef00cSJeff Roberson { 1134ad1e7d28SJulian Elischer struct td_sched *ts; 11355d7ef00cSJeff Roberson 1136ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1137e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_realtime); 1138dda713dfSJeff Roberson if (ts != NULL) 1139e7d50326SJeff Roberson return (ts); 11403f872f85SJeff Roberson ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1141e7d50326SJeff Roberson if (ts != NULL) { 1142dda713dfSJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1143e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 1144e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1145ad1e7d28SJulian Elischer return (ts); 114615dc847eSJeff Roberson } 114715dc847eSJeff Roberson 1148e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_idle); 1149e7d50326SJeff Roberson if (ts != NULL) { 1150e7d50326SJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1151e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 1152e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1153e7d50326SJeff Roberson return (ts); 1154e7d50326SJeff Roberson } 1155e7d50326SJeff Roberson 1156e7d50326SJeff Roberson return (NULL); 1157245f3abfSJeff Roberson } 11580a016a05SJeff Roberson 1159ae7a6b38SJeff Roberson /* 1160ae7a6b38SJeff Roberson * Initialize a thread queue. 1161ae7a6b38SJeff Roberson */ 11620a016a05SJeff Roberson static void 1163ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq) 11640a016a05SJeff Roberson { 1165ae7a6b38SJeff Roberson 1166c47f202bSJeff Roberson if (bootverbose) 1167c47f202bSJeff Roberson printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1168e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1169e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1170d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 1171d2ad694cSJeff Roberson tdq->tdq_load = 0; 11720a016a05SJeff Roberson } 11730a016a05SJeff Roberson 1174c47f202bSJeff Roberson #ifdef SMP 1175c47f202bSJeff Roberson static void 1176c47f202bSJeff Roberson tdg_setup(struct tdq_group *tdg) 1177c47f202bSJeff Roberson { 1178c47f202bSJeff Roberson if (bootverbose) 1179c47f202bSJeff Roberson printf("ULE: setup cpu group %d\n", TDG_ID(tdg)); 1180c47f202bSJeff Roberson snprintf(tdg->tdg_name, sizeof(tdg->tdg_name), 1181c47f202bSJeff Roberson "sched lock %d", (int)TDG_ID(tdg)); 1182c47f202bSJeff Roberson mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock", 1183c47f202bSJeff Roberson MTX_SPIN | MTX_RECURSE); 1184c47f202bSJeff Roberson LIST_INIT(&tdg->tdg_members); 1185c47f202bSJeff Roberson tdg->tdg_load = 0; 1186c47f202bSJeff Roberson tdg->tdg_transferable = 0; 1187c47f202bSJeff Roberson tdg->tdg_cpus = 0; 1188c47f202bSJeff Roberson tdg->tdg_mask = 0; 1189c47f202bSJeff Roberson tdg->tdg_cpumask = 0; 1190c47f202bSJeff Roberson tdg->tdg_idlemask = 0; 1191c47f202bSJeff Roberson } 1192c47f202bSJeff Roberson 1193c47f202bSJeff Roberson static void 1194c47f202bSJeff Roberson tdg_add(struct tdq_group *tdg, struct tdq *tdq) 1195c47f202bSJeff Roberson { 1196c47f202bSJeff Roberson if (tdg->tdg_mask == 0) 1197c47f202bSJeff Roberson tdg->tdg_mask |= 1 << TDQ_ID(tdq); 1198c47f202bSJeff Roberson tdg->tdg_cpumask |= 1 << TDQ_ID(tdq); 1199c47f202bSJeff Roberson tdg->tdg_cpus++; 1200c47f202bSJeff Roberson tdq->tdq_group = tdg; 1201c47f202bSJeff Roberson tdq->tdq_lock = &tdg->tdg_lock; 1202c47f202bSJeff Roberson LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings); 1203c47f202bSJeff Roberson if (bootverbose) 1204c47f202bSJeff Roberson printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n", 1205c47f202bSJeff Roberson TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask); 1206c47f202bSJeff Roberson } 1207c47f202bSJeff Roberson 1208c47f202bSJeff Roberson static void 1209c47f202bSJeff Roberson sched_setup_topology(void) 1210c47f202bSJeff Roberson { 1211c47f202bSJeff Roberson struct tdq_group *tdg; 1212c47f202bSJeff Roberson struct cpu_group *cg; 1213c47f202bSJeff Roberson int balance_groups; 1214c47f202bSJeff Roberson struct tdq *tdq; 1215c47f202bSJeff Roberson int i; 1216c47f202bSJeff Roberson int j; 1217c47f202bSJeff Roberson 1218c47f202bSJeff Roberson topology = 1; 1219c47f202bSJeff Roberson balance_groups = 0; 1220c47f202bSJeff Roberson for (i = 0; i < smp_topology->ct_count; i++) { 1221c47f202bSJeff Roberson cg = &smp_topology->ct_group[i]; 1222c47f202bSJeff Roberson tdg = &tdq_groups[i]; 1223c47f202bSJeff Roberson /* 1224c47f202bSJeff Roberson * Initialize the group. 1225c47f202bSJeff Roberson */ 1226c47f202bSJeff Roberson tdg_setup(tdg); 1227c47f202bSJeff Roberson /* 1228c47f202bSJeff Roberson * Find all of the group members and add them. 1229c47f202bSJeff Roberson */ 1230c47f202bSJeff Roberson for (j = 0; j < MAXCPU; j++) { 1231c47f202bSJeff Roberson if ((cg->cg_mask & (1 << j)) != 0) { 1232c47f202bSJeff Roberson tdq = TDQ_CPU(j); 1233c47f202bSJeff Roberson tdq_setup(tdq); 1234c47f202bSJeff Roberson tdg_add(tdg, tdq); 1235c47f202bSJeff Roberson } 1236c47f202bSJeff Roberson } 1237c47f202bSJeff Roberson if (tdg->tdg_cpus > 1) 1238c47f202bSJeff Roberson balance_groups = 1; 1239c47f202bSJeff Roberson } 1240c47f202bSJeff Roberson tdg_maxid = smp_topology->ct_count - 1; 1241c47f202bSJeff Roberson if (balance_groups) 12427fcf154aSJeff Roberson sched_balance_groups(); 1243c47f202bSJeff Roberson } 1244c47f202bSJeff Roberson 1245c47f202bSJeff Roberson static void 1246c47f202bSJeff Roberson sched_setup_smp(void) 1247c47f202bSJeff Roberson { 1248c47f202bSJeff Roberson struct tdq_group *tdg; 1249c47f202bSJeff Roberson struct tdq *tdq; 1250c47f202bSJeff Roberson int cpus; 1251c47f202bSJeff Roberson int i; 1252c47f202bSJeff Roberson 1253c47f202bSJeff Roberson for (cpus = 0, i = 0; i < MAXCPU; i++) { 1254c47f202bSJeff Roberson if (CPU_ABSENT(i)) 1255c47f202bSJeff Roberson continue; 1256c47f202bSJeff Roberson tdq = &tdq_cpu[i]; 1257c47f202bSJeff Roberson tdg = &tdq_groups[i]; 1258c47f202bSJeff Roberson /* 1259c47f202bSJeff Roberson * Setup a tdq group with one member. 1260c47f202bSJeff Roberson */ 1261c47f202bSJeff Roberson tdg_setup(tdg); 1262c47f202bSJeff Roberson tdq_setup(tdq); 1263c47f202bSJeff Roberson tdg_add(tdg, tdq); 1264c47f202bSJeff Roberson cpus++; 1265c47f202bSJeff Roberson } 1266c47f202bSJeff Roberson tdg_maxid = cpus - 1; 1267c47f202bSJeff Roberson } 1268c47f202bSJeff Roberson 1269c47f202bSJeff Roberson /* 1270c47f202bSJeff Roberson * Fake a topology with one group containing all CPUs. 1271c47f202bSJeff Roberson */ 1272c47f202bSJeff Roberson static void 1273c47f202bSJeff Roberson sched_fake_topo(void) 1274c47f202bSJeff Roberson { 1275c47f202bSJeff Roberson #ifdef SCHED_FAKE_TOPOLOGY 1276c47f202bSJeff Roberson static struct cpu_top top; 1277c47f202bSJeff Roberson static struct cpu_group group; 1278c47f202bSJeff Roberson 1279c47f202bSJeff Roberson top.ct_count = 1; 1280c47f202bSJeff Roberson top.ct_group = &group; 1281c47f202bSJeff Roberson group.cg_mask = all_cpus; 1282c47f202bSJeff Roberson group.cg_count = mp_ncpus; 1283c47f202bSJeff Roberson group.cg_children = 0; 1284c47f202bSJeff Roberson smp_topology = ⊤ 1285c47f202bSJeff Roberson #endif 1286c47f202bSJeff Roberson } 1287c47f202bSJeff Roberson #endif 1288c47f202bSJeff Roberson 1289ae7a6b38SJeff Roberson /* 1290ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1291ae7a6b38SJeff Roberson * information. 1292ae7a6b38SJeff Roberson */ 129335e6168fSJeff Roberson static void 129435e6168fSJeff Roberson sched_setup(void *dummy) 129535e6168fSJeff Roberson { 1296ae7a6b38SJeff Roberson struct tdq *tdq; 1297c47f202bSJeff Roberson 1298c47f202bSJeff Roberson tdq = TDQ_SELF(); 12990ec896fdSJeff Roberson #ifdef SMP 1300c47f202bSJeff Roberson sched_fake_topo(); 1301c47f202bSJeff Roberson /* 1302c47f202bSJeff Roberson * Setup tdqs based on a topology configuration or vanilla SMP based 1303c47f202bSJeff Roberson * on mp_maxid. 1304c47f202bSJeff Roberson */ 1305c47f202bSJeff Roberson if (smp_topology == NULL) 1306c47f202bSJeff Roberson sched_setup_smp(); 1307c47f202bSJeff Roberson else 1308c47f202bSJeff Roberson sched_setup_topology(); 13097fcf154aSJeff Roberson balance_tdq = tdq; 13107fcf154aSJeff Roberson sched_balance(); 1311749d01b0SJeff Roberson #else 1312c47f202bSJeff Roberson tdq_setup(tdq); 1313c47f202bSJeff Roberson mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE); 1314c47f202bSJeff Roberson tdq->tdq_lock = &tdq_lock; 1315356500a3SJeff Roberson #endif 1316ae7a6b38SJeff Roberson /* 1317ae7a6b38SJeff Roberson * To avoid divide-by-zero, we set realstathz a dummy value 1318ae7a6b38SJeff Roberson * in case which sched_clock() called before sched_initticks(). 1319ae7a6b38SJeff Roberson */ 1320ae7a6b38SJeff Roberson realstathz = hz; 1321ae7a6b38SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1322ae7a6b38SJeff Roberson tickincr = 1 << SCHED_TICK_SHIFT; 1323ae7a6b38SJeff Roberson 1324ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1325ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1326c47f202bSJeff Roberson thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1327ae7a6b38SJeff Roberson tdq_load_add(tdq, &td_sched0); 1328ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 132935e6168fSJeff Roberson } 133035e6168fSJeff Roberson 1331ae7a6b38SJeff Roberson /* 1332ae7a6b38SJeff Roberson * This routine determines the tickincr after stathz and hz are setup. 1333ae7a6b38SJeff Roberson */ 1334a1d4fe69SDavid Xu /* ARGSUSED */ 1335a1d4fe69SDavid Xu static void 1336a1d4fe69SDavid Xu sched_initticks(void *dummy) 1337a1d4fe69SDavid Xu { 1338ae7a6b38SJeff Roberson int incr; 1339ae7a6b38SJeff Roberson 1340a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 134114618990SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1342a1d4fe69SDavid Xu 1343a1d4fe69SDavid Xu /* 1344e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 13453f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1346e7d50326SJeff Roberson */ 1347ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1348e7d50326SJeff Roberson /* 1349e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1350e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1351a1d4fe69SDavid Xu */ 1352ae7a6b38SJeff Roberson if (incr == 0) 1353ae7a6b38SJeff Roberson incr = 1; 1354ae7a6b38SJeff Roberson tickincr = incr; 13557b8bfa0dSJeff Roberson #ifdef SMP 13569862717aSJeff Roberson /* 13577fcf154aSJeff Roberson * Set the default balance interval now that we know 13587fcf154aSJeff Roberson * what realstathz is. 13597fcf154aSJeff Roberson */ 13607fcf154aSJeff Roberson balance_interval = realstathz; 13617fcf154aSJeff Roberson /* 13629862717aSJeff Roberson * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 13639862717aSJeff Roberson * prevents excess thrashing on large machines and excess idle on 13649862717aSJeff Roberson * smaller machines. 13659862717aSJeff Roberson */ 13669862717aSJeff Roberson steal_thresh = min(ffs(mp_ncpus) - 1, 4); 13677b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 13687b8bfa0dSJeff Roberson #endif 1369a1d4fe69SDavid Xu } 1370a1d4fe69SDavid Xu 1371a1d4fe69SDavid Xu 137235e6168fSJeff Roberson /* 1373ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1374ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1375ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1376ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1377ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 1378ae7a6b38SJeff Roberson */ 1379ae7a6b38SJeff Roberson static int 1380ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1381ae7a6b38SJeff Roberson { 1382ae7a6b38SJeff Roberson struct td_sched *ts; 1383ae7a6b38SJeff Roberson int div; 1384ae7a6b38SJeff Roberson 1385ae7a6b38SJeff Roberson ts = td->td_sched; 1386ae7a6b38SJeff Roberson /* 1387ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1388ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1389ae7a6b38SJeff Roberson * no chance. 1390ae7a6b38SJeff Roberson */ 1391ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1392ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1393ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1394ae7a6b38SJeff Roberson 1395ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1396ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1397ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1398ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1399ae7a6b38SJeff Roberson } 1400ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1401ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1402ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1403ae7a6b38SJeff Roberson } 1404ae7a6b38SJeff Roberson /* runtime == slptime */ 1405ae7a6b38SJeff Roberson if (ts->ts_runtime) 1406ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1407ae7a6b38SJeff Roberson 1408ae7a6b38SJeff Roberson /* 1409ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1410ae7a6b38SJeff Roberson */ 1411ae7a6b38SJeff Roberson return (0); 1412ae7a6b38SJeff Roberson 1413ae7a6b38SJeff Roberson } 1414ae7a6b38SJeff Roberson 1415ae7a6b38SJeff Roberson /* 141635e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 141735e6168fSJeff Roberson * process. 141835e6168fSJeff Roberson */ 141915dc847eSJeff Roberson static void 14208460a577SJohn Birrell sched_priority(struct thread *td) 142135e6168fSJeff Roberson { 1422e7d50326SJeff Roberson int score; 142335e6168fSJeff Roberson int pri; 142435e6168fSJeff Roberson 14258460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 142615dc847eSJeff Roberson return; 1427e7d50326SJeff Roberson /* 1428e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1429e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1430e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1431e7d50326SJeff Roberson * 1432ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1433e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1434e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1435a5423ea3SJeff Roberson * 1436a5423ea3SJeff Roberson * The nice value of the process has a linear effect on the calculated 1437a5423ea3SJeff Roberson * score. Negative nice values make it easier for a thread to be 1438a5423ea3SJeff Roberson * considered interactive. 1439e7d50326SJeff Roberson */ 1440e270652bSJeff Roberson score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1441e7d50326SJeff Roberson if (score < sched_interact) { 1442e7d50326SJeff Roberson pri = PRI_MIN_REALTIME; 1443e7d50326SJeff Roberson pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1444e7d50326SJeff Roberson * score; 1445e7d50326SJeff Roberson KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 14469a93305aSJeff Roberson ("sched_priority: invalid interactive priority %d score %d", 14479a93305aSJeff Roberson pri, score)); 1448e7d50326SJeff Roberson } else { 1449e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 1450e7d50326SJeff Roberson if (td->td_sched->ts_ticks) 1451e7d50326SJeff Roberson pri += SCHED_PRI_TICKS(td->td_sched); 1452e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1453ae7a6b38SJeff Roberson KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1454ae7a6b38SJeff Roberson ("sched_priority: invalid priority %d: nice %d, " 1455ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 1456ae7a6b38SJeff Roberson pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1457ae7a6b38SJeff Roberson td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1458ae7a6b38SJeff Roberson SCHED_PRI_TICKS(td->td_sched))); 1459e7d50326SJeff Roberson } 14608460a577SJohn Birrell sched_user_prio(td, pri); 146135e6168fSJeff Roberson 146215dc847eSJeff Roberson return; 146335e6168fSJeff Roberson } 146435e6168fSJeff Roberson 146535e6168fSJeff Roberson /* 1466d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1467ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1468ae7a6b38SJeff Roberson * function is ugly due to integer math. 1469d322132cSJeff Roberson */ 14704b60e324SJeff Roberson static void 14718460a577SJohn Birrell sched_interact_update(struct thread *td) 14724b60e324SJeff Roberson { 1473155b6ca1SJeff Roberson struct td_sched *ts; 14749a93305aSJeff Roberson u_int sum; 14753f741ca1SJeff Roberson 1476155b6ca1SJeff Roberson ts = td->td_sched; 1477ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1478d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1479d322132cSJeff Roberson return; 1480d322132cSJeff Roberson /* 1481155b6ca1SJeff Roberson * This only happens from two places: 1482155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1483155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1484155b6ca1SJeff Roberson */ 1485155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1486ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1487ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1488ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1489155b6ca1SJeff Roberson } else { 1490ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1491ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1492155b6ca1SJeff Roberson } 1493155b6ca1SJeff Roberson return; 1494155b6ca1SJeff Roberson } 1495155b6ca1SJeff Roberson /* 1496d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1497d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 14982454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1499d322132cSJeff Roberson */ 150037a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1501ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1502ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1503d322132cSJeff Roberson return; 1504d322132cSJeff Roberson } 1505ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1506ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1507d322132cSJeff Roberson } 1508d322132cSJeff Roberson 1509ae7a6b38SJeff Roberson /* 1510ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1511ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1512ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1513ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1514ae7a6b38SJeff Roberson */ 1515d322132cSJeff Roberson static void 15168460a577SJohn Birrell sched_interact_fork(struct thread *td) 1517d322132cSJeff Roberson { 1518d322132cSJeff Roberson int ratio; 1519d322132cSJeff Roberson int sum; 1520d322132cSJeff Roberson 1521ae7a6b38SJeff Roberson sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1522d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1523d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 1524ae7a6b38SJeff Roberson td->td_sched->ts_runtime /= ratio; 1525ae7a6b38SJeff Roberson td->td_sched->ts_slptime /= ratio; 15264b60e324SJeff Roberson } 15274b60e324SJeff Roberson } 15284b60e324SJeff Roberson 152915dc847eSJeff Roberson /* 1530ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1531ed062c8dSJulian Elischer */ 1532ed062c8dSJulian Elischer void 1533ed062c8dSJulian Elischer schedinit(void) 1534ed062c8dSJulian Elischer { 1535e7d50326SJeff Roberson 1536ed062c8dSJulian Elischer /* 1537ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 1538ed062c8dSJulian Elischer */ 1539ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 1540ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 1541e7d50326SJeff Roberson td_sched0.ts_ltick = ticks; 15428ab80cf0SJeff Roberson td_sched0.ts_ftick = ticks; 1543ad1e7d28SJulian Elischer td_sched0.ts_thread = &thread0; 1544ed062c8dSJulian Elischer } 1545ed062c8dSJulian Elischer 1546ed062c8dSJulian Elischer /* 154715dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 154815dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1549e7d50326SJeff Roberson * at most sched_slice stathz ticks. 155015dc847eSJeff Roberson */ 155135e6168fSJeff Roberson int 155235e6168fSJeff Roberson sched_rr_interval(void) 155335e6168fSJeff Roberson { 1554e7d50326SJeff Roberson 1555e7d50326SJeff Roberson /* Convert sched_slice to hz */ 1556e7d50326SJeff Roberson return (hz/(realstathz/sched_slice)); 155735e6168fSJeff Roberson } 155835e6168fSJeff Roberson 1559ae7a6b38SJeff Roberson /* 1560ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1561ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1562ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1563ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1564ae7a6b38SJeff Roberson */ 156522bf7d9aSJeff Roberson static void 1566ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts) 156735e6168fSJeff Roberson { 1568e7d50326SJeff Roberson 1569e7d50326SJeff Roberson if (ts->ts_ticks == 0) 1570e7d50326SJeff Roberson return; 15718ab80cf0SJeff Roberson if (ticks - (hz / 10) < ts->ts_ltick && 15728ab80cf0SJeff Roberson SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 15738ab80cf0SJeff Roberson return; 157435e6168fSJeff Roberson /* 157535e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 1576210491d3SJeff Roberson */ 1577e7d50326SJeff Roberson if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1578ad1e7d28SJulian Elischer ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1579e7d50326SJeff Roberson SCHED_TICK_TARG; 1580e7d50326SJeff Roberson else 1581ad1e7d28SJulian Elischer ts->ts_ticks = 0; 1582ad1e7d28SJulian Elischer ts->ts_ltick = ticks; 1583e7d50326SJeff Roberson ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 158435e6168fSJeff Roberson } 158535e6168fSJeff Roberson 1586ae7a6b38SJeff Roberson /* 1587ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1588ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1589ae7a6b38SJeff Roberson * functions. 1590ae7a6b38SJeff Roberson */ 1591e7d50326SJeff Roberson static void 1592f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 159335e6168fSJeff Roberson { 1594ad1e7d28SJulian Elischer struct td_sched *ts; 159535e6168fSJeff Roberson 159681d47d3fSJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 159781d47d3fSJeff Roberson td, td->td_proc->p_comm, td->td_priority, prio, curthread, 159881d47d3fSJeff Roberson curthread->td_proc->p_comm); 1599ad1e7d28SJulian Elischer ts = td->td_sched; 16007b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1601f5c157d9SJohn Baldwin if (td->td_priority == prio) 1602f5c157d9SJohn Baldwin return; 1603e7d50326SJeff Roberson 16043f872f85SJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 16053f741ca1SJeff Roberson /* 16063f741ca1SJeff Roberson * If the priority has been elevated due to priority 16073f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1608e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1609e7d50326SJeff Roberson * cases. 1610f2b74cbfSJeff Roberson */ 1611e7d50326SJeff Roberson sched_rem(td); 1612e7d50326SJeff Roberson td->td_priority = prio; 1613ae7a6b38SJeff Roberson sched_add(td, SRQ_BORROWING); 1614ae7a6b38SJeff Roberson } else { 1615ae7a6b38SJeff Roberson #ifdef SMP 1616ae7a6b38SJeff Roberson struct tdq *tdq; 1617ae7a6b38SJeff Roberson 1618ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 1619ae7a6b38SJeff Roberson if (prio < tdq->tdq_lowpri) 1620ae7a6b38SJeff Roberson tdq->tdq_lowpri = prio; 1621ae7a6b38SJeff Roberson #endif 16223f741ca1SJeff Roberson td->td_priority = prio; 162335e6168fSJeff Roberson } 1624ae7a6b38SJeff Roberson } 162535e6168fSJeff Roberson 1626f5c157d9SJohn Baldwin /* 1627f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1628f5c157d9SJohn Baldwin * priority. 1629f5c157d9SJohn Baldwin */ 1630f5c157d9SJohn Baldwin void 1631f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1632f5c157d9SJohn Baldwin { 1633f5c157d9SJohn Baldwin 1634f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1635f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1636f5c157d9SJohn Baldwin } 1637f5c157d9SJohn Baldwin 1638f5c157d9SJohn Baldwin /* 1639f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1640f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1641f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1642f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1643f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1644f5c157d9SJohn Baldwin * of prio. 1645f5c157d9SJohn Baldwin */ 1646f5c157d9SJohn Baldwin void 1647f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1648f5c157d9SJohn Baldwin { 1649f5c157d9SJohn Baldwin u_char base_pri; 1650f5c157d9SJohn Baldwin 1651f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1652f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 16538460a577SJohn Birrell base_pri = td->td_user_pri; 1654f5c157d9SJohn Baldwin else 1655f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1656f5c157d9SJohn Baldwin if (prio >= base_pri) { 1657f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1658f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1659f5c157d9SJohn Baldwin } else 1660f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1661f5c157d9SJohn Baldwin } 1662f5c157d9SJohn Baldwin 1663ae7a6b38SJeff Roberson /* 1664ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1665ae7a6b38SJeff Roberson */ 1666f5c157d9SJohn Baldwin void 1667f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1668f5c157d9SJohn Baldwin { 1669f5c157d9SJohn Baldwin u_char oldprio; 1670f5c157d9SJohn Baldwin 1671f5c157d9SJohn Baldwin /* First, update the base priority. */ 1672f5c157d9SJohn Baldwin td->td_base_pri = prio; 1673f5c157d9SJohn Baldwin 1674f5c157d9SJohn Baldwin /* 167550aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1676f5c157d9SJohn Baldwin * ever lower the priority. 1677f5c157d9SJohn Baldwin */ 1678f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1679f5c157d9SJohn Baldwin return; 1680f5c157d9SJohn Baldwin 1681f5c157d9SJohn Baldwin /* Change the real priority. */ 1682f5c157d9SJohn Baldwin oldprio = td->td_priority; 1683f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1684f5c157d9SJohn Baldwin 1685f5c157d9SJohn Baldwin /* 1686f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1687f5c157d9SJohn Baldwin * its state. 1688f5c157d9SJohn Baldwin */ 1689f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1690f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1691f5c157d9SJohn Baldwin } 1692f5c157d9SJohn Baldwin 1693ae7a6b38SJeff Roberson /* 1694ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1695ae7a6b38SJeff Roberson */ 169635e6168fSJeff Roberson void 16978460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 16983db720fdSDavid Xu { 16993db720fdSDavid Xu u_char oldprio; 17003db720fdSDavid Xu 17018460a577SJohn Birrell td->td_base_user_pri = prio; 1702fc6c30f6SJulian Elischer if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1703fc6c30f6SJulian Elischer return; 17048460a577SJohn Birrell oldprio = td->td_user_pri; 17058460a577SJohn Birrell td->td_user_pri = prio; 17063db720fdSDavid Xu 17073db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 17083db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 17093db720fdSDavid Xu } 17103db720fdSDavid Xu 17113db720fdSDavid Xu void 17123db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 17133db720fdSDavid Xu { 17143db720fdSDavid Xu u_char oldprio; 17153db720fdSDavid Xu 17163db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 17173db720fdSDavid Xu 1718f645b5daSMaxim Konovalov oldprio = td->td_user_pri; 17198460a577SJohn Birrell td->td_user_pri = prio; 17203db720fdSDavid Xu 17213db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 17223db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 17233db720fdSDavid Xu } 17243db720fdSDavid Xu 17253db720fdSDavid Xu void 17263db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 17273db720fdSDavid Xu { 17283db720fdSDavid Xu u_char base_pri; 17293db720fdSDavid Xu 17308460a577SJohn Birrell base_pri = td->td_base_user_pri; 17313db720fdSDavid Xu if (prio >= base_pri) { 17323db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 17338460a577SJohn Birrell sched_user_prio(td, base_pri); 17343db720fdSDavid Xu } else 17353db720fdSDavid Xu sched_lend_user_prio(td, prio); 17363db720fdSDavid Xu } 17373db720fdSDavid Xu 1738ae7a6b38SJeff Roberson /* 173908c9a16cSJeff Roberson * Add the thread passed as 'newtd' to the run queue before selecting 174008c9a16cSJeff Roberson * the next thread to run. This is only used for KSE. 174108c9a16cSJeff Roberson */ 174208c9a16cSJeff Roberson static void 174308c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td) 174408c9a16cSJeff Roberson { 174508c9a16cSJeff Roberson #ifdef SMP 174608c9a16cSJeff Roberson spinlock_enter(); 174708c9a16cSJeff Roberson TDQ_UNLOCK(tdq); 174808c9a16cSJeff Roberson thread_lock(td); 174908c9a16cSJeff Roberson spinlock_exit(); 175008c9a16cSJeff Roberson sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 175108c9a16cSJeff Roberson #else 175208c9a16cSJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 175308c9a16cSJeff Roberson #endif 175408c9a16cSJeff Roberson tdq_add(tdq, td, SRQ_YIELDING); 175508c9a16cSJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 175608c9a16cSJeff Roberson } 175708c9a16cSJeff Roberson 175808c9a16cSJeff Roberson /* 1759c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 1760c47f202bSJeff Roberson * cpu binding. 1761c47f202bSJeff Roberson */ 1762c47f202bSJeff Roberson static struct mtx * 1763c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1764c47f202bSJeff Roberson { 1765c47f202bSJeff Roberson struct tdq *tdn; 1766c47f202bSJeff Roberson 1767c47f202bSJeff Roberson tdn = TDQ_CPU(td->td_sched->ts_cpu); 1768c47f202bSJeff Roberson #ifdef SMP 1769c47f202bSJeff Roberson /* 1770c47f202bSJeff Roberson * Do the lock dance required to avoid LOR. We grab an extra 1771c47f202bSJeff Roberson * spinlock nesting to prevent preemption while we're 1772c47f202bSJeff Roberson * not holding either run-queue lock. 1773c47f202bSJeff Roberson */ 1774c47f202bSJeff Roberson spinlock_enter(); 1775c47f202bSJeff Roberson thread_block_switch(td); /* This releases the lock on tdq. */ 1776c47f202bSJeff Roberson TDQ_LOCK(tdn); 1777c47f202bSJeff Roberson tdq_add(tdn, td, flags); 1778c47f202bSJeff Roberson tdq_notify(td->td_sched); 1779c47f202bSJeff Roberson /* 1780c47f202bSJeff Roberson * After we unlock tdn the new cpu still can't switch into this 1781c47f202bSJeff Roberson * thread until we've unblocked it in cpu_switch(). The lock 1782c47f202bSJeff Roberson * pointers may match in the case of HTT cores. Don't unlock here 1783c47f202bSJeff Roberson * or we can deadlock when the other CPU runs the IPI handler. 1784c47f202bSJeff Roberson */ 1785c47f202bSJeff Roberson if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1786c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 1787c47f202bSJeff Roberson TDQ_LOCK(tdq); 1788c47f202bSJeff Roberson } 1789c47f202bSJeff Roberson spinlock_exit(); 1790c47f202bSJeff Roberson #endif 1791c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 1792c47f202bSJeff Roberson } 1793c47f202bSJeff Roberson 1794c47f202bSJeff Roberson /* 1795ae7a6b38SJeff Roberson * Block a thread for switching. Similar to thread_block() but does not 1796ae7a6b38SJeff Roberson * bump the spin count. 1797ae7a6b38SJeff Roberson */ 1798ae7a6b38SJeff Roberson static inline struct mtx * 1799ae7a6b38SJeff Roberson thread_block_switch(struct thread *td) 1800ae7a6b38SJeff Roberson { 1801ae7a6b38SJeff Roberson struct mtx *lock; 1802ae7a6b38SJeff Roberson 1803ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1804ae7a6b38SJeff Roberson lock = td->td_lock; 1805ae7a6b38SJeff Roberson td->td_lock = &blocked_lock; 1806ae7a6b38SJeff Roberson mtx_unlock_spin(lock); 1807ae7a6b38SJeff Roberson 1808ae7a6b38SJeff Roberson return (lock); 1809ae7a6b38SJeff Roberson } 1810ae7a6b38SJeff Roberson 1811ae7a6b38SJeff Roberson /* 1812ae7a6b38SJeff Roberson * Release a thread that was blocked with thread_block_switch(). 1813ae7a6b38SJeff Roberson */ 1814ae7a6b38SJeff Roberson static inline void 1815ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 1816ae7a6b38SJeff Roberson { 1817ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1818ae7a6b38SJeff Roberson (uintptr_t)mtx); 1819ae7a6b38SJeff Roberson } 1820ae7a6b38SJeff Roberson 1821ae7a6b38SJeff Roberson /* 1822ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 1823ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 1824ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 1825ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 1826ae7a6b38SJeff Roberson */ 18273db720fdSDavid Xu void 18283389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 182935e6168fSJeff Roberson { 1830c02bbb43SJeff Roberson struct tdq *tdq; 1831ad1e7d28SJulian Elischer struct td_sched *ts; 1832ae7a6b38SJeff Roberson struct mtx *mtx; 1833c47f202bSJeff Roberson int srqflag; 1834ae7a6b38SJeff Roberson int cpuid; 183535e6168fSJeff Roberson 18367b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 183735e6168fSJeff Roberson 1838ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1839ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1840e7d50326SJeff Roberson ts = td->td_sched; 1841c47f202bSJeff Roberson mtx = td->td_lock; 1842ae7a6b38SJeff Roberson #ifdef SMP 1843ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 1844ae7a6b38SJeff Roberson if (newtd && newtd->td_priority < tdq->tdq_lowpri) 1845ae7a6b38SJeff Roberson tdq->tdq_lowpri = newtd->td_priority; 1846ae7a6b38SJeff Roberson #endif 1847060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 1848060563ecSJulian Elischer td->td_oncpu = NOCPU; 184952eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 185077918643SStephan Uphoff td->td_owepreempt = 0; 1851b11fdad0SJeff Roberson /* 1852ae7a6b38SJeff Roberson * The lock pointer in an idle thread should never change. Reset it 1853ae7a6b38SJeff Roberson * to CAN_RUN as well. 1854b11fdad0SJeff Roberson */ 1855486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 1856ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1857bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 18587b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1859ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 18607b20fb19SJeff Roberson tdq_load_rem(tdq, ts); 1861c47f202bSJeff Roberson srqflag = (flags & SW_PREEMPT) ? 1862598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1863c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 1864c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 1865c47f202bSJeff Roberson tdq_add(tdq, td, srqflag); 1866c47f202bSJeff Roberson else 1867c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 1868ae7a6b38SJeff Roberson } else { 1869ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 1870ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1871ae7a6b38SJeff Roberson mtx = thread_block_switch(td); 1872ae7a6b38SJeff Roberson tdq_load_rem(tdq, ts); 1873ae7a6b38SJeff Roberson } 1874ae7a6b38SJeff Roberson /* 1875ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 1876ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 1877ae7a6b38SJeff Roberson * thread-queue locked. 1878ae7a6b38SJeff Roberson */ 1879ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1880ae7a6b38SJeff Roberson /* 188108c9a16cSJeff Roberson * If KSE assigned a new thread just add it here and let choosethread 188208c9a16cSJeff Roberson * select the best one. 1883ae7a6b38SJeff Roberson */ 188408c9a16cSJeff Roberson if (newtd != NULL) 188508c9a16cSJeff Roberson sched_switchin(tdq, newtd); 18862454aaf5SJeff Roberson newtd = choosethread(); 1887ae7a6b38SJeff Roberson /* 1888ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 1889ae7a6b38SJeff Roberson */ 1890ebccf1e3SJoseph Koshy if (td != newtd) { 1891ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1892ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1893ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1894ebccf1e3SJoseph Koshy #endif 189559c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1896ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 1897ae7a6b38SJeff Roberson /* 1898ae7a6b38SJeff Roberson * We may return from cpu_switch on a different cpu. However, 1899ae7a6b38SJeff Roberson * we always return with td_lock pointing to the current cpu's 1900ae7a6b38SJeff Roberson * run queue lock. 1901ae7a6b38SJeff Roberson */ 1902ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1903ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1904ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1905ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1906ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1907ebccf1e3SJoseph Koshy #endif 1908ae7a6b38SJeff Roberson } else 1909ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 1910ae7a6b38SJeff Roberson /* 1911ae7a6b38SJeff Roberson * Assert that all went well and return. 1912ae7a6b38SJeff Roberson */ 1913ae7a6b38SJeff Roberson #ifdef SMP 1914ae7a6b38SJeff Roberson /* We should always get here with the lowest priority td possible */ 1915ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 1916ae7a6b38SJeff Roberson #endif 1917ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1918ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1919ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 192035e6168fSJeff Roberson } 192135e6168fSJeff Roberson 1922ae7a6b38SJeff Roberson /* 1923ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 1924ae7a6b38SJeff Roberson */ 192535e6168fSJeff Roberson void 1926fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 192735e6168fSJeff Roberson { 192835e6168fSJeff Roberson struct thread *td; 192935e6168fSJeff Roberson 1930fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 19317b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1932e7d50326SJeff Roberson 1933fa885116SJulian Elischer p->p_nice = nice; 19348460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 19357b20fb19SJeff Roberson thread_lock(td); 19368460a577SJohn Birrell sched_priority(td); 1937e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 19387b20fb19SJeff Roberson thread_unlock(td); 193935e6168fSJeff Roberson } 1940fa885116SJulian Elischer } 194135e6168fSJeff Roberson 1942ae7a6b38SJeff Roberson /* 1943ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 1944ae7a6b38SJeff Roberson */ 194535e6168fSJeff Roberson void 194644f3b092SJohn Baldwin sched_sleep(struct thread *td) 194735e6168fSJeff Roberson { 1948e7d50326SJeff Roberson 19497b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 195035e6168fSJeff Roberson 195154b0e65fSJeff Roberson td->td_slptick = ticks; 195235e6168fSJeff Roberson } 195335e6168fSJeff Roberson 1954ae7a6b38SJeff Roberson /* 1955ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 1956ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 1957ae7a6b38SJeff Roberson */ 195835e6168fSJeff Roberson void 195935e6168fSJeff Roberson sched_wakeup(struct thread *td) 196035e6168fSJeff Roberson { 196114618990SJeff Roberson struct td_sched *ts; 1962ae7a6b38SJeff Roberson int slptick; 1963e7d50326SJeff Roberson 19647b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 196514618990SJeff Roberson ts = td->td_sched; 196635e6168fSJeff Roberson /* 1967e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 1968e7d50326SJeff Roberson * priority. 196935e6168fSJeff Roberson */ 197054b0e65fSJeff Roberson slptick = td->td_slptick; 197154b0e65fSJeff Roberson td->td_slptick = 0; 1972ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 19739a93305aSJeff Roberson u_int hzticks; 1974f1e8dc4aSJeff Roberson 1975ae7a6b38SJeff Roberson hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1976ae7a6b38SJeff Roberson ts->ts_slptime += hzticks; 19778460a577SJohn Birrell sched_interact_update(td); 197814618990SJeff Roberson sched_pctcpu_update(ts); 19798460a577SJohn Birrell sched_priority(td); 1980f1e8dc4aSJeff Roberson } 198114618990SJeff Roberson /* Reset the slice value after we sleep. */ 198214618990SJeff Roberson ts->ts_slice = sched_slice; 19837a5e5e2aSJeff Roberson sched_add(td, SRQ_BORING); 198435e6168fSJeff Roberson } 198535e6168fSJeff Roberson 198635e6168fSJeff Roberson /* 198735e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 198835e6168fSJeff Roberson * priority. 198935e6168fSJeff Roberson */ 199035e6168fSJeff Roberson void 19918460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 199215dc847eSJeff Roberson { 19937b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1994ad1e7d28SJulian Elischer sched_fork_thread(td, child); 1995e7d50326SJeff Roberson /* 1996e7d50326SJeff Roberson * Penalize the parent and child for forking. 1997e7d50326SJeff Roberson */ 1998e7d50326SJeff Roberson sched_interact_fork(child); 1999e7d50326SJeff Roberson sched_priority(child); 2000ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 2001e7d50326SJeff Roberson sched_interact_update(td); 2002e7d50326SJeff Roberson sched_priority(td); 2003ad1e7d28SJulian Elischer } 2004ad1e7d28SJulian Elischer 2005ae7a6b38SJeff Roberson /* 2006ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 2007ae7a6b38SJeff Roberson */ 2008ad1e7d28SJulian Elischer void 2009ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 2010ad1e7d28SJulian Elischer { 2011ad1e7d28SJulian Elischer struct td_sched *ts; 2012ad1e7d28SJulian Elischer struct td_sched *ts2; 20138460a577SJohn Birrell 2014e7d50326SJeff Roberson /* 2015e7d50326SJeff Roberson * Initialize child. 2016e7d50326SJeff Roberson */ 20177b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2018ed062c8dSJulian Elischer sched_newthread(child); 2019ae7a6b38SJeff Roberson child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 2020ad1e7d28SJulian Elischer ts = td->td_sched; 2021ad1e7d28SJulian Elischer ts2 = child->td_sched; 2022ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 2023ad1e7d28SJulian Elischer ts2->ts_runq = NULL; 2024e7d50326SJeff Roberson /* 2025e7d50326SJeff Roberson * Grab our parents cpu estimation information and priority. 2026e7d50326SJeff Roberson */ 2027ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 2028ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 2029ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 2030e7d50326SJeff Roberson child->td_user_pri = td->td_user_pri; 2031e7d50326SJeff Roberson child->td_base_user_pri = td->td_base_user_pri; 2032e7d50326SJeff Roberson /* 2033e7d50326SJeff Roberson * And update interactivity score. 2034e7d50326SJeff Roberson */ 2035ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 2036ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 2037e7d50326SJeff Roberson ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 203815dc847eSJeff Roberson } 203915dc847eSJeff Roberson 2040ae7a6b38SJeff Roberson /* 2041ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 2042ae7a6b38SJeff Roberson */ 204315dc847eSJeff Roberson void 20448460a577SJohn Birrell sched_class(struct thread *td, int class) 204515dc847eSJeff Roberson { 204615dc847eSJeff Roberson 20477b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 20488460a577SJohn Birrell if (td->td_pri_class == class) 204915dc847eSJeff Roberson return; 205015dc847eSJeff Roberson 2051ef1134c9SJeff Roberson #ifdef SMP 2052155b9987SJeff Roberson /* 2053155b9987SJeff Roberson * On SMP if we're on the RUNQ we must adjust the transferable 2054155b9987SJeff Roberson * count because could be changing to or from an interrupt 2055155b9987SJeff Roberson * class. 2056155b9987SJeff Roberson */ 20577a5e5e2aSJeff Roberson if (TD_ON_RUNQ(td)) { 20581e516cf5SJeff Roberson struct tdq *tdq; 20591e516cf5SJeff Roberson 20601e516cf5SJeff Roberson tdq = TDQ_CPU(td->td_sched->ts_cpu); 20611e516cf5SJeff Roberson if (THREAD_CAN_MIGRATE(td)) { 2062d2ad694cSJeff Roberson tdq->tdq_transferable--; 2063d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable--; 206480f86c9fSJeff Roberson } 20651e516cf5SJeff Roberson td->td_pri_class = class; 20661e516cf5SJeff Roberson if (THREAD_CAN_MIGRATE(td)) { 2067d2ad694cSJeff Roberson tdq->tdq_transferable++; 2068d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable++; 206980f86c9fSJeff Roberson } 2070155b9987SJeff Roberson } 2071ef1134c9SJeff Roberson #endif 20728460a577SJohn Birrell td->td_pri_class = class; 207335e6168fSJeff Roberson } 207435e6168fSJeff Roberson 207535e6168fSJeff Roberson /* 207635e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 207735e6168fSJeff Roberson */ 207835e6168fSJeff Roberson void 2079fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 208035e6168fSJeff Roberson { 2081e7d50326SJeff Roberson struct thread *td; 2082141ad61cSJeff Roberson 20838460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 2084fc6c30f6SJulian Elischer child, child->td_proc->p_comm, child->td_priority); 20858460a577SJohn Birrell 20867b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 2087e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 2088e7d50326SJeff Roberson sched_exit_thread(td, child); 2089ad1e7d28SJulian Elischer } 2090ad1e7d28SJulian Elischer 2091ae7a6b38SJeff Roberson /* 2092ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 2093ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 2094ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 2095ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 2096ae7a6b38SJeff Roberson */ 2097ad1e7d28SJulian Elischer void 2098fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 2099ad1e7d28SJulian Elischer { 2100fc6c30f6SJulian Elischer 2101e7d50326SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2102e7d50326SJeff Roberson child, child->td_proc->p_comm, child->td_priority); 2103e7d50326SJeff Roberson 2104e7d50326SJeff Roberson #ifdef KSE 2105e7d50326SJeff Roberson /* 2106e7d50326SJeff Roberson * KSE forks and exits so often that this penalty causes short-lived 2107e7d50326SJeff Roberson * threads to always be non-interactive. This causes mozilla to 2108e7d50326SJeff Roberson * crawl under load. 2109e7d50326SJeff Roberson */ 2110e7d50326SJeff Roberson if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 2111e7d50326SJeff Roberson return; 2112e7d50326SJeff Roberson #endif 2113e7d50326SJeff Roberson /* 2114e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 2115e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 2116e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 2117e7d50326SJeff Roberson */ 21187b20fb19SJeff Roberson thread_lock(td); 2119ae7a6b38SJeff Roberson td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2120fc6c30f6SJulian Elischer sched_interact_update(td); 2121e7d50326SJeff Roberson sched_priority(td); 21227b20fb19SJeff Roberson thread_unlock(td); 2123ad1e7d28SJulian Elischer } 2124ad1e7d28SJulian Elischer 2125ae7a6b38SJeff Roberson /* 2126ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2127ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2128ae7a6b38SJeff Roberson */ 2129ad1e7d28SJulian Elischer void 2130ad1e7d28SJulian Elischer sched_userret(struct thread *td) 2131ad1e7d28SJulian Elischer { 2132ad1e7d28SJulian Elischer /* 2133ad1e7d28SJulian Elischer * XXX we cheat slightly on the locking here to avoid locking in 2134ad1e7d28SJulian Elischer * the usual case. Setting td_priority here is essentially an 2135ad1e7d28SJulian Elischer * incomplete workaround for not setting it properly elsewhere. 2136ad1e7d28SJulian Elischer * Now that some interrupt handlers are threads, not setting it 2137ad1e7d28SJulian Elischer * properly elsewhere can clobber it in the window between setting 2138ad1e7d28SJulian Elischer * it here and returning to user mode, so don't waste time setting 2139ad1e7d28SJulian Elischer * it perfectly here. 2140ad1e7d28SJulian Elischer */ 2141ad1e7d28SJulian Elischer KASSERT((td->td_flags & TDF_BORROWING) == 0, 2142ad1e7d28SJulian Elischer ("thread with borrowed priority returning to userland")); 2143ad1e7d28SJulian Elischer if (td->td_priority != td->td_user_pri) { 21447b20fb19SJeff Roberson thread_lock(td); 2145ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2146ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 21477b20fb19SJeff Roberson thread_unlock(td); 2148ad1e7d28SJulian Elischer } 214935e6168fSJeff Roberson } 215035e6168fSJeff Roberson 2151ae7a6b38SJeff Roberson /* 2152ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2153ae7a6b38SJeff Roberson * threads. 2154ae7a6b38SJeff Roberson */ 215535e6168fSJeff Roberson void 21567cf90fb3SJeff Roberson sched_clock(struct thread *td) 215735e6168fSJeff Roberson { 2158ad1e7d28SJulian Elischer struct tdq *tdq; 2159ad1e7d28SJulian Elischer struct td_sched *ts; 216035e6168fSJeff Roberson 2161ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 21623f872f85SJeff Roberson tdq = TDQ_SELF(); 21637fcf154aSJeff Roberson #ifdef SMP 21647fcf154aSJeff Roberson /* 21657fcf154aSJeff Roberson * We run the long term load balancer infrequently on the first cpu. 21667fcf154aSJeff Roberson */ 21677fcf154aSJeff Roberson if (balance_tdq == tdq) { 21687fcf154aSJeff Roberson if (balance_ticks && --balance_ticks == 0) 21697fcf154aSJeff Roberson sched_balance(); 21707fcf154aSJeff Roberson if (balance_group_ticks && --balance_group_ticks == 0) 21717fcf154aSJeff Roberson sched_balance_groups(); 21727fcf154aSJeff Roberson } 21737fcf154aSJeff Roberson #endif 21743f872f85SJeff Roberson /* 21753f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 21763f872f85SJeff Roberson * threads get a chance to run. 21773f872f85SJeff Roberson */ 21783f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 21793f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 21803f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 21813f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 21823f872f85SJeff Roberson } 21833f872f85SJeff Roberson ts = td->td_sched; 21843f741ca1SJeff Roberson /* 21858460a577SJohn Birrell * We only do slicing code for TIMESHARE threads. 2186a8949de2SJeff Roberson */ 21878460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 2188a8949de2SJeff Roberson return; 2189a8949de2SJeff Roberson /* 21903f872f85SJeff Roberson * We used a tick; charge it to the thread so that we can compute our 219115dc847eSJeff Roberson * interactivity. 219215dc847eSJeff Roberson */ 2193ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 21948460a577SJohn Birrell sched_interact_update(td); 219535e6168fSJeff Roberson /* 219635e6168fSJeff Roberson * We used up one time slice. 219735e6168fSJeff Roberson */ 2198ad1e7d28SJulian Elischer if (--ts->ts_slice > 0) 219915dc847eSJeff Roberson return; 220035e6168fSJeff Roberson /* 220115dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 220235e6168fSJeff Roberson */ 22038460a577SJohn Birrell sched_priority(td); 22044a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 220535e6168fSJeff Roberson } 220635e6168fSJeff Roberson 2207ae7a6b38SJeff Roberson /* 2208ae7a6b38SJeff Roberson * Called once per hz tick. Used for cpu utilization information. This 2209ae7a6b38SJeff Roberson * is easier than trying to scale based on stathz. 2210ae7a6b38SJeff Roberson */ 2211ae7a6b38SJeff Roberson void 2212ae7a6b38SJeff Roberson sched_tick(void) 2213ae7a6b38SJeff Roberson { 2214ae7a6b38SJeff Roberson struct td_sched *ts; 2215ae7a6b38SJeff Roberson 2216ae7a6b38SJeff Roberson ts = curthread->td_sched; 2217ae7a6b38SJeff Roberson /* Adjust ticks for pctcpu */ 2218ae7a6b38SJeff Roberson ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2219ae7a6b38SJeff Roberson ts->ts_ltick = ticks; 2220ae7a6b38SJeff Roberson /* 2221ae7a6b38SJeff Roberson * Update if we've exceeded our desired tick threshhold by over one 2222ae7a6b38SJeff Roberson * second. 2223ae7a6b38SJeff Roberson */ 2224ae7a6b38SJeff Roberson if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2225ae7a6b38SJeff Roberson sched_pctcpu_update(ts); 2226ae7a6b38SJeff Roberson } 2227ae7a6b38SJeff Roberson 2228ae7a6b38SJeff Roberson /* 2229ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2230ae7a6b38SJeff Roberson * cooperative idle threads. 2231ae7a6b38SJeff Roberson */ 223235e6168fSJeff Roberson int 223335e6168fSJeff Roberson sched_runnable(void) 223435e6168fSJeff Roberson { 2235ad1e7d28SJulian Elischer struct tdq *tdq; 2236b90816f1SJeff Roberson int load; 223735e6168fSJeff Roberson 2238b90816f1SJeff Roberson load = 1; 2239b90816f1SJeff Roberson 2240ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 22413f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2242d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 22433f741ca1SJeff Roberson goto out; 22443f741ca1SJeff Roberson } else 2245d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2246b90816f1SJeff Roberson goto out; 2247b90816f1SJeff Roberson load = 0; 2248b90816f1SJeff Roberson out: 2249b90816f1SJeff Roberson return (load); 225035e6168fSJeff Roberson } 225135e6168fSJeff Roberson 2252ae7a6b38SJeff Roberson /* 2253ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2254ae7a6b38SJeff Roberson * the run-queue while running however the load remains. For SMP we set 2255ae7a6b38SJeff Roberson * the tdq in the global idle bitmask if it idles here. 2256ae7a6b38SJeff Roberson */ 22577a5e5e2aSJeff Roberson struct thread * 2258c9f25d8fSJeff Roberson sched_choose(void) 2259c9f25d8fSJeff Roberson { 226015dc847eSJeff Roberson #ifdef SMP 2261ae7a6b38SJeff Roberson struct tdq_group *tdg; 226215dc847eSJeff Roberson #endif 2263ae7a6b38SJeff Roberson struct td_sched *ts; 2264ae7a6b38SJeff Roberson struct tdq *tdq; 2265ae7a6b38SJeff Roberson 2266ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2267ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2268ad1e7d28SJulian Elischer ts = tdq_choose(tdq); 2269ad1e7d28SJulian Elischer if (ts) { 2270ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 22717a5e5e2aSJeff Roberson return (ts->ts_thread); 227235e6168fSJeff Roberson } 2273c9f25d8fSJeff Roberson #ifdef SMP 2274ae7a6b38SJeff Roberson /* 2275ae7a6b38SJeff Roberson * We only set the idled bit when all of the cpus in the group are 2276ae7a6b38SJeff Roberson * idle. Otherwise we could get into a situation where a thread bounces 2277ae7a6b38SJeff Roberson * back and forth between two idle cores on seperate physical CPUs. 2278ae7a6b38SJeff Roberson */ 2279ae7a6b38SJeff Roberson tdg = tdq->tdq_group; 2280ae7a6b38SJeff Roberson tdg->tdg_idlemask |= PCPU_GET(cpumask); 2281ae7a6b38SJeff Roberson if (tdg->tdg_idlemask == tdg->tdg_cpumask) 2282ae7a6b38SJeff Roberson atomic_set_int(&tdq_idle, tdg->tdg_mask); 2283ae7a6b38SJeff Roberson tdq->tdq_lowpri = PRI_MAX_IDLE; 2284c9f25d8fSJeff Roberson #endif 22857a5e5e2aSJeff Roberson return (PCPU_GET(idlethread)); 22867a5e5e2aSJeff Roberson } 22877a5e5e2aSJeff Roberson 2288ae7a6b38SJeff Roberson /* 2289ae7a6b38SJeff Roberson * Set owepreempt if necessary. Preemption never happens directly in ULE, 2290ae7a6b38SJeff Roberson * we always request it once we exit a critical section. 2291ae7a6b38SJeff Roberson */ 2292ae7a6b38SJeff Roberson static inline void 2293ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td) 22947a5e5e2aSJeff Roberson { 22957a5e5e2aSJeff Roberson struct thread *ctd; 22967a5e5e2aSJeff Roberson int cpri; 22977a5e5e2aSJeff Roberson int pri; 22987a5e5e2aSJeff Roberson 22997a5e5e2aSJeff Roberson ctd = curthread; 23007a5e5e2aSJeff Roberson pri = td->td_priority; 23017a5e5e2aSJeff Roberson cpri = ctd->td_priority; 2302ae7a6b38SJeff Roberson if (td->td_priority < ctd->td_priority) 2303ae7a6b38SJeff Roberson curthread->td_flags |= TDF_NEEDRESCHED; 23047a5e5e2aSJeff Roberson if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2305ae7a6b38SJeff Roberson return; 23067a5e5e2aSJeff Roberson /* 23077a5e5e2aSJeff Roberson * Always preempt IDLE threads. Otherwise only if the preempting 23087a5e5e2aSJeff Roberson * thread is an ithread. 23097a5e5e2aSJeff Roberson */ 2310ae7a6b38SJeff Roberson if (pri > preempt_thresh && cpri < PRI_MIN_IDLE) 2311ae7a6b38SJeff Roberson return; 23127a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 2313ae7a6b38SJeff Roberson return; 231435e6168fSJeff Roberson } 231535e6168fSJeff Roberson 2316ae7a6b38SJeff Roberson /* 2317ae7a6b38SJeff Roberson * Add a thread to a thread queue. Initializes priority, slice, runq, and 2318ae7a6b38SJeff Roberson * add it to the appropriate queue. This is the internal function called 2319ae7a6b38SJeff Roberson * when the tdq is predetermined. 2320ae7a6b38SJeff Roberson */ 232135e6168fSJeff Roberson void 2322ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 232335e6168fSJeff Roberson { 2324ad1e7d28SJulian Elischer struct td_sched *ts; 232522bf7d9aSJeff Roberson int class; 23267b8bfa0dSJeff Roberson #ifdef SMP 23277b8bfa0dSJeff Roberson int cpumask; 23287b8bfa0dSJeff Roberson #endif 2329c9f25d8fSJeff Roberson 2330ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 23317a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 23327a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 23337a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 23347a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2335b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2336b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2337ae7a6b38SJeff Roberson 2338ae7a6b38SJeff Roberson ts = td->td_sched; 23397a5e5e2aSJeff Roberson class = PRI_BASE(td->td_pri_class); 2340ae7a6b38SJeff Roberson TD_SET_RUNQ(td); 23417a5e5e2aSJeff Roberson if (ts->ts_slice == 0) 23427a5e5e2aSJeff Roberson ts->ts_slice = sched_slice; 23432454aaf5SJeff Roberson /* 2344ae7a6b38SJeff Roberson * Pick the run queue based on priority. 23452454aaf5SJeff Roberson */ 2346ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_REALTIME) 2347ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 2348ae7a6b38SJeff Roberson else if (td->td_priority <= PRI_MAX_TIMESHARE) 2349ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 23507b8bfa0dSJeff Roberson else 2351ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_idle; 2352ae7a6b38SJeff Roberson #ifdef SMP 23537b8bfa0dSJeff Roberson cpumask = 1 << ts->ts_cpu; 235422bf7d9aSJeff Roberson /* 2355670c524fSJeff Roberson * If we had been idle, clear our bit in the group and potentially 23567b8bfa0dSJeff Roberson * the global bitmap. 235722bf7d9aSJeff Roberson */ 2358e7d50326SJeff Roberson if ((class != PRI_IDLE && class != PRI_ITHD) && 23597b8bfa0dSJeff Roberson (tdq->tdq_group->tdg_idlemask & cpumask) != 0) { 236080f86c9fSJeff Roberson /* 236180f86c9fSJeff Roberson * Check to see if our group is unidling, and if so, remove it 236280f86c9fSJeff Roberson * from the global idle mask. 236380f86c9fSJeff Roberson */ 2364d2ad694cSJeff Roberson if (tdq->tdq_group->tdg_idlemask == 2365d2ad694cSJeff Roberson tdq->tdq_group->tdg_cpumask) 2366d2ad694cSJeff Roberson atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 236780f86c9fSJeff Roberson /* 236880f86c9fSJeff Roberson * Now remove ourselves from the group specific idle mask. 236980f86c9fSJeff Roberson */ 23707b8bfa0dSJeff Roberson tdq->tdq_group->tdg_idlemask &= ~cpumask; 23717b8bfa0dSJeff Roberson } 2372ae7a6b38SJeff Roberson if (td->td_priority < tdq->tdq_lowpri) 2373ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 237422bf7d9aSJeff Roberson #endif 2375ad1e7d28SJulian Elischer tdq_runq_add(tdq, ts, flags); 2376ad1e7d28SJulian Elischer tdq_load_add(tdq, ts); 2377ae7a6b38SJeff Roberson } 2378ae7a6b38SJeff Roberson 2379ae7a6b38SJeff Roberson /* 2380ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2381ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 2382ae7a6b38SJeff Roberson */ 2383ae7a6b38SJeff Roberson void 2384ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2385ae7a6b38SJeff Roberson { 2386ae7a6b38SJeff Roberson struct td_sched *ts; 2387ae7a6b38SJeff Roberson struct tdq *tdq; 23887b8bfa0dSJeff Roberson #ifdef SMP 2389ae7a6b38SJeff Roberson int cpuid; 2390ae7a6b38SJeff Roberson int cpu; 2391ae7a6b38SJeff Roberson #endif 2392ae7a6b38SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2393ae7a6b38SJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 2394ae7a6b38SJeff Roberson curthread->td_proc->p_comm); 2395ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2396ae7a6b38SJeff Roberson ts = td->td_sched; 2397ae7a6b38SJeff Roberson /* 2398ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2399ae7a6b38SJeff Roberson * run-queue. 2400ae7a6b38SJeff Roberson */ 2401ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2402ae7a6b38SJeff Roberson sched_priority(td); 2403ae7a6b38SJeff Roberson #ifdef SMP 2404ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2405ae7a6b38SJeff Roberson /* 2406ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2407ae7a6b38SJeff Roberson * target cpu. 2408ae7a6b38SJeff Roberson */ 2409ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td)) 2410ae7a6b38SJeff Roberson cpu = cpuid; 2411ae7a6b38SJeff Roberson else if (!THREAD_CAN_MIGRATE(td)) 2412ae7a6b38SJeff Roberson cpu = ts->ts_cpu; 2413ae7a6b38SJeff Roberson else 2414ae7a6b38SJeff Roberson cpu = sched_pickcpu(ts, flags); 2415ae7a6b38SJeff Roberson tdq = sched_setcpu(ts, cpu, flags); 2416ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 2417ae7a6b38SJeff Roberson if (cpu != cpuid) { 24187b8bfa0dSJeff Roberson tdq_notify(ts); 24197b8bfa0dSJeff Roberson return; 24207b8bfa0dSJeff Roberson } 2421ae7a6b38SJeff Roberson #else 2422ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2423ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 2424ae7a6b38SJeff Roberson /* 2425ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2426ae7a6b38SJeff Roberson * to the scheduler's lock. 2427ae7a6b38SJeff Roberson */ 2428ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2429ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 24307b8bfa0dSJeff Roberson #endif 2431ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2432ae7a6b38SJeff Roberson sched_setpreempt(td); 243335e6168fSJeff Roberson } 243435e6168fSJeff Roberson 2435ae7a6b38SJeff Roberson /* 2436ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2437ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2438ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2439ae7a6b38SJeff Roberson */ 244035e6168fSJeff Roberson void 24417cf90fb3SJeff Roberson sched_rem(struct thread *td) 244235e6168fSJeff Roberson { 2443ad1e7d28SJulian Elischer struct tdq *tdq; 2444ad1e7d28SJulian Elischer struct td_sched *ts; 24457cf90fb3SJeff Roberson 244681d47d3fSJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 244781d47d3fSJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 244881d47d3fSJeff Roberson curthread->td_proc->p_comm); 2449ad1e7d28SJulian Elischer ts = td->td_sched; 2450ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 2451ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2452ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 24537a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2454ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 2455ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 2456ad1e7d28SJulian Elischer tdq_load_rem(tdq, ts); 24577a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 245835e6168fSJeff Roberson } 245935e6168fSJeff Roberson 2460ae7a6b38SJeff Roberson /* 2461ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2462ae7a6b38SJeff Roberson */ 246335e6168fSJeff Roberson fixpt_t 24647cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 246535e6168fSJeff Roberson { 246635e6168fSJeff Roberson fixpt_t pctcpu; 2467ad1e7d28SJulian Elischer struct td_sched *ts; 246835e6168fSJeff Roberson 246935e6168fSJeff Roberson pctcpu = 0; 2470ad1e7d28SJulian Elischer ts = td->td_sched; 2471ad1e7d28SJulian Elischer if (ts == NULL) 2472484288deSJeff Roberson return (0); 247335e6168fSJeff Roberson 24747b20fb19SJeff Roberson thread_lock(td); 2475ad1e7d28SJulian Elischer if (ts->ts_ticks) { 247635e6168fSJeff Roberson int rtick; 247735e6168fSJeff Roberson 2478ad1e7d28SJulian Elischer sched_pctcpu_update(ts); 247935e6168fSJeff Roberson /* How many rtick per second ? */ 2480e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2481e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 248235e6168fSJeff Roberson } 24837b20fb19SJeff Roberson thread_unlock(td); 248435e6168fSJeff Roberson 248535e6168fSJeff Roberson return (pctcpu); 248635e6168fSJeff Roberson } 248735e6168fSJeff Roberson 2488ae7a6b38SJeff Roberson /* 2489ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2490ae7a6b38SJeff Roberson */ 24919bacd788SJeff Roberson void 24929bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 24939bacd788SJeff Roberson { 2494ad1e7d28SJulian Elischer struct td_sched *ts; 24959bacd788SJeff Roberson 2496c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2497ad1e7d28SJulian Elischer ts = td->td_sched; 24986b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2499c95d2db2SJeff Roberson sched_unbind(td); 2500ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 250180f86c9fSJeff Roberson #ifdef SMP 25026b2f763fSJeff Roberson sched_pin(); 250380f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 25049bacd788SJeff Roberson return; 25056b2f763fSJeff Roberson ts->ts_cpu = cpu; 25069bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2507279f949eSPoul-Henning Kamp mi_switch(SW_VOL, NULL); 25089bacd788SJeff Roberson #endif 25099bacd788SJeff Roberson } 25109bacd788SJeff Roberson 2511ae7a6b38SJeff Roberson /* 2512ae7a6b38SJeff Roberson * Release a bound thread. 2513ae7a6b38SJeff Roberson */ 25149bacd788SJeff Roberson void 25159bacd788SJeff Roberson sched_unbind(struct thread *td) 25169bacd788SJeff Roberson { 2517e7d50326SJeff Roberson struct td_sched *ts; 2518e7d50326SJeff Roberson 25197b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2520e7d50326SJeff Roberson ts = td->td_sched; 25216b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 25226b2f763fSJeff Roberson return; 2523e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2524e7d50326SJeff Roberson #ifdef SMP 2525e7d50326SJeff Roberson sched_unpin(); 2526e7d50326SJeff Roberson #endif 25279bacd788SJeff Roberson } 25289bacd788SJeff Roberson 252935e6168fSJeff Roberson int 2530ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2531ebccf1e3SJoseph Koshy { 25327b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2533ad1e7d28SJulian Elischer return (td->td_sched->ts_flags & TSF_BOUND); 2534ebccf1e3SJoseph Koshy } 2535ebccf1e3SJoseph Koshy 2536ae7a6b38SJeff Roberson /* 2537ae7a6b38SJeff Roberson * Basic yield call. 2538ae7a6b38SJeff Roberson */ 253936ec198bSDavid Xu void 254036ec198bSDavid Xu sched_relinquish(struct thread *td) 254136ec198bSDavid Xu { 25427b20fb19SJeff Roberson thread_lock(td); 25438460a577SJohn Birrell if (td->td_pri_class == PRI_TIMESHARE) 254436ec198bSDavid Xu sched_prio(td, PRI_MAX_TIMESHARE); 25457b20fb19SJeff Roberson SCHED_STAT_INC(switch_relinquish); 254636ec198bSDavid Xu mi_switch(SW_VOL, NULL); 25477b20fb19SJeff Roberson thread_unlock(td); 254836ec198bSDavid Xu } 254936ec198bSDavid Xu 2550ae7a6b38SJeff Roberson /* 2551ae7a6b38SJeff Roberson * Return the total system load. 2552ae7a6b38SJeff Roberson */ 2553ebccf1e3SJoseph Koshy int 255433916c36SJeff Roberson sched_load(void) 255533916c36SJeff Roberson { 255633916c36SJeff Roberson #ifdef SMP 255733916c36SJeff Roberson int total; 255833916c36SJeff Roberson int i; 255933916c36SJeff Roberson 256033916c36SJeff Roberson total = 0; 2561d2ad694cSJeff Roberson for (i = 0; i <= tdg_maxid; i++) 2562d2ad694cSJeff Roberson total += TDQ_GROUP(i)->tdg_load; 256333916c36SJeff Roberson return (total); 256433916c36SJeff Roberson #else 2565d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 256633916c36SJeff Roberson #endif 256733916c36SJeff Roberson } 256833916c36SJeff Roberson 256933916c36SJeff Roberson int 257035e6168fSJeff Roberson sched_sizeof_proc(void) 257135e6168fSJeff Roberson { 257235e6168fSJeff Roberson return (sizeof(struct proc)); 257335e6168fSJeff Roberson } 257435e6168fSJeff Roberson 257535e6168fSJeff Roberson int 257635e6168fSJeff Roberson sched_sizeof_thread(void) 257735e6168fSJeff Roberson { 257835e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 257935e6168fSJeff Roberson } 2580b41f1452SDavid Xu 25817a5e5e2aSJeff Roberson /* 25827a5e5e2aSJeff Roberson * The actual idle process. 25837a5e5e2aSJeff Roberson */ 25847a5e5e2aSJeff Roberson void 25857a5e5e2aSJeff Roberson sched_idletd(void *dummy) 25867a5e5e2aSJeff Roberson { 25877a5e5e2aSJeff Roberson struct thread *td; 2588ae7a6b38SJeff Roberson struct tdq *tdq; 25897a5e5e2aSJeff Roberson 25907a5e5e2aSJeff Roberson td = curthread; 2591ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 25927a5e5e2aSJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 2593ae7a6b38SJeff Roberson /* ULE relies on preemption for idle interruption. */ 2594ae7a6b38SJeff Roberson for (;;) { 2595ae7a6b38SJeff Roberson #ifdef SMP 2596ae7a6b38SJeff Roberson if (tdq_idled(tdq)) 25977a5e5e2aSJeff Roberson cpu_idle(); 2598ae7a6b38SJeff Roberson #else 2599ae7a6b38SJeff Roberson cpu_idle(); 2600ae7a6b38SJeff Roberson #endif 2601ae7a6b38SJeff Roberson } 2602b41f1452SDavid Xu } 2603e7d50326SJeff Roberson 26047b20fb19SJeff Roberson /* 26057b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 26067b20fb19SJeff Roberson */ 26077b20fb19SJeff Roberson void 26087b20fb19SJeff Roberson sched_throw(struct thread *td) 26097b20fb19SJeff Roberson { 261059c68134SJeff Roberson struct thread *newtd; 2611ae7a6b38SJeff Roberson struct tdq *tdq; 2612ae7a6b38SJeff Roberson 2613ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 26147b20fb19SJeff Roberson if (td == NULL) { 2615ae7a6b38SJeff Roberson /* Correct spinlock nesting and acquire the correct lock. */ 2616ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 26177b20fb19SJeff Roberson spinlock_exit(); 26187b20fb19SJeff Roberson } else { 2619ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2620ae7a6b38SJeff Roberson tdq_load_rem(tdq, td->td_sched); 26217b20fb19SJeff Roberson } 26227b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 262359c68134SJeff Roberson newtd = choosethread(); 262459c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 26257b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 26267b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 262759c68134SJeff Roberson cpu_throw(td, newtd); /* doesn't return */ 26287b20fb19SJeff Roberson } 26297b20fb19SJeff Roberson 2630ae7a6b38SJeff Roberson /* 2631ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 2632ae7a6b38SJeff Roberson * let fork do the rest of the work. 2633ae7a6b38SJeff Roberson */ 26347b20fb19SJeff Roberson void 2635fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 26367b20fb19SJeff Roberson { 2637ae7a6b38SJeff Roberson struct td_sched *ts; 2638ae7a6b38SJeff Roberson struct tdq *tdq; 2639ae7a6b38SJeff Roberson int cpuid; 26407b20fb19SJeff Roberson 26417b20fb19SJeff Roberson /* 26427b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 2643ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 26447b20fb19SJeff Roberson */ 2645ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2646ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 2647ae7a6b38SJeff Roberson ts = td->td_sched; 2648ae7a6b38SJeff Roberson if (TD_IS_IDLETHREAD(td)) 2649ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 2650ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2651ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 265259c68134SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 26537b20fb19SJeff Roberson } 26547b20fb19SJeff Roberson 2655ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2656ae7a6b38SJeff Roberson "Scheduler"); 2657ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2658e7d50326SJeff Roberson "Scheduler name"); 2659ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2660ae7a6b38SJeff Roberson "Slice size for timeshare threads"); 2661ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2662ae7a6b38SJeff Roberson "Interactivity score threshold"); 2663ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2664ae7a6b38SJeff Roberson 0,"Min priority for preemption, lower priorities have greater precedence"); 26657b8bfa0dSJeff Roberson #ifdef SMP 2666ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, 2667ae7a6b38SJeff Roberson "Pick the target cpu based on priority rather than load."); 2668ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2669ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 2670ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, ""); 2671ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2672ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 26737fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 26747fcf154aSJeff Roberson &balance_interval, 0, 26757fcf154aSJeff Roberson "Average frequency in stathz ticks to run the long-term balancer"); 2676ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2677ae7a6b38SJeff Roberson "Steals work from another hyper-threaded core on idle"); 2678ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2679ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 268028994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 268128994a58SJeff Roberson "Minimum load on remote cpu before we'll steal"); 2682ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, 2683ae7a6b38SJeff Roberson "True when a topology has been specified by the MD code."); 26847b8bfa0dSJeff Roberson #endif 2685e7d50326SJeff Roberson 268654b0e65fSJeff Roberson /* ps compat. All cpu percentages from ULE are weighted. */ 2687a5423ea3SJeff Roberson static int ccpu = 0; 2688e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2689e7d50326SJeff Roberson 2690e7d50326SJeff Roberson 2691ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 2692ed062c8dSJulian Elischer #include "kern/kern_switch.c" 2693